summaryrefslogtreecommitdiff
path: root/storage
diff options
context:
space:
mode:
authorSergei Golubchik <serg@mariadb.org>2016-06-28 22:01:55 +0200
committerSergei Golubchik <serg@mariadb.org>2016-06-28 22:01:55 +0200
commit3361aee591b1eb8c676f60887ffc535cd509890a (patch)
tree54a65f83ba7d9293e6f8e8281ad920fbae6eb823 /storage
parent6ce20fb2b9fe57330c797694b9dbea4028f40d7c (diff)
parent0fdb17e6c3f50ae22eb97b6363bcbd8b0cd9e040 (diff)
downloadmariadb-git-3361aee591b1eb8c676f60887ffc535cd509890a.tar.gz
Merge branch '10.0' into 10.1
Diffstat (limited to 'storage')
-rw-r--r--storage/connect/.gitignore264
-rw-r--r--storage/connect/CMakeLists.txt17
-rw-r--r--storage/connect/JdbcApacheInterface.classbin0 -> 15357 bytes
-rw-r--r--storage/connect/JdbcApacheInterface.java709
-rw-r--r--storage/connect/JdbcDSInterface.classbin0 -> 16175 bytes
-rw-r--r--storage/connect/JdbcDSInterface.java743
-rw-r--r--storage/connect/JdbcInterface.classbin0 -> 15215 bytes
-rw-r--r--storage/connect/JdbcInterface.java712
-rw-r--r--storage/connect/ha_connect.cc67
-rw-r--r--storage/connect/inihandl.c7
-rw-r--r--storage/connect/jsonudf.cpp442
-rw-r--r--storage/connect/jsonudf.h4
-rw-r--r--storage/connect/mycat.cc14
-rw-r--r--storage/connect/myconn.cpp32
-rw-r--r--storage/connect/myconn.h6
-rw-r--r--storage/connect/mysql-test/connect/disabled.def16
-rw-r--r--storage/connect/mysql-test/connect/r/jdbc.result269
-rw-r--r--storage/connect/mysql-test/connect/r/jdbc_new.result216
-rw-r--r--storage/connect/mysql-test/connect/r/jdbc_oracle.result70
-rw-r--r--storage/connect/mysql-test/connect/r/jdbc_postgresql.result65
-rw-r--r--storage/connect/mysql-test/connect/std_data/girls.txt5
-rw-r--r--storage/connect/mysql-test/connect/t/jdbc.test143
-rw-r--r--storage/connect/mysql-test/connect/t/jdbc_new.test179
-rw-r--r--storage/connect/mysql-test/connect/t/jdbc_oracle.test56
-rw-r--r--storage/connect/mysql-test/connect/t/jdbc_postgresql.test53
-rw-r--r--storage/connect/mysql-test/connect/t/jdbconn.inc31
-rw-r--r--storage/connect/mysql-test/connect/t/jdbconn_cleanup.inc6
-rw-r--r--storage/connect/plgdbutl.cpp31
-rw-r--r--storage/connect/plugutil.c4
-rw-r--r--storage/connect/reldef.cpp5
-rw-r--r--storage/connect/tabcol.cpp2
-rw-r--r--storage/connect/tabtbl.cpp8
-rw-r--r--storage/innobase/btr/btr0btr.cc16
-rw-r--r--storage/innobase/btr/btr0cur.cc16
-rw-r--r--storage/innobase/btr/btr0sea.cc4
-rw-r--r--storage/innobase/buf/buf0buddy.cc6
-rw-r--r--storage/innobase/buf/buf0buf.cc4
-rw-r--r--storage/innobase/buf/buf0dump.cc8
-rw-r--r--storage/innobase/buf/buf0flu.cc6
-rw-r--r--storage/innobase/buf/buf0lru.cc18
-rw-r--r--storage/innobase/data/data0data.cc7
-rw-r--r--storage/innobase/dict/dict0crea.cc26
-rw-r--r--storage/innobase/dict/dict0dict.cc14
-rw-r--r--storage/innobase/dict/dict0load.cc26
-rw-r--r--storage/innobase/dict/dict0mem.cc4
-rw-r--r--storage/innobase/dict/dict0stats_bg.cc4
-rw-r--r--storage/innobase/fil/fil0fil.cc6
-rw-r--r--storage/innobase/fsp/fsp0fsp.cc39
-rw-r--r--storage/innobase/fts/fts0blex.cc26
-rw-r--r--storage/innobase/fts/fts0fts.cc66
-rw-r--r--storage/innobase/fts/fts0opt.cc52
-rw-r--r--storage/innobase/fts/fts0que.cc28
-rw-r--r--storage/innobase/fts/fts0tlex.cc26
-rwxr-xr-xstorage/innobase/fts/make_parser.sh28
-rw-r--r--storage/innobase/handler/ha_innodb.cc103
-rw-r--r--storage/innobase/handler/ha_innodb.h18
-rw-r--r--storage/innobase/handler/handler0alter.cc94
-rw-r--r--storage/innobase/ibuf/ibuf0ibuf.cc138
-rw-r--r--storage/innobase/include/api0api.h4
-rw-r--r--storage/innobase/include/btr0btr.h78
-rw-r--r--storage/innobase/include/btr0btr.ic20
-rw-r--r--storage/innobase/include/btr0cur.h36
-rw-r--r--storage/innobase/include/btr0pcur.h4
-rw-r--r--storage/innobase/include/btr0sea.h4
-rw-r--r--storage/innobase/include/btr0types.h18
-rw-r--r--storage/innobase/include/buf0buddy.h6
-rw-r--r--storage/innobase/include/buf0buddy.ic6
-rw-r--r--storage/innobase/include/buf0buf.h70
-rw-r--r--storage/innobase/include/buf0flu.h8
-rw-r--r--storage/innobase/include/buf0lru.h10
-rw-r--r--storage/innobase/include/data0data.h84
-rw-r--r--storage/innobase/include/data0data.ic8
-rw-r--r--storage/innobase/include/dict0boot.h8
-rw-r--r--storage/innobase/include/dict0crea.h8
-rw-r--r--storage/innobase/include/dict0crea.ic4
-rw-r--r--storage/innobase/include/dict0dict.h248
-rw-r--r--storage/innobase/include/dict0dict.ic9
-rw-r--r--storage/innobase/include/dict0load.h4
-rw-r--r--storage/innobase/include/dict0mem.h16
-rw-r--r--storage/innobase/include/dict0stats.h10
-rw-r--r--storage/innobase/include/dict0stats_bg.h4
-rw-r--r--storage/innobase/include/dyn0dyn.h22
-rw-r--r--storage/innobase/include/dyn0dyn.ic4
-rw-r--r--storage/innobase/include/fil0fil.h14
-rw-r--r--storage/innobase/include/fsp0fsp.h18
-rw-r--r--storage/innobase/include/fts0ast.h10
-rw-r--r--storage/innobase/include/fts0fts.h46
-rw-r--r--storage/innobase/include/fts0priv.h88
-rw-r--r--storage/innobase/include/fts0priv.ic4
-rw-r--r--storage/innobase/include/ha_prototypes.h22
-rw-r--r--storage/innobase/include/handler0alter.h10
-rw-r--r--storage/innobase/include/ibuf0ibuf.h44
-rw-r--r--storage/innobase/include/lock0lock.h36
-rw-r--r--storage/innobase/include/lock0priv.h6
-rw-r--r--storage/innobase/include/log0recv.h4
-rw-r--r--storage/innobase/include/mach0data.h38
-rw-r--r--storage/innobase/include/mem0mem.h4
-rw-r--r--storage/innobase/include/mem0mem.ic6
-rw-r--r--storage/innobase/include/mtr0mtr.h13
-rw-r--r--storage/innobase/include/mtr0mtr.ic4
-rw-r--r--storage/innobase/include/os0file.h12
-rw-r--r--storage/innobase/include/os0sync.h2
-rw-r--r--storage/innobase/include/os0thread.h4
-rw-r--r--storage/innobase/include/page0cur.h8
-rw-r--r--storage/innobase/include/page0page.h57
-rw-r--r--storage/innobase/include/page0types.h10
-rw-r--r--storage/innobase/include/page0zip.h51
-rw-r--r--storage/innobase/include/pars0pars.h6
-rw-r--r--storage/innobase/include/read0read.h4
-rw-r--r--storage/innobase/include/rem0cmp.h6
-rw-r--r--storage/innobase/include/rem0rec.h152
-rw-r--r--storage/innobase/include/rem0rec.ic5
-rw-r--r--storage/innobase/include/row0ftsort.h8
-rw-r--r--storage/innobase/include/row0import.h8
-rw-r--r--storage/innobase/include/row0ins.h14
-rw-r--r--storage/innobase/include/row0log.h32
-rw-r--r--storage/innobase/include/row0merge.h34
-rw-r--r--storage/innobase/include/row0mysql.h45
-rw-r--r--storage/innobase/include/row0purge.h8
-rw-r--r--storage/innobase/include/row0quiesce.h8
-rw-r--r--storage/innobase/include/row0row.h30
-rw-r--r--storage/innobase/include/row0sel.h6
-rw-r--r--storage/innobase/include/row0uins.h4
-rw-r--r--storage/innobase/include/row0umod.h4
-rw-r--r--storage/innobase/include/row0upd.h18
-rw-r--r--storage/innobase/include/row0vers.h6
-rw-r--r--storage/innobase/include/srv0srv.h6
-rw-r--r--storage/innobase/include/srv0start.h6
-rw-r--r--storage/innobase/include/sync0arr.h4
-rw-r--r--storage/innobase/include/sync0rw.h6
-rw-r--r--storage/innobase/include/sync0rw.ic4
-rw-r--r--storage/innobase/include/sync0sync.h10
-rw-r--r--storage/innobase/include/trx0rec.h14
-rw-r--r--storage/innobase/include/trx0roll.h16
-rw-r--r--storage/innobase/include/trx0sys.h6
-rw-r--r--storage/innobase/include/trx0trx.h24
-rw-r--r--storage/innobase/include/trx0undo.h15
-rw-r--r--storage/innobase/include/univ.i11
-rw-r--r--storage/innobase/include/ut0byte.h8
-rw-r--r--storage/innobase/include/ut0dbg.h4
-rw-r--r--storage/innobase/include/ut0mem.h4
-rw-r--r--storage/innobase/include/ut0rnd.h12
-rw-r--r--storage/innobase/include/ut0ut.h8
-rw-r--r--storage/innobase/lock/lock0lock.cc18
-rw-r--r--storage/innobase/lock/lock0wait.cc4
-rw-r--r--storage/innobase/log/log0log.cc6
-rw-r--r--storage/innobase/log/log0recv.cc6
-rw-r--r--storage/innobase/mem/mem0dbg.cc6
-rw-r--r--storage/innobase/mtr/mtr0mtr.cc8
-rw-r--r--storage/innobase/os/os0file.cc10
-rw-r--r--storage/innobase/page/page0page.cc4
-rw-r--r--storage/innobase/page/page0zip.cc15
-rw-r--r--storage/innobase/pars/lexyy.cc10
-rwxr-xr-xstorage/innobase/pars/make_flex.sh12
-rw-r--r--storage/innobase/pars/pars0pars.cc8
-rw-r--r--storage/innobase/rem/rem0cmp.cc19
-rw-r--r--storage/innobase/rem/rem0rec.cc12
-rw-r--r--storage/innobase/row/row0ftsort.cc18
-rw-r--r--storage/innobase/row/row0import.cc26
-rw-r--r--storage/innobase/row/row0ins.cc30
-rw-r--r--storage/innobase/row/row0log.cc34
-rw-r--r--storage/innobase/row/row0merge.cc32
-rw-r--r--storage/innobase/row/row0mysql.cc45
-rw-r--r--storage/innobase/row/row0purge.cc24
-rw-r--r--storage/innobase/row/row0quiesce.cc16
-rw-r--r--storage/innobase/row/row0row.cc18
-rw-r--r--storage/innobase/row/row0sel.cc20
-rw-r--r--storage/innobase/row/row0uins.cc10
-rw-r--r--storage/innobase/row/row0umod.cc27
-rw-r--r--storage/innobase/row/row0undo.cc7
-rw-r--r--storage/innobase/row/row0upd.cc25
-rw-r--r--storage/innobase/srv/srv0srv.cc16
-rw-r--r--storage/innobase/srv/srv0start.cc10
-rw-r--r--storage/innobase/sync/sync0sync.cc6
-rw-r--r--storage/innobase/trx/trx0purge.cc4
-rw-r--r--storage/innobase/trx/trx0rec.cc10
-rw-r--r--storage/innobase/trx/trx0roll.cc8
-rw-r--r--storage/innobase/trx/trx0trx.cc14
-rw-r--r--storage/innobase/trx/trx0undo.cc14
-rw-r--r--storage/maria/ma_checkpoint.c20
-rw-r--r--storage/maria/ma_loghandler.c11
-rw-r--r--storage/maria/ma_pagecache.c32
-rw-r--r--storage/maria/ma_pagecache.h40
-rw-r--r--storage/maria/ma_servicethread.c51
-rw-r--r--storage/maria/ma_servicethread.h7
-rw-r--r--storage/maria/ma_sort.c318
-rw-r--r--storage/maria/maria_def.h3
-rw-r--r--storage/mroonga/ha_mroonga.cpp5
-rw-r--r--storage/myisam/mysql-test/storage_engine/alter_table_online.rdiff36
-rw-r--r--storage/myisam/sort.c315
-rw-r--r--storage/oqgraph/oqgraph_shim.h2
-rw-r--r--storage/tokudb/CMakeLists.txt20
-rw-r--r--storage/tokudb/PerconaFT/CMakeLists.txt3
-rw-r--r--storage/tokudb/PerconaFT/CTestCustom.cmake.in (renamed from storage/tokudb/PerconaFT/CTestCustom.cmake)0
-rw-r--r--storage/tokudb/PerconaFT/README.md8
-rw-r--r--storage/tokudb/PerconaFT/buildheader/make_tdb.cc5
-rw-r--r--storage/tokudb/PerconaFT/cmake_modules/TokuMergeLibs.cmake3
-rw-r--r--storage/tokudb/PerconaFT/cmake_modules/TokuSetupCompiler.cmake34
-rw-r--r--storage/tokudb/PerconaFT/ft/CMakeLists.txt1
-rw-r--r--storage/tokudb/PerconaFT/ft/ft-flusher.cc6
-rw-r--r--storage/tokudb/PerconaFT/ft/ft-internal.h5
-rw-r--r--storage/tokudb/PerconaFT/ft/ft-ops.cc11
-rw-r--r--storage/tokudb/PerconaFT/ft/ft-ops.h9
-rw-r--r--storage/tokudb/PerconaFT/ft/ft-recount-rows.cc115
-rw-r--r--storage/tokudb/PerconaFT/ft/ft-status.cc36
-rw-r--r--storage/tokudb/PerconaFT/ft/ft-test-helpers.cc29
-rw-r--r--storage/tokudb/PerconaFT/ft/ft.cc44
-rw-r--r--storage/tokudb/PerconaFT/ft/ft.h6
-rw-r--r--storage/tokudb/PerconaFT/ft/leafentry.h88
-rw-r--r--storage/tokudb/PerconaFT/ft/loader/loader.cc121
-rw-r--r--storage/tokudb/PerconaFT/ft/logger/log_upgrade.cc5
-rw-r--r--storage/tokudb/PerconaFT/ft/logger/logger.h1
-rw-r--r--storage/tokudb/PerconaFT/ft/logger/recover.h2
-rw-r--r--storage/tokudb/PerconaFT/ft/node.cc475
-rw-r--r--storage/tokudb/PerconaFT/ft/node.h67
-rw-r--r--storage/tokudb/PerconaFT/ft/serialize/ft-serialize.cc13
-rw-r--r--storage/tokudb/PerconaFT/ft/serialize/ft_layout_version.h1
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/CMakeLists.txt2
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/make-tree.cc13
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/msnfilter.cc70
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/orthopush-flush.cc94
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/test-upgrade-recovery-logs.cc2
-rwxr-xr-xstorage/tokudb/PerconaFT/ft/tests/upgrade.data/upgrade-recovery-logs-24-clean/log000000000000.tokulog24bin0 -> 131 bytes
-rwxr-xr-xstorage/tokudb/PerconaFT/ft/tests/upgrade.data/upgrade-recovery-logs-24-dirty/log000000000000.tokulog24bin0 -> 94 bytes
-rwxr-xr-xstorage/tokudb/PerconaFT/ft/tests/upgrade.data/upgrade-recovery-logs-25-clean/log000000000000.tokulog25bin0 -> 131 bytes
-rwxr-xr-xstorage/tokudb/PerconaFT/ft/tests/upgrade.data/upgrade-recovery-logs-25-dirty/log000000000000.tokulog25bin0 -> 94 bytes
-rwxr-xr-xstorage/tokudb/PerconaFT/ft/tests/upgrade.data/upgrade-recovery-logs-26-clean/log000000000000.tokulog26bin0 -> 131 bytes
-rwxr-xr-xstorage/tokudb/PerconaFT/ft/tests/upgrade.data/upgrade-recovery-logs-26-dirty/log000000000000.tokulog26bin0 -> 94 bytes
-rwxr-xr-xstorage/tokudb/PerconaFT/ft/tests/upgrade.data/upgrade-recovery-logs-27-clean/log000000000000.tokulog27bin0 -> 131 bytes
-rwxr-xr-xstorage/tokudb/PerconaFT/ft/tests/upgrade.data/upgrade-recovery-logs-27-dirty/log000000000000.tokulog27bin0 -> 94 bytes
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/upgrade.data/upgrade-recovery-logs-28-clean/log000000000000.tokulog28bin0 -> 131 bytes
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/upgrade.data/upgrade-recovery-logs-28-dirty/log000000000000.tokulog28bin0 -> 94 bytes
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/upgrade.data/upgrade-recovery-logs-29-clean/log000000000000.tokulog29bin0 -> 131 bytes
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/upgrade.data/upgrade-recovery-logs-29-dirty/log000000000000.tokulog29bin0 -> 94 bytes
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/verify-bad-msn.cc13
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/verify-bad-pivots.cc15
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/verify-dup-in-leaf.cc15
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/verify-dup-pivots.cc15
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/verify-misrouted-msgs.cc15
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/verify-unsorted-leaf.cc15
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/verify-unsorted-pivots.cc15
-rw-r--r--storage/tokudb/PerconaFT/ft/txn/rollback-apply.cc1
-rw-r--r--storage/tokudb/PerconaFT/ft/txn/txn.cc19
-rw-r--r--storage/tokudb/PerconaFT/ft/txn/txn_manager.cc21
-rw-r--r--storage/tokudb/PerconaFT/ft/txn/txn_manager.h9
-rw-r--r--storage/tokudb/PerconaFT/ft/ule.cc1169
-rw-r--r--storage/tokudb/PerconaFT/ftcxx/tests/CMakeLists.txt10
-rw-r--r--storage/tokudb/PerconaFT/portability/toku_pthread.h33
-rw-r--r--storage/tokudb/PerconaFT/portability/toku_time.h10
-rw-r--r--storage/tokudb/PerconaFT/scripts/run.stress-tests.py26
-rw-r--r--storage/tokudb/PerconaFT/src/export.map1
-rw-r--r--storage/tokudb/PerconaFT/src/indexer-undo-do.cc2
-rw-r--r--storage/tokudb/PerconaFT/src/tests/CMakeLists.txt2
-rw-r--r--storage/tokudb/PerconaFT/src/tests/rollback-inconsistency.cc161
-rw-r--r--storage/tokudb/PerconaFT/src/tests/stat64-root-changes.cc4
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_db_rowcount.cc523
-rw-r--r--storage/tokudb/PerconaFT/src/tests/txn_manager_handle_snapshot_atomicity.cc217
-rw-r--r--storage/tokudb/PerconaFT/src/ydb.cc4
-rw-r--r--storage/tokudb/PerconaFT/src/ydb.h3
-rw-r--r--storage/tokudb/PerconaFT/src/ydb_db.cc20
-rw-r--r--storage/tokudb/ha_tokudb.cc3083
-rw-r--r--storage/tokudb/ha_tokudb.h451
-rw-r--r--storage/tokudb/ha_tokudb_admin.cc1003
-rw-r--r--storage/tokudb/ha_tokudb_alter_56.cc1246
-rw-r--r--storage/tokudb/ha_tokudb_alter_common.cc444
-rw-r--r--storage/tokudb/ha_tokudb_update.cc536
-rw-r--r--storage/tokudb/hatoku_cmp.cc420
-rw-r--r--storage/tokudb/hatoku_cmp.h6
-rw-r--r--storage/tokudb/hatoku_defines.h368
-rw-r--r--storage/tokudb/hatoku_hton.cc2180
-rw-r--r--storage/tokudb/hatoku_hton.h485
-rw-r--r--storage/tokudb/mysql-test/rpl/disabled.def1
-rw-r--r--storage/tokudb/mysql-test/rpl/r/rpl_parallel_tokudb_delete_pk.result7
-rw-r--r--storage/tokudb/mysql-test/rpl/r/rpl_parallel_tokudb_update_pk_uc0_lookup0.result7
-rw-r--r--storage/tokudb/mysql-test/rpl/r/rpl_parallel_tokudb_write_pk.result4
-rw-r--r--storage/tokudb/mysql-test/rpl/r/rpl_tokudb_delete_pk.result7
-rw-r--r--storage/tokudb/mysql-test/rpl/r/rpl_tokudb_delete_pk_lookup1.result7
-rw-r--r--storage/tokudb/mysql-test/rpl/r/rpl_tokudb_mixed_dml.result3
-rw-r--r--storage/tokudb/mysql-test/rpl/r/rpl_tokudb_read_only_ff.result7
-rw-r--r--storage/tokudb/mysql-test/rpl/r/rpl_tokudb_read_only_ft.result7
-rw-r--r--storage/tokudb/mysql-test/rpl/r/rpl_tokudb_read_only_tf.result7
-rw-r--r--storage/tokudb/mysql-test/rpl/r/rpl_tokudb_read_only_tt.result7
-rw-r--r--storage/tokudb/mysql-test/rpl/r/rpl_tokudb_update_pk_uc0_lookup0.result7
-rw-r--r--storage/tokudb/mysql-test/rpl/r/rpl_tokudb_update_pk_uc0_lookup1.result7
-rw-r--r--storage/tokudb/mysql-test/rpl/r/rpl_tokudb_update_pk_uc1_lookup0.result7
-rw-r--r--storage/tokudb/mysql-test/rpl/r/rpl_tokudb_update_pk_uc1_lookup1.result7
-rw-r--r--storage/tokudb/mysql-test/rpl/r/rpl_tokudb_update_unique_uc0_lookup0.result7
-rw-r--r--storage/tokudb/mysql-test/rpl/r/rpl_tokudb_update_unique_uc0_lookup1.result7
-rw-r--r--storage/tokudb/mysql-test/rpl/r/rpl_tokudb_write_pk.result4
-rw-r--r--storage/tokudb/mysql-test/rpl/r/rpl_tokudb_write_pk_uc1.result4
-rw-r--r--storage/tokudb/mysql-test/rpl/r/rpl_tokudb_write_unique.result4
-rw-r--r--storage/tokudb/mysql-test/rpl/r/rpl_tokudb_write_unique_uc1.result4
-rw-r--r--storage/tokudb/mysql-test/rpl/t/disabled.def1
-rw-r--r--storage/tokudb/mysql-test/rpl/t/rpl_parallel_tokudb.test1
-rw-r--r--storage/tokudb/mysql-test/rpl/t/rpl_parallel_tokudb_delete_pk-slave.opt9
-rw-r--r--storage/tokudb/mysql-test/rpl/t/rpl_parallel_tokudb_delete_pk.test2
-rw-r--r--storage/tokudb/mysql-test/rpl/t/rpl_parallel_tokudb_update_pk_uc0_lookup0-slave.opt8
-rw-r--r--storage/tokudb/mysql-test/rpl/t/rpl_parallel_tokudb_update_pk_uc0_lookup0.test2
-rw-r--r--storage/tokudb/mysql-test/rpl/t/rpl_parallel_tokudb_write_pk-slave.opt4
-rw-r--r--storage/tokudb/mysql-test/rpl/t/rpl_parallel_tokudb_write_pk.test2
-rw-r--r--storage/tokudb/mysql-test/rpl/t/rpl_tokudb_delete_pk-slave.opt2
-rw-r--r--storage/tokudb/mysql-test/rpl/t/rpl_tokudb_delete_pk.test2
-rw-r--r--storage/tokudb/mysql-test/rpl/t/rpl_tokudb_delete_pk_lookup1-slave.opt2
-rw-r--r--storage/tokudb/mysql-test/rpl/t/rpl_tokudb_delete_pk_lookup1.test2
-rw-r--r--storage/tokudb/mysql-test/rpl/t/rpl_tokudb_mixed_dml-master.opt2
-rw-r--r--storage/tokudb/mysql-test/rpl/t/rpl_tokudb_mixed_dml.test3
-rw-r--r--storage/tokudb/mysql-test/rpl/t/rpl_tokudb_read_only_ff-slave.opt2
-rw-r--r--storage/tokudb/mysql-test/rpl/t/rpl_tokudb_read_only_ff.test2
-rw-r--r--storage/tokudb/mysql-test/rpl/t/rpl_tokudb_read_only_ft-slave.opt2
-rw-r--r--storage/tokudb/mysql-test/rpl/t/rpl_tokudb_read_only_ft.test2
-rw-r--r--storage/tokudb/mysql-test/rpl/t/rpl_tokudb_read_only_tf-slave.opt2
-rw-r--r--storage/tokudb/mysql-test/rpl/t/rpl_tokudb_read_only_tf.test2
-rw-r--r--storage/tokudb/mysql-test/rpl/t/rpl_tokudb_read_only_tt-slave.opt2
-rw-r--r--storage/tokudb/mysql-test/rpl/t/rpl_tokudb_read_only_tt.test2
-rw-r--r--storage/tokudb/mysql-test/rpl/t/rpl_tokudb_update_pk_uc0_lookup0-slave.opt2
-rw-r--r--storage/tokudb/mysql-test/rpl/t/rpl_tokudb_update_pk_uc0_lookup0.test2
-rw-r--r--storage/tokudb/mysql-test/rpl/t/rpl_tokudb_update_pk_uc0_lookup1-slave.opt2
-rw-r--r--storage/tokudb/mysql-test/rpl/t/rpl_tokudb_update_pk_uc0_lookup1.test2
-rw-r--r--storage/tokudb/mysql-test/rpl/t/rpl_tokudb_update_pk_uc1_lookup0-slave.opt2
-rw-r--r--storage/tokudb/mysql-test/rpl/t/rpl_tokudb_update_pk_uc1_lookup0.test2
-rw-r--r--storage/tokudb/mysql-test/rpl/t/rpl_tokudb_update_pk_uc1_lookup1-slave.opt2
-rw-r--r--storage/tokudb/mysql-test/rpl/t/rpl_tokudb_update_pk_uc1_lookup1.test2
-rw-r--r--storage/tokudb/mysql-test/rpl/t/rpl_tokudb_update_unique_uc0_lookup0-slave.opt2
-rw-r--r--storage/tokudb/mysql-test/rpl/t/rpl_tokudb_update_unique_uc0_lookup0.test2
-rw-r--r--storage/tokudb/mysql-test/rpl/t/rpl_tokudb_update_unique_uc0_lookup1-slave.opt2
-rw-r--r--storage/tokudb/mysql-test/rpl/t/rpl_tokudb_update_unique_uc0_lookup1.test2
-rw-r--r--storage/tokudb/mysql-test/rpl/t/rpl_tokudb_write_pk-slave.opt2
-rw-r--r--storage/tokudb/mysql-test/rpl/t/rpl_tokudb_write_pk.test2
-rw-r--r--storage/tokudb/mysql-test/rpl/t/rpl_tokudb_write_pk_uc1-slave.opt2
-rw-r--r--storage/tokudb/mysql-test/rpl/t/rpl_tokudb_write_pk_uc1.test2
-rw-r--r--storage/tokudb/mysql-test/rpl/t/rpl_tokudb_write_unique-slave.opt2
-rw-r--r--storage/tokudb/mysql-test/rpl/t/rpl_tokudb_write_unique.test2
-rw-r--r--storage/tokudb/mysql-test/rpl/t/rpl_tokudb_write_unique_uc1-slave.opt2
-rw-r--r--storage/tokudb/mysql-test/rpl/t/rpl_tokudb_write_unique_uc1.test2
-rw-r--r--storage/tokudb/mysql-test/rpl/t/suite.opt1
-rw-r--r--storage/tokudb/mysql-test/tokudb/disabled.def8
-rw-r--r--storage/tokudb/mysql-test/tokudb/include/cluster_key.inc138
-rw-r--r--storage/tokudb/mysql-test/tokudb/r/background_job_manager.result122
-rw-r--r--storage/tokudb/mysql-test/tokudb/r/card_add_drop.result22
-rw-r--r--storage/tokudb/mysql-test/tokudb/r/card_add_index.result16
-rw-r--r--storage/tokudb/mysql-test/tokudb/r/card_auto_analyze_lots.result805
-rw-r--r--storage/tokudb/mysql-test/tokudb/r/card_drop_index.result10
-rw-r--r--storage/tokudb/mysql-test/tokudb/r/card_drop_index_2.result10
-rw-r--r--storage/tokudb/mysql-test/tokudb/r/card_drop_pk.result16
-rw-r--r--storage/tokudb/mysql-test/tokudb/r/card_pk_2.result6
-rw-r--r--storage/tokudb/mysql-test/tokudb/r/card_pk_sk.result8
-rw-r--r--storage/tokudb/mysql-test/tokudb/r/card_scale_percent.result42
-rw-r--r--storage/tokudb/mysql-test/tokudb/r/card_sk.result6
-rw-r--r--storage/tokudb/mysql-test/tokudb/r/card_sk_2.result8
-rw-r--r--storage/tokudb/mysql-test/tokudb/r/cluster_2968-0.result2
-rw-r--r--storage/tokudb/mysql-test/tokudb/r/cluster_2968-1.result4
-rw-r--r--storage/tokudb/mysql-test/tokudb/r/cluster_2968-2.result6
-rw-r--r--storage/tokudb/mysql-test/tokudb/r/cluster_2968-3.result4
-rw-r--r--storage/tokudb/mysql-test/tokudb/r/cluster_key.result36
-rw-r--r--storage/tokudb/mysql-test/tokudb/r/cluster_key_part.result652
-rw-r--r--storage/tokudb/mysql-test/tokudb/r/i_s_tokudb_lock_waits_released.result4
-rw-r--r--storage/tokudb/mysql-test/tokudb/r/i_s_tokudb_lock_waits_timeout.result4
-rw-r--r--storage/tokudb/mysql-test/tokudb/r/i_s_tokudb_locks.result24
-rw-r--r--storage/tokudb/mysql-test/tokudb/r/i_s_tokudb_locks_released.result2
-rw-r--r--storage/tokudb/mysql-test/tokudb/r/i_s_tokudb_trx.result21
-rw-r--r--storage/tokudb/mysql-test/tokudb/r/type_bit.result2
-rw-r--r--storage/tokudb/mysql-test/tokudb/t/background_job_manager.opt1
-rw-r--r--storage/tokudb/mysql-test/tokudb/t/background_job_manager.test139
-rw-r--r--storage/tokudb/mysql-test/tokudb/t/card_auto_analyze_lots.test82
-rw-r--r--storage/tokudb/mysql-test/tokudb/t/card_scale_percent.test56
-rw-r--r--storage/tokudb/mysql-test/tokudb/t/cluster_key.test136
-rw-r--r--storage/tokudb/mysql-test/tokudb/t/cluster_key_part.test48
-rw-r--r--storage/tokudb/mysql-test/tokudb/t/disabled.def28
-rw-r--r--storage/tokudb/mysql-test/tokudb/t/i_s_tokudb_lock_waits_released.test4
-rw-r--r--storage/tokudb/mysql-test/tokudb/t/i_s_tokudb_lock_waits_timeout.test11
-rw-r--r--storage/tokudb/mysql-test/tokudb/t/i_s_tokudb_locks.test9
-rw-r--r--storage/tokudb/mysql-test/tokudb/t/i_s_tokudb_locks_released.test2
-rw-r--r--storage/tokudb/mysql-test/tokudb/t/i_s_tokudb_trx.test17
-rw-r--r--storage/tokudb/mysql-test/tokudb/t/suite.opt1
-rw-r--r--storage/tokudb/mysql-test/tokudb_add_index/disabled.def0
-rw-r--r--storage/tokudb/mysql-test/tokudb_add_index/t/suite.opt1
-rw-r--r--storage/tokudb/mysql-test/tokudb_alter_table/disabled.def6
-rw-r--r--storage/tokudb/mysql-test/tokudb_alter_table/t/disabled.def8
-rw-r--r--storage/tokudb/mysql-test/tokudb_alter_table/t/suite.opt1
-rw-r--r--storage/tokudb/mysql-test/tokudb_backup/r/tokudb_backup_exclude.result31
-rw-r--r--storage/tokudb/mysql-test/tokudb_backup/r/tokudb_backup_set_last_error.result20
-rw-r--r--storage/tokudb/mysql-test/tokudb_backup/t/suite.opt1
-rw-r--r--storage/tokudb/mysql-test/tokudb_backup/t/tokudb_backup_exclude.test69
-rw-r--r--storage/tokudb/mysql-test/tokudb_backup/t/tokudb_backup_set_last_error.test32
-rw-r--r--storage/tokudb/mysql-test/tokudb_bugs/disabled.def14
-rw-r--r--storage/tokudb/mysql-test/tokudb_bugs/r/5585.result6
-rw-r--r--storage/tokudb/mysql-test/tokudb_bugs/r/db233.result37
-rw-r--r--storage/tokudb/mysql-test/tokudb_bugs/r/db397_delete_trigger.result12
-rw-r--r--storage/tokudb/mysql-test/tokudb_bugs/r/db397_insert_trigger.result8
-rw-r--r--storage/tokudb/mysql-test/tokudb_bugs/r/db397_update_trigger.result12
-rw-r--r--storage/tokudb/mysql-test/tokudb_bugs/r/db739_replace.result2
-rw-r--r--storage/tokudb/mysql-test/tokudb_bugs/r/db756_card_part_hash.result8
-rw-r--r--storage/tokudb/mysql-test/tokudb_bugs/r/db756_card_part_hash_1.result12
-rw-r--r--storage/tokudb/mysql-test/tokudb_bugs/r/db756_card_part_hash_1_pick.result8
-rw-r--r--storage/tokudb/mysql-test/tokudb_bugs/r/db756_card_part_hash_2.result12
-rw-r--r--storage/tokudb/mysql-test/tokudb_bugs/r/db756_card_part_hash_2_pick.result6
-rw-r--r--storage/tokudb/mysql-test/tokudb_bugs/r/db757_part_alter_analyze.result26
-rw-r--r--storage/tokudb/mysql-test/tokudb_bugs/r/db917.result14
-rw-r--r--storage/tokudb/mysql-test/tokudb_bugs/r/db938.result35
-rw-r--r--storage/tokudb/mysql-test/tokudb_bugs/r/db945.result15
-rw-r--r--storage/tokudb/mysql-test/tokudb_bugs/t/5585-master.opt2
-rw-r--r--storage/tokudb/mysql-test/tokudb_bugs/t/5585.test9
-rw-r--r--storage/tokudb/mysql-test/tokudb_bugs/t/db233.test70
-rw-r--r--storage/tokudb/mysql-test/tokudb_bugs/t/db397_delete_trigger.test6
-rw-r--r--storage/tokudb/mysql-test/tokudb_bugs/t/db397_insert_trigger.test6
-rw-r--r--storage/tokudb/mysql-test/tokudb_bugs/t/db397_update_trigger.test4
-rw-r--r--storage/tokudb/mysql-test/tokudb_bugs/t/db917.test23
-rw-r--r--storage/tokudb/mysql-test/tokudb_bugs/t/db938.test77
-rw-r--r--storage/tokudb/mysql-test/tokudb_bugs/t/db945.test24
-rw-r--r--storage/tokudb/mysql-test/tokudb_bugs/t/disabled.def16
-rw-r--r--storage/tokudb/mysql-test/tokudb_bugs/t/suite.opt1
-rw-r--r--storage/tokudb/mysql-test/tokudb_bugs/t/xa-3.test6
-rw-r--r--storage/tokudb/mysql-test/tokudb_bugs/t/xa-4.test6
-rw-r--r--storage/tokudb/mysql-test/tokudb_bugs/t/xa-6.test6
-rw-r--r--storage/tokudb/mysql-test/tokudb_mariadb/r/mdev5426.result2
-rw-r--r--storage/tokudb/mysql-test/tokudb_mariadb/r/mrr.result4
-rw-r--r--storage/tokudb/mysql-test/tokudb_parts/disabled.def3
-rw-r--r--storage/tokudb/mysql-test/tokudb_parts/r/partition_alter4_tokudb.result192
-rw-r--r--storage/tokudb/mysql-test/tokudb_parts/r/partition_auto_increment_tokudb.result4
-rw-r--r--storage/tokudb/mysql-test/tokudb_parts/t/disabled.def2
-rw-r--r--storage/tokudb/mysql-test/tokudb_parts/t/partition_auto_increment_tokudb-master.opt1
-rw-r--r--storage/tokudb/mysql-test/tokudb_parts/t/partition_auto_increment_tokudb.test5
-rw-r--r--storage/tokudb/mysql-test/tokudb_parts/t/suite.opt1
-rw-r--r--storage/tokudb/mysql-test/tokudb_rpl/r/rpl_rfr_disable_on_expl_pk_absence.result47
-rw-r--r--storage/tokudb/mysql-test/tokudb_rpl/t/rpl_rfr_disable_on_expl_pk_absence-slave.opt1
-rw-r--r--storage/tokudb/mysql-test/tokudb_rpl/t/rpl_rfr_disable_on_expl_pk_absence.test48
-rw-r--r--storage/tokudb/mysql-test/tokudb_sys_vars/r/tokudb_analyze_delete_fraction.result73
-rw-r--r--storage/tokudb/mysql-test/tokudb_sys_vars/r/tokudb_analyze_in_background_basic.result99
-rw-r--r--storage/tokudb/mysql-test/tokudb_sys_vars/r/tokudb_analyze_mode_basic.result89
-rw-r--r--storage/tokudb/mysql-test/tokudb_sys_vars/r/tokudb_analyze_throttle_basic.result61
-rw-r--r--storage/tokudb/mysql-test/tokudb_sys_vars/r/tokudb_analyze_time_basic.result61
-rw-r--r--storage/tokudb/mysql-test/tokudb_sys_vars/r/tokudb_auto_analyze.result61
-rw-r--r--storage/tokudb/mysql-test/tokudb_sys_vars/r/tokudb_cardinality_scale_percent_basic.result36
-rw-r--r--storage/tokudb/mysql-test/tokudb_sys_vars/r/tokudb_pk_insert_mode_basic.result85
-rw-r--r--storage/tokudb/mysql-test/tokudb_sys_vars/t/suite.opt1
-rw-r--r--storage/tokudb/mysql-test/tokudb_sys_vars/t/tokudb_analyze_delete_fraction.test56
-rw-r--r--storage/tokudb/mysql-test/tokudb_sys_vars/t/tokudb_analyze_in_background_basic.test80
-rw-r--r--storage/tokudb/mysql-test/tokudb_sys_vars/t/tokudb_analyze_mode_basic.test72
-rw-r--r--storage/tokudb/mysql-test/tokudb_sys_vars/t/tokudb_analyze_throttle_basic.test50
-rw-r--r--storage/tokudb/mysql-test/tokudb_sys_vars/t/tokudb_analyze_time_basic.test50
-rw-r--r--storage/tokudb/mysql-test/tokudb_sys_vars/t/tokudb_auto_analyze.test50
-rw-r--r--storage/tokudb/mysql-test/tokudb_sys_vars/t/tokudb_cardinality_scale_percent_basic.test32
-rw-r--r--storage/tokudb/mysql-test/tokudb_sys_vars/t/tokudb_pk_insert_mode_basic.test51
-rw-r--r--storage/tokudb/tokudb_background.cc253
-rw-r--r--storage/tokudb/tokudb_background.h212
-rw-r--r--storage/tokudb/tokudb_buffer.h93
-rw-r--r--storage/tokudb/tokudb_card.h211
-rw-r--r--storage/tokudb/tokudb_debug.h227
-rw-r--r--storage/tokudb/tokudb_information_schema.cc1210
-rw-r--r--storage/tokudb/tokudb_information_schema.h50
-rw-r--r--storage/tokudb/tokudb_math.h42
-rw-r--r--storage/tokudb/tokudb_memory.h102
-rw-r--r--storage/tokudb/tokudb_status.h442
-rw-r--r--storage/tokudb/tokudb_sysvars.cc1149
-rw-r--r--storage/tokudb/tokudb_sysvars.h173
-rw-r--r--storage/tokudb/tokudb_thread.cc35
-rw-r--r--storage/tokudb/tokudb_thread.h597
-rw-r--r--storage/tokudb/tokudb_time.h73
-rw-r--r--storage/tokudb/tokudb_txn.h155
-rw-r--r--storage/tokudb/tokudb_update_fun.cc977
-rw-r--r--storage/xtradb/buf/buf0buf.cc20
-rw-r--r--storage/xtradb/buf/buf0lru.cc19
-rw-r--r--storage/xtradb/dict/dict0crea.cc16
-rw-r--r--storage/xtradb/fil/fil0fil.cc30
-rw-r--r--storage/xtradb/fts/fts0fts.cc270
-rw-r--r--storage/xtradb/fts/fts0opt.cc81
-rw-r--r--storage/xtradb/ha/ha0ha.cc4
-rw-r--r--storage/xtradb/handler/ha_innodb.cc57
-rw-r--r--storage/xtradb/handler/handler0alter.cc18
-rw-r--r--storage/xtradb/ibuf/ibuf0ibuf.cc2
-rw-r--r--storage/xtradb/include/fil0fil.h5
-rw-r--r--storage/xtradb/include/fts0fts.h24
-rw-r--r--storage/xtradb/include/fts0types.h10
-rw-r--r--storage/xtradb/include/log0log.h26
-rw-r--r--storage/xtradb/include/log0log.ic28
-rw-r--r--storage/xtradb/include/log0online.h15
-rw-r--r--storage/xtradb/include/log0recv.h33
-rw-r--r--storage/xtradb/include/log0recv.ic16
-rw-r--r--storage/xtradb/include/os0file.h6
-rw-r--r--storage/xtradb/include/os0sync.h128
-rw-r--r--storage/xtradb/include/row0mysql.h3
-rw-r--r--storage/xtradb/include/srv0srv.h20
-rw-r--r--storage/xtradb/include/sync0rw.h8
-rw-r--r--storage/xtradb/include/sync0rw.ic14
-rw-r--r--storage/xtradb/include/sync0sync.h5
-rw-r--r--storage/xtradb/include/sync0sync.ic2
-rw-r--r--storage/xtradb/include/univ.i4
-rw-r--r--storage/xtradb/lock/lock0lock.cc11
-rw-r--r--storage/xtradb/log/log0log.cc23
-rw-r--r--storage/xtradb/log/log0online.cc67
-rw-r--r--storage/xtradb/log/log0recv.cc334
-rw-r--r--storage/xtradb/mem/mem0pool.cc1
-rw-r--r--storage/xtradb/os/os0file.cc61
-rw-r--r--storage/xtradb/os/os0sync.cc247
-rw-r--r--storage/xtradb/os/os0thread.cc15
-rw-r--r--storage/xtradb/row/row0merge.cc7
-rw-r--r--storage/xtradb/row/row0mysql.cc22
-rw-r--r--storage/xtradb/srv/srv0conc.cc4
-rw-r--r--storage/xtradb/srv/srv0srv.cc104
-rw-r--r--storage/xtradb/srv/srv0start.cc51
-rw-r--r--storage/xtradb/sync/sync0arr.cc12
-rw-r--r--storage/xtradb/sync/sync0rw.cc16
-rw-r--r--storage/xtradb/sync/sync0sync.cc36
-rw-r--r--storage/xtradb/trx/trx0i_s.cc2
-rw-r--r--storage/xtradb/trx/trx0roll.cc2
505 files changed, 24900 insertions, 9642 deletions
diff --git a/storage/connect/.gitignore b/storage/connect/.gitignore
new file mode 100644
index 00000000000..e2fa07ee143
--- /dev/null
+++ b/storage/connect/.gitignore
@@ -0,0 +1,264 @@
+# Edited by Olivier Bertrand
+*-t
+*.a
+*.ctest
+*.o
+*.reject
+*.so
+*.so.*
+*.spec
+*~
+*.bak
+*.log
+*.cmake
+*.tgz
+*.msg
+.*.swp
+*.ninja
+.ninja_*
+.gdb_history
+
+CMakeFiles/
+connect.dir/
+connect.dir-Copie/
+Debug/
+MinSizeRel/
+Release/
+RelWithDebInfo/
+
+# C and C++
+
+# Compiled Object files
+*.slo
+*.lo
+*.o
+*.ko
+*.obj
+*.elf
+*.exp
+*.manifest
+*.dep
+*.idb
+*.res
+
+# Precompiled Headers
+*.gch
+*.pch
+
+# Compiled Static libraries
+*.lib
+*.a
+*.la
+*.lai
+*.lo
+
+# Compiled Dynamic libraries
+*.so
+*.so.*
+*.dylib
+*.dll
+
+# Executables
+*.exe
+*.out
+*.app
+*.i*86
+*.x86_64
+*.hex
+
+
+## Ignore Visual Studio temporary files, build results, and
+## files generated by popular Visual Studio add-ons.
+
+# User-specific files
+*.suo
+*.user
+*.userosscache
+*.sln.docstates
+*.ncb
+*.sln
+
+*.vcproj
+*.vcproj.*
+*.vcproj.*.*
+*.vcproj.*.*.*
+*.vcxproj
+*.vcxproj.*
+*.vcxproj.*.*
+*.vcxproj.*.*.*
+
+# Build results
+[Dd]ebug/
+[Dd]ebugPublic/
+[Rr]elease/
+[Rr]eleases/
+x64/
+x86/
+build/
+bld/
+[Bb]in/
+[Oo]bj/
+
+# Roslyn cache directories
+*.ide/
+
+# MSTest test Results
+[Tt]est[Rr]esult*/
+[Bb]uild[Ll]og.*
+
+#NUNIT
+*.VisualState.xml
+TestResult.xml
+
+# Build Results of an ATL Project
+[Dd]ebugPS/
+[Rr]eleasePS/
+dlldata.c
+
+*_i.c
+*_p.c
+*_i.h
+*.ilk
+*.meta
+*.obj
+*.pch
+*.pdb
+*.pgc
+*.pgd
+*.rsp
+*.sbr
+*.tlb
+*.tli
+*.tlh
+*.tmp
+*.tmp_proj
+*.log
+*.vspscc
+*.vssscc
+.builds
+*.pidb
+*.svclog
+*.scc
+
+# Chutzpah Test files
+_Chutzpah*
+
+# Visual C++ cache files
+ipch/
+*.aps
+*.ncb
+*.opensdf
+*.sdf
+*.cachefile
+
+# Visual Studio profiler
+*.psess
+*.vsp
+*.vspx
+
+# TFS 2012 Local Workspace
+$tf/
+
+# Guidance Automation Toolkit
+*.gpState
+
+# ReSharper is a .NET coding add-in
+_ReSharper*/
+*.[Rr]e[Ss]harper
+*.DotSettings.user
+
+# JustCode is a .NET coding addin-in
+.JustCode
+
+# TeamCity is a build add-in
+_TeamCity*
+
+# DotCover is a Code Coverage Tool
+*.dotCover
+
+# NCrunch
+_NCrunch_*
+.*crunch*.local.xml
+
+# MightyMoose
+*.mm.*
+AutoTest.Net/
+
+# Web workbench (sass)
+.sass-cache/
+
+# Installshield output folder
+[Ee]xpress/
+
+# DocProject is a documentation generator add-in
+DocProject/buildhelp/
+DocProject/Help/*.HxT
+DocProject/Help/*.HxC
+DocProject/Help/*.hhc
+DocProject/Help/*.hhk
+DocProject/Help/*.hhp
+DocProject/Help/Html2
+DocProject/Help/html
+
+# Click-Once directory
+publish/
+
+# Publish Web Output
+*.[Pp]ublish.xml
+*.azurePubxml
+# TODO: Comment the next line if you want to checkin your web deploy settings
+# but database connection strings (with potential passwords) will be unencrypted
+*.pubxml
+*.publishproj
+
+# NuGet Packages
+*.nupkg
+# The packages folder can be ignored because of Package Restore
+**/packages/*
+# except build/, which is used as an MSBuild target.
+!**/packages/build/
+# If using the old MSBuild-Integrated Package Restore, uncomment this:
+#!**/packages/repositories.config
+
+# Windows Azure Build Output
+csx/
+*.build.csdef
+
+# Windows Store app package directory
+AppPackages/
+
+# Others
+# sql/
+*.Cache
+ClientBin/
+[Ss]tyle[Cc]op.*
+~$*
+*~
+*.dbmdl
+*.dbproj.schemaview
+*.pfx
+*.publishsettings
+node_modules/
+
+# RIA/Silverlight projects
+Generated_Code/
+
+# Backup & report files from converting an old project file
+# to a newer Visual Studio version. Backup files are not needed,
+# because we have git ;-)
+_UpgradeReport_Files/
+Backup*/
+UpgradeLog*.XML
+UpgradeLog*.htm
+
+# SQL Server files
+*.mdf
+*.ldf
+
+# Business Intelligence projects
+*.rdl.data
+*.bim.layout
+*.bim_*.settings
+
+# Microsoft Fakes
+FakesAssemblies/
diff --git a/storage/connect/CMakeLists.txt b/storage/connect/CMakeLists.txt
index db7d178a9e4..c6b808acc60 100644
--- a/storage/connect/CMakeLists.txt
+++ b/storage/connect/CMakeLists.txt
@@ -240,14 +240,21 @@ OPTION(CONNECT_WITH_JDBC "Compile CONNECT storage engine with JDBC support" ON)
IF(CONNECT_WITH_JDBC)
# TODO: detect Java SDK and the presence of JDBC connectors
+ # TODO: Find how to compile and install the java wrapper class
# Find required libraries and include directories
- # Find how to compile and install the JdbcInterface.java class
- IF(JDBC_OK)
- INCLUDE_DIRECTORIES(${JDBC_INCLUDE_DIR})
+ FIND_PACKAGE(Java 1.6)
+ FIND_PACKAGE(JNI)
+ IF (JAVA_FOUND AND JNI_FOUND)
+ INCLUDE_DIRECTORIES(${JAVA_INCLUDE_PATH})
+ INCLUDE_DIRECTORIES(${JAVA_INCLUDE_PATH2})
+ # SET(JDBC_LIBRARY ${JAVA_JVM_LIBRARY})
+ SET(CONNECT_SOURCES ${CONNECT_SOURCES}
+ JdbcInterface.java JdbcInterface.class
+ JdbcDSInterface.java JdbcDSInterface.class
+ JdbcApacheInterface.java JdbcApacheInterface.class
+ jdbconn.cpp tabjdbc.cpp jdbconn.h tabjdbc.h jdbccat.h)
add_definitions(-DJDBC_SUPPORT)
- SET(CONNECT_SOURCES ${CONNECT_SOURCES}
- JdbcInterface.java tabjdbc.cpp jdbconn.cpp jdbccat.h jdbconn.h tabjdbc.h)
ELSE()
SET(JDBC_LIBRARY "")
ENDIF()
diff --git a/storage/connect/JdbcApacheInterface.class b/storage/connect/JdbcApacheInterface.class
new file mode 100644
index 00000000000..acd4258e3d3
--- /dev/null
+++ b/storage/connect/JdbcApacheInterface.class
Binary files differ
diff --git a/storage/connect/JdbcApacheInterface.java b/storage/connect/JdbcApacheInterface.java
new file mode 100644
index 00000000000..fdbc5bff203
--- /dev/null
+++ b/storage/connect/JdbcApacheInterface.java
@@ -0,0 +1,709 @@
+import java.math.*;
+import java.sql.*;
+import java.util.Collections;
+import java.util.Hashtable;
+import java.util.List;
+
+import org.apache.commons.dbcp2.BasicDataSource;
+
+public class JdbcApacheInterface {
+ boolean DEBUG = false;
+ String Errmsg = "No error";
+ Connection conn = null;
+ DatabaseMetaData dbmd = null;
+ Statement stmt = null;
+ PreparedStatement pstmt = null;
+ ResultSet rs = null;
+ ResultSetMetaData rsmd = null;
+ static Hashtable<String,BasicDataSource> pool = new Hashtable<String, BasicDataSource>();
+
+ // === Constructors/finalize =========================================
+ public JdbcApacheInterface() {
+ this(true);
+ } // end of default constructor
+
+ public JdbcApacheInterface(boolean b) {
+ DEBUG = b;
+ } // end of constructor
+
+ private void SetErrmsg(Exception e) {
+ if (DEBUG)
+ System.out.println(e.getMessage());
+
+ Errmsg = e.toString();
+ } // end of SetErrmsg
+
+ private void SetErrmsg(String s) {
+ if (DEBUG)
+ System.out.println(s);
+
+ Errmsg = s;
+ } // end of SetErrmsg
+
+ public String GetErrmsg() {
+ String err = Errmsg;
+
+ Errmsg = "No error";
+ return err;
+ } // end of GetErrmsg
+
+ public int JdbcConnect(String[] parms, int fsize, boolean scrollable) {
+ int rc = 0;
+ String url = parms[1];
+ BasicDataSource ds = null;
+
+ if (url == null) {
+ SetErrmsg("URL cannot be null");
+ return -1;
+ } // endif url
+
+ try {
+ if ((ds = pool.get(url)) == null) {
+ ds = new BasicDataSource();
+ ds.setDriverClassName(parms[0]);
+ ds.setUrl(url);
+ ds.setUsername(parms[2]);
+ ds.setPassword(parms[3]);
+ pool.put(url, ds);
+ } // endif ds
+
+ // Get a connection from the data source
+ conn = ds.getConnection();
+
+ // Get the data base meta data object
+ dbmd = conn.getMetaData();
+
+ // Get a statement from the connection
+ if (scrollable)
+ stmt = conn.createStatement(java.sql.ResultSet.TYPE_SCROLL_INSENSITIVE, java.sql.ResultSet.CONCUR_READ_ONLY);
+ else
+ stmt = conn.createStatement(java.sql.ResultSet.TYPE_FORWARD_ONLY, java.sql.ResultSet.CONCUR_READ_ONLY);
+
+ if (DEBUG)
+ System.out.println("Statement type = " + stmt.getResultSetType()
+ + " concurrency = " + stmt.getResultSetConcurrency());
+
+ if (DEBUG) // Get the fetch size of a statement
+ System.out.println("Default fetch size = " + stmt.getFetchSize());
+
+ if (fsize != 0) {
+ // Set the fetch size
+ stmt.setFetchSize(fsize);
+
+ if (DEBUG)
+ System.out.println("New fetch size = " + stmt.getFetchSize());
+
+ } // endif fsize
+
+ } catch (SQLException se) {
+ SetErrmsg(se);
+ rc = -2;
+ } catch( Exception e ) {
+ SetErrmsg(e);
+ rc = -3;
+ } // end try/catch
+
+ return rc;
+ } // end of JdbcConnect
+
+ public int CreatePrepStmt(String sql) {
+ int rc = 0;
+
+ try {
+ pstmt = conn.prepareStatement(sql);
+ } catch (SQLException se) {
+ SetErrmsg(se);
+ rc = -1;
+ } catch (Exception e) {
+ SetErrmsg(e);
+ rc = -2;
+ } // end try/catch
+
+ return rc;
+ } // end of CreatePrepStmt
+
+ public void SetStringParm(int i, String s) {
+ try {
+ pstmt.setString(i, s);
+ } catch (Exception e) {
+ SetErrmsg(e);
+ } // end try/catch
+
+ } // end of SetStringParm
+
+ public void SetIntParm(int i, int n) {
+ try {
+ pstmt.setInt(i, n);
+ } catch (Exception e) {
+ SetErrmsg(e);
+ } // end try/catch
+
+ } // end of SetIntParm
+
+ public void SetShortParm(int i, short n) {
+ try {
+ pstmt.setShort(i, n);
+ } catch (Exception e) {
+ SetErrmsg(e);
+ } // end try/catch
+
+ } // end of SetShortParm
+
+ public void SetBigintParm(int i, long n) {
+ try {
+ pstmt.setLong(i, n);
+ } catch (Exception e) {
+ SetErrmsg(e);
+ } // end try/catch
+
+ } // end of SetBigintParm
+
+ public void SetFloatParm(int i, float f) {
+ try {
+ pstmt.setFloat(i, f);
+ } catch (Exception e) {
+ SetErrmsg(e);
+ } // end try/catch
+
+ } // end of SetFloatParm
+
+ public void SetDoubleParm(int i, double d) {
+ try {
+ pstmt.setDouble(i, d);
+ } catch (Exception e) {
+ SetErrmsg(e);
+ } // end try/catch
+
+ } // end of SetDoubleParm
+
+ public void SetTimestampParm(int i, Timestamp t) {
+ try {
+ pstmt.setTimestamp(i, t);
+ } catch (Exception e) {
+ SetErrmsg(e);
+ } // end try/catch
+
+ } // end of SetTimestampParm
+
+ public int ExecutePrep() {
+ int n = -3;
+
+ if (pstmt != null) try {
+ n = pstmt.executeUpdate();
+ } catch (SQLException se) {
+ SetErrmsg(se);
+ n = -1;
+ } catch (Exception e) {
+ SetErrmsg(e);
+ n = -2;
+ } //end try/catch
+
+ return n;
+ } // end of ExecutePrep
+
+ public boolean ClosePrepStmt() {
+ boolean b = false;
+
+ if (pstmt != null) try {
+ pstmt.close();
+ pstmt = null;
+ } catch (SQLException se) {
+ SetErrmsg(se);
+ b = true;
+ } catch (Exception e) {
+ SetErrmsg(e);
+ b = true;
+ } // end try/catch
+
+ return b;
+ } // end of ClosePrepStmt
+
+ public int JdbcDisconnect() {
+ int rc = 0;
+
+ // Cancel pending statement
+ if (stmt != null)
+ try {
+ System.out.println("Cancelling statement");
+ stmt.cancel();
+ } catch(SQLException se) {
+ SetErrmsg(se);
+ rc += 1;
+ } // nothing more we can do
+
+ // Close the statement and the connection
+ if (rs != null)
+ try {
+ if (DEBUG)
+ System.out.println("Closing result set");
+
+ rs.close();
+ } catch(SQLException se) {
+ SetErrmsg(se);
+ rc = 2;
+ } // nothing more we can do
+
+ if (stmt != null)
+ try {
+ if (DEBUG)
+ System.out.println("Closing statement");
+
+ stmt.close();
+ } catch(SQLException se) {
+ SetErrmsg(se);
+ rc += 4;
+ } // nothing more we can do
+
+ ClosePrepStmt();
+
+ if (conn != null)
+ try {
+ if (DEBUG)
+ System.out.println("Closing connection");
+
+ conn.close();
+ } catch (SQLException se) {
+ SetErrmsg(se);
+ rc += 8;
+ } //end try/catch
+
+ if (DEBUG)
+ System.out.println("All closed");
+
+ return rc;
+ } // end of JdbcDisconnect
+
+ public int GetMaxValue(int n) {
+ int m = 0;
+
+ try {
+ switch (n) {
+ case 1: // Max columns in table
+ m = dbmd.getMaxColumnsInTable();
+ break;
+ case 2: // Max catalog name length
+ m = dbmd.getMaxCatalogNameLength();
+ break;
+ case 3: // Max schema name length
+ m = dbmd.getMaxSchemaNameLength();
+ break;
+ case 4: // Max table name length
+ m = dbmd.getMaxTableNameLength();
+ break;
+ case 5: // Max column name length
+ m = dbmd.getMaxColumnNameLength();
+ break;
+ } // endswitch n
+
+ } catch(Exception e) {
+ SetErrmsg(e);
+ m = -1;
+ } // end try/catch
+
+ return m;
+ } // end of GetMaxValue
+
+ public int GetColumns(String[] parms) {
+ int ncol = 0;
+
+ try {
+ if (rs != null) rs.close();
+ rs = dbmd.getColumns(parms[0], parms[1], parms[2], parms[3]);
+
+ if (rs != null) {
+ rsmd = rs.getMetaData();
+ ncol = rsmd.getColumnCount();
+ } // endif rs
+
+ } catch(SQLException se) {
+ SetErrmsg(se);
+ } // end try/catch
+
+ return ncol;
+ } // end of GetColumns
+
+ public int GetTables(String[] parms) {
+ int ncol = 0;
+ String[] typ = null;
+
+ if (parms[3] != null) {
+ typ = new String[1];
+ typ[0] = parms[3];
+ } // endif parms
+
+ try {
+ if (rs != null) rs.close();
+ rs = dbmd.getTables(parms[0], parms[1], parms[2], typ);
+
+ if (rs != null) {
+ rsmd = rs.getMetaData();
+ ncol = rsmd.getColumnCount();
+ } // endif rs
+
+ } catch(SQLException se) {
+ SetErrmsg(se);
+ } // end try/catch
+
+ return ncol;
+ } // end of GetColumns
+
+ public int Execute(String query) {
+ int n = 0;
+
+ if (DEBUG)
+ System.out.println("Executing '" + query + "'");
+
+ try {
+ boolean b = stmt.execute(query);
+
+ if (b == false) {
+ n = stmt.getUpdateCount();
+ if (rs != null) rs.close();
+ } // endif b
+
+ if (DEBUG)
+ System.out.println("Query '" + query + "' executed: n = " + n);
+
+ } catch (SQLException se) {
+ SetErrmsg(se);
+ n = -1;
+ } catch (Exception e) {
+ SetErrmsg(e);
+ n = -2;
+ } //end try/catch
+
+ return n;
+ } // end of Execute
+
+ public int GetResult() {
+ int ncol = 0;
+
+ try {
+ rs = stmt.getResultSet();
+
+ if (rs != null) {
+ rsmd = rs.getMetaData();
+ ncol = rsmd.getColumnCount();
+
+ if (DEBUG)
+ System.out.println("Result set has " + rsmd.getColumnCount() + " column(s)");
+
+ } // endif rs
+
+ } catch (SQLException se) {
+ SetErrmsg(se);
+ ncol = -1;
+ } catch (Exception e) {
+ SetErrmsg(e);
+ ncol = -2;
+ } //end try/catch
+
+ return ncol;
+ } // end of GetResult
+
+ public int ExecuteQuery(String query) {
+ int ncol = 0;
+
+ if (DEBUG)
+ System.out.println("Executing query '" + query + "'");
+
+ try {
+ rs = stmt.executeQuery(query);
+ rsmd = rs.getMetaData();
+ ncol = rsmd.getColumnCount();
+
+ if (DEBUG) {
+ System.out.println("Query '" + query + "' executed successfully");
+ System.out.println("Result set has " + rsmd.getColumnCount() + " column(s)");
+ } // endif DEBUG
+
+ } catch (SQLException se) {
+ SetErrmsg(se);
+ ncol = -1;
+ } catch (Exception e) {
+ SetErrmsg(e);
+ ncol = -2;
+ } //end try/catch
+
+ return ncol;
+ } // end of ExecuteQuery
+
+ public int ExecuteUpdate(String query) {
+ int n = 0;
+
+ if (DEBUG)
+ System.out.println("Executing update query '" + query + "'");
+
+ try {
+ n = stmt.executeUpdate(query);
+
+ if (DEBUG)
+ System.out.println("Update Query '" + query + "' executed: n = " + n);
+
+ } catch (SQLException se) {
+ SetErrmsg(se);
+ n = -1;
+ } catch (Exception e) {
+ SetErrmsg(e);
+ n = -2;
+ } //end try/catch
+
+ return n;
+ } // end of ExecuteUpdate
+
+ public int ReadNext() {
+ if (rs != null) {
+ try {
+ return rs.next() ? 1 : 0;
+ } catch (SQLException se) {
+ SetErrmsg(se);
+ return -1;
+ } //end try/catch
+
+ } else
+ return 0;
+
+ } // end of ReadNext
+
+ public boolean Fetch(int row) {
+ if (rs != null) {
+ try {
+ return rs.absolute(row);
+ } catch (SQLException se) {
+ SetErrmsg(se);
+ return false;
+ } //end try/catch
+
+ } else
+ return false;
+
+ } // end of Fetch
+
+ public String ColumnName(int n) {
+ if (rsmd == null) {
+ System.out.println("No result metadata");
+ } else try {
+ return rsmd.getColumnLabel(n);
+ } catch (SQLException se) {
+ SetErrmsg(se);
+ } //end try/catch
+
+ return null;
+ } // end of ColumnName
+
+ public int ColumnType(int n, String name) {
+ if (rsmd == null) {
+ System.out.println("No result metadata");
+ } else try {
+ if (n == 0)
+ n = rs.findColumn(name);
+
+ return rsmd.getColumnType(n);
+ } catch (SQLException se) {
+ SetErrmsg(se);
+ } //end try/catch
+
+ return 666; // Not a type
+ } // end of ColumnType
+
+ public String ColumnDesc(int n, int[] val) {
+ if (rsmd == null) {
+ System.out.println("No result metadata");
+ return null;
+ } else try {
+ val[0] = rsmd.getColumnType(n);
+ val[1] = rsmd.getPrecision(n);
+ val[2] = rsmd.getScale(n);
+ val[3] = rsmd.isNullable(n);
+ return rsmd.getColumnLabel(n);
+ } catch (SQLException se) {
+ SetErrmsg(se);
+ } //end try/catch
+
+ return null;
+ } // end of ColumnDesc
+
+ public String StringField(int n, String name) {
+ if (rs == null) {
+ System.out.println("No result set");
+ } else try {
+ return (n > 0) ? rs.getString(n) : rs.getString(name);
+ } catch (SQLException se) {
+ SetErrmsg(se);
+ } //end try/catch
+
+ return null;
+ } // end of StringField
+
+ public int IntField(int n, String name) {
+ if (rs == null) {
+ System.out.println("No result set");
+ } else try {
+ return (n > 0) ? rs.getInt(n) : rs.getInt(name);
+ } catch (SQLException se) {
+ SetErrmsg(se);
+ } //end try/catch
+
+ return 0;
+ } // end of IntField
+
+ public long BigintField(int n, String name) {
+ if (rs == null) {
+ System.out.println("No result set");
+ } else try {
+ BigDecimal bigDecimal = (n > 0) ? rs.getBigDecimal(n) : rs.getBigDecimal(name);
+ return bigDecimal != null ? bigDecimal.longValue() : 0;
+ } catch (SQLException se) {
+ SetErrmsg(se);
+ } //end try/catch
+
+ return 0;
+ } // end of BiginttField
+
+ public double DoubleField(int n, String name) {
+ if (rs == null) {
+ System.out.println("No result set");
+ } else try {
+ return (n > 0) ? rs.getDouble(n) : rs.getDouble(name);
+ } catch (SQLException se) {
+ SetErrmsg(se);
+ } //end try/catch
+
+ return 0.;
+ } // end of DoubleField
+
+ public float FloatField(int n, String name) {
+ if (rs == null) {
+ System.out.println("No result set");
+ } else try {
+ return (n > 0) ? rs.getFloat(n) : rs.getFloat(name);
+ } catch (SQLException se) {
+ SetErrmsg(se);
+ } //end try/catch
+
+ return 0;
+ } // end of FloatField
+
+ public boolean BooleanField(int n, String name) {
+ if (rs == null) {
+ System.out.println("No result set");
+ } else try {
+ return (n > 0) ? rs.getBoolean(n) : rs.getBoolean(name);
+ } catch (SQLException se) {
+ SetErrmsg(se);
+ } //end try/catch
+
+ return false;
+ } // end of BooleanField
+
+ public Date DateField(int n, String name) {
+ if (rs == null) {
+ System.out.println("No result set");
+ } else try {
+ return (n > 0) ? rs.getDate(n) : rs.getDate(name);
+ } catch (SQLException se) {
+ SetErrmsg(se);
+ } //end try/catch
+
+ return null;
+ } // end of DateField
+
+ public Time TimeField(int n, String name) {
+ if (rs == null) {
+ System.out.println("No result set");
+ } else try {
+ return (n > 0) ? rs.getTime(n) : rs.getTime(name);
+ } catch (SQLException se) {
+ SetErrmsg(se);
+ } //end try/catch
+
+ return null;
+ } // end of TimeField
+
+ public Timestamp TimestampField(int n, String name) {
+ if (rs == null) {
+ System.out.println("No result set");
+ } else try {
+ return (n > 0) ? rs.getTimestamp(n) : rs.getTimestamp(name);
+ } catch (SQLException se) {
+ SetErrmsg(se);
+ } //end try/catch
+
+ return null;
+ } // end of TimestampField
+
+ public String ObjectField(int n, String name) {
+ if (rs == null) {
+ System.out.println("No result set");
+ } else try {
+ return (n > 0) ? rs.getObject(n).toString() : rs.getObject(name).toString();
+ } catch (SQLException se) {
+ SetErrmsg(se);
+ } //end try/catch
+
+ return null;
+ } // end of ObjectField
+
+ public int GetDrivers(String[] s, int mxs) {
+ int n = 0;
+ List<Driver> drivers = Collections.list(DriverManager.getDrivers());
+ int size = Math.min(mxs, drivers.size());
+
+ for (int i = 0; i < size; i++) {
+ Driver driver = (Driver)drivers.get(i);
+
+ // Get name of driver
+ s[n++] = driver.getClass().getName();
+
+ // Get version info
+ s[n++] = driver.getMajorVersion() + "." + driver.getMinorVersion();
+ s[n++] = driver.jdbcCompliant() ? "Yes" : "No";
+ s[n++] = driver.toString();
+ } // endfor i
+
+ return size;
+ } // end of GetDrivers
+
+ /**
+ * Adds the specified path to the java library path
+ * from Fahd Shariff blog
+ *
+ * @param pathToAdd the path to add
+ static public int addLibraryPath(String pathToAdd) {
+ System.out.println("jpath = " + pathToAdd);
+
+ try {
+ Field usrPathsField = ClassLoader.class.getDeclaredField("usr_paths");
+ usrPathsField.setAccessible(true);
+
+ //get array of paths
+ String[] paths = (String[])usrPathsField.get(null);
+
+ //check if the path to add is already present
+ for (String path : paths) {
+ System.out.println("path = " + path);
+
+ if (path.equals(pathToAdd))
+ return -5;
+
+ } // endfor path
+
+ //add the new path
+ String[] newPaths = Arrays.copyOf(paths, paths.length + 1);
+ newPaths[paths.length] = pathToAdd;
+ usrPathsField.set(null, newPaths);
+ System.setProperty("java.library.path",
+ System.getProperty("java.library.path") + File.pathSeparator + pathToAdd);
+ Field fieldSysPath = ClassLoader.class.getDeclaredField("sys_paths");
+ fieldSysPath.setAccessible(true);
+ fieldSysPath.set(null, null);
+ } catch (Exception e) {
+ SetErrmsg(e);
+ return -1;
+ } // end try/catch
+
+ return 0;
+ } // end of addLibraryPath
+ */
+
+} // end of class JdbcApacheInterface
diff --git a/storage/connect/JdbcDSInterface.class b/storage/connect/JdbcDSInterface.class
new file mode 100644
index 00000000000..d56c04bd81f
--- /dev/null
+++ b/storage/connect/JdbcDSInterface.class
Binary files differ
diff --git a/storage/connect/JdbcDSInterface.java b/storage/connect/JdbcDSInterface.java
new file mode 100644
index 00000000000..09f545bfb74
--- /dev/null
+++ b/storage/connect/JdbcDSInterface.java
@@ -0,0 +1,743 @@
+import java.math.*;
+import java.sql.*;
+import java.util.Collections;
+import java.util.Hashtable;
+import java.util.List;
+
+import javax.sql.DataSource;
+
+import org.mariadb.jdbc.MariaDbDataSource;
+import org.postgresql.jdbc2.optional.PoolingDataSource;
+import com.mysql.cj.jdbc.MysqlDataSource;
+import oracle.jdbc.pool.OracleDataSource;
+
+public class JdbcDSInterface {
+ boolean DEBUG = false;
+ String Errmsg = "No error";
+ Connection conn = null;
+ DatabaseMetaData dbmd = null;
+ Statement stmt = null;
+ PreparedStatement pstmt = null;
+ ResultSet rs = null;
+ ResultSetMetaData rsmd = null;
+ Hashtable<String,DataSource> dst = null;
+
+ // === Constructors/finalize =========================================
+ public JdbcDSInterface() {
+ this(true);
+ } // end of default constructor
+
+ public JdbcDSInterface(boolean b) {
+ DEBUG = b;
+ dst = new Hashtable<String, DataSource>();
+ } // end of constructor
+
+ private void SetErrmsg(Exception e) {
+ if (DEBUG)
+ System.out.println(e.getMessage());
+
+ Errmsg = e.toString();
+ } // end of SetErrmsg
+
+ private void SetErrmsg(String s) {
+ if (DEBUG)
+ System.out.println(s);
+
+ Errmsg = s;
+ } // end of SetErrmsg
+
+ public String GetErrmsg() {
+ String err = Errmsg;
+
+ Errmsg = "No error";
+ return err;
+ } // end of GetErrmsg
+
+ public int JdbcConnect(String[] parms, int fsize, boolean scrollable) {
+ int rc = 0;
+ String url = parms[1];
+ DataSource ds = null;
+ MysqlDataSource mds = null;
+ MariaDbDataSource ads = null;
+ OracleDataSource ods = null;
+ PoolingDataSource pds = null;
+
+ if (url == null) {
+ SetErrmsg("URL cannot be null");
+ return -1;
+ } // endif driver
+
+ try {
+ if ((ds = dst.get(url)) == null) {
+ if (url.toLowerCase().contains("mysql")) {
+ mds = new MysqlDataSource();
+ mds.setURL(url);
+ mds.setUser(parms[2]);
+ mds.setPassword(parms[3]);
+ ds = mds;
+ } else if (url.toLowerCase().contains("mariadb")) {
+ ads = new MariaDbDataSource();
+ ads.setUrl(url);
+ ads.setUser(parms[2]);
+ ads.setPassword(parms[3]);
+ ds = ads;
+ } else if (url.toLowerCase().contains("oracle")) {
+ ods = new OracleDataSource();
+ ods.setURL(url);
+ ods.setUser(parms[2]);
+ ods.setPassword(parms[3]);
+ ds = ods;
+ } else if (url.toLowerCase().contains("postgresql")) {
+ pds = new PoolingDataSource();
+ pds.setUrl(url);
+ pds.setUser(parms[2]);
+ pds.setPassword(parms[3]);
+ ds = pds;
+ } else {
+ SetErrmsg("Unsupported driver");
+ return -4;
+ } // endif driver
+
+ dst.put(url, ds);
+ } // endif ds
+
+ // Get a connection from the data source
+ conn = ds.getConnection();
+
+ // Get the data base meta data object
+ dbmd = conn.getMetaData();
+
+ // Get a statement from the connection
+ if (scrollable)
+ stmt = conn.createStatement(java.sql.ResultSet.TYPE_SCROLL_INSENSITIVE, java.sql.ResultSet.CONCUR_READ_ONLY);
+ else
+ stmt = conn.createStatement(java.sql.ResultSet.TYPE_FORWARD_ONLY, java.sql.ResultSet.CONCUR_READ_ONLY);
+
+ if (DEBUG)
+ System.out.println("Statement type = " + stmt.getResultSetType()
+ + " concurrency = " + stmt.getResultSetConcurrency());
+
+ if (DEBUG) // Get the fetch size of a statement
+ System.out.println("Default fetch size = " + stmt.getFetchSize());
+
+ if (fsize != 0) {
+ // Set the fetch size
+ stmt.setFetchSize(fsize);
+
+ if (DEBUG)
+ System.out.println("New fetch size = " + stmt.getFetchSize());
+
+ } // endif fsize
+
+ } catch (SQLException se) {
+ SetErrmsg(se);
+ rc = -2;
+ } catch( Exception e ) {
+ SetErrmsg(e);
+ rc = -3;
+ } // end try/catch
+
+ return rc;
+ } // end of JdbcConnect
+
+ public int CreatePrepStmt(String sql) {
+ int rc = 0;
+
+ try {
+ pstmt = conn.prepareStatement(sql);
+ } catch (SQLException se) {
+ SetErrmsg(se);
+ rc = -1;
+ } catch (Exception e) {
+ SetErrmsg(e);
+ rc = -2;
+ } // end try/catch
+
+ return rc;
+ } // end of CreatePrepStmt
+
+ public void SetStringParm(int i, String s) {
+ try {
+ pstmt.setString(i, s);
+ } catch (Exception e) {
+ SetErrmsg(e);
+ } // end try/catch
+
+ } // end of SetStringParm
+
+ public void SetIntParm(int i, int n) {
+ try {
+ pstmt.setInt(i, n);
+ } catch (Exception e) {
+ SetErrmsg(e);
+ } // end try/catch
+
+ } // end of SetIntParm
+
+ public void SetShortParm(int i, short n) {
+ try {
+ pstmt.setShort(i, n);
+ } catch (Exception e) {
+ SetErrmsg(e);
+ } // end try/catch
+
+ } // end of SetShortParm
+
+ public void SetBigintParm(int i, long n) {
+ try {
+ pstmt.setLong(i, n);
+ } catch (Exception e) {
+ SetErrmsg(e);
+ } // end try/catch
+
+ } // end of SetBigintParm
+
+ public void SetFloatParm(int i, float f) {
+ try {
+ pstmt.setFloat(i, f);
+ } catch (Exception e) {
+ SetErrmsg(e);
+ } // end try/catch
+
+ } // end of SetFloatParm
+
+ public void SetDoubleParm(int i, double d) {
+ try {
+ pstmt.setDouble(i, d);
+ } catch (Exception e) {
+ SetErrmsg(e);
+ } // end try/catch
+
+ } // end of SetDoubleParm
+
+ public void SetTimestampParm(int i, Timestamp t) {
+ try {
+ pstmt.setTimestamp(i, t);
+ } catch (Exception e) {
+ SetErrmsg(e);
+ } // end try/catch
+
+ } // end of SetTimestampParm
+
+ public int ExecutePrep() {
+ int n = -3;
+
+ if (pstmt != null) try {
+ n = pstmt.executeUpdate();
+ } catch (SQLException se) {
+ SetErrmsg(se);
+ n = -1;
+ } catch (Exception e) {
+ SetErrmsg(e);
+ n = -2;
+ } //end try/catch
+
+ return n;
+ } // end of ExecutePrep
+
+ public boolean ClosePrepStmt() {
+ boolean b = false;
+
+ if (pstmt != null) try {
+ pstmt.close();
+ pstmt = null;
+ } catch (SQLException se) {
+ SetErrmsg(se);
+ b = true;
+ } catch (Exception e) {
+ SetErrmsg(e);
+ b = true;
+ } // end try/catch
+
+ return b;
+ } // end of ClosePrepStmt
+
+ public int JdbcDisconnect() {
+ int rc = 0;
+
+ // Cancel pending statement
+ if (stmt != null)
+ try {
+ System.out.println("Cancelling statement");
+ stmt.cancel();
+ } catch(SQLException se) {
+ SetErrmsg(se);
+ rc += 1;
+ } // nothing more we can do
+
+ // Close the statement and the connection
+ if (rs != null)
+ try {
+ if (DEBUG)
+ System.out.println("Closing result set");
+
+ rs.close();
+ } catch(SQLException se) {
+ SetErrmsg(se);
+ rc = 2;
+ } // nothing more we can do
+
+ if (stmt != null)
+ try {
+ if (DEBUG)
+ System.out.println("Closing statement");
+
+ stmt.close();
+ } catch(SQLException se) {
+ SetErrmsg(se);
+ rc += 4;
+ } // nothing more we can do
+
+ ClosePrepStmt();
+
+ if (conn != null)
+ try {
+ if (DEBUG)
+ System.out.println("Closing connection");
+
+ conn.close();
+ } catch (SQLException se) {
+ SetErrmsg(se);
+ rc += 8;
+ } //end try/catch
+
+ if (DEBUG)
+ System.out.println("All closed");
+
+ return rc;
+ } // end of JdbcDisconnect
+
+ public int GetMaxValue(int n) {
+ int m = 0;
+
+ try {
+ switch (n) {
+ case 1: // Max columns in table
+ m = dbmd.getMaxColumnsInTable();
+ break;
+ case 2: // Max catalog name length
+ m = dbmd.getMaxCatalogNameLength();
+ break;
+ case 3: // Max schema name length
+ m = dbmd.getMaxSchemaNameLength();
+ break;
+ case 4: // Max table name length
+ m = dbmd.getMaxTableNameLength();
+ break;
+ case 5: // Max column name length
+ m = dbmd.getMaxColumnNameLength();
+ break;
+ } // endswitch n
+
+ } catch(Exception e) {
+ SetErrmsg(e);
+ m = -1;
+ } // end try/catch
+
+ return m;
+ } // end of GetMaxValue
+
+ public int GetColumns(String[] parms) {
+ int ncol = 0;
+
+ try {
+ if (rs != null) rs.close();
+ rs = dbmd.getColumns(parms[0], parms[1], parms[2], parms[3]);
+
+ if (rs != null) {
+ rsmd = rs.getMetaData();
+ ncol = rsmd.getColumnCount();
+ } // endif rs
+
+ } catch(SQLException se) {
+ SetErrmsg(se);
+ } // end try/catch
+
+ return ncol;
+ } // end of GetColumns
+
+ public int GetTables(String[] parms) {
+ int ncol = 0;
+ String[] typ = null;
+
+ if (parms[3] != null) {
+ typ = new String[1];
+ typ[0] = parms[3];
+ } // endif parms
+
+ try {
+ if (rs != null) rs.close();
+ rs = dbmd.getTables(parms[0], parms[1], parms[2], typ);
+
+ if (rs != null) {
+ rsmd = rs.getMetaData();
+ ncol = rsmd.getColumnCount();
+ } // endif rs
+
+ } catch(SQLException se) {
+ SetErrmsg(se);
+ } // end try/catch
+
+ return ncol;
+ } // end of GetColumns
+
+ public int Execute(String query) {
+ int n = 0;
+
+ if (DEBUG)
+ System.out.println("Executing '" + query + "'");
+
+ try {
+ boolean b = stmt.execute(query);
+
+ if (b == false) {
+ n = stmt.getUpdateCount();
+ if (rs != null) rs.close();
+ } // endif b
+
+ if (DEBUG)
+ System.out.println("Query '" + query + "' executed: n = " + n);
+
+ } catch (SQLException se) {
+ SetErrmsg(se);
+ n = -1;
+ } catch (Exception e) {
+ SetErrmsg(e);
+ n = -2;
+ } //end try/catch
+
+ return n;
+ } // end of Execute
+
+ public int GetResult() {
+ int ncol = 0;
+
+ try {
+ rs = stmt.getResultSet();
+
+ if (rs != null) {
+ rsmd = rs.getMetaData();
+ ncol = rsmd.getColumnCount();
+
+ if (DEBUG)
+ System.out.println("Result set has " + rsmd.getColumnCount() + " column(s)");
+
+ } // endif rs
+
+ } catch (SQLException se) {
+ SetErrmsg(se);
+ ncol = -1;
+ } catch (Exception e) {
+ SetErrmsg(e);
+ ncol = -2;
+ } //end try/catch
+
+ return ncol;
+ } // end of GetResult
+
+ public int ExecuteQuery(String query) {
+ int ncol = 0;
+
+ if (DEBUG)
+ System.out.println("Executing query '" + query + "'");
+
+ try {
+ rs = stmt.executeQuery(query);
+ rsmd = rs.getMetaData();
+ ncol = rsmd.getColumnCount();
+
+ if (DEBUG) {
+ System.out.println("Query '" + query + "' executed successfully");
+ System.out.println("Result set has " + rsmd.getColumnCount() + " column(s)");
+ } // endif DEBUG
+
+ } catch (SQLException se) {
+ SetErrmsg(se);
+ ncol = -1;
+ } catch (Exception e) {
+ SetErrmsg(e);
+ ncol = -2;
+ } //end try/catch
+
+ return ncol;
+ } // end of ExecuteQuery
+
+ public int ExecuteUpdate(String query) {
+ int n = 0;
+
+ if (DEBUG)
+ System.out.println("Executing update query '" + query + "'");
+
+ try {
+ n = stmt.executeUpdate(query);
+
+ if (DEBUG)
+ System.out.println("Update Query '" + query + "' executed: n = " + n);
+
+ } catch (SQLException se) {
+ SetErrmsg(se);
+ n = -1;
+ } catch (Exception e) {
+ SetErrmsg(e);
+ n = -2;
+ } //end try/catch
+
+ return n;
+ } // end of ExecuteUpdate
+
+ public int ReadNext() {
+ if (rs != null) {
+ try {
+ return rs.next() ? 1 : 0;
+ } catch (SQLException se) {
+ SetErrmsg(se);
+ return -1;
+ } //end try/catch
+
+ } else
+ return 0;
+
+ } // end of ReadNext
+
+ public boolean Fetch(int row) {
+ if (rs != null) {
+ try {
+ return rs.absolute(row);
+ } catch (SQLException se) {
+ SetErrmsg(se);
+ return false;
+ } //end try/catch
+
+ } else
+ return false;
+
+ } // end of Fetch
+
+ public String ColumnName(int n) {
+ if (rsmd == null) {
+ System.out.println("No result metadata");
+ } else try {
+ return rsmd.getColumnLabel(n);
+ } catch (SQLException se) {
+ SetErrmsg(se);
+ } //end try/catch
+
+ return null;
+ } // end of ColumnName
+
+ public int ColumnType(int n, String name) {
+ if (rsmd == null) {
+ System.out.println("No result metadata");
+ } else try {
+ if (n == 0)
+ n = rs.findColumn(name);
+
+ return rsmd.getColumnType(n);
+ } catch (SQLException se) {
+ SetErrmsg(se);
+ } //end try/catch
+
+ return 666; // Not a type
+ } // end of ColumnType
+
+ public String ColumnDesc(int n, int[] val) {
+ if (rsmd == null) {
+ System.out.println("No result metadata");
+ return null;
+ } else try {
+ val[0] = rsmd.getColumnType(n);
+ val[1] = rsmd.getPrecision(n);
+ val[2] = rsmd.getScale(n);
+ val[3] = rsmd.isNullable(n);
+ return rsmd.getColumnLabel(n);
+ } catch (SQLException se) {
+ SetErrmsg(se);
+ } //end try/catch
+
+ return null;
+ } // end of ColumnDesc
+
+ public String StringField(int n, String name) {
+ if (rs == null) {
+ System.out.println("No result set");
+ } else try {
+ return (n > 0) ? rs.getString(n) : rs.getString(name);
+ } catch (SQLException se) {
+ SetErrmsg(se);
+ } //end try/catch
+
+ return null;
+ } // end of StringField
+
+ public int IntField(int n, String name) {
+ if (rs == null) {
+ System.out.println("No result set");
+ } else try {
+ return (n > 0) ? rs.getInt(n) : rs.getInt(name);
+ } catch (SQLException se) {
+ SetErrmsg(se);
+ } //end try/catch
+
+ return 0;
+ } // end of IntField
+
+ public long BigintField(int n, String name) {
+ if (rs == null) {
+ System.out.println("No result set");
+ } else try {
+ BigDecimal bigDecimal = (n > 0) ? rs.getBigDecimal(n) : rs.getBigDecimal(name);
+ return bigDecimal != null ? bigDecimal.longValue() : 0;
+ } catch (SQLException se) {
+ SetErrmsg(se);
+ } //end try/catch
+
+ return 0;
+ } // end of BiginttField
+
+ public double DoubleField(int n, String name) {
+ if (rs == null) {
+ System.out.println("No result set");
+ } else try {
+ return (n > 0) ? rs.getDouble(n) : rs.getDouble(name);
+ } catch (SQLException se) {
+ SetErrmsg(se);
+ } //end try/catch
+
+ return 0.;
+ } // end of DoubleField
+
+ public float FloatField(int n, String name) {
+ if (rs == null) {
+ System.out.println("No result set");
+ } else try {
+ return (n > 0) ? rs.getFloat(n) : rs.getFloat(name);
+ } catch (SQLException se) {
+ SetErrmsg(se);
+ } //end try/catch
+
+ return 0;
+ } // end of FloatField
+
+ public boolean BooleanField(int n, String name) {
+ if (rs == null) {
+ System.out.println("No result set");
+ } else try {
+ return (n > 0) ? rs.getBoolean(n) : rs.getBoolean(name);
+ } catch (SQLException se) {
+ SetErrmsg(se);
+ } //end try/catch
+
+ return false;
+ } // end of BooleanField
+
+ public Date DateField(int n, String name) {
+ if (rs == null) {
+ System.out.println("No result set");
+ } else try {
+ return (n > 0) ? rs.getDate(n) : rs.getDate(name);
+ } catch (SQLException se) {
+ SetErrmsg(se);
+ } //end try/catch
+
+ return null;
+ } // end of DateField
+
+ public Time TimeField(int n, String name) {
+ if (rs == null) {
+ System.out.println("No result set");
+ } else try {
+ return (n > 0) ? rs.getTime(n) : rs.getTime(name);
+ } catch (SQLException se) {
+ SetErrmsg(se);
+ } //end try/catch
+
+ return null;
+ } // end of TimeField
+
+ public Timestamp TimestampField(int n, String name) {
+ if (rs == null) {
+ System.out.println("No result set");
+ } else try {
+ return (n > 0) ? rs.getTimestamp(n) : rs.getTimestamp(name);
+ } catch (SQLException se) {
+ SetErrmsg(se);
+ } //end try/catch
+
+ return null;
+ } // end of TimestampField
+
+ public String ObjectField(int n, String name) {
+ if (rs == null) {
+ System.out.println("No result set");
+ } else try {
+ return (n > 0) ? rs.getObject(n).toString() : rs.getObject(name).toString();
+ } catch (SQLException se) {
+ SetErrmsg(se);
+ } //end try/catch
+
+ return null;
+ } // end of ObjectField
+
+ public int GetDrivers(String[] s, int mxs) {
+ int n = 0;
+ List<Driver> drivers = Collections.list(DriverManager.getDrivers());
+ int size = Math.min(mxs, drivers.size());
+
+ for (int i = 0; i < size; i++) {
+ Driver driver = (Driver)drivers.get(i);
+
+ // Get name of driver
+ s[n++] = driver.getClass().getName();
+
+ // Get version info
+ s[n++] = driver.getMajorVersion() + "." + driver.getMinorVersion();
+ s[n++] = driver.jdbcCompliant() ? "Yes" : "No";
+ s[n++] = driver.toString();
+ } // endfor i
+
+ return size;
+ } // end of GetDrivers
+
+ /**
+ * Adds the specified path to the java library path
+ * from Fahd Shariff blog
+ *
+ * @param pathToAdd the path to add
+ static public int addLibraryPath(String pathToAdd) {
+ System.out.println("jpath = " + pathToAdd);
+
+ try {
+ Field usrPathsField = ClassLoader.class.getDeclaredField("usr_paths");
+ usrPathsField.setAccessible(true);
+
+ //get array of paths
+ String[] paths = (String[])usrPathsField.get(null);
+
+ //check if the path to add is already present
+ for (String path : paths) {
+ System.out.println("path = " + path);
+
+ if (path.equals(pathToAdd))
+ return -5;
+
+ } // endfor path
+
+ //add the new path
+ String[] newPaths = Arrays.copyOf(paths, paths.length + 1);
+ newPaths[paths.length] = pathToAdd;
+ usrPathsField.set(null, newPaths);
+ System.setProperty("java.library.path",
+ System.getProperty("java.library.path") + File.pathSeparator + pathToAdd);
+ Field fieldSysPath = ClassLoader.class.getDeclaredField("sys_paths");
+ fieldSysPath.setAccessible(true);
+ fieldSysPath.set(null, null);
+ } catch (Exception e) {
+ SetErrmsg(e);
+ return -1;
+ } // end try/catch
+
+ return 0;
+ } // end of addLibraryPath
+ */
+
+} // end of class JdbcDSInterface
diff --git a/storage/connect/JdbcInterface.class b/storage/connect/JdbcInterface.class
new file mode 100644
index 00000000000..8c5ba6439f3
--- /dev/null
+++ b/storage/connect/JdbcInterface.class
Binary files differ
diff --git a/storage/connect/JdbcInterface.java b/storage/connect/JdbcInterface.java
new file mode 100644
index 00000000000..f9a6e734454
--- /dev/null
+++ b/storage/connect/JdbcInterface.java
@@ -0,0 +1,712 @@
+import java.math.*;
+import java.sql.*;
+//import java.util.Arrays;
+import java.util.Collections;
+import java.util.List;
+//import java.io.File;
+//import java.lang.reflect.Field;
+
+public class JdbcInterface {
+ boolean DEBUG = false;
+ String Errmsg = "No error";
+ Connection conn = null;
+ DatabaseMetaData dbmd = null;
+ Statement stmt = null;
+ PreparedStatement pstmt = null;
+ ResultSet rs = null;
+ ResultSetMetaData rsmd = null;
+
+ // === Constructors/finalize =========================================
+ public JdbcInterface() {
+ this(true);
+ } // end of default constructor
+
+ public JdbcInterface(boolean b) {
+ DEBUG = b;
+ } // end of constructor
+
+ private void SetErrmsg(Exception e) {
+ if (DEBUG)
+ System.out.println(e.getMessage());
+
+ Errmsg = e.toString();
+ } // end of SetErrmsg
+
+ public String GetErrmsg() {
+ String err = Errmsg;
+
+ Errmsg = "No error";
+ return err;
+ } // end of GetErrmsg
+
+ public int JdbcConnect(String[] parms, int fsize, boolean scrollable) {
+ int rc = 0;
+
+ if (DEBUG)
+ System.out.println("In JdbcInterface: driver=" + parms[0]);
+
+ try {
+ if (DEBUG)
+ System.out.println("In try block");
+
+ if (parms[0] != null && !parms[0].isEmpty()) {
+ if (DEBUG)
+ System.out.println("Loading class" + parms[0]);
+
+ Class.forName(parms[0]); //loads the driver
+ } // endif driver
+
+ if (DEBUG)
+ System.out.println("URL=" + parms[1]);
+
+ if (parms[2] != null && !parms[2].isEmpty()) {
+ if (DEBUG)
+ System.out.println("user=" + parms[2] + " pwd=" + parms[3]);
+
+ conn = DriverManager.getConnection(parms[1], parms[2], parms[3]);
+ } else
+ conn = DriverManager.getConnection(parms[1]);
+
+ if (DEBUG)
+ System.out.println("Connection " + conn.toString() + " established");
+
+ // Get the data base meta data object
+ dbmd = conn.getMetaData();
+
+ // Get a statement from the connection
+ if (scrollable)
+ stmt = conn.createStatement(java.sql.ResultSet.TYPE_SCROLL_INSENSITIVE, java.sql.ResultSet.CONCUR_READ_ONLY);
+ else
+ stmt = conn.createStatement(java.sql.ResultSet.TYPE_FORWARD_ONLY, java.sql.ResultSet.CONCUR_READ_ONLY);
+
+ if (DEBUG)
+ System.out.println("Statement type = " + stmt.getResultSetType()
+ + " concurrency = " + stmt.getResultSetConcurrency());
+
+ if (DEBUG) // Get the fetch size of a statement
+ System.out.println("Default fetch size = " + stmt.getFetchSize());
+
+ if (fsize != 0) {
+ // Set the fetch size
+ stmt.setFetchSize(fsize);
+
+ if (DEBUG)
+ System.out.println("New fetch size = " + stmt.getFetchSize());
+
+ } // endif fsize
+
+ } catch(ClassNotFoundException e) {
+ SetErrmsg(e);
+ rc = -1;
+ } catch (SQLException se) {
+ SetErrmsg(se);
+ rc = -2;
+ } catch( Exception e ) {
+ SetErrmsg(e);
+ rc = -3;
+ } // end try/catch
+
+ return rc;
+ } // end of JdbcConnect
+
+ public int CreatePrepStmt(String sql) {
+ int rc = 0;
+
+ try {
+ pstmt = conn.prepareStatement(sql);
+ } catch (SQLException se) {
+ SetErrmsg(se);
+ rc = -1;
+ } catch (Exception e) {
+ SetErrmsg(e);
+ rc = -2;
+ } // end try/catch
+
+ return rc;
+ } // end of CreatePrepStmt
+
+ public void SetStringParm(int i, String s) {
+ try {
+ pstmt.setString(i, s);
+ } catch (Exception e) {
+ SetErrmsg(e);
+ } // end try/catch
+
+ } // end of SetStringParm
+
+ public void SetIntParm(int i, int n) {
+ try {
+ pstmt.setInt(i, n);
+ } catch (Exception e) {
+ SetErrmsg(e);
+ } // end try/catch
+
+ } // end of SetIntParm
+
+ public void SetShortParm(int i, short n) {
+ try {
+ pstmt.setShort(i, n);
+ } catch (Exception e) {
+ SetErrmsg(e);
+ } // end try/catch
+
+ } // end of SetShortParm
+
+ public void SetBigintParm(int i, long n) {
+ try {
+ pstmt.setLong(i, n);
+ } catch (Exception e) {
+ SetErrmsg(e);
+ } // end try/catch
+
+ } // end of SetBigintParm
+
+ public void SetFloatParm(int i, float f) {
+ try {
+ pstmt.setFloat(i, f);
+ } catch (Exception e) {
+ SetErrmsg(e);
+ } // end try/catch
+
+ } // end of SetFloatParm
+
+ public void SetDoubleParm(int i, double d) {
+ try {
+ pstmt.setDouble(i, d);
+ } catch (Exception e) {
+ SetErrmsg(e);
+ } // end try/catch
+
+ } // end of SetDoubleParm
+
+ public void SetTimestampParm(int i, Timestamp t) {
+ try {
+ pstmt.setTimestamp(i, t);
+ } catch (Exception e) {
+ SetErrmsg(e);
+ } // end try/catch
+
+ } // end of SetTimestampParm
+
+ public int ExecutePrep() {
+ int n = -3;
+
+ if (pstmt != null) try {
+ n = pstmt.executeUpdate();
+ } catch (SQLException se) {
+ SetErrmsg(se);
+ n = -1;
+ } catch (Exception e) {
+ SetErrmsg(e);
+ n = -2;
+ } //end try/catch
+
+ return n;
+ } // end of ExecutePrep
+
+ public boolean ClosePrepStmt() {
+ boolean b = false;
+
+ if (pstmt != null) try {
+ pstmt.close();
+ pstmt = null;
+ } catch (SQLException se) {
+ SetErrmsg(se);
+ b = true;
+ } catch (Exception e) {
+ SetErrmsg(e);
+ b = true;
+ } // end try/catch
+
+ return b;
+ } // end of ClosePrepStmt
+
+ public int JdbcDisconnect() {
+ int rc = 0;
+
+ // Cancel pending statement
+ if (stmt != null)
+ try {
+ System.out.println("Cancelling statement");
+ stmt.cancel();
+ } catch(SQLException se) {
+ SetErrmsg(se);
+ rc += 1;
+ } // nothing more we can do
+
+ // Close the statement and the connection
+ if (rs != null)
+ try {
+ if (DEBUG)
+ System.out.println("Closing result set");
+
+ rs.close();
+ } catch(SQLException se) {
+ SetErrmsg(se);
+ rc = 2;
+ } // nothing more we can do
+
+ if (stmt != null)
+ try {
+ if (DEBUG)
+ System.out.println("Closing statement");
+
+ stmt.close();
+ } catch(SQLException se) {
+ SetErrmsg(se);
+ rc += 4;
+ } // nothing more we can do
+
+ ClosePrepStmt();
+
+ if (conn != null)
+ try {
+ if (DEBUG)
+ System.out.println("Closing connection");
+
+ conn.close();
+ } catch (SQLException se) {
+ SetErrmsg(se);
+ rc += 8;
+ } //end try/catch
+
+ if (DEBUG)
+ System.out.println("All closed");
+
+ return rc;
+ } // end of JdbcDisconnect
+
+ public int GetMaxValue(int n) {
+ int m = 0;
+
+ try {
+ switch (n) {
+ case 1: // Max columns in table
+ m = dbmd.getMaxColumnsInTable();
+ break;
+ case 2: // Max catalog name length
+ m = dbmd.getMaxCatalogNameLength();
+ break;
+ case 3: // Max schema name length
+ m = dbmd.getMaxSchemaNameLength();
+ break;
+ case 4: // Max table name length
+ m = dbmd.getMaxTableNameLength();
+ break;
+ case 5: // Max column name length
+ m = dbmd.getMaxColumnNameLength();
+ break;
+ } // endswitch n
+
+ } catch(Exception e) {
+ SetErrmsg(e);
+ m = -1;
+ } // end try/catch
+
+ return m;
+ } // end of GetMaxValue
+
+ public int GetColumns(String[] parms) {
+ int ncol = 0;
+
+ try {
+ if (rs != null) rs.close();
+ rs = dbmd.getColumns(parms[0], parms[1], parms[2], parms[3]);
+
+ if (rs != null) {
+ rsmd = rs.getMetaData();
+ ncol = rsmd.getColumnCount();
+ } // endif rs
+
+ } catch(SQLException se) {
+ SetErrmsg(se);
+ } // end try/catch
+
+ return ncol;
+ } // end of GetColumns
+
+ public int GetTables(String[] parms) {
+ int ncol = 0;
+ String[] typ = null;
+
+ if (parms[3] != null) {
+ typ = new String[1];
+ typ[0] = parms[3];
+ } // endif parms
+
+ try {
+ if (rs != null) rs.close();
+ rs = dbmd.getTables(parms[0], parms[1], parms[2], typ);
+
+ if (rs != null) {
+ rsmd = rs.getMetaData();
+ ncol = rsmd.getColumnCount();
+ } // endif rs
+
+ } catch(SQLException se) {
+ SetErrmsg(se);
+ } // end try/catch
+
+ return ncol;
+ } // end of GetColumns
+
+ public int Execute(String query) {
+ int n = 0;
+
+ if (DEBUG)
+ System.out.println("Executing '" + query + "'");
+
+ try {
+ boolean b = stmt.execute(query);
+
+ if (b == false) {
+ n = stmt.getUpdateCount();
+ if (rs != null) rs.close();
+ } // endif b
+
+ if (DEBUG)
+ System.out.println("Query '" + query + "' executed: n = " + n);
+
+ } catch (SQLException se) {
+ SetErrmsg(se);
+ n = -1;
+ } catch (Exception e) {
+ SetErrmsg(e);
+ n = -2;
+ } //end try/catch
+
+ return n;
+ } // end of Execute
+
+ public int GetResult() {
+ int ncol = 0;
+
+ try {
+ rs = stmt.getResultSet();
+
+ if (rs != null) {
+ rsmd = rs.getMetaData();
+ ncol = rsmd.getColumnCount();
+
+ if (DEBUG)
+ System.out.println("Result set has " + rsmd.getColumnCount() + " column(s)");
+
+ } // endif rs
+
+ } catch (SQLException se) {
+ SetErrmsg(se);
+ ncol = -1;
+ } catch (Exception e) {
+ SetErrmsg(e);
+ ncol = -2;
+ } //end try/catch
+
+ return ncol;
+ } // end of GetResult
+
+ public int ExecuteQuery(String query) {
+ int ncol = 0;
+
+ if (DEBUG)
+ System.out.println("Executing query '" + query + "'");
+
+ try {
+ rs = stmt.executeQuery(query);
+ rsmd = rs.getMetaData();
+ ncol = rsmd.getColumnCount();
+
+ if (DEBUG) {
+ System.out.println("Query '" + query + "' executed successfully");
+ System.out.println("Result set has " + rsmd.getColumnCount() + " column(s)");
+ } // endif DEBUG
+
+ } catch (SQLException se) {
+ SetErrmsg(se);
+ ncol = -1;
+ } catch (Exception e) {
+ SetErrmsg(e);
+ ncol = -2;
+ } //end try/catch
+
+ return ncol;
+ } // end of ExecuteQuery
+
+ public int ExecuteUpdate(String query) {
+ int n = 0;
+
+ if (DEBUG)
+ System.out.println("Executing update query '" + query + "'");
+
+ try {
+ n = stmt.executeUpdate(query);
+
+ if (DEBUG)
+ System.out.println("Update Query '" + query + "' executed: n = " + n);
+
+ } catch (SQLException se) {
+ SetErrmsg(se);
+ n = -1;
+ } catch (Exception e) {
+ SetErrmsg(e);
+ n = -2;
+ } //end try/catch
+
+ return n;
+ } // end of ExecuteUpdate
+
+ public int ReadNext() {
+ if (rs != null) {
+ try {
+ return rs.next() ? 1 : 0;
+ } catch (SQLException se) {
+ SetErrmsg(se);
+ return -1;
+ } //end try/catch
+
+ } else
+ return 0;
+
+ } // end of ReadNext
+
+ public boolean Fetch(int row) {
+ if (rs != null) {
+ try {
+ return rs.absolute(row);
+ } catch (SQLException se) {
+ SetErrmsg(se);
+ return false;
+ } //end try/catch
+
+ } else
+ return false;
+
+ } // end of Fetch
+
+ public String ColumnName(int n) {
+ if (rsmd == null) {
+ System.out.println("No result metadata");
+ } else try {
+ return rsmd.getColumnLabel(n);
+ } catch (SQLException se) {
+ SetErrmsg(se);
+ } //end try/catch
+
+ return null;
+ } // end of ColumnName
+
+ public int ColumnType(int n, String name) {
+ if (rsmd == null) {
+ System.out.println("No result metadata");
+ } else try {
+ if (n == 0)
+ n = rs.findColumn(name);
+
+ return rsmd.getColumnType(n);
+ } catch (SQLException se) {
+ SetErrmsg(se);
+ } //end try/catch
+
+ return 666; // Not a type
+ } // end of ColumnType
+
+ public String ColumnDesc(int n, int[] val) {
+ if (rsmd == null) {
+ System.out.println("No result metadata");
+ return null;
+ } else try {
+ val[0] = rsmd.getColumnType(n);
+ val[1] = rsmd.getPrecision(n);
+ val[2] = rsmd.getScale(n);
+ val[3] = rsmd.isNullable(n);
+ return rsmd.getColumnLabel(n);
+ } catch (SQLException se) {
+ SetErrmsg(se);
+ } //end try/catch
+
+ return null;
+ } // end of ColumnDesc
+
+ public String StringField(int n, String name) {
+ if (rs == null) {
+ System.out.println("No result set");
+ } else try {
+ return (n > 0) ? rs.getString(n) : rs.getString(name);
+ } catch (SQLException se) {
+ SetErrmsg(se);
+ } //end try/catch
+
+ return null;
+ } // end of StringField
+
+ public int IntField(int n, String name) {
+ if (rs == null) {
+ System.out.println("No result set");
+ } else try {
+ return (n > 0) ? rs.getInt(n) : rs.getInt(name);
+ } catch (SQLException se) {
+ SetErrmsg(se);
+ } //end try/catch
+
+ return 0;
+ } // end of IntField
+
+ public long BigintField(int n, String name) {
+ if (rs == null) {
+ System.out.println("No result set");
+ } else try {
+ BigDecimal bigDecimal = (n > 0) ? rs.getBigDecimal(n) : rs.getBigDecimal(name);
+ return bigDecimal != null ? bigDecimal.longValue() : 0;
+ } catch (SQLException se) {
+ SetErrmsg(se);
+ } //end try/catch
+
+ return 0;
+ } // end of BiginttField
+
+ public double DoubleField(int n, String name) {
+ if (rs == null) {
+ System.out.println("No result set");
+ } else try {
+ return (n > 0) ? rs.getDouble(n) : rs.getDouble(name);
+ } catch (SQLException se) {
+ SetErrmsg(se);
+ } //end try/catch
+
+ return 0.;
+ } // end of DoubleField
+
+ public float FloatField(int n, String name) {
+ if (rs == null) {
+ System.out.println("No result set");
+ } else try {
+ return (n > 0) ? rs.getFloat(n) : rs.getFloat(name);
+ } catch (SQLException se) {
+ SetErrmsg(se);
+ } //end try/catch
+
+ return 0;
+ } // end of FloatField
+
+ public boolean BooleanField(int n, String name) {
+ if (rs == null) {
+ System.out.println("No result set");
+ } else try {
+ return (n > 0) ? rs.getBoolean(n) : rs.getBoolean(name);
+ } catch (SQLException se) {
+ SetErrmsg(se);
+ } //end try/catch
+
+ return false;
+ } // end of BooleanField
+
+ public Date DateField(int n, String name) {
+ if (rs == null) {
+ System.out.println("No result set");
+ } else try {
+ return (n > 0) ? rs.getDate(n) : rs.getDate(name);
+ } catch (SQLException se) {
+ SetErrmsg(se);
+ } //end try/catch
+
+ return null;
+ } // end of DateField
+
+ public Time TimeField(int n, String name) {
+ if (rs == null) {
+ System.out.println("No result set");
+ } else try {
+ return (n > 0) ? rs.getTime(n) : rs.getTime(name);
+ } catch (SQLException se) {
+ SetErrmsg(se);
+ } //end try/catch
+
+ return null;
+ } // end of TimeField
+
+ public Timestamp TimestampField(int n, String name) {
+ if (rs == null) {
+ System.out.println("No result set");
+ } else try {
+ return (n > 0) ? rs.getTimestamp(n) : rs.getTimestamp(name);
+ } catch (SQLException se) {
+ SetErrmsg(se);
+ } //end try/catch
+
+ return null;
+ } // end of TimestampField
+
+ public String ObjectField(int n, String name) {
+ if (rs == null) {
+ System.out.println("No result set");
+ } else try {
+ return (n > 0) ? rs.getObject(n).toString() : rs.getObject(name).toString();
+ } catch (SQLException se) {
+ SetErrmsg(se);
+ } //end try/catch
+
+ return null;
+ } // end of ObjectField
+
+ public int GetDrivers(String[] s, int mxs) {
+ int n = 0;
+ List<Driver> drivers = Collections.list(DriverManager.getDrivers());
+ int size = Math.min(mxs, drivers.size());
+
+ for (int i = 0; i < size; i++) {
+ Driver driver = (Driver)drivers.get(i);
+
+ // Get name of driver
+ s[n++] = driver.getClass().getName();
+
+ // Get version info
+ s[n++] = driver.getMajorVersion() + "." + driver.getMinorVersion();
+ s[n++] = driver.jdbcCompliant() ? "Yes" : "No";
+ s[n++] = driver.toString();
+ } // endfor i
+
+ return size;
+ } // end of GetDrivers
+
+ /**
+ * Adds the specified path to the java library path
+ * from Fahd Shariff blog
+ *
+ * @param pathToAdd the path to add
+ static public int addLibraryPath(String pathToAdd) {
+ System.out.println("jpath = " + pathToAdd);
+
+ try {
+ Field usrPathsField = ClassLoader.class.getDeclaredField("usr_paths");
+ usrPathsField.setAccessible(true);
+
+ //get array of paths
+ String[] paths = (String[])usrPathsField.get(null);
+
+ //check if the path to add is already present
+ for (String path : paths) {
+ System.out.println("path = " + path);
+
+ if (path.equals(pathToAdd))
+ return -5;
+
+ } // endfor path
+
+ //add the new path
+ String[] newPaths = Arrays.copyOf(paths, paths.length + 1);
+ newPaths[paths.length] = pathToAdd;
+ usrPathsField.set(null, newPaths);
+ System.setProperty("java.library.path",
+ System.getProperty("java.library.path") + File.pathSeparator + pathToAdd);
+ Field fieldSysPath = ClassLoader.class.getDeclaredField("sys_paths");
+ fieldSysPath.setAccessible(true);
+ fieldSysPath.set(null, null);
+ } catch (Exception e) {
+ SetErrmsg(e);
+ return -1;
+ } // end try/catch
+
+ return 0;
+ } // end of addLibraryPath
+ */
+
+} // end of class JdbcInterface
diff --git a/storage/connect/ha_connect.cc b/storage/connect/ha_connect.cc
index 4aef3af9946..eaebd88b0cd 100644
--- a/storage/connect/ha_connect.cc
+++ b/storage/connect/ha_connect.cc
@@ -119,6 +119,7 @@
#undef OFFSET
#define NOPARSE
+#define NJDBC
#if defined(UNIX)
#include "osutil.h"
#endif // UNIX
@@ -169,7 +170,7 @@
#define JSONMAX 10 // JSON Default max grp size
extern "C" {
- char version[]= "Version 1.04.0006 March 12, 2016";
+ char version[]= "Version 1.04.0006 May 08, 2016";
#if defined(__WIN__)
char compver[]= "Version 1.04.0006 " __DATE__ " " __TIME__;
char slash= '\\';
@@ -190,6 +191,18 @@ extern "C" {
} // extern "C"
#endif // XMSG
+#if defined(JDBC_SUPPORT)
+ char *JvmPath;
+ char *ClassPath;
+ char *Wrapper;
+#endif // JDBC_SUPPORT
+
+#if defined(__WIN__)
+CRITICAL_SECTION parsec; // Used calling the Flex parser
+#else // !__WIN__
+pthread_mutex_t parmut = PTHREAD_MUTEX_INITIALIZER;
+#endif // !__WIN__
+
/***********************************************************************/
/* Utility functions. */
/***********************************************************************/
@@ -634,6 +647,7 @@ static int connect_init_func(void *p)
#if defined(__WIN__)
sql_print_information("CONNECT: %s", compver);
+ InitializeCriticalSection((LPCRITICAL_SECTION)&parsec);
#else // !__WIN__
sql_print_information("CONNECT: %s", version);
#endif // !__WIN__
@@ -660,6 +674,9 @@ static int connect_init_func(void *p)
DTVAL::SetTimeShift(); // Initialize time zone shift once for all
BINCOL::SetEndian(); // Initialize host endian setting
+#if defined(JDBC_SUPPORT)
+ JDBConn::SetJVM();
+#endif // JDBC_SUPPORT
DBUG_RETURN(0);
} // end of connect_init_func
@@ -676,11 +693,17 @@ static int connect_done_func(void *)
#ifdef LIBXML2_SUPPORT
XmlCleanupParserLib();
-#endif // LIBXML2_SUPPORT
+#endif // LIBXML2_SUPPORT
-#if !defined(__WIN__)
-//PROFILE_End(); Causes signal 11
-#endif // !__WIN__
+#ifdef JDBC_SUPPORT
+ JDBConn::ResetJVM();
+#endif // JDBC_SUPPORT
+
+#if defined(__WIN__)
+ DeleteCriticalSection((LPCRITICAL_SECTION)&parsec);
+#else // !__WIN__
+ PROFILE_End();
+#endif // !__WIN__
for (pc= user_connect::to_users; pc; pc= pn) {
if (pc->g)
@@ -1937,7 +1960,7 @@ int ha_connect::MakeRecord(char *buf)
if (trace > 1)
htrc("Maps: read=%08X write=%08X vcol=%08X defr=%08X defw=%08X\n",
*table->read_set->bitmap, *table->write_set->bitmap,
- *table->vcol_set->bitmap,
+ (table->vcol_set) ? *table->vcol_set->bitmap : 0,
*table->def_read_set.bitmap, *table->def_write_set.bitmap);
// Avoid asserts in field::store() for columns that are not updated
@@ -5177,7 +5200,7 @@ static int connect_assisted_discovery(handlerton *, THD* thd,
spc= (!sep) ? ',' : *sep;
qch= topt->qchar ? *topt->qchar : (signed)topt->quoted >= 0 ? '"' : 0;
hdr= (int)topt->header;
- tbl= topt->tablist;
+ tbl= topt->tablist;
col= topt->colist;
if (topt->oplist) {
@@ -5585,7 +5608,7 @@ static int connect_assisted_discovery(handlerton *, THD* thd,
len= crp->Length;
dec= crp->Prec;
flg= crp->Flag;
- v= crp->Var;
+ v= (crp->Kdata->IsUnsigned()) ? 'U' : crp->Var;
tm= (crp->Kdata->IsNullable()) ? 0 : NOT_NULL_FLAG;
if (!len && typ == TYPE_STRING)
@@ -6831,6 +6854,27 @@ static MYSQL_SYSVAR_STR(errmsg_dir_path, msg_path,
"../../../../storage/connect/"); // for testing
#endif // XMSG
+#if defined(JDBC_SUPPORT)
+static MYSQL_SYSVAR_STR(jvm_path, JvmPath,
+ PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_MEMALLOC,
+ "Path to the directory where is the JVM lib",
+ // check_jvm_path, update_jvm_path,
+ NULL, NULL, NULL);
+
+static MYSQL_SYSVAR_STR(class_path, ClassPath,
+ PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_MEMALLOC,
+ "Java class path",
+ // check_class_path, update_class_path,
+ NULL, NULL, NULL);
+
+static MYSQL_SYSVAR_STR(java_wrapper, Wrapper,
+ PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_MEMALLOC,
+ "Java wrapper class",
+ // check_class_path, update_class_path,
+ NULL, NULL, "JdbcInterface");
+#endif // JDBC_SUPPORT
+
+
static struct st_mysql_sys_var* connect_system_variables[]= {
MYSQL_SYSVAR(xtrace),
MYSQL_SYSVAR(conv_size),
@@ -6848,7 +6892,12 @@ static struct st_mysql_sys_var* connect_system_variables[]= {
MYSQL_SYSVAR(errmsg_dir_path),
#endif // XMSG
MYSQL_SYSVAR(json_grp_size),
- NULL
+#if defined(JDBC_SUPPORT)
+ MYSQL_SYSVAR(jvm_path),
+ MYSQL_SYSVAR(class_path),
+ MYSQL_SYSVAR(java_wrapper),
+#endif // JDBC_SUPPORT
+ NULL
};
maria_declare_plugin(connect)
diff --git a/storage/connect/inihandl.c b/storage/connect/inihandl.c
index 542b807f899..46102557b20 100644
--- a/storage/connect/inihandl.c
+++ b/storage/connect/inihandl.c
@@ -622,13 +622,16 @@ void PROFILE_End(void)
if (trace)
htrc("PROFILE_End: CurProfile=%p N=%d\n", CurProfile, N_CACHED_PROFILES);
+ if (!CurProfile) // Sergey Vojtovich
+ return;
+
/* Close all opened files and free the cache structure */
for (i = 0; i < N_CACHED_PROFILES; i++) {
if (trace)
htrc("MRU=%s i=%d\n", SVP(MRUProfile[i]->filename), i);
- CurProfile = MRUProfile[i];
- PROFILE_ReleaseFile();
+// CurProfile = MRUProfile[i]; Sergey Vojtovich
+// PROFILE_ReleaseFile(); see MDEV-9997
free(MRUProfile[i]);
} // endfor i
diff --git a/storage/connect/jsonudf.cpp b/storage/connect/jsonudf.cpp
index 7b82ba2d627..e94d3817926 100644
--- a/storage/connect/jsonudf.cpp
+++ b/storage/connect/jsonudf.cpp
@@ -1,6 +1,6 @@
/****************** jsonudf C++ Program Source Code File (.CPP) ******************/
-/* PROGRAM NAME: jsonudf Version 1.3 */
-/* (C) Copyright to the author Olivier BERTRAND 2015 */
+/* PROGRAM NAME: jsonudf Version 1.4 */
+/* (C) Copyright to the author Olivier BERTRAND 2015-2016 */
/* This program are the JSON User Defined Functions . */
/*********************************************************************************/
@@ -1433,7 +1433,7 @@ static my_bool CheckMemory(PGLOBAL g, UDF_INIT *initid, UDF_ARGS *args, uint n,
char *p = args->args[0];
// Is this a file name?
- if (!strchr("[{ \t\r\n", *p) && (len = GetFileLength(p)))
+ if (p && !strchr("[{ \t\r\n", *p) && (len = GetFileLength(p)))
ml += len * (M + 1);
else
ml += args->lengths[0] * M;
@@ -1805,7 +1805,20 @@ my_bool json_array_add_values_init(UDF_INIT *initid, UDF_ARGS *args, char *messa
} else
CalcLen(args, false, reslen, memlen);
- return JsonInit(initid, args, message, true, reslen, memlen);
+ if (!JsonInit(initid, args, message, true, reslen, memlen)) {
+ PGLOBAL g = (PGLOBAL)initid->ptr;
+
+ // This is a constant function
+ g->N = (initid->const_item) ? 1 : 0;
+
+ // This is to avoid double execution when using prepared statements
+ if (IsJson(args, 0) > 1)
+ initid->const_item = 0;
+
+ return false;
+ } else
+ return true;
+
} // end of json_array_add_values_init
char *json_array_add_values(UDF_INIT *initid, UDF_ARGS *args, char *result,
@@ -1850,7 +1863,7 @@ char *json_array_add_values(UDF_INIT *initid, UDF_ARGS *args, char *result,
} // endif str
// Keep result of constant function
- g->Xchk = (initid->const_item) ? str : NULL;
+ g->Xchk = (g->N) ? str : NULL;
} else
str = (char*)g->Xchk;
@@ -1873,7 +1886,7 @@ void json_array_add_values_deinit(UDF_INIT* initid)
/*********************************************************************************/
my_bool json_array_add_init(UDF_INIT *initid, UDF_ARGS *args, char *message)
{
- unsigned long reslen, memlen;
+ unsigned long reslen, memlen;
if (args->arg_count < 2) {
strcpy(message, "This function must have at least 2 arguments");
@@ -1884,7 +1897,20 @@ my_bool json_array_add_init(UDF_INIT *initid, UDF_ARGS *args, char *message)
} else
CalcLen(args, false, reslen, memlen, true);
- return JsonInit(initid, args, message, true, reslen, memlen);
+ if (!JsonInit(initid, args, message, true, reslen, memlen)) {
+ PGLOBAL g = (PGLOBAL)initid->ptr;
+
+ // This is a constant function
+ g->N = (initid->const_item) ? 1 : 0;
+
+ // This is to avoid double execution when using prepared statements
+ if (IsJson(args, 0) > 1)
+ initid->const_item = 0;
+
+ return false;
+ } else
+ return true;
+
} // end of json_array_add_init
char *json_array_add(UDF_INIT *initid, UDF_ARGS *args, char *result,
@@ -1930,7 +1956,7 @@ char *json_array_add(UDF_INIT *initid, UDF_ARGS *args, char *result,
if (!str)
str = MakePSZ(g, args, 0);
- if (initid->const_item)
+ if (g->N)
// Keep result of constant function
g->Xchk = str;
@@ -1966,7 +1992,20 @@ my_bool json_array_delete_init(UDF_INIT *initid, UDF_ARGS *args, char *message)
} else
CalcLen(args, false, reslen, memlen, true);
- return JsonInit(initid, args, message, true, reslen, memlen);
+ if (!JsonInit(initid, args, message, true, reslen, memlen)) {
+ PGLOBAL g = (PGLOBAL)initid->ptr;
+
+ // This is a constant function
+ g->N = (initid->const_item) ? 1 : 0;
+
+ // This is to avoid double execution when using prepared statements
+ if (IsJson(args, 0) > 1)
+ initid->const_item = 0;
+
+ return false;
+ } else
+ return true;
+
} // end of json_array_delete_init
char *json_array_delete(UDF_INIT *initid, UDF_ARGS *args, char *result,
@@ -2008,7 +2047,7 @@ char *json_array_delete(UDF_INIT *initid, UDF_ARGS *args, char *result,
if (!str)
str = MakePSZ(g, args, 0);
- if (initid->const_item)
+ if (g->N)
// Keep result of constant function
g->Xchk = str;
@@ -2184,7 +2223,20 @@ my_bool json_object_add_init(UDF_INIT *initid, UDF_ARGS *args, char *message)
} else
CalcLen(args, true, reslen, memlen, true);
- return JsonInit(initid, args, message, true, reslen, memlen);
+ if (!JsonInit(initid, args, message, true, reslen, memlen)) {
+ PGLOBAL g = (PGLOBAL)initid->ptr;
+
+ // This is a constant function
+ g->N = (initid->const_item) ? 1 : 0;
+
+ // This is to avoid double execution when using prepared statements
+ if (IsJson(args, 0) > 1)
+ initid->const_item = 0;
+
+ return false;
+ } else
+ return true;
+
} // end of json_object_add_init
char *json_object_add(UDF_INIT *initid, UDF_ARGS *args, char *result,
@@ -2227,7 +2279,7 @@ char *json_object_add(UDF_INIT *initid, UDF_ARGS *args, char *result,
if (!str)
str = MakePSZ(g, args, 0);
- if (initid->const_item)
+ if (g->N)
// Keep result of constant function
g->Xchk = str;
@@ -2266,7 +2318,20 @@ my_bool json_object_delete_init(UDF_INIT *initid, UDF_ARGS *args, char *message)
} else
CalcLen(args, true, reslen, memlen, true);
- return JsonInit(initid, args, message, true, reslen, memlen);
+ if (!JsonInit(initid, args, message, true, reslen, memlen)) {
+ PGLOBAL g = (PGLOBAL)initid->ptr;
+
+ // This is a constant function
+ g->N = (initid->const_item) ? 1 : 0;
+
+ // This is to avoid double execution when using prepared statements
+ if (IsJson(args, 0) > 1)
+ initid->const_item = 0;
+
+ return false;
+ } else
+ return true;
+
} // end of json_object_delete_init
char *json_object_delete(UDF_INIT *initid, UDF_ARGS *args, char *result,
@@ -2307,7 +2372,7 @@ char *json_object_delete(UDF_INIT *initid, UDF_ARGS *args, char *result,
if (!str)
str = MakePSZ(g, args, 0);
- if (initid->const_item)
+ if (g->N)
// Keep result of constant function
g->Xchk = str;
@@ -2605,7 +2670,20 @@ my_bool json_item_merge_init(UDF_INIT *initid, UDF_ARGS *args, char *message)
} else
CalcLen(args, false, reslen, memlen, true);
- return JsonInit(initid, args, message, true, reslen, memlen);
+ if (!JsonInit(initid, args, message, true, reslen, memlen)) {
+ PGLOBAL g = (PGLOBAL)initid->ptr;
+
+ // This is a constant function
+ g->N = (initid->const_item) ? 1 : 0;
+
+ // This is to avoid double execution when using prepared statements
+ if (IsJson(args, 0) > 1)
+ initid->const_item = 0;
+
+ return false;
+ } else
+ return true;
+
} // end of json_item_merge_init
char *json_item_merge(UDF_INIT *initid, UDF_ARGS *args, char *result,
@@ -2651,7 +2729,7 @@ char *json_item_merge(UDF_INIT *initid, UDF_ARGS *args, char *result,
if (!str)
str = MakePSZ(g, args, 0);
- if (initid->const_item)
+ if (g->N)
// Keep result of constant function
g->Xchk = str;
@@ -3538,37 +3616,9 @@ void jsoncontains_path_deinit(UDF_INIT* initid)
} // end of jsoncontains_path_deinit
/*********************************************************************************/
-/* Set Json items of a Json document according to path. */
+/* This function is used by the json_set/insert/update_item functions. */
/*********************************************************************************/
-my_bool json_set_item_init(UDF_INIT *initid, UDF_ARGS *args, char *message)
-{
- unsigned long reslen, memlen;
- int n = IsJson(args, 0);
-
- if (!(args->arg_count % 2)) {
- strcpy(message, "This function must have an odd number of arguments");
- return true;
- } else if (!n && args->arg_type[0] != STRING_RESULT) {
- strcpy(message, "First argument must be a json item");
- return true;
- } else
- CalcLen(args, false, reslen, memlen);
-
- if (n == 2 && args->args[0]) {
- char fn[_MAX_PATH];
- long fl;
-
- memcpy(fn, args->args[0], args->lengths[0]);
- fn[args->lengths[0]] = 0;
- fl = GetFileLength(fn);
- memlen += fl * 3;
- } else if (n != 3)
- memlen += args->lengths[0] * 3;
-
- return JsonInit(initid, args, message, true, reslen, memlen);
-} // end of json_set_item_init
-
-char *json_set_item(UDF_INIT *initid, UDF_ARGS *args, char *result,
+char *handle_item(UDF_INIT *initid, UDF_ARGS *args, char *result,
unsigned long *res_length, char *is_null, char *error)
{
char *p, *path, *str = NULL;
@@ -3580,18 +3630,22 @@ char *json_set_item(UDF_INIT *initid, UDF_ARGS *args, char *result,
PGLOBAL g = (PGLOBAL)initid->ptr;
PGLOBAL gb = GetMemPtr(g, args, 0);
- if (g->N) {
+ if (g->Alchecked) {
str = (char*)g->Activityp;
goto fin;
- } else if (initid->const_item)
- g->N = 1;
+ } else if (g->N)
+ g->Alchecked = 1;
- if (!strcmp(result, "$insert"))
+ if (!strcmp(result, "$set"))
+ w = 0;
+ else if (!strcmp(result, "$insert"))
w = 1;
else if (!strcmp(result, "$update"))
w = 2;
- else
- w = 0;
+ else {
+ PUSH_WARNING("Logical error, please contact CONNECT developer");
+ goto err;
+ } // endelse
// Save stack and allocation environment and prepare error return
if (g->jump_level == MAX_JUMP) {
@@ -3656,14 +3710,14 @@ char *json_set_item(UDF_INIT *initid, UDF_ARGS *args, char *result,
if (!(str = MakeResult(g, args, jsp, INT_MAX32)))
str = MakePSZ(g, args, 0);
- if (initid->const_item)
+ if (g->N)
// Keep result of constant function
g->Activityp = (PACTIVITY)str;
- err:
+err:
g->jump_level--;
- fin:
+fin:
if (!str) {
*is_null = 1;
*res_length = 0;
@@ -3671,6 +3725,58 @@ char *json_set_item(UDF_INIT *initid, UDF_ARGS *args, char *result,
*res_length = strlen(str);
return str;
+} // end of handle_item
+
+/*********************************************************************************/
+/* Set Json items of a Json document according to path. */
+/*********************************************************************************/
+my_bool json_set_item_init(UDF_INIT *initid, UDF_ARGS *args, char *message)
+{
+ unsigned long reslen, memlen;
+ int n = IsJson(args, 0);
+
+ if (!(args->arg_count % 2)) {
+ strcpy(message, "This function must have an odd number of arguments");
+ return true;
+ } else if (!n && args->arg_type[0] != STRING_RESULT) {
+ strcpy(message, "First argument must be a json item");
+ return true;
+ } else
+ CalcLen(args, false, reslen, memlen);
+
+ if (n == 2 && args->args[0]) {
+ char fn[_MAX_PATH];
+ long fl;
+
+ memcpy(fn, args->args[0], args->lengths[0]);
+ fn[args->lengths[0]] = 0;
+ fl = GetFileLength(fn);
+ memlen += fl * 3;
+ } else if (n != 3)
+ memlen += args->lengths[0] * 3;
+
+ if (!JsonInit(initid, args, message, true, reslen, memlen)) {
+ PGLOBAL g = (PGLOBAL)initid->ptr;
+
+ // This is a constant function
+ g->N = (initid->const_item) ? 1 : 0;
+
+ // This is to avoid double execution when using prepared statements
+ if (IsJson(args, 0) > 1)
+ initid->const_item = 0;
+
+ g->Alchecked = 0;
+ return false;
+ } else
+ return true;
+
+} // end of json_set_item_init
+
+char *json_set_item(UDF_INIT *initid, UDF_ARGS *args, char *result,
+ unsigned long *res_length, char *is_null, char *p)
+{
+ strcpy(result, "$set");
+ return handle_item(initid, args, result, res_length, is_null, p);
} // end of json_set_item
void json_set_item_deinit(UDF_INIT* initid)
@@ -3690,7 +3796,7 @@ char *json_insert_item(UDF_INIT *initid, UDF_ARGS *args, char *result,
unsigned long *res_length, char *is_null, char *p)
{
strcpy(result, "$insert");
- return json_set_item(initid, args, result, res_length, is_null, p);
+ return handle_item(initid, args, result, res_length, is_null, p);
} // end of json_insert_item
void json_insert_item_deinit(UDF_INIT* initid)
@@ -3710,7 +3816,7 @@ char *json_update_item(UDF_INIT *initid, UDF_ARGS *args, char *result,
unsigned long *res_length, char *is_null, char *p)
{
strcpy(result, "$update");
- return json_set_item(initid, args, result, res_length, is_null, p);
+ return handle_item(initid, args, result, res_length, is_null, p);
} // end of json_update_item
void json_update_item_deinit(UDF_INIT* initid)
@@ -3728,8 +3834,8 @@ my_bool json_file_init(UDF_INIT *initid, UDF_ARGS *args, char *message)
if (args->arg_count < 1 || args->arg_count > 4) {
strcpy(message, "This function only accepts 1 to 4 arguments");
return true;
- } else if (!args->args[0] || args->arg_type[0] != STRING_RESULT) {
- strcpy(message, "First argument must be a constant string (file name)");
+ } else if (args->arg_type[0] != STRING_RESULT) {
+ strcpy(message, "First argument must be a string (file name)");
return true;
} // endif's args[0]
@@ -3747,7 +3853,12 @@ my_bool json_file_init(UDF_INIT *initid, UDF_ARGS *args, char *message)
initid->maybe_null = 1;
CalcLen(args, false, reslen, memlen);
- fl = GetFileLength(args->args[0]);
+
+ if (args->args[0])
+ fl = GetFileLength(args->args[0]);
+ else
+ fl = 100; // What can be done here?
+
reslen += fl;
if (initid->const_item)
@@ -4006,7 +4117,18 @@ void jbin_array_deinit(UDF_INIT* initid)
/*********************************************************************************/
my_bool jbin_array_add_values_init(UDF_INIT *initid, UDF_ARGS *args, char *message)
{
- return json_array_add_values_init(initid, args, message);
+ unsigned long reslen, memlen;
+
+ if (args->arg_count < 2) {
+ strcpy(message, "This function must have at least 2 arguments");
+ return true;
+ } else if (!IsJson(args, 0) && args->arg_type[0] != STRING_RESULT) {
+ strcpy(message, "First argument must be a json string or item");
+ return true;
+ } else
+ CalcLen(args, false, reslen, memlen);
+
+ return JsonInit(initid, args, message, true, reslen, memlen);
} // end of jbin_array_add_values_init
char *jbin_array_add_values(UDF_INIT *initid, UDF_ARGS *args, char *result,
@@ -4076,7 +4198,18 @@ void jbin_array_add_values_deinit(UDF_INIT* initid)
/*********************************************************************************/
my_bool jbin_array_add_init(UDF_INIT *initid, UDF_ARGS *args, char *message)
{
- return json_array_add_init(initid, args, message);
+ unsigned long reslen, memlen;
+
+ if (args->arg_count < 2) {
+ strcpy(message, "This function must have at least 2 arguments");
+ return true;
+ } else if (!IsJson(args, 0)) {
+ strcpy(message, "First argument must be a json item");
+ return true;
+ } else
+ CalcLen(args, false, reslen, memlen, true);
+
+ return JsonInit(initid, args, message, true, reslen, memlen);
} // end of jbin_array_add_init
char *jbin_array_add(UDF_INIT *initid, UDF_ARGS *args, char *result,
@@ -4146,8 +4279,19 @@ void jbin_array_add_deinit(UDF_INIT* initid)
/*********************************************************************************/
my_bool jbin_array_delete_init(UDF_INIT *initid, UDF_ARGS *args, char *message)
{
- return json_array_delete_init(initid, args, message);
-} // end of jbin_array_delete_init
+ unsigned long reslen, memlen;
+
+ if (args->arg_count < 2) {
+ strcpy(message, "This function must have at least 2 arguments");
+ return true;
+ } else if (!IsJson(args, 0)) {
+ strcpy(message, "First argument must be a json item");
+ return true;
+ } else
+ CalcLen(args, false, reslen, memlen, true);
+
+ return JsonInit(initid, args, message, true, reslen, memlen);
+ } // end of jbin_array_delete_init
char *jbin_array_delete(UDF_INIT *initid, UDF_ARGS *args, char *result,
unsigned long *res_length, char *is_null, char *error)
@@ -4369,8 +4513,19 @@ void jbin_object_key_deinit(UDF_INIT* initid)
/*********************************************************************************/
my_bool jbin_object_add_init(UDF_INIT *initid, UDF_ARGS *args, char *message)
{
- return json_object_add_init(initid, args, message);
-} // end of jbin_object_add_init
+ unsigned long reslen, memlen;
+
+ if (args->arg_count < 2) {
+ strcpy(message, "This function must have at least 2 arguments");
+ return true;
+ } else if (!IsJson(args, 0)) {
+ strcpy(message, "First argument must be a json item");
+ return true;
+ } else
+ CalcLen(args, true, reslen, memlen, true);
+
+ return JsonInit(initid, args, message, true, reslen, memlen);
+ } // end of jbin_object_add_init
char *jbin_object_add(UDF_INIT *initid, UDF_ARGS *args, char *result,
unsigned long *res_length, char *is_null, char *error)
@@ -4435,8 +4590,22 @@ void jbin_object_add_deinit(UDF_INIT* initid)
/*********************************************************************************/
my_bool jbin_object_delete_init(UDF_INIT *initid, UDF_ARGS *args, char *message)
{
- return json_object_delete_init(initid, args, message);
-} // end of jbin_object_delete_init
+ unsigned long reslen, memlen;
+
+ if (args->arg_count < 2) {
+ strcpy(message, "This function must have 2 or 3 arguments");
+ return true;
+ } else if (!IsJson(args, 0)) {
+ strcpy(message, "First argument must be a json item");
+ return true;
+ } else if (args->arg_type[1] != STRING_RESULT) {
+ strcpy(message, "Second argument must be a key string");
+ return true;
+ } else
+ CalcLen(args, true, reslen, memlen, true);
+
+ return JsonInit(initid, args, message, true, reslen, memlen);
+ } // end of jbin_object_delete_init
char *jbin_object_delete(UDF_INIT *initid, UDF_ARGS *args, char *result,
unsigned long *res_length, char *is_null, char *error)
@@ -4645,8 +4814,22 @@ void jbin_get_item_deinit(UDF_INIT* initid)
/*********************************************************************************/
my_bool jbin_item_merge_init(UDF_INIT *initid, UDF_ARGS *args, char *message)
{
- return json_item_merge_init(initid, args, message);
-} // end of jbin_item_merge_init
+ unsigned long reslen, memlen;
+
+ if (args->arg_count < 2) {
+ strcpy(message, "This function must have at least 2 arguments");
+ return true;
+ } else if (!IsJson(args, 0)) {
+ strcpy(message, "First argument must be a json item");
+ return true;
+ } else if (!IsJson(args, 1)) {
+ strcpy(message, "Second argument must be a json item");
+ return true;
+ } else
+ CalcLen(args, false, reslen, memlen, true);
+
+ return JsonInit(initid, args, message, true, reslen, memlen);
+ } // end of jbin_item_merge_init
char *jbin_item_merge(UDF_INIT *initid, UDF_ARGS *args, char *result,
unsigned long *res_length, char *is_null, char *error)
@@ -4706,14 +4889,9 @@ void jbin_item_merge_deinit(UDF_INIT* initid)
} // end of jbin_item_merge_deinit
/*********************************************************************************/
-/* Set Json items of a Json document according to path. */
+/* This function is used by the jbin_set/insert/update functions. */
/*********************************************************************************/
-my_bool jbin_set_item_init(UDF_INIT *initid, UDF_ARGS *args, char *message)
-{
- return json_set_item_init(initid, args, message);
-} // end of jbin_set_item_init
-
-char *jbin_set_item(UDF_INIT *initid, UDF_ARGS *args, char *result,
+char *bin_handle_item(UDF_INIT *initid, UDF_ARGS *args, char *result,
unsigned long *res_length, char *is_null, char *error)
{
char *p, *path;
@@ -4732,12 +4910,16 @@ char *jbin_set_item(UDF_INIT *initid, UDF_ARGS *args, char *result,
} else if (initid->const_item)
g->N = 1;
- if (!strcmp(result, "$insert"))
+ if (!strcmp(result, "$set"))
+ w = 0;
+ else if (!strcmp(result, "$insert"))
w = 1;
else if (!strcmp(result, "$update"))
w = 2;
- else
- w = 0;
+ else {
+ PUSH_WARNING("Logical error, please contact CONNECT developer");
+ goto fin;
+ } // endelse
if (!g->Xchk) {
if (CheckMemory(g, initid, args, 1, true, false, true)) {
@@ -4792,7 +4974,7 @@ char *jbin_set_item(UDF_INIT *initid, UDF_ARGS *args, char *result,
// Keep result of constant function
g->Activityp = (PACTIVITY)bsp;
- fin:
+fin:
if (!bsp) {
*is_null = 1;
*res_length = 0;
@@ -4800,6 +4982,44 @@ char *jbin_set_item(UDF_INIT *initid, UDF_ARGS *args, char *result,
*res_length = sizeof(BSON);
return (char*)bsp;
+} // end of bin_handle_item
+
+/*********************************************************************************/
+/* Set Json items of a Json document according to path. */
+/*********************************************************************************/
+my_bool jbin_set_item_init(UDF_INIT *initid, UDF_ARGS *args, char *message)
+{
+ unsigned long reslen, memlen;
+ int n = IsJson(args, 0);
+
+ if (!(args->arg_count % 2)) {
+ strcpy(message, "This function must have an odd number of arguments");
+ return true;
+ } else if (!n && args->arg_type[0] != STRING_RESULT) {
+ strcpy(message, "First argument must be a json item");
+ return true;
+ } else
+ CalcLen(args, false, reslen, memlen);
+
+ if (n == 2 && args->args[0]) {
+ char fn[_MAX_PATH];
+ long fl;
+
+ memcpy(fn, args->args[0], args->lengths[0]);
+ fn[args->lengths[0]] = 0;
+ fl = GetFileLength(fn);
+ memlen += fl * 3;
+ } else if (n != 3)
+ memlen += args->lengths[0] * 3;
+
+ return JsonInit(initid, args, message, true, reslen, memlen);
+ } // end of jbin_set_item_init
+
+char *jbin_set_item(UDF_INIT *initid, UDF_ARGS *args, char *result,
+ unsigned long *res_length, char *is_null, char *p)
+{
+ strcpy(result, "$set");
+ return bin_handle_item(initid, args, result, res_length, is_null, p);
} // end of jbin_set_item
void jbin_set_item_deinit(UDF_INIT* initid)
@@ -4819,7 +5039,7 @@ char *jbin_insert_item(UDF_INIT *initid, UDF_ARGS *args, char *result,
unsigned long *res_length, char *is_null, char *p)
{
strcpy(result, "$insert");
- return jbin_set_item(initid, args, result, res_length, is_null, p);
+ return bin_handle_item(initid, args, result, res_length, is_null, p);
} // end of jbin_insert_item
void jbin_insert_item_deinit(UDF_INIT* initid)
@@ -4839,7 +5059,7 @@ char *jbin_update_item(UDF_INIT *initid, UDF_ARGS *args, char *result,
unsigned long *res_length, char *is_null, char *p)
{
strcpy(result, "$update");
- return jbin_set_item(initid, args, result, res_length, is_null, p);
+ return bin_handle_item(initid, args, result, res_length, is_null, p);
} // end of jbin_update_item
void jbin_update_item_deinit(UDF_INIT* initid)
@@ -4964,7 +5184,7 @@ my_bool json_serialize_init(UDF_INIT *initid, UDF_ARGS *args, char *message)
if (args->arg_count != 1) {
strcpy(message, "This function must have 1 argument");
return true;
- } else if (IsJson(args, 0) != 3) {
+ } else if (args->args[0] && IsJson(args, 0) != 3) {
strcpy(message, "Argument must be a Jbin tree");
return true;
} else
@@ -4974,21 +5194,27 @@ my_bool json_serialize_init(UDF_INIT *initid, UDF_ARGS *args, char *message)
} // end of json_serialize_init
char *json_serialize(UDF_INIT *initid, UDF_ARGS *args, char *result,
- unsigned long *res_length, char *, char *)
+ unsigned long *res_length, char *, char *error)
{
char *str;
PGLOBAL g = (PGLOBAL)initid->ptr;
if (!g->Xchk) {
- PBSON bsp = (PBSON)args->args[0];
+ if (IsJson(args, 0) == 3) {
+ PBSON bsp = (PBSON)args->args[0];
- JsonSubSet(g);
+ JsonSubSet(g);
- if (!(str = Serialize(g, bsp->Jsp, NULL, 0)))
- str = strcpy(result, g->Message);
+ if (!(str = Serialize(g, bsp->Jsp, NULL, 0)))
+ str = strcpy(result, g->Message);
+
+ // Keep result of constant function
+ g->Xchk = (initid->const_item) ? str : NULL;
+ } else {
+ *error = 1;
+ str = strcpy(result, "Argument is not a Jbin tree");
+ } // endif
- // Keep result of constant function
- g->Xchk = (initid->const_item) ? str : NULL;
} else
str = (char*)g->Xchk;
@@ -5000,3 +5226,37 @@ void json_serialize_deinit(UDF_INIT* initid)
{
JsonFreeMem((PGLOBAL)initid->ptr);
} // end of json_serialize_deinit
+
+/*********************************************************************************/
+/* Utility function returning an environment variable value. */
+/*********************************************************************************/
+my_bool envar_init(UDF_INIT *initid, UDF_ARGS *args, char *message)
+{
+ if (args->arg_count != 1) {
+ strcpy(message, "Unique argument must be an environment variable name");
+ return true;
+ } else {
+ initid->maybe_null = true;
+ return false;
+ } // endif count
+
+} // end of envar_init
+
+char *envar(UDF_INIT *initid, UDF_ARGS *args, char *result,
+ unsigned long *res_length, char *is_null, char *)
+{
+ char *str, name[256];
+ int n = MY_MIN(args->lengths[0], sizeof(name) - 1);
+
+ memcpy(name, args->args[0], n);
+ name[n] = 0;
+
+ if (!(str = getenv(name))) {
+ *res_length = 0;
+ *is_null = 1;
+ } else
+ *res_length = strlen(str);
+
+ return str;
+} // end of envar
+
diff --git a/storage/connect/jsonudf.h b/storage/connect/jsonudf.h
index b7e9b8ecabb..1406d9f2f2e 100644
--- a/storage/connect/jsonudf.h
+++ b/storage/connect/jsonudf.h
@@ -218,8 +218,12 @@ extern "C" {
DllExport my_bool json_serialize_init(UDF_INIT*, UDF_ARGS*, char*);
DllExport char *json_serialize(UDF_EXEC_ARGS);
DllExport void json_serialize_deinit(UDF_INIT*);
+
+ DllExport my_bool envar_init(UDF_INIT*, UDF_ARGS*, char*);
+ DllExport char *envar(UDF_EXEC_ARGS);
} // extern "C"
+
/*********************************************************************************/
/* Structure JPN. Used to make the locate path. */
/*********************************************************************************/
diff --git a/storage/connect/mycat.cc b/storage/connect/mycat.cc
index da8be207237..b4b03e6ba4a 100644
--- a/storage/connect/mycat.cc
+++ b/storage/connect/mycat.cc
@@ -109,19 +109,7 @@ PQRYRES OEMColumns(PGLOBAL g, PTOS topt, char *tab, char *db, bool info);
/***********************************************************************/
char *GetPluginDir(void)
{
- char *plugin_dir;
-
-#if defined(_WIN64)
- plugin_dir = (char *)GetProcAddress(GetModuleHandle(NULL),
- "?opt_plugin_dir@@3PADEA");
-#elif defined(_WIN32)
- plugin_dir = (char*)GetProcAddress(GetModuleHandle(NULL),
- "?opt_plugin_dir@@3PADA");
-#else
- plugin_dir = opt_plugin_dir;
-#endif
-
- return plugin_dir;
+ return opt_plugin_dir;
} // end of GetPluginDir
/***********************************************************************/
diff --git a/storage/connect/myconn.cpp b/storage/connect/myconn.cpp
index e9bd64cf8e6..644ca019e4a 100644
--- a/storage/connect/myconn.cpp
+++ b/storage/connect/myconn.cpp
@@ -5,7 +5,7 @@
/* */
/* COPYRIGHT: */
/* ---------- */
-/* (C) Copyright to the author Olivier BERTRAND 2007-2015 */
+/* (C) Copyright to the author Olivier BERTRAND 2007-2016 */
/* */
/* WHAT THIS PROGRAM DOES: */
/* ----------------------- */
@@ -401,8 +401,10 @@ PQRYRES SrcColumns(PGLOBAL g, const char *host, const char *db,
MYSQLC::MYSQLC(void)
{
m_DB = NULL;
- m_Stmt = NULL;
- m_Res = NULL;
+#if defined (MYSQL_PREPARED_STATEMENTS)
+ m_Stmt = NULL;
+#endif // MYSQL_PREPARED_STATEMENTS
+ m_Res = NULL;
m_Rows = -1;
m_Row = NULL;
m_Fields = -1;
@@ -444,7 +446,10 @@ int MYSQLC::Open(PGLOBAL g, const char *host, const char *db,
return RC_FX;
} // endif m_DB
- // Removed to do like FEDERATED do
+ if (trace)
+ htrc("MYSQLC Open: m_DB=%.4X size=%d\n", m_DB, (int)sizeof(*m_DB));
+
+ // Removed to do like FEDERATED do
//mysql_options(m_DB, MYSQL_READ_DEFAULT_GROUP, "client-mariadb");
mysql_options(m_DB, MYSQL_OPT_USE_REMOTE_CONNECTION, NULL);
mysql_options(m_DB, MYSQL_OPT_CONNECT_TIMEOUT, &cto);
@@ -701,6 +706,11 @@ int MYSQLC::ExecSQL(PGLOBAL g, const char *query, int *w)
} else {
m_Fields = mysql_num_fields(m_Res);
m_Rows = (!m_Use) ? (int)mysql_num_rows(m_Res) : 0;
+
+ if (trace)
+ htrc("ExecSQL: m_Res=%.4X size=%d m_Fields=%d m_Rows=%d\n",
+ m_Res, sizeof(*m_Res), m_Fields, m_Rows);
+
} // endif m_Res
} else {
@@ -901,8 +911,12 @@ PQRYRES MYSQLC::GetResult(PGLOBAL g, bool pdb)
if (fld->flags & NOT_NULL_FLAG)
crp->Nulls = NULL;
else {
- crp->Nulls = (char*)PlugSubAlloc(g, NULL, m_Rows);
- memset(crp->Nulls, ' ', m_Rows);
+ if (m_Rows) {
+ crp->Nulls = (char*)PlugSubAlloc(g, NULL, m_Rows);
+ memset(crp->Nulls, ' ', m_Rows);
+ } // endif m_Rows
+
+ crp->Kdata->SetNullable(true);
} // endelse fld->flags
} // endfor fld
@@ -1013,7 +1027,11 @@ int MYSQLC::ExecSQLcmd(PGLOBAL g, const char *query, int *w)
void MYSQLC::Close(void)
{
FreeResult();
- mysql_close(m_DB);
+
+ if (trace)
+ htrc("MYSQLC Close: m_DB=%.4X\n", m_DB);
+
+ mysql_close(m_DB);
m_DB = NULL;
} // end of Close
diff --git a/storage/connect/myconn.h b/storage/connect/myconn.h
index 79f095f5c93..9ebd37527a6 100644
--- a/storage/connect/myconn.h
+++ b/storage/connect/myconn.h
@@ -90,8 +90,10 @@ class DllItem MYSQLC {
// Members
MYSQL *m_DB; // The return from MySQL connection
- MYSQL_STMT *m_Stmt; // Prepared statement handle
- MYSQL_RES *m_Res; // Points to MySQL Result
+#if defined (MYSQL_PREPARED_STATEMENTS)
+ MYSQL_STMT *m_Stmt; // Prepared statement handle
+#endif // MYSQL_PREPARED_STATEMENTS
+ MYSQL_RES *m_Res; // Points to MySQL Result
MYSQL_ROW m_Row; // Point to current row
int m_Rows; // The number of rows of the result
int N;
diff --git a/storage/connect/mysql-test/connect/disabled.def b/storage/connect/mysql-test/connect/disabled.def
index 67acb5dff29..4e07b5c0576 100644
--- a/storage/connect/mysql-test/connect/disabled.def
+++ b/storage/connect/mysql-test/connect/disabled.def
@@ -1 +1,15 @@
-json_udf_bin : broken upstream in --ps
+##############################################################################
+#
+# List the test cases that are to be disabled temporarily.
+#
+# Separate the test case name and the comment with ':'.
+#
+# <testcasename> : BUG#<xxxx> <date disabled> <disabler> <comment>
+#
+# Do not use any TAB characters for whitespace.
+#
+##############################################################################
+jdbc : Variable settings depend on machine configuration
+jdbc_new : Variable settings depend on machine configuration
+jdbc_oracle : Variable settings depend on machine configuration
+jdbc_postgresql : Variable settings depend on machine configuration
diff --git a/storage/connect/mysql-test/connect/r/jdbc.result b/storage/connect/mysql-test/connect/r/jdbc.result
new file mode 100644
index 00000000000..5e844bc9900
--- /dev/null
+++ b/storage/connect/mysql-test/connect/r/jdbc.result
@@ -0,0 +1,269 @@
+CREATE DATABASE connect;
+USE connect;
+CREATE TABLE t2 (
+id bigint not null,
+msg varchar(500),
+tm time,
+dt date,
+dtm datetime,
+ts timestamp);
+INSERT INTO t2 VALUES(455000000000, 'A very big number', '18:10:25', '2016-03-16', '1999-12-11 23:01:52', '2015-07-24 09:32:45');
+SELECT * FROM t2;
+id msg tm dt dtm ts
+455000000000 A very big number 18:10:25 2016-03-16 1999-12-11 23:01:52 2015-07-24 09:32:45
+#
+# Testing JDBC connection to MySQL driver
+#
+USE test;
+CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=JDBC TABNAME=t2 CONNECTION='jdbc:mysql://localhost:PORT/connect?user=root';
+SELECT * FROM t1;
+id msg tm dt dtm ts
+455000000000 A very big number 18:10:25 2016-03-16 1999-12-11 23:01:52 2015-07-24 09:32:45
+INSERT INTO t1 VALUES(786325481247, 'Hello!', '19:45:03', '1933-08-10', '1985-11-12 09:02:44', '2014-06-17 10:32:01');
+Warnings:
+Note 1105 t2: 1 affected rows
+SELECT * FROM t1;
+id msg tm dt dtm ts
+455000000000 A very big number 18:10:25 2016-03-16 1999-12-11 23:01:52 2015-07-24 09:32:45
+786325481247 Hello! 19:45:03 1933-08-09 1985-11-12 09:02:44 2014-06-17 10:32:01
+DELETE FROM t1 WHERE msg = 'Hello!';
+Warnings:
+Note 1105 t2: 1 affected rows
+SELECT * FROM t1;
+id msg tm dt dtm ts
+455000000000 A very big number 18:10:25 2016-03-16 1999-12-11 23:01:52 2015-07-24 09:32:45
+DROP TABLE t1;
+#
+# Testing JDBC view
+#
+CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=JDBC SRCDEF='select id, msg, tm, dt from t2' CONNECTION='jdbc:mysql://localhost:PORT/connect?user=root';
+SELECT * FROM t1;
+id msg tm dt
+455000000000 A very big number 18:10:25 2016-03-16
+SELECT msg, dt FROM t1;
+msg dt
+A very big number 2016-03-16
+DROP TABLE t1, connect.t2;
+#
+# Testing JDBC write operations
+#
+USE connect;
+CREATE TABLE boys (
+name CHAR(12) NOT NULL,
+city CHAR(11),
+birth DATE DATE_FORMAT='DD/MM/YYYY',
+hired DATE DATE_FORMAT='DD/MM/YYYY' flag=36)
+ENGINE=CONNECT TABLE_TYPE=FIX FILE_NAME='boys.txt' ENDING=1;
+SELECT * FROM boys;
+name city birth hired
+John Boston 1986-01-25 2010-06-02
+Henry Boston 1987-06-07 2008-04-01
+George San Jose 1981-08-10 2010-06-02
+Sam Chicago 1979-11-22 2007-10-10
+James Dallas 1992-05-13 2009-12-14
+Bill Boston 1986-09-11 2008-02-10
+USE test;
+CREATE TABLE t3 (
+name CHAR(12) NOT NULL,
+city CHAR(12),
+birth DATE,
+hired DATE);
+INSERT INTO t3 VALUES('Donald','Atlanta','1999-04-01','2016-03-31'),('Mick','New York','1980-01-20','2002-09-11');
+SELECT * FROM t3;
+name city birth hired
+Donald Atlanta 1999-04-01 2016-03-31
+Mick New York 1980-01-20 2002-09-11
+CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=JDBC TABNAME=boys CONNECTION='jdbc:mysql://localhost:PORT/connect?user=root' OPTION_LIST='scrollable=1';
+SELECT * FROM t1;
+name city birth hired
+John Boston 1986-01-25 2010-06-02
+Henry Boston 1987-06-07 2008-04-01
+George San Jose 1981-08-10 2010-06-02
+Sam Chicago 1979-11-22 2007-10-10
+James Dallas 1992-05-13 2009-12-14
+Bill Boston 1986-09-11 2008-02-10
+UPDATE t1 SET city = 'Phoenix' WHERE name = 'Henry';
+Warnings:
+Note 1105 boys: 1 affected rows
+INSERT INTO t1 SELECT * FROM t3;
+Warnings:
+Note 1105 boys: 2 affected rows
+INSERT INTO t1 VALUES('Tom','Seatle','2002-03-15',NULL);
+Warnings:
+Note 1105 boys: 1 affected rows
+SELECT * FROM t1;
+name city birth hired
+John Boston 1986-01-25 2010-06-02
+Henry Phoenix 1987-06-07 2008-04-01
+George San Jose 1981-08-10 2010-06-02
+Sam Chicago 1979-11-22 2007-10-10
+James Dallas 1992-05-13 2009-12-14
+Bill Boston 1986-09-11 2008-02-10
+Donald Atlanta 1999-04-01 2016-03-31
+Mick New York 1980-01-20 2002-09-11
+Tom Seatle 2002-03-15 1970-01-01
+DROP TABLE t3;
+#
+# Testing JDBC join operations
+#
+CREATE TABLE t3 (
+name CHAR(9) NOT NULL,
+city CHAR(12) NOT NULL,
+age INT(2))
+engine=CONNECT table_type=FIX file_name='girls.txt';
+SELECT g.name, b.name, g.city FROM t3 g STRAIGHT_JOIN connect.boys b where g.city = b.city;
+name name city
+Mary John Boston
+Susan Sam Chicago
+Betty Sam Chicago
+Mary Bill Boston
+SELECT g.name, b.name, g.city FROM t3 g STRAIGHT_JOIN t1 b where g.city = b.city;
+name name city
+Mary John Boston
+Susan Sam Chicago
+Betty Sam Chicago
+Mary Bill Boston
+DROP TABLE t1, t3, connect.boys;
+#
+# Testing MariaDB JDBC driver
+#
+USE connect;
+CREATE TABLE emp (
+serialno CHAR(5) NOT NULL,
+name VARCHAR(12) NOT NULL FLAG=6,
+sex TINYINT(1) NOT NULL,
+title VARCHAR(15) NOT NULL FLAG=20,
+manager CHAR(5) NOT NULL,
+department CHAR(4) NOT NULL FLAG=41,
+secretary CHAR(5) NOT NULL FLAG=46,
+salary DOUBLE(8,2) NOT NULL FLAG=52)
+ENGINE=connect TABLE_TYPE=fix FILE_NAME='employee.dat' ENDING=1;
+SELECT * FROM emp;
+serialno name sex title manager department secretary salary
+74200 BANCROFT 2 SALESMAN 70012 0318 24888 9600.00
+02345 SMITH 1 ENGINEER 31416 2452 11111 9000.00
+78943 MERCHANT 1 SALESMAN 70012 0318 24888 8700.00
+07654 FUNNIGUY 1 ADMINISTRATOR 40567 0319 33333 8500.00
+45678 BUGHAPPY 1 PROGRAMMER 40567 0319 12345 8500.00
+34567 BIGHEAD 1 SCIENTIST 31416 2452 11111 8000.00
+77777 SHRINKY 2 ADMINISTRATOR 70012 0318 27845 7500.00
+74234 WALTER 1 ENGINEER 70012 0318 24888 7400.00
+56789 FODDERMAN 1 SALESMAN 40567 0319 12345 7000.00
+73452 TONGHO 1 ENGINEER 70012 0318 24888 6800.00
+22222 SHORTSIGHT 2 SECRETARY 87777 0021 5500.00
+55555 MESSIFUL 2 SECRETARY 40567 0319 12345 5000.50
+27845 HONEY 2 SECRETARY 70012 0318 24888 4900.00
+98765 GOOSEPEN 1 ADMINISTRATOR 07654 0319 33333 4700.00
+11111 CHERRY 2 SECRETARY 31416 2452 4500.00
+33333 MONAPENNY 2 SECRETARY 07654 0319 3800.00
+12345 KITTY 2 TYPIST 40567 0319 3000.45
+24888 PLUMHEAD 2 TYPIST 27845 0318 2800.00
+87777 STRONG 1 DIRECTOR 0021 22222 23000.00
+76543 BULLOZER 1 SALESMAN 40567 0319 12345 14800.00
+70012 WERTHER 1 DIRECTOR 87777 0318 27845 14500.00
+40567 QUINN 1 DIRECTOR 87777 0319 55555 14000.00
+31416 ORELLY 1 ENGINEER 87777 2452 11111 13400.00
+36666 BIGHORN 1 SCIENTIST 31416 2452 11111 11000.00
+00137 BROWNY 1 ENGINEER 40567 0319 12345 10500.00
+73111 WHEELFOR 1 SALESMAN 70012 0318 24888 10030.00
+00023 MARTIN 1 ENGINEER 40567 0319 12345 10000.00
+USE test;
+CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=JDBC TABNAME=emp CONNECTION='jdbc:mariadb://localhost:PORT/connect?user=root';
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `serialno` char(5) NOT NULL,
+ `name` varchar(12) NOT NULL,
+ `sex` tinyint(3) NOT NULL,
+ `title` varchar(15) NOT NULL,
+ `manager` char(5) NOT NULL,
+ `department` char(4) NOT NULL,
+ `secretary` char(5) NOT NULL,
+ `salary` double(12,2) NOT NULL
+) ENGINE=CONNECT DEFAULT CHARSET=latin1 CONNECTION='jdbc:mariadb://localhost:PORT/connect?user=root' `TABLE_TYPE`='JDBC' `TABNAME`='emp'
+SELECT * FROM t1;
+serialno name sex title manager department secretary salary
+74200 BANCROFT 2 SALESMAN 70012 0318 24888 9600.00
+02345 SMITH 1 ENGINEER 31416 2452 11111 9000.00
+78943 MERCHANT 1 SALESMAN 70012 0318 24888 8700.00
+07654 FUNNIGUY 1 ADMINISTRATOR 40567 0319 33333 8500.00
+45678 BUGHAPPY 1 PROGRAMMER 40567 0319 12345 8500.00
+34567 BIGHEAD 1 SCIENTIST 31416 2452 11111 8000.00
+77777 SHRINKY 2 ADMINISTRATOR 70012 0318 27845 7500.00
+74234 WALTER 1 ENGINEER 70012 0318 24888 7400.00
+56789 FODDERMAN 1 SALESMAN 40567 0319 12345 7000.00
+73452 TONGHO 1 ENGINEER 70012 0318 24888 6800.00
+22222 SHORTSIGHT 2 SECRETARY 87777 0021 5500.00
+55555 MESSIFUL 2 SECRETARY 40567 0319 12345 5000.50
+27845 HONEY 2 SECRETARY 70012 0318 24888 4900.00
+98765 GOOSEPEN 1 ADMINISTRATOR 07654 0319 33333 4700.00
+11111 CHERRY 2 SECRETARY 31416 2452 4500.00
+33333 MONAPENNY 2 SECRETARY 07654 0319 3800.00
+12345 KITTY 2 TYPIST 40567 0319 3000.45
+24888 PLUMHEAD 2 TYPIST 27845 0318 2800.00
+87777 STRONG 1 DIRECTOR 0021 22222 23000.00
+76543 BULLOZER 1 SALESMAN 40567 0319 12345 14800.00
+70012 WERTHER 1 DIRECTOR 87777 0318 27845 14500.00
+40567 QUINN 1 DIRECTOR 87777 0319 55555 14000.00
+31416 ORELLY 1 ENGINEER 87777 2452 11111 13400.00
+36666 BIGHORN 1 SCIENTIST 31416 2452 11111 11000.00
+00137 BROWNY 1 ENGINEER 40567 0319 12345 10500.00
+73111 WHEELFOR 1 SALESMAN 70012 0318 24888 10030.00
+00023 MARTIN 1 ENGINEER 40567 0319 12345 10000.00
+SELECT name, title, salary FROM t1 WHERE sex = 1;
+name title salary
+SMITH ENGINEER 9000.00
+MERCHANT SALESMAN 8700.00
+FUNNIGUY ADMINISTRATOR 8500.00
+BUGHAPPY PROGRAMMER 8500.00
+BIGHEAD SCIENTIST 8000.00
+WALTER ENGINEER 7400.00
+FODDERMAN SALESMAN 7000.00
+TONGHO ENGINEER 6800.00
+GOOSEPEN ADMINISTRATOR 4700.00
+STRONG DIRECTOR 23000.00
+BULLOZER SALESMAN 14800.00
+WERTHER DIRECTOR 14500.00
+QUINN DIRECTOR 14000.00
+ORELLY ENGINEER 13400.00
+BIGHORN SCIENTIST 11000.00
+BROWNY ENGINEER 10500.00
+WHEELFOR SALESMAN 10030.00
+MARTIN ENGINEER 10000.00
+DROP TABLE t1, connect.emp;
+CREATE TABLE t2 (command varchar(128) not null,number int(5) not null flag=1,message varchar(255) flag=2) ENGINE=CONNECT TABLE_TYPE=JDBC CONNECTION='jdbc:mariadb://localhost:PORT/connect' OPTION_LIST='User=root,Execsrc=1';
+SELECT * FROM t2 WHERE command='drop table tx1';
+command number message
+drop table tx1 0 Execute: java.sql.SQLSyntaxErrorException: Unknown table 'connect.tx1'
+Query is : drop table tx1
+SELECT * FROM t2 WHERE command = 'create table tx1 (a int not null, b char(32), c double(8,2))';
+command number message
+create table tx1 (a int not null, b char(32), c double(8,2)) 0 Affected rows
+SELECT * FROM t2 WHERE command in ('insert into tx1 values(1,''The number one'',456.12)',"insert into tx1(a,b) values(2,'The number two'),(3,'The number three')");
+command number message
+insert into tx1 values(1,'The number one',456.12) 1 Affected rows
+insert into tx1(a,b) values(2,'The number two'),(3,'The number three') 2 Affected rows
+SELECT * FROM t2 WHERE command='update tx1 set c = 3.1416 where a = 2';
+command number message
+update tx1 set c = 3.1416 where a = 2 1 Affected rows
+SELECT * FROM t2 WHERE command='select * from tx1';
+command number message
+select * from tx1 3 Result set column number
+SELECT * FROM t2 WHERE command='delete from tx1 where a = 2';
+command number message
+delete from tx1 where a = 2 1 Affected rows
+SELECT * FROM connect.tx1;
+a b c
+1 The number one 456.12
+3 The number three NULL
+DROP TABLE t2;
+CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=JDBC CATFUNC=tables CONNECTION='jdbc:mariadb://localhost:PORT/connect' option_list='User=root,Maxres=50';
+SELECT * FROM t1;
+Table_Cat Table_Schema Table_Name Table_Type Remark
+connect NULL tx1 BASE TABLE
+DROP TABLE t1;
+DROP TABLE connect.tx1;
+DROP DATABASE connect;
+SET GLOBAL connect_jvm_path=NULL;
+SET GLOBAL connect_class_path=NULL;
+SET GLOBAL time_zone = SYSTEM;
diff --git a/storage/connect/mysql-test/connect/r/jdbc_new.result b/storage/connect/mysql-test/connect/r/jdbc_new.result
new file mode 100644
index 00000000000..e5356edd5d8
--- /dev/null
+++ b/storage/connect/mysql-test/connect/r/jdbc_new.result
@@ -0,0 +1,216 @@
+CREATE TABLE t1 (a int, b char(10));
+INSERT INTO t1 VALUES (NULL,NULL),(0,'test00'),(1,'test01'),(2,'test02'),(3,'test03');
+SELECT * FROM t1;
+a b
+NULL NULL
+0 test00
+1 test01
+2 test02
+3 test03
+#
+# Testing errors
+#
+CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=JDBC
+CONNECTION='jdbc:mysql://127.0.0.1:SLAVE_PORT/test?user=unknown';
+SELECT * FROM t1;
+ERROR HY000: Got error 174 'Connecting: java.sql.SQLException: Access denied for user 'unknown'@'localhost' (using password: NO) rc=-2' from CONNECT
+DROP TABLE t1;
+CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=JDBC
+CONNECTION='jdbc:mysql://127.0.0.1:SLAVE_PORT/unknown?user=root';
+ERROR HY000: Connecting: java.sql.SQLSyntaxErrorException: Unknown database 'unknown' rc=-2
+CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=JDBC TABNAME='unknown'
+ CONNECTION='jdbc:mysql://127.0.0.1:SLAVE_PORT/test?user=root';
+ERROR HY000: Cannot get columns from unknown
+SHOW CREATE TABLE t1;
+ERROR 42S02: Table 'test.t1' doesn't exist
+CREATE TABLE t1 (x int, y char(10)) ENGINE=CONNECT TABLE_TYPE=JDBC
+CONNECTION='jdbc:mysql://127.0.0.1:SLAVE_PORT/test?user=root';
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `x` int(11) DEFAULT NULL,
+ `y` char(10) DEFAULT NULL
+) ENGINE=CONNECT DEFAULT CHARSET=latin1 CONNECTION='jdbc:mysql://127.0.0.1:SLAVE_PORT/test?user=root' `TABLE_TYPE`=JDBC
+SELECT * FROM t1;
+ERROR HY000: Got error 174 'ExecuteQuery: java.sql.SQLSyntaxErrorException: Unknown column 'x' in 'field list'
+Query is : SELECT x, y FROM t1' from CONNECT
+DROP TABLE t1;
+CREATE TABLE t1 (a int, b char(10)) ENGINE=CONNECT TABLE_TYPE=JDBC
+CONNECTION='jdbc:mysql://127.0.0.1:SLAVE_PORT/test?user=root';
+ALTER TABLE t1 RENAME t1backup;
+SELECT * FROM t1;
+ERROR HY000: Got error 174 'ExecuteQuery: java.sql.SQLSyntaxErrorException: Table 'test.t1' doesn't exist
+Query is : SELECT a, b FROM t1' from CONNECT
+ALTER TABLE t1backup RENAME t1;
+DROP TABLE t1;
+#
+# Testing SELECT, etc.
+#
+CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=JDBC
+CONNECTION='jdbc:mysql://127.0.0.1:SLAVE_PORT/test?user=root';
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` int(10) DEFAULT NULL,
+ `b` char(10) DEFAULT NULL
+) ENGINE=CONNECT DEFAULT CHARSET=latin1 CONNECTION='jdbc:mysql://127.0.0.1:SLAVE_PORT/test?user=root' `TABLE_TYPE`='JDBC'
+SELECT * FROM t1;
+a b
+0 NULL
+0 test00
+1 test01
+2 test02
+3 test03
+DROP TABLE t1;
+CREATE TABLE t1 (a int, b char(10)) ENGINE=CONNECT TABLE_TYPE=JDBC TABNAME='t1'
+ CONNECTION='jdbc:mysql://127.0.0.1:SLAVE_PORT/test?user=root';
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` int(11) DEFAULT NULL,
+ `b` char(10) DEFAULT NULL
+) ENGINE=CONNECT DEFAULT CHARSET=latin1 CONNECTION='jdbc:mysql://127.0.0.1:SLAVE_PORT/test?user=root' `TABLE_TYPE`=JDBC `TABNAME`='t1'
+SELECT * FROM t1;
+a b
+0 NULL
+0 test00
+1 test01
+2 test02
+3 test03
+DROP TABLE t1;
+CREATE TABLE t1 (a INT NOT NULL, b CHAR(10) NOT NULL) ENGINE=CONNECT TABLE_TYPE=JDBC
+CONNECTION='jdbc:mysql://127.0.0.1:SLAVE_PORT/test?user=root';
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` int(11) NOT NULL,
+ `b` char(10) NOT NULL
+) ENGINE=CONNECT DEFAULT CHARSET=latin1 CONNECTION='jdbc:mysql://127.0.0.1:SLAVE_PORT/test?user=root' `TABLE_TYPE`=JDBC
+SELECT * FROM t1;
+a b
+0
+0 test00
+1 test01
+2 test02
+3 test03
+DROP TABLE t1;
+CREATE TABLE t1 (a char(10), b int) ENGINE=CONNECT TABLE_TYPE=JDBC
+CONNECTION='jdbc:mysql://127.0.0.1:SLAVE_PORT/test?user=root';
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` char(10) DEFAULT NULL,
+ `b` int(11) DEFAULT NULL
+) ENGINE=CONNECT DEFAULT CHARSET=latin1 CONNECTION='jdbc:mysql://127.0.0.1:SLAVE_PORT/test?user=root' `TABLE_TYPE`=JDBC
+SELECT * FROM t1;
+a b
+0 NULL
+0 0
+1 0
+2 0
+3 0
+DROP TABLE t1;
+DROP TABLE t1;
+#
+# Testing numeric data types
+#
+CREATE TABLE t1 (a tinyint, b smallint, c mediumint, d int, e bigint, f float, g double, h decimal(20,5));
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` tinyint(4) DEFAULT NULL,
+ `b` smallint(6) DEFAULT NULL,
+ `c` mediumint(9) DEFAULT NULL,
+ `d` int(11) DEFAULT NULL,
+ `e` bigint(20) DEFAULT NULL,
+ `f` float DEFAULT NULL,
+ `g` double DEFAULT NULL,
+ `h` decimal(20,5) DEFAULT NULL
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+INSERT INTO t1 VALUES(100,3333,41235,1234567890,235000000000,3.14159265,3.14159265,3141.59265);
+CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=JDBC
+CONNECTION='jdbc:mysql://127.0.0.1:SLAVE_PORT/test?user=root';
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` tinyint(3) DEFAULT NULL,
+ `b` smallint(5) DEFAULT NULL,
+ `c` int(7) DEFAULT NULL,
+ `d` int(10) DEFAULT NULL,
+ `e` bigint(19) DEFAULT NULL,
+ `f` double(14,0) DEFAULT NULL,
+ `g` double(24,0) DEFAULT NULL,
+ `h` decimal(27,5) DEFAULT NULL
+) ENGINE=CONNECT DEFAULT CHARSET=latin1 CONNECTION='jdbc:mysql://127.0.0.1:SLAVE_PORT/test?user=root' `TABLE_TYPE`='JDBC'
+SELECT * FROM t1;
+a b c d e f g h
+100 3333 41235 1234567890 235000000000 3 3 3141.59265
+DROP TABLE t1;
+DROP TABLE t1;
+#
+# Testing character data types
+#
+CREATE TABLE t1 (a char(12), b varchar(12));
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` char(12) DEFAULT NULL,
+ `b` varchar(12) DEFAULT NULL
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+INSERT INTO t1 VALUES('Welcome','Hello, World');
+SELECT * FROM t1;
+a b
+Welcome Hello, World
+CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=JDBC
+CONNECTION='jdbc:mysql://127.0.0.1:SLAVE_PORT/test?user=root';
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` char(12) DEFAULT NULL,
+ `b` varchar(12) DEFAULT NULL
+) ENGINE=CONNECT DEFAULT CHARSET=latin1 CONNECTION='jdbc:mysql://127.0.0.1:SLAVE_PORT/test?user=root' `TABLE_TYPE`='JDBC'
+SELECT * FROM t1;
+a b
+Welcome Hello, World
+DROP TABLE t1;
+DROP TABLE t1;
+#
+# Testing temporal data types
+#
+CREATE TABLE t1 (a date, b datetime, c time, d timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, e year);
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` date DEFAULT NULL,
+ `b` datetime DEFAULT NULL,
+ `c` time DEFAULT NULL,
+ `d` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
+ `e` year(4) DEFAULT NULL
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+INSERT INTO t1 VALUES('2003-05-27 10:45:23','2003-05-27 10:45:23','2003-05-27 10:45:23','2003-05-27 10:45:23','2003-05-27 10:45:23');
+Warnings:
+Note 1265 Data truncated for column 'a' at row 1
+Note 1265 Data truncated for column 'c' at row 1
+Warning 1265 Data truncated for column 'e' at row 1
+SELECT * FROM t1;
+a b c d e
+2003-05-27 2003-05-27 10:45:23 10:45:23 2003-05-27 10:45:23 2003
+CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=JDBC
+CONNECTION='jdbc:mysql://127.0.0.1:SLAVE_PORT/test?user=root';
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` date DEFAULT NULL,
+ `b` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
+ `c` time DEFAULT NULL,
+ `d` timestamp NOT NULL DEFAULT '0000-00-00 00:00:00',
+ `e` date DEFAULT NULL
+) ENGINE=CONNECT DEFAULT CHARSET=latin1 CONNECTION='jdbc:mysql://127.0.0.1:SLAVE_PORT/test?user=root' `TABLE_TYPE`='JDBC'
+SELECT * FROM t1;
+a b c d e
+2003-05-27 2003-05-27 10:45:23 10:45:23 2003-05-27 10:45:23 1970-01-01
+DROP TABLE t1;
+DROP TABLE t1;
+SET GLOBAL connect_jvm_path=NULL;
+SET GLOBAL connect_class_path=NULL;
+SET GLOBAL time_zone = SYSTEM;
diff --git a/storage/connect/mysql-test/connect/r/jdbc_oracle.result b/storage/connect/mysql-test/connect/r/jdbc_oracle.result
new file mode 100644
index 00000000000..2e36891a037
--- /dev/null
+++ b/storage/connect/mysql-test/connect/r/jdbc_oracle.result
@@ -0,0 +1,70 @@
+CREATE TABLE t2 (
+command varchar(128) not null,
+number int(5) not null flag=1,
+message varchar(255) flag=2)
+ENGINE=CONNECT TABLE_TYPE=JDBC CONNECTION='jdbc:oracle:thin:@localhost:1521:xe'
+OPTION_LIST='User=system,Password=manager,Execsrc=1';
+SELECT * FROM t2 WHERE command = 'drop table employee';
+command number message
+drop table employee 0 Execute: java.sql.SQLSyntaxErrorException: ORA-00942: table or view does not exist
+
+SELECT * FROM t2 WHERE command = 'create table employee (id int not null, name varchar(32), title char(16), salary number(8,2))';
+command number message
+create table employee (id int not null, name varchar(32), title char(16), salary number(8,2)) 0 Affected rows
+SELECT * FROM t2 WHERE command = "insert into employee values(4567,'Johnson', 'Engineer', 12560.50)";
+command number message
+insert into employee values(4567,'Johnson', 'Engineer', 12560.50) 1 Affected rows
+CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=JDBC CATFUNC=tables
+CONNECTION='jdbc:oracle:thin:@localhost:1521:xe'
+OPTION_LIST='User=system,Password=manager';
+SELECT * FROM t1 WHERE table_name='employee';
+Table_Cat Table_Schema Table_Name Table_Type Remark
+NULL SYSTEM EMPLOYEE TABLE NULL
+DROP TABLE t1;
+CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=JDBC TABNAME='EMPLOYEE' CATFUNC=columns
+CONNECTION='jdbc:oracle:thin:@localhost:1521:xe'
+OPTION_LIST='User=system,Password=manager';
+SELECT * FROM t1;
+Table_Cat Table_Schema Table_Name Column_Name Data_Type Type_Name Column_Size Buffer_Length Decimal_Digits Radix Nullable Remarks
+NULL SYSTEM EMPLOYEE ID 3 NUMBER 38 0 0 10 0 NULL
+NULL SYSTEM EMPLOYEE NAME 12 VARCHAR2 32 0 0 10 1 NULL
+NULL SYSTEM EMPLOYEE TITLE 1 CHAR 16 0 0 10 1 NULL
+NULL SYSTEM EMPLOYEE SALARY 3 NUMBER 8 0 2 10 1 NULL
+DROP TABLE t1;
+CREATE SERVER 'oracle' FOREIGN DATA WRAPPER 'oracle.jdbc.driver.OracleDriver' OPTIONS (
+HOST 'jdbc:oracle:thin:@localhost:1521:xe',
+DATABASE 'SYSTEM',
+USER 'system',
+PASSWORD 'manager',
+PORT 0,
+SOCKET '',
+OWNER 'SYSTEM');
+CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=JDBC CONNECTION='oracle' tabname='EMPLOYEE';
+SELECT * FROM t1;
+ID NAME TITLE SALARY
+4567 Johnson Engineer 12560.50
+INSERT INTO t1 VALUES(6214, 'Clinton', 'Retired', NULL);
+Warnings:
+Note 1105 EMPLOYEE: 1 affected rows
+UPDATE t1 set name='Trump' WHERE id = 4567;
+Warnings:
+Note 1105 EMPLOYEE: 1 affected rows
+SELECT * FROM t1;
+ID NAME TITLE SALARY
+4567 Trump Engineer 12560.50
+6214 Clinton Retired 0.00
+DELETE FROM t1 WHERE id = 6214;
+Warnings:
+Note 1105 EMPLOYEE: 1 affected rows
+SELECT * FROM t1;
+ID NAME TITLE SALARY
+4567 Trump Engineer 12560.50
+DROP TABLE t1;
+SELECT * FROM t2 WHERE command = 'drop table employee';
+command number message
+drop table employee 0 Affected rows
+DROP TABLE t2;
+DROP SERVER 'oracle';
+SET GLOBAL connect_jvm_path=NULL;
+SET GLOBAL connect_class_path=NULL;
+SET GLOBAL time_zone = SYSTEM;
diff --git a/storage/connect/mysql-test/connect/r/jdbc_postgresql.result b/storage/connect/mysql-test/connect/r/jdbc_postgresql.result
new file mode 100644
index 00000000000..6d77d79d5d3
--- /dev/null
+++ b/storage/connect/mysql-test/connect/r/jdbc_postgresql.result
@@ -0,0 +1,65 @@
+CREATE TABLE t2 (
+command varchar(128) not null,
+number int(5) not null flag=1,
+message varchar(255) flag=2)
+ENGINE=CONNECT TABLE_TYPE=JDBC CONNECTION='jdbc:postgresql://localhost/mtr'
+OPTION_LIST='User=mtr,Password=mtr,Schema=public,Execsrc=1';
+SELECT * FROM t2 WHERE command='drop table employee';
+command number message
+drop table employee 0 Execute: org.postgresql.util.PSQLException: ERREUR: la table « employee » n'existe pas
+SELECT * FROM t2 WHERE command = 'create table employee (id int not null, name varchar(32), title char(16), salary decimal(8,2))';
+command number message
+create table employee (id int not null, name varchar(32), title char(16), salary decimal(8,2)) 0 Affected rows
+SELECT * FROM t2 WHERE command = "insert into employee values(4567,'Johnson', 'Engineer', 12560.50)";
+command number message
+insert into employee values(4567,'Johnson', 'Engineer', 12560.50) 1 Affected rows
+CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=JDBC CATFUNC=tables
+CONNECTION='jdbc:postgresql://localhost/mtr'
+OPTION_LIST='User=mtr,Password=mtr,Schema=public,Tabtype=TABLE,Maxres=10';
+SELECT * FROM t1;
+Table_Cat Table_Schema Table_Name Table_Type Remark
+ public employee TABLE NULL
+ public t1 TABLE NULL
+ public t2 TABLE NULL
+DROP TABLE t1;
+CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=JDBC CATFUNC=columns
+CONNECTION='jdbc:postgresql://localhost/mtr' tabname=employee
+OPTION_LIST='User=mtr,Password=mtr,Maxres=10';
+SELECT * FROM t1;
+Table_Cat Table_Schema Table_Name Column_Name Data_Type Type_Name Column_Size Buffer_Length Decimal_Digits Radix Nullable Remarks
+NULL public employee id 4 int4 10 0 0 10 0 NULL
+NULL public employee name 12 varchar 32 0 0 10 1 NULL
+NULL public employee title 1 bpchar 16 0 0 10 1 NULL
+NULL public employee salary 2 numeric 8 0 2 10 1 NULL
+DROP TABLE t1;
+CREATE SERVER 'postgresql' FOREIGN DATA WRAPPER 'postgresql' OPTIONS (
+HOST 'localhost',
+DATABASE 'mtr',
+USER 'mtr',
+PASSWORD 'mtr',
+PORT 0,
+SOCKET '',
+OWNER 'root');
+CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=JDBC CONNECTION='postgresql/public.employee';
+SELECT * FROM t1;
+id name title salary
+4567 Johnson Engineer 12560.50
+INSERT INTO t1 VALUES(3126,'Smith', 'Clerk', 5230.00);
+Warnings:
+Note 1105 public.employee: 1 affected rows
+UPDATE t1 SET salary = salary + 100.00;
+Warnings:
+Note 1105 public.employee: 2 affected rows
+SELECT * FROM t1;
+id name title salary
+4567 Johnson Engineer 12660.50
+3126 Smith Clerk 5330.00
+DROP TABLE t1;
+DROP SERVER 'postgresql';
+SELECT * FROM t2 WHERE command='drop table employee';
+command number message
+drop table employee 0 Affected rows
+DROP TABLE t2;
+SET GLOBAL connect_jvm_path=NULL;
+SET GLOBAL connect_class_path=NULL;
+SET GLOBAL time_zone = SYSTEM;
diff --git a/storage/connect/mysql-test/connect/std_data/girls.txt b/storage/connect/mysql-test/connect/std_data/girls.txt
new file mode 100644
index 00000000000..12ce8babbaf
--- /dev/null
+++ b/storage/connect/mysql-test/connect/std_data/girls.txt
@@ -0,0 +1,5 @@
+Mary Boston 25
+Nancy Palo Alto 23
+Susan Chicago 18
+Betty Chicago 32
+Anne Denver 23
diff --git a/storage/connect/mysql-test/connect/t/jdbc.test b/storage/connect/mysql-test/connect/t/jdbc.test
new file mode 100644
index 00000000000..9389747ad9c
--- /dev/null
+++ b/storage/connect/mysql-test/connect/t/jdbc.test
@@ -0,0 +1,143 @@
+-- source jdbconn.inc
+
+let $MYSQLD_DATADIR= `select @@datadir`;
+--copy_file $MTR_SUITE_DIR/std_data/girls.txt $MYSQLD_DATADIR/test/girls.txt
+
+let $PORT= `select @@port`;
+
+#
+# This test is run against a local MariaDB server
+#
+CREATE DATABASE connect;
+USE connect;
+CREATE TABLE t2 (
+ id bigint not null,
+ msg varchar(500),
+ tm time,
+ dt date,
+ dtm datetime,
+ ts timestamp);
+INSERT INTO t2 VALUES(455000000000, 'A very big number', '18:10:25', '2016-03-16', '1999-12-11 23:01:52', '2015-07-24 09:32:45');
+SELECT * FROM t2;
+
+--echo #
+--echo # Testing JDBC connection to MySQL driver
+--echo #
+USE test;
+--replace_result $PORT PORT
+--eval CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=JDBC TABNAME=t2 CONNECTION='jdbc:mysql://localhost:$PORT/connect?user=root'
+SELECT * FROM t1;
+INSERT INTO t1 VALUES(786325481247, 'Hello!', '19:45:03', '1933-08-10', '1985-11-12 09:02:44', '2014-06-17 10:32:01');
+SELECT * FROM t1;
+DELETE FROM t1 WHERE msg = 'Hello!';
+SELECT * FROM t1;
+DROP TABLE t1;
+
+--echo #
+--echo # Testing JDBC view
+--echo #
+--replace_result $PORT PORT
+--eval CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=JDBC SRCDEF='select id, msg, tm, dt from t2' CONNECTION='jdbc:mysql://localhost:$PORT/connect?user=root'
+SELECT * FROM t1;
+SELECT msg, dt FROM t1;
+DROP TABLE t1, connect.t2;
+
+--echo #
+--echo # Testing JDBC write operations
+--echo #
+USE connect;
+--copy_file $MTR_SUITE_DIR/std_data/boys.txt $MYSQLD_DATADIR/connect/boys.txt
+CREATE TABLE boys (
+ name CHAR(12) NOT NULL,
+ city CHAR(11),
+ birth DATE DATE_FORMAT='DD/MM/YYYY',
+ hired DATE DATE_FORMAT='DD/MM/YYYY' flag=36)
+ENGINE=CONNECT TABLE_TYPE=FIX FILE_NAME='boys.txt' ENDING=1;
+SELECT * FROM boys;
+
+USE test;
+CREATE TABLE t3 (
+ name CHAR(12) NOT NULL,
+ city CHAR(12),
+ birth DATE,
+ hired DATE);
+INSERT INTO t3 VALUES('Donald','Atlanta','1999-04-01','2016-03-31'),('Mick','New York','1980-01-20','2002-09-11');
+SELECT * FROM t3;
+
+--replace_result $PORT PORT
+--eval CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=JDBC TABNAME=boys CONNECTION='jdbc:mysql://localhost:$PORT/connect?user=root' OPTION_LIST='scrollable=1'
+SELECT * FROM t1;
+UPDATE t1 SET city = 'Phoenix' WHERE name = 'Henry';
+INSERT INTO t1 SELECT * FROM t3;
+INSERT INTO t1 VALUES('Tom','Seatle','2002-03-15',NULL);
+SELECT * FROM t1;
+DROP TABLE t3;
+
+--echo #
+--echo # Testing JDBC join operations
+--echo #
+CREATE TABLE t3 (
+ name CHAR(9) NOT NULL,
+ city CHAR(12) NOT NULL,
+ age INT(2))
+engine=CONNECT table_type=FIX file_name='girls.txt';
+SELECT g.name, b.name, g.city FROM t3 g STRAIGHT_JOIN connect.boys b where g.city = b.city;
+SELECT g.name, b.name, g.city FROM t3 g STRAIGHT_JOIN t1 b where g.city = b.city;
+DROP TABLE t1, t3, connect.boys;
+
+--echo #
+--echo # Testing MariaDB JDBC driver
+--echo #
+USE connect;
+--copy_file $MTR_SUITE_DIR/std_data/employee.dat $MYSQLD_DATADIR/connect/employee.dat
+CREATE TABLE emp (
+ serialno CHAR(5) NOT NULL,
+ name VARCHAR(12) NOT NULL FLAG=6,
+ sex TINYINT(1) NOT NULL,
+ title VARCHAR(15) NOT NULL FLAG=20,
+ manager CHAR(5) NOT NULL,
+ department CHAR(4) NOT NULL FLAG=41,
+ secretary CHAR(5) NOT NULL FLAG=46,
+ salary DOUBLE(8,2) NOT NULL FLAG=52)
+ENGINE=connect TABLE_TYPE=fix FILE_NAME='employee.dat' ENDING=1;
+SELECT * FROM emp;
+
+USE test;
+--replace_result $PORT PORT
+--eval CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=JDBC TABNAME=emp CONNECTION='jdbc:mariadb://localhost:$PORT/connect?user=root'
+--replace_result $PORT PORT
+--eval SHOW CREATE TABLE t1
+SELECT * FROM t1;
+SELECT name, title, salary FROM t1 WHERE sex = 1;
+
+DROP TABLE t1, connect.emp;
+
+#
+# Testing remote command execution
+#
+--replace_result $PORT PORT
+--eval CREATE TABLE t2 (command varchar(128) not null,number int(5) not null flag=1,message varchar(255) flag=2) ENGINE=CONNECT TABLE_TYPE=JDBC CONNECTION='jdbc:mariadb://localhost:$PORT/connect' OPTION_LIST='User=root,Execsrc=1'
+SELECT * FROM t2 WHERE command='drop table tx1';
+SELECT * FROM t2 WHERE command = 'create table tx1 (a int not null, b char(32), c double(8,2))';
+SELECT * FROM t2 WHERE command in ('insert into tx1 values(1,''The number one'',456.12)',"insert into tx1(a,b) values(2,'The number two'),(3,'The number three')");
+SELECT * FROM t2 WHERE command='update tx1 set c = 3.1416 where a = 2';
+SELECT * FROM t2 WHERE command='select * from tx1';
+SELECT * FROM t2 WHERE command='delete from tx1 where a = 2';
+SELECT * FROM connect.tx1;
+DROP TABLE t2;
+
+--replace_result $PORT PORT
+--eval CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=JDBC CATFUNC=tables CONNECTION='jdbc:mariadb://localhost:$PORT/connect' option_list='User=root,Maxres=50'
+SELECT * FROM t1;
+DROP TABLE t1;
+DROP TABLE connect.tx1;
+
+#
+# Clean up
+#
+--remove_file $MYSQLD_DATADIR/connect/boys.txt
+--remove_file $MYSQLD_DATADIR/connect/employee.dat
+DROP DATABASE connect;
+--remove_file $MYSQLD_DATADIR/test/girls.txt
+
+-- source jdbconn_cleanup.inc
diff --git a/storage/connect/mysql-test/connect/t/jdbc_new.test b/storage/connect/mysql-test/connect/t/jdbc_new.test
new file mode 100644
index 00000000000..33ec1b343cc
--- /dev/null
+++ b/storage/connect/mysql-test/connect/t/jdbc_new.test
@@ -0,0 +1,179 @@
+#
+# This test is run against a remote MySQL server
+#
+connect (master,127.0.0.1,root,,test,$MASTER_MYPORT,);
+connect (slave,127.0.0.1,root,,test,$SLAVE_MYPORT,);
+connection master;
+
+-- source jdbconn.inc
+
+connection slave;
+CREATE TABLE t1 (a int, b char(10));
+INSERT INTO t1 VALUES (NULL,NULL),(0,'test00'),(1,'test01'),(2,'test02'),(3,'test03');
+SELECT * FROM t1;
+
+--echo #
+--echo # Testing errors
+--echo #
+connection master;
+
+# Bad user name
+# Suppress "mysql_real_connect failed:" (printed in _DEBUG build)
+--replace_result $SLAVE_MYPORT SLAVE_PORT "mysql_real_connect failed: " ""
+eval CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=JDBC
+ CONNECTION='jdbc:mysql://127.0.0.1:$SLAVE_MYPORT/test?user=unknown';
+--error ER_GET_ERRMSG
+SELECT * FROM t1;
+DROP TABLE t1;
+
+# Bad database name
+--replace_result $SLAVE_MYPORT SLAVE_PORT "mysql_real_connect failed: " ""
+--error ER_UNKNOWN_ERROR
+eval CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=JDBC
+ CONNECTION='jdbc:mysql://127.0.0.1:$SLAVE_MYPORT/unknown?user=root';
+
+# Bad table name
+--replace_result $SLAVE_MYPORT SLAVE_PORT
+--error ER_UNKNOWN_ERROR
+eval CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=JDBC TABNAME='unknown'
+ CONNECTION='jdbc:mysql://127.0.0.1:$SLAVE_MYPORT/test?user=root';
+--error ER_NO_SUCH_TABLE
+SHOW CREATE TABLE t1;
+
+# Bad column name
+--replace_result $SLAVE_MYPORT SLAVE_PORT
+eval CREATE TABLE t1 (x int, y char(10)) ENGINE=CONNECT TABLE_TYPE=JDBC
+ CONNECTION='jdbc:mysql://127.0.0.1:$SLAVE_MYPORT/test?user=root';
+--replace_result $SLAVE_MYPORT SLAVE_PORT
+SHOW CREATE TABLE t1;
+--error ER_GET_ERRMSG
+SELECT * FROM t1;
+DROP TABLE t1;
+
+# The remote table disappeared
+--replace_result $SLAVE_MYPORT SLAVE_PORT
+eval CREATE TABLE t1 (a int, b char(10)) ENGINE=CONNECT TABLE_TYPE=JDBC
+ CONNECTION='jdbc:mysql://127.0.0.1:$SLAVE_MYPORT/test?user=root';
+
+connection slave;
+ALTER TABLE t1 RENAME t1backup;
+
+connection master;
+--error ER_GET_ERRMSG
+SELECT * FROM t1;
+
+connection slave;
+ALTER TABLE t1backup RENAME t1;
+
+connection master;
+DROP TABLE t1;
+
+--echo #
+--echo # Testing SELECT, etc.
+--echo #
+
+# Automatic table structure
+--replace_result $SLAVE_MYPORT SLAVE_PORT
+eval CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=JDBC
+ CONNECTION='jdbc:mysql://127.0.0.1:$SLAVE_MYPORT/test?user=root';
+--replace_result $SLAVE_MYPORT SLAVE_PORT
+SHOW CREATE TABLE t1;
+SELECT * FROM t1;
+DROP TABLE t1;
+
+# Explicit table structure
+--replace_result $SLAVE_MYPORT SLAVE_PORT
+eval CREATE TABLE t1 (a int, b char(10)) ENGINE=CONNECT TABLE_TYPE=JDBC TABNAME='t1'
+ CONNECTION='jdbc:mysql://127.0.0.1:$SLAVE_MYPORT/test?user=root';
+--replace_result $SLAVE_MYPORT SLAVE_PORT
+SHOW CREATE TABLE t1;
+SELECT * FROM t1;
+DROP TABLE t1;
+
+# Explicit table structure: remote NULL, local NOT NULL
+--replace_result $SLAVE_MYPORT SLAVE_PORT
+eval CREATE TABLE t1 (a INT NOT NULL, b CHAR(10) NOT NULL) ENGINE=CONNECT TABLE_TYPE=JDBC
+ CONNECTION='jdbc:mysql://127.0.0.1:$SLAVE_MYPORT/test?user=root';
+--replace_result $SLAVE_MYPORT SLAVE_PORT
+SHOW CREATE TABLE t1;
+SELECT * FROM t1;
+DROP TABLE t1;
+
+# Explicit table structure with wrong column types
+--replace_result $SLAVE_MYPORT SLAVE_PORT
+eval CREATE TABLE t1 (a char(10), b int) ENGINE=CONNECT TABLE_TYPE=JDBC
+ CONNECTION='jdbc:mysql://127.0.0.1:$SLAVE_MYPORT/test?user=root';
+--replace_result $SLAVE_MYPORT SLAVE_PORT
+SHOW CREATE TABLE t1;
+SELECT * FROM t1;
+DROP TABLE t1;
+
+connection slave;
+DROP TABLE t1;
+
+--echo #
+--echo # Testing numeric data types
+--echo #
+
+# TODO: mediumint is converted to int, float is converted to double, decimal is converted to double
+CREATE TABLE t1 (a tinyint, b smallint, c mediumint, d int, e bigint, f float, g double, h decimal(20,5));
+SHOW CREATE TABLE t1;
+INSERT INTO t1 VALUES(100,3333,41235,1234567890,235000000000,3.14159265,3.14159265,3141.59265);
+
+connection master;
+--replace_result $SLAVE_MYPORT SLAVE_PORT
+eval CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=JDBC
+ CONNECTION='jdbc:mysql://127.0.0.1:$SLAVE_MYPORT/test?user=root';
+--replace_result $SLAVE_MYPORT SLAVE_PORT
+SHOW CREATE TABLE t1;
+SELECT * FROM t1;
+DROP TABLE t1;
+
+connection slave;
+DROP TABLE t1;
+
+--echo #
+--echo # Testing character data types
+--echo #
+
+CREATE TABLE t1 (a char(12), b varchar(12));
+SHOW CREATE TABLE t1;
+INSERT INTO t1 VALUES('Welcome','Hello, World');
+SELECT * FROM t1;
+
+connection master;
+--replace_result $SLAVE_MYPORT SLAVE_PORT
+eval CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=JDBC
+ CONNECTION='jdbc:mysql://127.0.0.1:$SLAVE_MYPORT/test?user=root';
+--replace_result $SLAVE_MYPORT SLAVE_PORT
+SHOW CREATE TABLE t1;
+SELECT * FROM t1;
+DROP TABLE t1;
+
+connection slave;
+DROP TABLE t1;
+
+--echo #
+--echo # Testing temporal data types
+--echo #
+
+CREATE TABLE t1 (a date, b datetime, c time, d timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, e year);
+SHOW CREATE TABLE t1;
+INSERT INTO t1 VALUES('2003-05-27 10:45:23','2003-05-27 10:45:23','2003-05-27 10:45:23','2003-05-27 10:45:23','2003-05-27 10:45:23');
+SELECT * FROM t1;
+
+connection master;
+--replace_result $SLAVE_MYPORT SLAVE_PORT
+eval CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=JDBC
+ CONNECTION='jdbc:mysql://127.0.0.1:$SLAVE_MYPORT/test?user=root';
+--replace_result $SLAVE_MYPORT SLAVE_PORT
+SHOW CREATE TABLE t1;
+SELECT * FROM t1;
+DROP TABLE t1;
+
+connection slave;
+DROP TABLE t1;
+
+connection master;
+-- source jdbconn_cleanup.inc
+
diff --git a/storage/connect/mysql-test/connect/t/jdbc_oracle.test b/storage/connect/mysql-test/connect/t/jdbc_oracle.test
new file mode 100644
index 00000000000..10cb7a7b77d
--- /dev/null
+++ b/storage/connect/mysql-test/connect/t/jdbc_oracle.test
@@ -0,0 +1,56 @@
+-- source jdbconn.inc
+
+#
+# This test is run against Oracle driver
+#
+CREATE TABLE t2 (
+ command varchar(128) not null,
+ number int(5) not null flag=1,
+ message varchar(255) flag=2)
+ENGINE=CONNECT TABLE_TYPE=JDBC CONNECTION='jdbc:oracle:thin:@localhost:1521:xe'
+OPTION_LIST='User=system,Password=manager,Execsrc=1';
+SELECT * FROM t2 WHERE command = 'drop table employee';
+SELECT * FROM t2 WHERE command = 'create table employee (id int not null, name varchar(32), title char(16), salary number(8,2))';
+SELECT * FROM t2 WHERE command = "insert into employee values(4567,'Johnson', 'Engineer', 12560.50)";
+
+CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=JDBC CATFUNC=tables
+CONNECTION='jdbc:oracle:thin:@localhost:1521:xe'
+OPTION_LIST='User=system,Password=manager';
+SELECT * FROM t1 WHERE table_name='employee';
+DROP TABLE t1;
+
+CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=JDBC TABNAME='EMPLOYEE' CATFUNC=columns
+CONNECTION='jdbc:oracle:thin:@localhost:1521:xe'
+OPTION_LIST='User=system,Password=manager';
+SELECT * FROM t1;
+DROP TABLE t1;
+
+#
+# Test connecting via a Federated server
+#
+CREATE SERVER 'oracle' FOREIGN DATA WRAPPER 'oracle.jdbc.driver.OracleDriver' OPTIONS (
+HOST 'jdbc:oracle:thin:@localhost:1521:xe',
+DATABASE 'SYSTEM',
+USER 'system',
+PASSWORD 'manager',
+PORT 0,
+SOCKET '',
+OWNER 'SYSTEM');
+
+CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=JDBC CONNECTION='oracle' tabname='EMPLOYEE';
+SELECT * FROM t1;
+INSERT INTO t1 VALUES(6214, 'Clinton', 'Retired', NULL);
+UPDATE t1 set name='Trump' WHERE id = 4567;
+SELECT * FROM t1;
+DELETE FROM t1 WHERE id = 6214;
+SELECT * FROM t1;
+DROP TABLE t1;
+SELECT * FROM t2 WHERE command = 'drop table employee';
+DROP TABLE t2;
+DROP SERVER 'oracle';
+
+#
+# Clean up
+#
+
+-- source jdbconn_cleanup.inc
diff --git a/storage/connect/mysql-test/connect/t/jdbc_postgresql.test b/storage/connect/mysql-test/connect/t/jdbc_postgresql.test
new file mode 100644
index 00000000000..1041ef468d7
--- /dev/null
+++ b/storage/connect/mysql-test/connect/t/jdbc_postgresql.test
@@ -0,0 +1,53 @@
+-- source jdbconn.inc
+
+#
+# This test is run against Postgresql driver
+#
+CREATE TABLE t2 (
+ command varchar(128) not null,
+ number int(5) not null flag=1,
+ message varchar(255) flag=2)
+ENGINE=CONNECT TABLE_TYPE=JDBC CONNECTION='jdbc:postgresql://localhost/mtr'
+OPTION_LIST='User=mtr,Password=mtr,Schema=public,Execsrc=1';
+SELECT * FROM t2 WHERE command='drop table employee';
+SELECT * FROM t2 WHERE command = 'create table employee (id int not null, name varchar(32), title char(16), salary decimal(8,2))';
+SELECT * FROM t2 WHERE command = "insert into employee values(4567,'Johnson', 'Engineer', 12560.50)";
+
+CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=JDBC CATFUNC=tables
+CONNECTION='jdbc:postgresql://localhost/mtr'
+OPTION_LIST='User=mtr,Password=mtr,Schema=public,Tabtype=TABLE,Maxres=10';
+SELECT * FROM t1;
+DROP TABLE t1;
+
+CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=JDBC CATFUNC=columns
+CONNECTION='jdbc:postgresql://localhost/mtr' tabname=employee
+OPTION_LIST='User=mtr,Password=mtr,Maxres=10';
+SELECT * FROM t1;
+DROP TABLE t1;
+
+#
+# Test connecting via a Federated server
+#
+CREATE SERVER 'postgresql' FOREIGN DATA WRAPPER 'postgresql' OPTIONS (
+HOST 'localhost',
+DATABASE 'mtr',
+USER 'mtr',
+PASSWORD 'mtr',
+PORT 0,
+SOCKET '',
+OWNER 'root');
+
+CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=JDBC CONNECTION='postgresql/public.employee';
+SELECT * FROM t1;
+INSERT INTO t1 VALUES(3126,'Smith', 'Clerk', 5230.00);
+UPDATE t1 SET salary = salary + 100.00;
+SELECT * FROM t1;
+DROP TABLE t1;
+DROP SERVER 'postgresql';
+SELECT * FROM t2 WHERE command='drop table employee';
+DROP TABLE t2;
+
+#
+# Clean up
+#
+-- source jdbconn_cleanup.inc
diff --git a/storage/connect/mysql-test/connect/t/jdbconn.inc b/storage/connect/mysql-test/connect/t/jdbconn.inc
new file mode 100644
index 00000000000..0bac0b35fc4
--- /dev/null
+++ b/storage/connect/mysql-test/connect/t/jdbconn.inc
@@ -0,0 +1,31 @@
+--source include/not_embedded.inc
+
+--disable_query_log
+--error 0,ER_UNKNOWN_ERROR
+CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=JDBC CATFUNC=drivers;
+if (!`SELECT count(*) FROM INFORMATION_SCHEMA.TABLES
+ WHERE TABLE_SCHEMA='test' AND TABLE_NAME='t1'
+ AND ENGINE='CONNECT'
+ AND (CREATE_OPTIONS LIKE "%`table_type`='JDBC'%" OR CREATE_OPTIONS LIKE '%`table_type`=JDBC%')`)
+{
+ Skip Need Java support;
+}
+DROP TABLE t1;
+
+# This is specific and explains why this test is disabled.
+# You should edit this file to reflect what is the required files location on your machine.
+# This is the path to the JVM library (dll or so)
+SET GLOBAL connect_jvm_path='C:\\Program Files\\Java\\jdk1.8.0_77\\jre\\bin\\client';
+
+# The complete class path send when creating the Java Virtual Machine is, in that order:
+# 1 - The current directory.
+# 2 - The paths of the connect_class_path global variable.
+# 3 - The paths of the CLASSPATH environment variable.
+# These are the paths to the needed classes or jar files. The Apache ones are only for the JdbcApacheInterface wrapper.
+SET GLOBAL connect_class_path='E:\\MariaDB-10.1\\Connect\\storage\\connect;E:\\MariaDB-10.1\\Connect\\sql\\data\\postgresql-9.4.1208.jar;E:\\Oracle\\ojdbc6.jar;E:\\Apache\\commons-dbcp2-2.1.1\\commons-dbcp2-2.1.1.jar;E:\\Apache\\commons-pool2-2.4.2\\commons-pool2-2.4.2.jar;E:\\Apache\\commons-logging-1.2\\commons-logging-1.2.jar';
+
+# On my machine, paths to the JDK classes and to the MySQL and MariaDB drivers are defined in the CLASSPATH environment variable
+#CREATE FUNCTION envar RETURNS STRING SONAME 'ha_connect.dll';
+#SELECT envar('CLASSPATH');
+
+--enable_query_log
diff --git a/storage/connect/mysql-test/connect/t/jdbconn_cleanup.inc b/storage/connect/mysql-test/connect/t/jdbconn_cleanup.inc
new file mode 100644
index 00000000000..48e321495ad
--- /dev/null
+++ b/storage/connect/mysql-test/connect/t/jdbconn_cleanup.inc
@@ -0,0 +1,6 @@
+--disable_warnings
+#DROP FUNCTION envar;
+SET GLOBAL connect_jvm_path=NULL;
+SET GLOBAL connect_class_path=NULL;
+SET GLOBAL time_zone = SYSTEM;
+--enable_warnings
diff --git a/storage/connect/plgdbutl.cpp b/storage/connect/plgdbutl.cpp
index d70eeb32a04..13c0dfd1e18 100644
--- a/storage/connect/plgdbutl.cpp
+++ b/storage/connect/plgdbutl.cpp
@@ -5,7 +5,7 @@
/* */
/* COPYRIGHT: */
/* ---------- */
-/* (C) Copyright to the author Olivier BERTRAND 1998-2015 */
+/* (C) Copyright to the author Olivier BERTRAND 1998-2016 */
/* */
/* WHAT THIS PROGRAM DOES: */
/* ----------------------- */
@@ -46,9 +46,9 @@
#else // !__WIN__
#include <unistd.h>
#include <fcntl.h>
-#if defined(THREAD)
+//#if defined(THREAD)
#include <pthread.h>
-#endif // THREAD
+//#endif // THREAD
#include <stdarg.h>
#define BIGMEM 2147483647 // Max int value
#endif // !__WIN__
@@ -70,17 +70,6 @@
#include "rcmsg.h"
/***********************************************************************/
-/* Macro or external routine definition */
-/***********************************************************************/
-#if defined(THREAD)
-#if defined(__WIN__)
-extern CRITICAL_SECTION parsec; // Used calling the Flex parser
-#else // !__WIN__
-extern pthread_mutex_t parmut;
-#endif // !__WIN__
-#endif // THREAD
-
-/***********************************************************************/
/* DB static variables. */
/***********************************************************************/
bool Initdone = false;
@@ -90,6 +79,12 @@ extern "C" {
extern char version[];
} // extern "C"
+#if defined(__WIN__)
+extern CRITICAL_SECTION parsec; // Used calling the Flex parser
+#else // !__WIN__
+extern pthread_mutex_t parmut;
+#endif // !__WIN__
+
// The debug trace used by the main thread
FILE *pfile = NULL;
@@ -702,21 +697,21 @@ PDTP MakeDateFormat(PGLOBAL g, PSZ dfmt, bool in, bool out, int flag)
/* Call the FLEX generated parser. In multi-threading mode the next */
/* instruction is included in an Enter/LeaveCriticalSection bracket. */
/*********************************************************************/
-#if defined(THREAD)
+ //#if defined(THREAD)
#if defined(__WIN__)
EnterCriticalSection((LPCRITICAL_SECTION)&parsec);
#else // !__WIN__
pthread_mutex_lock(&parmut);
#endif // !__WIN__
-#endif // THREAD
+//#endif // THREAD
rc = fmdflex(pdp);
-#if defined(THREAD)
+//#if defined(THREAD)
#if defined(__WIN__)
LeaveCriticalSection((LPCRITICAL_SECTION)&parsec);
#else // !__WIN__
pthread_mutex_unlock(&parmut);
#endif // !__WIN__
-#endif // THREAD
+//#endif // THREAD
if (trace)
htrc("Done: in=%s out=%s rc=%d\n", SVP(pdp->InFmt), SVP(pdp->OutFmt), rc);
diff --git a/storage/connect/plugutil.c b/storage/connect/plugutil.c
index 38e28a171b2..2551b603349 100644
--- a/storage/connect/plugutil.c
+++ b/storage/connect/plugutil.c
@@ -516,7 +516,9 @@ void *PlugSubAlloc(PGLOBAL g, void *memp, size_t size)
if (trace)
htrc("PlugSubAlloc: %s\n", g->Message);
- longjmp(g->jumper[g->jump_level], 1);
+ /* Nothing we can do if longjmp is not initialized. */
+ assert(g->jump_level >= 0);
+ longjmp(g->jumper[g->jump_level], 1);
} /* endif size OS32 code */
/*********************************************************************/
diff --git a/storage/connect/reldef.cpp b/storage/connect/reldef.cpp
index 4b9d99bd8c9..2c8ada52e6f 100644
--- a/storage/connect/reldef.cpp
+++ b/storage/connect/reldef.cpp
@@ -514,10 +514,11 @@ PTABDEF OEMDEF::GetXdef(PGLOBAL g)
} // endif getdef
#else // !__WIN__
const char *error = NULL;
- Dl_info dl_info;
#if 0 // Don't know what all this stuff does
- // The OEM lib must retrieve exported CONNECT variables
+ Dl_info dl_info;
+
+ // The OEM lib must retrieve exported CONNECT variables
if (dladdr(&connect_hton, &dl_info)) {
if (dlopen(dl_info.dli_fname, RTLD_NOLOAD | RTLD_NOW | RTLD_GLOBAL) == 0) {
error = dlerror();
diff --git a/storage/connect/tabcol.cpp b/storage/connect/tabcol.cpp
index 662c0b514cf..fde1baa6317 100644
--- a/storage/connect/tabcol.cpp
+++ b/storage/connect/tabcol.cpp
@@ -50,7 +50,7 @@ XTAB::XTAB(PTABLE tp) : Name(tp->Name)
Qualifier = tp->Qualifier;
if (trace)
- htrc(" making copy TABLE %s %s\n", Name, Srcdef);
+ htrc(" making copy TABLE %s %s\n", Name, SVP(Srcdef));
} // end of XTAB constructor
diff --git a/storage/connect/tabtbl.cpp b/storage/connect/tabtbl.cpp
index 36849146746..e3baf7c3da5 100644
--- a/storage/connect/tabtbl.cpp
+++ b/storage/connect/tabtbl.cpp
@@ -569,6 +569,9 @@ pthread_handler_t ThreadOpen(void *p)
if (!my_thread_init()) {
set_current_thd(cmp->Thd);
+ if (trace)
+ htrc("ThreadOpen: Thd=%d\n", cmp->Thd);
+
// Try to open the connection
if (!cmp->Tap->GetTo_Tdb()->OpenDB(cmp->G)) {
cmp->Ready = true;
@@ -604,9 +607,14 @@ void TDBTBM::ResetDB(void)
if (colp->GetAmType() == TYPE_AM_TABID)
colp->COLBLK::Reset();
+ // Local tables
for (PTABLE tabp = Tablist; tabp; tabp = tabp->GetNext())
((PTDBASE)tabp->GetTo_Tdb())->ResetDB();
+ // Remote tables
+ for (PTBMT tp = Tmp; tp; tp = tp->Next)
+ ((PTDBASE)tp->Tap->GetTo_Tdb())->ResetDB();
+
Tdbp = (Tablist) ? (PTDBASE)Tablist->GetTo_Tdb() : NULL;
Crp = 0;
} // end of ResetDB
diff --git a/storage/innobase/btr/btr0btr.cc b/storage/innobase/btr/btr0btr.cc
index be5b04787dd..33ca57c9654 100644
--- a/storage/innobase/btr/btr0btr.cc
+++ b/storage/innobase/btr/btr0btr.cc
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1994, 2015, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1994, 2016, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2012, Facebook Inc.
Copyright (c) 2014, 2015, MariaDB Corporation
@@ -1126,7 +1126,7 @@ that the caller has made the reservation for free extents!
@retval block, rw_lock_x_lock_count(&block->lock) == 1 if allocation succeeded
(init_mtr == mtr, or the page was not previously freed in mtr)
@retval block (not allocated or initialized) otherwise */
-static __attribute__((nonnull, warn_unused_result))
+static MY_ATTRIBUTE((nonnull, warn_unused_result))
buf_block_t*
btr_page_alloc_low(
/*===============*/
@@ -2181,7 +2181,8 @@ btr_parse_page_reorganize(
{
ulint level = page_zip_level;
- ut_ad(ptr && end_ptr);
+ ut_ad(ptr != NULL);
+ ut_ad(end_ptr != NULL);
/* If dealing with a compressed page the record has the
compression level used during original compression written in
@@ -2653,7 +2654,7 @@ func_exit:
Returns TRUE if the insert fits on the appropriate half-page with the
chosen split_rec.
@return true if fits */
-static __attribute__((nonnull(1,3,4,6), warn_unused_result))
+static MY_ATTRIBUTE((nonnull(1,3,4,6), warn_unused_result))
bool
btr_page_insert_fits(
/*=================*/
@@ -2796,7 +2797,7 @@ btr_insert_on_non_leaf_level_func(
/**************************************************************//**
Attaches the halves of an index page on the appropriate level in an
index tree. */
-static __attribute__((nonnull))
+static MY_ATTRIBUTE((nonnull))
void
btr_attach_half_pages(
/*==================*/
@@ -2932,7 +2933,7 @@ btr_attach_half_pages(
/*************************************************************//**
Determine if a tuple is smaller than any record on the page.
@return TRUE if smaller */
-static __attribute__((nonnull, warn_unused_result))
+static MY_ATTRIBUTE((nonnull, warn_unused_result))
bool
btr_page_tuple_smaller(
/*===================*/
@@ -3524,7 +3525,8 @@ btr_level_list_remove_func(
ulint prev_page_no;
ulint next_page_no;
- ut_ad(page && mtr);
+ ut_ad(page != NULL);
+ ut_ad(mtr != NULL);
ut_ad(mtr_memo_contains_page(mtr, page, MTR_MEMO_PAGE_X_FIX));
ut_ad(space == page_get_space_id(page));
/* Get the previous and next page numbers of page */
diff --git a/storage/innobase/btr/btr0cur.cc b/storage/innobase/btr/btr0cur.cc
index a6bb258344f..eca232d81b4 100644
--- a/storage/innobase/btr/btr0cur.cc
+++ b/storage/innobase/btr/btr0cur.cc
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1994, 2015, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1994, 2016, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2008, Google Inc.
Copyright (c) 2012, Facebook Inc.
Copyright (c) 2015, MariaDB Corporation.
@@ -1139,7 +1139,7 @@ This has to be done either within the same mini-transaction,
or by invoking ibuf_reset_free_bits() before mtr_commit().
@return pointer to inserted record if succeed, else NULL */
-static __attribute__((nonnull, warn_unused_result))
+static MY_ATTRIBUTE((nonnull, warn_unused_result))
rec_t*
btr_cur_insert_if_possible(
/*=======================*/
@@ -1182,7 +1182,7 @@ btr_cur_insert_if_possible(
/*************************************************************//**
For an insert, checks the locks and does the undo logging if desired.
@return DB_SUCCESS, DB_WAIT_LOCK, DB_FAIL, or error number */
-UNIV_INLINE __attribute__((warn_unused_result, nonnull(2,3,5,6)))
+UNIV_INLINE MY_ATTRIBUTE((warn_unused_result, nonnull(2,3,5,6)))
dberr_t
btr_cur_ins_lock_and_undo(
/*======================*/
@@ -1717,7 +1717,7 @@ btr_cur_pessimistic_insert(
/*************************************************************//**
For an update, checks the locks and does the undo logging.
@return DB_SUCCESS, DB_WAIT_LOCK, or error number */
-UNIV_INLINE __attribute__((warn_unused_result, nonnull(2,3,6,7)))
+UNIV_INLINE MY_ATTRIBUTE((warn_unused_result, nonnull(2,3,6,7)))
dberr_t
btr_cur_upd_lock_and_undo(
/*======================*/
@@ -1736,7 +1736,7 @@ btr_cur_upd_lock_and_undo(
const rec_t* rec;
dberr_t err;
- ut_ad(thr || (flags & BTR_NO_LOCKING_FLAG));
+ ut_ad((thr != NULL) || (flags & BTR_NO_LOCKING_FLAG));
rec = btr_cur_get_rec(cursor);
index = cursor->index;
@@ -3035,7 +3035,7 @@ btr_cur_del_mark_set_clust_rec(
ut_ad(page_is_leaf(page_align(rec)));
#ifdef UNIV_DEBUG
- if (btr_cur_print_record_ops && thr) {
+ if (btr_cur_print_record_ops && (thr != NULL)) {
btr_cur_trx_report(thr_get_trx(thr)->id, index, "del mark ");
rec_print_new(stderr, rec, offsets);
}
@@ -3183,7 +3183,7 @@ btr_cur_del_mark_set_sec_rec(
rec = btr_cur_get_rec(cursor);
#ifdef UNIV_DEBUG
- if (btr_cur_print_record_ops && thr) {
+ if (btr_cur_print_record_ops && (thr != NULL)) {
btr_cur_trx_report(thr_get_trx(thr)->id, cursor->index,
"del mark ");
rec_print(stderr, rec, cursor->index);
@@ -5134,7 +5134,7 @@ btr_free_externally_stored_field(
ulint i, /*!< in: field number of field_ref;
ignored if rec == NULL */
enum trx_rb_ctx rb_ctx, /*!< in: rollback context */
- mtr_t* local_mtr __attribute__((unused))) /*!< in: mtr
+ mtr_t* local_mtr MY_ATTRIBUTE((unused))) /*!< in: mtr
containing the latch to data an an
X-latch to the index tree */
{
diff --git a/storage/innobase/btr/btr0sea.cc b/storage/innobase/btr/btr0sea.cc
index dcb508a7f29..712e15a8171 100644
--- a/storage/innobase/btr/btr0sea.cc
+++ b/storage/innobase/btr/btr0sea.cc
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1996, 2012, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2008, Google Inc.
Portions of this file contain modifications contributed and copyrighted by
@@ -473,7 +473,7 @@ btr_search_update_block_hash_info(
/*==============================*/
btr_search_t* info, /*!< in: search info */
buf_block_t* block, /*!< in: buffer block */
- btr_cur_t* cursor __attribute__((unused)))
+ btr_cur_t* cursor MY_ATTRIBUTE((unused)))
/*!< in: cursor */
{
#ifdef UNIV_SYNC_DEBUG
diff --git a/storage/innobase/buf/buf0buddy.cc b/storage/innobase/buf/buf0buddy.cc
index 958b3b5cfad..f2ab73217e0 100644
--- a/storage/innobase/buf/buf0buddy.cc
+++ b/storage/innobase/buf/buf0buddy.cc
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 2006, 2013, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2006, 2016, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -112,7 +112,7 @@ buf_buddy_mem_invalid(
/**********************************************************************//**
Check if a buddy is stamped free.
@return whether the buddy is free */
-UNIV_INLINE __attribute__((warn_unused_result))
+UNIV_INLINE MY_ATTRIBUTE((warn_unused_result))
bool
buf_buddy_stamp_is_free(
/*====================*/
@@ -225,7 +225,7 @@ Checks if a buf is free i.e.: in the zip_free[].
@retval BUF_BUDDY_STATE_FREE if fully free
@retval BUF_BUDDY_STATE_USED if currently in use
@retval BUF_BUDDY_STATE_PARTIALLY_USED if partially in use. */
-static __attribute__((warn_unused_result))
+static MY_ATTRIBUTE((warn_unused_result))
buf_buddy_state_t
buf_buddy_is_free(
/*==============*/
diff --git a/storage/innobase/buf/buf0buf.cc b/storage/innobase/buf/buf0buf.cc
index bd424b7b598..7b0cf339ef1 100644
--- a/storage/innobase/buf/buf0buf.cc
+++ b/storage/innobase/buf/buf0buf.cc
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1995, 2015, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2008, Google Inc.
Copyright (c) 2013, 2016, MariaDB Corporation. All Rights Reserved.
@@ -3740,7 +3740,7 @@ buf_page_init_low(
/********************************************************************//**
Inits a page to the buffer buf_pool. */
-static __attribute__((nonnull))
+static MY_ATTRIBUTE((nonnull))
void
buf_page_init(
/*==========*/
diff --git a/storage/innobase/buf/buf0dump.cc b/storage/innobase/buf/buf0dump.cc
index 6f3aca0e1fb..0abf7118b4f 100644
--- a/storage/innobase/buf/buf0dump.cc
+++ b/storage/innobase/buf/buf0dump.cc
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 2011, 2015, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2011, 2016, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -105,7 +105,7 @@ SELECT variable_value FROM information_schema.global_status WHERE
variable_name = 'INNODB_BUFFER_POOL_DUMP_STATUS';
or by:
SHOW STATUS LIKE 'innodb_buffer_pool_dump_status'; */
-static __attribute__((nonnull, format(printf, 2, 3)))
+static MY_ATTRIBUTE((nonnull, format(printf, 2, 3)))
void
buf_dump_status(
/*============*/
@@ -137,7 +137,7 @@ SELECT variable_value FROM information_schema.global_status WHERE
variable_name = 'INNODB_BUFFER_POOL_LOAD_STATUS';
or by:
SHOW STATUS LIKE 'innodb_buffer_pool_load_status'; */
-static __attribute__((nonnull, format(printf, 2, 3)))
+static MY_ATTRIBUTE((nonnull, format(printf, 2, 3)))
void
buf_load_status(
/*============*/
@@ -684,7 +684,7 @@ extern "C" UNIV_INTERN
os_thread_ret_t
DECLARE_THREAD(buf_dump_thread)(
/*============================*/
- void* arg __attribute__((unused))) /*!< in: a dummy parameter
+ void* arg MY_ATTRIBUTE((unused))) /*!< in: a dummy parameter
required by os_thread_create */
{
ut_ad(!srv_read_only_mode);
diff --git a/storage/innobase/buf/buf0flu.cc b/storage/innobase/buf/buf0flu.cc
index 780230b38ba..2f84ebae301 100644
--- a/storage/innobase/buf/buf0flu.cc
+++ b/storage/innobase/buf/buf0flu.cc
@@ -1,7 +1,7 @@
/*****************************************************************************
-Copyright (c) 1995, 2015, Oracle and/or its affiliates
-Copyright (c) 2013, 2015, MariaDB Corporation
+Copyright (c) 1995, 2016, Oracle and/or its affiliates
+Copyright (c) 2013, 2016, MariaDB Corporation
Copyright (c) 2013, 2014, Fusion-io
This program is free software; you can redistribute it and/or modify it under
@@ -2285,7 +2285,7 @@ extern "C" UNIV_INTERN
os_thread_ret_t
DECLARE_THREAD(buf_flush_page_cleaner_thread)(
/*==========================================*/
- void* arg __attribute__((unused)))
+ void* arg MY_ATTRIBUTE((unused)))
/*!< in: a dummy parameter required by
os_thread_create */
{
diff --git a/storage/innobase/buf/buf0lru.cc b/storage/innobase/buf/buf0lru.cc
index 7591e4476d6..30b991d24cf 100644
--- a/storage/innobase/buf/buf0lru.cc
+++ b/storage/innobase/buf/buf0lru.cc
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1995, 2013, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -146,7 +146,7 @@ If a compressed page is freed other compressed pages may be relocated.
caller needs to free the page to the free list
@retval false if BUF_BLOCK_ZIP_PAGE was removed from page_hash. In
this case the block is already returned to the buddy allocator. */
-static __attribute__((nonnull, warn_unused_result))
+static MY_ATTRIBUTE((nonnull, warn_unused_result))
bool
buf_LRU_block_remove_hashed(
/*========================*/
@@ -370,7 +370,7 @@ want to hog the CPU and resources. Release the buffer pool and block
mutex and try to force a context switch. Then reacquire the same mutexes.
The current page is "fixed" before the release of the mutexes and then
"unfixed" again once we have reacquired the mutexes. */
-static __attribute__((nonnull))
+static MY_ATTRIBUTE((nonnull))
void
buf_flush_yield(
/*============*/
@@ -411,7 +411,7 @@ If we have hogged the resources for too long then release the buffer
pool and flush list mutex and do a thread yield. Set the current page
to "sticky" so that it is not relocated during the yield.
@return true if yielded */
-static __attribute__((nonnull(1), warn_unused_result))
+static MY_ATTRIBUTE((nonnull(1), warn_unused_result))
bool
buf_flush_try_yield(
/*================*/
@@ -454,7 +454,7 @@ buf_flush_try_yield(
Removes a single page from a given tablespace inside a specific
buffer pool instance.
@return true if page was removed. */
-static __attribute__((nonnull, warn_unused_result))
+static MY_ATTRIBUTE((nonnull, warn_unused_result))
bool
buf_flush_or_remove_page(
/*=====================*/
@@ -535,7 +535,7 @@ the list as they age towards the tail of the LRU.
@retval DB_SUCCESS if all freed
@retval DB_FAIL if not all freed
@retval DB_INTERRUPTED if the transaction was interrupted */
-static __attribute__((nonnull(1), warn_unused_result))
+static MY_ATTRIBUTE((nonnull(1), warn_unused_result))
dberr_t
buf_flush_or_remove_pages(
/*======================*/
@@ -641,7 +641,7 @@ Remove or flush all the dirty pages that belong to a given tablespace
inside a specific buffer pool instance. The pages will remain in the LRU
list and will be evicted from the LRU list as they age and move towards
the tail of the LRU list. */
-static __attribute__((nonnull(1)))
+static MY_ATTRIBUTE((nonnull(1)))
void
buf_flush_dirty_pages(
/*==================*/
@@ -681,7 +681,7 @@ buf_flush_dirty_pages(
/******************************************************************//**
Remove all pages that belong to a given tablespace inside a specific
buffer pool instance when we are DISCARDing the tablespace. */
-static __attribute__((nonnull))
+static MY_ATTRIBUTE((nonnull))
void
buf_LRU_remove_all_pages(
/*=====================*/
@@ -829,7 +829,7 @@ buffer pool instance when we are deleting the data file(s) of that
tablespace. The pages still remain a part of LRU and are evicted from
the list as they age towards the tail of the LRU only if buf_remove
is BUF_REMOVE_FLUSH_NO_WRITE. */
-static __attribute__((nonnull(1)))
+static MY_ATTRIBUTE((nonnull(1)))
void
buf_LRU_remove_pages(
/*=================*/
diff --git a/storage/innobase/data/data0data.cc b/storage/innobase/data/data0data.cc
index 179de79b69f..593af089b00 100644
--- a/storage/innobase/data/data0data.cc
+++ b/storage/innobase/data/data0data.cc
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1994, 2012, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1994, 2016, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -67,7 +67,8 @@ dtuple_coll_cmp(
ulint n_fields;
ulint i;
- ut_ad(tuple1 && tuple2);
+ ut_ad(tuple1 != NULL);
+ ut_ad(tuple2 != NULL);
ut_ad(tuple1->magic_n == DATA_TUPLE_MAGIC_N);
ut_ad(tuple2->magic_n == DATA_TUPLE_MAGIC_N);
ut_ad(dtuple_check_typed(tuple1));
@@ -715,7 +716,7 @@ UNIV_INTERN
void
dtuple_convert_back_big_rec(
/*========================*/
- dict_index_t* index __attribute__((unused)), /*!< in: index */
+ dict_index_t* index MY_ATTRIBUTE((unused)), /*!< in: index */
dtuple_t* entry, /*!< in: entry whose data was put to vector */
big_rec_t* vector) /*!< in, own: big rec vector; it is
freed in this function */
diff --git a/storage/innobase/dict/dict0crea.cc b/storage/innobase/dict/dict0crea.cc
index a5af8208135..d423f16f61c 100644
--- a/storage/innobase/dict/dict0crea.cc
+++ b/storage/innobase/dict/dict0crea.cc
@@ -246,7 +246,7 @@ dict_create_sys_columns_tuple(
/***************************************************************//**
Builds a table definition to insert.
@return DB_SUCCESS or error code */
-static __attribute__((nonnull, warn_unused_result))
+static MY_ATTRIBUTE((nonnull, warn_unused_result))
dberr_t
dict_build_table_def_step(
/*======================*/
@@ -574,7 +574,7 @@ dict_create_search_tuple(
/***************************************************************//**
Builds an index definition row to insert.
@return DB_SUCCESS or error code */
-static __attribute__((nonnull, warn_unused_result))
+static MY_ATTRIBUTE((nonnull, warn_unused_result))
dberr_t
dict_build_index_def_step(
/*======================*/
@@ -649,7 +649,7 @@ dict_build_field_def_step(
/***************************************************************//**
Creates an index tree for the index if it is not a member of a cluster.
@return DB_SUCCESS or DB_OUT_OF_FILE_SPACE */
-static __attribute__((nonnull, warn_unused_result))
+static MY_ATTRIBUTE((nonnull, warn_unused_result))
dberr_t
dict_create_index_tree_step(
/*========================*/
@@ -1369,7 +1369,7 @@ dict_create_or_check_foreign_constraint_tables(void)
ib_logf(IB_LOG_LEVEL_WARN,
"Dropping incompletely created "
"SYS_FOREIGN table.");
- row_drop_table_for_mysql("SYS_FOREIGN", trx, TRUE);
+ row_drop_table_for_mysql("SYS_FOREIGN", trx, TRUE, TRUE);
}
if (sys_foreign_cols_err == DB_CORRUPTION) {
@@ -1377,7 +1377,7 @@ dict_create_or_check_foreign_constraint_tables(void)
"Dropping incompletely created "
"SYS_FOREIGN_COLS table.");
- row_drop_table_for_mysql("SYS_FOREIGN_COLS", trx, TRUE);
+ row_drop_table_for_mysql("SYS_FOREIGN_COLS", trx, TRUE, TRUE);
}
ib_logf(IB_LOG_LEVEL_WARN,
@@ -1431,8 +1431,8 @@ dict_create_or_check_foreign_constraint_tables(void)
ut_ad(err == DB_OUT_OF_FILE_SPACE
|| err == DB_TOO_MANY_CONCURRENT_TRXS);
- row_drop_table_for_mysql("SYS_FOREIGN", trx, TRUE);
- row_drop_table_for_mysql("SYS_FOREIGN_COLS", trx, TRUE);
+ row_drop_table_for_mysql("SYS_FOREIGN", trx, TRUE, TRUE);
+ row_drop_table_for_mysql("SYS_FOREIGN_COLS", trx, TRUE, TRUE);
if (err == DB_OUT_OF_FILE_SPACE) {
err = DB_MUST_GET_MORE_FILE_SPACE;
@@ -1468,7 +1468,7 @@ dict_create_or_check_foreign_constraint_tables(void)
/****************************************************************//**
Evaluate the given foreign key SQL statement.
@return error code or DB_SUCCESS */
-static __attribute__((nonnull, warn_unused_result))
+static MY_ATTRIBUTE((nonnull, warn_unused_result))
dberr_t
dict_foreign_eval_sql(
/*==================*/
@@ -1534,7 +1534,7 @@ dict_foreign_eval_sql(
Add a single foreign key field definition to the data dictionary tables in
the database.
@return error code or DB_SUCCESS */
-static __attribute__((nonnull, warn_unused_result))
+static MY_ATTRIBUTE((nonnull, warn_unused_result))
dberr_t
dict_create_add_foreign_field_to_dictionary(
/*========================================*/
@@ -1857,7 +1857,7 @@ dict_create_or_check_sys_tablespace(void)
ib_logf(IB_LOG_LEVEL_WARN,
"Dropping incompletely created "
"SYS_TABLESPACES table.");
- row_drop_table_for_mysql("SYS_TABLESPACES", trx, TRUE);
+ row_drop_table_for_mysql("SYS_TABLESPACES", trx, TRUE, TRUE);
}
if (sys_datafiles_err == DB_CORRUPTION) {
@@ -1865,7 +1865,7 @@ dict_create_or_check_sys_tablespace(void)
"Dropping incompletely created "
"SYS_DATAFILES table.");
- row_drop_table_for_mysql("SYS_DATAFILES", trx, TRUE);
+ row_drop_table_for_mysql("SYS_DATAFILES", trx, TRUE, TRUE);
}
ib_logf(IB_LOG_LEVEL_INFO,
@@ -1901,8 +1901,8 @@ dict_create_or_check_sys_tablespace(void)
ut_a(err == DB_OUT_OF_FILE_SPACE
|| err == DB_TOO_MANY_CONCURRENT_TRXS);
- row_drop_table_for_mysql("SYS_TABLESPACES", trx, TRUE);
- row_drop_table_for_mysql("SYS_DATAFILES", trx, TRUE);
+ row_drop_table_for_mysql("SYS_TABLESPACES", trx, TRUE, TRUE);
+ row_drop_table_for_mysql("SYS_DATAFILES", trx, TRUE, TRUE);
if (err == DB_OUT_OF_FILE_SPACE) {
err = DB_MUST_GET_MORE_FILE_SPACE;
diff --git a/storage/innobase/dict/dict0dict.cc b/storage/innobase/dict/dict0dict.cc
index c51deb0a048..0310e5e1d66 100644
--- a/storage/innobase/dict/dict0dict.cc
+++ b/storage/innobase/dict/dict0dict.cc
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1996, 2015, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2012, Facebook Inc.
Copyright (c) 2013, 2015, MariaDB Corporation.
@@ -6309,7 +6309,7 @@ dict_set_corrupted_index_cache_only(
dict_index_t* index, /*!< in/out: index */
dict_table_t* table) /*!< in/out: table */
{
- ut_ad(index);
+ ut_ad(index != NULL);
ut_ad(mutex_own(&dict_sys->mutex));
ut_ad(!dict_table_is_comp(dict_sys->sys_tables));
ut_ad(!dict_table_is_comp(dict_sys->sys_indexes));
@@ -6319,8 +6319,9 @@ dict_set_corrupted_index_cache_only(
if (dict_index_is_clust(index)) {
dict_table_t* corrupt_table;
- corrupt_table = table ? table : index->table;
- ut_ad(!index->table || !table || index->table == table);
+ corrupt_table = (table != NULL) ? table : index->table;
+ ut_ad((index->table != NULL) || (table != NULL)
+ || index->table == table);
if (corrupt_table) {
corrupt_table->corrupted = TRUE;
@@ -6400,11 +6401,6 @@ dict_table_get_index_on_name(
{
dict_index_t* index;
- /* If name is NULL, just return */
- if (!name) {
- return(NULL);
- }
-
index = dict_table_get_first_index(table);
while (index != NULL) {
diff --git a/storage/innobase/dict/dict0load.cc b/storage/innobase/dict/dict0load.cc
index ce5b10a623c..3c72efdb6f3 100644
--- a/storage/innobase/dict/dict0load.cc
+++ b/storage/innobase/dict/dict0load.cc
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1996, 2013, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2016, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
@@ -1751,7 +1751,7 @@ err_len:
goto err_len;
}
type = mach_read_from_4(field);
- if (type & (~0 << DICT_IT_BITS)) {
+ if (type & (~0U << DICT_IT_BITS)) {
return("unknown SYS_INDEXES.TYPE bits");
}
@@ -1790,7 +1790,7 @@ Loads definitions for table indexes. Adds them to the data dictionary
cache.
@return DB_SUCCESS if ok, DB_CORRUPTION if corruption of dictionary
table or DB_UNSUPPORTED if table has unknown index type */
-static __attribute__((nonnull))
+static MY_ATTRIBUTE((nonnull))
dberr_t
dict_load_indexes(
/*==============*/
@@ -2538,6 +2538,7 @@ func_exit:
/* the table->fts could be created in dict_load_column
when a user defined FTS_DOC_ID is present, but no
FTS */
+ fts_optimize_remove_table(table);
fts_free(table);
} else {
fts_optimize_add_table(table);
@@ -2603,14 +2604,13 @@ dict_load_table_on_id(
btr_pcur_open_on_user_rec(sys_table_ids, tuple, PAGE_CUR_GE,
BTR_SEARCH_LEAF, &pcur, &mtr);
-check_rec:
rec = btr_pcur_get_rec(&pcur);
if (page_rec_is_user_rec(rec)) {
/*---------------------------------------------------*/
/* Now we have the record in the secondary index
containing the table ID and NAME */
-
+check_rec:
field = rec_get_nth_field_old(
rec, DICT_FLD__SYS_TABLE_IDS__ID, &len);
ut_ad(len == 8);
@@ -2620,12 +2620,14 @@ check_rec:
if (rec_get_deleted_flag(rec, 0)) {
/* Until purge has completed, there
may be delete-marked duplicate records
- for the same SYS_TABLES.ID.
- Due to Bug #60049, some delete-marked
- records may survive the purge forever. */
- if (btr_pcur_move_to_next(&pcur, &mtr)) {
-
- goto check_rec;
+ for the same SYS_TABLES.ID, but different
+ SYS_TABLES.NAME. */
+ while (btr_pcur_move_to_next(&pcur, &mtr)) {
+ rec = btr_pcur_get_rec(&pcur);
+
+ if (page_rec_is_user_rec(rec)) {
+ goto check_rec;
+ }
}
} else {
/* Now we get the table name from the record */
@@ -2887,7 +2889,7 @@ dict_load_foreign_cols(
/***********************************************************************//**
Loads a foreign key constraint to the dictionary cache.
@return DB_SUCCESS or error code */
-static __attribute__((nonnull(1), warn_unused_result))
+static MY_ATTRIBUTE((nonnull(1), warn_unused_result))
dberr_t
dict_load_foreign(
/*==============*/
diff --git a/storage/innobase/dict/dict0mem.cc b/storage/innobase/dict/dict0mem.cc
index 1724ac024fa..f8ea0005665 100644
--- a/storage/innobase/dict/dict0mem.cc
+++ b/storage/innobase/dict/dict0mem.cc
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1996, 2015, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2012, Facebook Inc.
This program is free software; you can redistribute it and/or modify it under
@@ -303,7 +303,7 @@ dict_mem_table_add_col(
/**********************************************************************//**
Renames a column of a table in the data dictionary cache. */
-static __attribute__((nonnull))
+static MY_ATTRIBUTE((nonnull))
void
dict_mem_table_col_rename_low(
/*==========================*/
diff --git a/storage/innobase/dict/dict0stats_bg.cc b/storage/innobase/dict/dict0stats_bg.cc
index 076ceb79613..7aefa6a1d4d 100644
--- a/storage/innobase/dict/dict0stats_bg.cc
+++ b/storage/innobase/dict/dict0stats_bg.cc
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 2012, 2013, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2012, 2016, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -527,7 +527,7 @@ extern "C" UNIV_INTERN
os_thread_ret_t
DECLARE_THREAD(dict_stats_thread)(
/*==============================*/
- void* arg __attribute__((unused))) /*!< in: a dummy parameter
+ void* arg MY_ATTRIBUTE((unused))) /*!< in: a dummy parameter
required by os_thread_create */
{
ut_a(!srv_read_only_mode);
diff --git a/storage/innobase/fil/fil0fil.cc b/storage/innobase/fil/fil0fil.cc
index 1bf15e7b639..e9e164e5e1a 100644
--- a/storage/innobase/fil/fil0fil.cc
+++ b/storage/innobase/fil/fil0fil.cc
@@ -1809,7 +1809,7 @@ fil_set_max_space_id_if_bigger(
Writes the flushed lsn and the latest archived log number to the page header
of the first page of a data file of the system tablespace (space 0),
which is uncompressed. */
-static __attribute__((warn_unused_result))
+static MY_ATTRIBUTE((warn_unused_result))
dberr_t
fil_write_lsn_and_arch_no_to_file(
/*==============================*/
@@ -1817,7 +1817,7 @@ fil_write_lsn_and_arch_no_to_file(
ulint sum_of_sizes, /*!< in: combined size of previous files
in space, in database pages */
lsn_t lsn, /*!< in: lsn to write */
- ulint arch_log_no __attribute__((unused)))
+ ulint arch_log_no MY_ATTRIBUTE((unused)))
/*!< in: archived log number to write */
{
byte* buf1;
@@ -1905,7 +1905,7 @@ Checks the consistency of the first data page of a tablespace
at database startup.
@retval NULL on success, or if innodb_force_recovery is set
@return pointer to an error message string */
-static __attribute__((warn_unused_result))
+static MY_ATTRIBUTE((warn_unused_result))
const char*
fil_check_first_page(
/*=================*/
diff --git a/storage/innobase/fsp/fsp0fsp.cc b/storage/innobase/fsp/fsp0fsp.cc
index bbd7ee1b650..87aa5f7db5c 100644
--- a/storage/innobase/fsp/fsp0fsp.cc
+++ b/storage/innobase/fsp/fsp0fsp.cc
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1995, 2015, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -94,7 +94,7 @@ fseg_n_reserved_pages_low(
/********************************************************************//**
Marks a page used. The page must reside within the extents of the given
segment. */
-static __attribute__((nonnull))
+static MY_ATTRIBUTE((nonnull))
void
fseg_mark_page_used(
/*================*/
@@ -133,7 +133,7 @@ fsp_fill_free_list(
ulint space, /*!< in: space */
fsp_header_t* header, /*!< in/out: space header */
mtr_t* mtr) /*!< in/out: mini-transaction */
- UNIV_COLD __attribute__((nonnull));
+ UNIV_COLD MY_ATTRIBUTE((nonnull));
/**********************************************************************//**
Allocates a single free page from a segment. This function implements
the intelligent allocation strategy which tries to minimize file space
@@ -162,7 +162,7 @@ fseg_alloc_free_page_low(
in which the page should be initialized.
If init_mtr!=mtr, but the page is already
latched in mtr, do not initialize the page. */
- __attribute__((warn_unused_result, nonnull));
+ MY_ATTRIBUTE((warn_unused_result, nonnull));
#endif /* !UNIV_HOTBACKUP */
/**********************************************************************//**
@@ -426,7 +426,7 @@ descriptor resides is x-locked. This function no longer extends the data
file.
@return pointer to the extent descriptor, NULL if the page does not
exist in the space or if the offset is >= the free limit */
-UNIV_INLINE __attribute__((nonnull, warn_unused_result))
+UNIV_INLINE MY_ATTRIBUTE((nonnull, warn_unused_result))
xdes_t*
xdes_get_descriptor_with_space_hdr(
/*===============================*/
@@ -488,7 +488,7 @@ is necessary to make the descriptor defined, as they are uninitialized
above the free limit.
@return pointer to the extent descriptor, NULL if the page does not
exist in the space or if the offset exceeds the free limit */
-static __attribute__((nonnull, warn_unused_result))
+static MY_ATTRIBUTE((nonnull, warn_unused_result))
xdes_t*
xdes_get_descriptor(
/*================*/
@@ -615,7 +615,7 @@ byte*
fsp_parse_init_file_page(
/*=====================*/
byte* ptr, /*!< in: buffer */
- byte* end_ptr __attribute__((unused)), /*!< in: buffer end */
+ byte* end_ptr MY_ATTRIBUTE((unused)), /*!< in: buffer end */
buf_block_t* block) /*!< in: block or NULL */
{
ut_ad(ptr && end_ptr);
@@ -856,7 +856,7 @@ fsp_header_get_tablespace_size(void)
Tries to extend a single-table tablespace so that a page would fit in the
data file.
@return TRUE if success */
-static UNIV_COLD __attribute__((nonnull, warn_unused_result))
+static UNIV_COLD MY_ATTRIBUTE((nonnull, warn_unused_result))
ibool
fsp_try_extend_data_file_with_pages(
/*================================*/
@@ -888,7 +888,7 @@ fsp_try_extend_data_file_with_pages(
/***********************************************************************//**
Tries to extend the last data file of a tablespace if it is auto-extending.
@return FALSE if not auto-extending */
-static UNIV_COLD __attribute__((nonnull))
+static UNIV_COLD MY_ATTRIBUTE((nonnull))
ibool
fsp_try_extend_data_file(
/*=====================*/
@@ -1070,7 +1070,8 @@ fsp_fill_free_list(
ulint i;
mtr_t ibuf_mtr;
- ut_ad(header && mtr);
+ ut_ad(header != NULL);
+ ut_ad(mtr != NULL);
ut_ad(page_offset(header) == FSP_HEADER_OFFSET);
/* Check if we can fill free list from above the free list limit */
@@ -1242,7 +1243,7 @@ fsp_alloc_free_extent(
/**********************************************************************//**
Allocates a single free page from a space. */
-static __attribute__((nonnull))
+static MY_ATTRIBUTE((nonnull))
void
fsp_alloc_from_free_frag(
/*=====================*/
@@ -1333,7 +1334,7 @@ Allocates a single free page from a space. The page is marked as used.
@retval block, rw_lock_x_lock_count(&block->lock) == 1 if allocation succeeded
(init_mtr == mtr, or the page was not previously freed in mtr)
@retval block (not allocated or initialized) otherwise */
-static __attribute__((nonnull, warn_unused_result))
+static MY_ATTRIBUTE((nonnull, warn_unused_result))
buf_block_t*
fsp_alloc_free_page(
/*================*/
@@ -1582,9 +1583,9 @@ fsp_seg_inode_page_get_nth_inode(
/*=============================*/
page_t* page, /*!< in: segment inode page */
ulint i, /*!< in: inode index on page */
- ulint zip_size __attribute__((unused)),
+ ulint zip_size MY_ATTRIBUTE((unused)),
/*!< in: compressed page size, or 0 */
- mtr_t* mtr __attribute__((unused)))
+ mtr_t* mtr MY_ATTRIBUTE((unused)))
/*!< in/out: mini-transaction */
{
ut_ad(i < FSP_SEG_INODES_PER_PAGE(zip_size));
@@ -1883,7 +1884,7 @@ fseg_get_nth_frag_page_no(
/*======================*/
fseg_inode_t* inode, /*!< in: segment inode */
ulint n, /*!< in: slot index */
- mtr_t* mtr __attribute__((unused)))
+ mtr_t* mtr MY_ATTRIBUTE((unused)))
/*!< in/out: mini-transaction */
{
ut_ad(inode && mtr);
@@ -2978,7 +2979,7 @@ fsp_get_available_space_in_free_extents(
/********************************************************************//**
Marks a page used. The page must reside within the extents of the given
segment. */
-static __attribute__((nonnull))
+static MY_ATTRIBUTE((nonnull))
void
fseg_mark_page_used(
/*================*/
@@ -3050,7 +3051,8 @@ fseg_free_page_low(
ib_id_t seg_id;
ulint i;
- ut_ad(seg_inode && mtr);
+ ut_ad(seg_inode != NULL);
+ ut_ad(mtr != NULL);
ut_ad(mach_read_from_4(seg_inode + FSEG_MAGIC_N)
== FSEG_MAGIC_N_VALUE);
ut_ad(!((page_offset(seg_inode) - FSEG_ARR_OFFSET) % FSEG_INODE_SIZE));
@@ -3259,7 +3261,8 @@ fseg_free_extent(
ulint descr_n_used;
ulint i;
- ut_ad(seg_inode && mtr);
+ ut_ad(seg_inode != NULL);
+ ut_ad(mtr != NULL);
descr = xdes_get_descriptor(space, zip_size, page, mtr);
diff --git a/storage/innobase/fts/fts0blex.cc b/storage/innobase/fts/fts0blex.cc
index 7d0acb00a3b..2d71934fa0e 100644
--- a/storage/innobase/fts/fts0blex.cc
+++ b/storage/innobase/fts/fts0blex.cc
@@ -305,9 +305,9 @@ YY_BUFFER_STATE fts0b_scan_buffer (char *base,yy_size_t size ,yyscan_t yyscanner
YY_BUFFER_STATE fts0b_scan_string (yyconst char *yy_str ,yyscan_t yyscanner );
YY_BUFFER_STATE fts0b_scan_bytes (yyconst char *bytes,int len ,yyscan_t yyscanner );
-void *fts0balloc (yy_size_t , yyscan_t yyscanner __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) );
-void *fts0brealloc (void *,yy_size_t , yyscan_t yyscanner __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) );
-void fts0bfree (void * , yyscan_t yyscanner __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) );
+void *fts0balloc (yy_size_t , yyscan_t yyscanner MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) );
+void *fts0brealloc (void *,yy_size_t , yyscan_t yyscanner MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) );
+void fts0bfree (void * , yyscan_t yyscanner MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) );
#define yy_new_buffer fts0b_create_buffer
@@ -347,7 +347,7 @@ typedef int yy_state_type;
static yy_state_type yy_get_previous_state (yyscan_t yyscanner );
static yy_state_type yy_try_NUL_trans (yy_state_type current_state ,yyscan_t yyscanner);
static int yy_get_next_buffer (yyscan_t yyscanner );
-static void yy_fatal_error (yyconst char msg[] , yyscan_t yyscanner __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) );
+static void yy_fatal_error (yyconst char msg[] , yyscan_t yyscanner MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) );
/* Done after the current pattern has been matched and before the
* corresponding action - sets up yytext.
@@ -451,7 +451,7 @@ static yyconst flex_int16_t yy_chk[32] =
#line 1 "fts0blex.l"
/*****************************************************************************
-Copyright (c) 2007, 2014, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2007, 2016, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -579,11 +579,11 @@ extern int fts0bwrap (yyscan_t yyscanner );
#endif
#ifndef yytext_ptr
-static void yy_flex_strncpy (char *,yyconst char *,int , yyscan_t yyscanner __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)));
+static void yy_flex_strncpy (char *,yyconst char *,int , yyscan_t yyscanner MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)));
#endif
#ifdef YY_NEED_STRLEN
-static int yy_flex_strlen (yyconst char * , yyscan_t yyscanner __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)));
+static int yy_flex_strlen (yyconst char * , yyscan_t yyscanner MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)));
#endif
#ifndef YY_NO_INPUT
@@ -1609,7 +1609,7 @@ YY_BUFFER_STATE fts0b_scan_bytes (yyconst char * yybytes, int _yybytes_len , y
#define YY_EXIT_FAILURE 2
#endif
-static void yy_fatal_error (yyconst char* msg , yyscan_t yyscanner __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)))
+static void yy_fatal_error (yyconst char* msg , yyscan_t yyscanner MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)))
{
(void) fprintf( stderr, "%s\n", msg );
exit( YY_EXIT_FAILURE );
@@ -1910,7 +1910,7 @@ int fts0blex_destroy (yyscan_t yyscanner)
*/
#ifndef yytext_ptr
-static void yy_flex_strncpy (char* s1, yyconst char * s2, int n , yyscan_t yyscanner __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)))
+static void yy_flex_strncpy (char* s1, yyconst char * s2, int n , yyscan_t yyscanner MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)))
{
register int i;
for ( i = 0; i < n; ++i )
@@ -1919,7 +1919,7 @@ static void yy_flex_strncpy (char* s1, yyconst char * s2, int n , yys
#endif
#ifdef YY_NEED_STRLEN
-static int yy_flex_strlen (yyconst char * s , yyscan_t yyscanner __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)))
+static int yy_flex_strlen (yyconst char * s , yyscan_t yyscanner MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)))
{
register int n;
for ( n = 0; s[n]; ++n )
@@ -1929,12 +1929,12 @@ static int yy_flex_strlen (yyconst char * s , yyscan_t yyscanner __at
}
#endif
-void *fts0balloc (yy_size_t size , yyscan_t yyscanner __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)))
+void *fts0balloc (yy_size_t size , yyscan_t yyscanner MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)))
{
return (void *) malloc( size );
}
-void *fts0brealloc (void * ptr, yy_size_t size , yyscan_t yyscanner __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)))
+void *fts0brealloc (void * ptr, yy_size_t size , yyscan_t yyscanner MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)))
{
/* The cast to (char *) in the following accommodates both
* implementations that use char* generic pointers, and those
@@ -1946,7 +1946,7 @@ void *fts0brealloc (void * ptr, yy_size_t size , yyscan_t yyscanner
return (void *) realloc( (char *) ptr, size );
}
-void fts0bfree (void * ptr , yyscan_t yyscanner __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)))
+void fts0bfree (void * ptr , yyscan_t yyscanner MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)))
{
free( (char *) ptr ); /* see fts0brealloc() for (char *) cast */
}
diff --git a/storage/innobase/fts/fts0fts.cc b/storage/innobase/fts/fts0fts.cc
index 95740da78d5..0337cf6dfe7 100644
--- a/storage/innobase/fts/fts0fts.cc
+++ b/storage/innobase/fts/fts0fts.cc
@@ -280,7 +280,7 @@ void
fts_words_free(
/*===========*/
ib_rbt_t* words) /*!< in: rb tree of words */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
#ifdef FTS_CACHE_SIZE_DEBUG
/****************************************************************//**
Read the max cache size parameter from the config table. */
@@ -302,7 +302,7 @@ fts_add_doc_by_id(
/*==============*/
fts_trx_table_t*ftt, /*!< in: FTS trx table */
doc_id_t doc_id, /*!< in: doc id */
- ib_vector_t* fts_indexes __attribute__((unused)));
+ ib_vector_t* fts_indexes MY_ATTRIBUTE((unused)));
/*!< in: affected fts indexes */
#ifdef FTS_DOC_STATS_DEBUG
/****************************************************************//**
@@ -317,7 +317,7 @@ fts_is_word_in_index(
fts_table_t* fts_table, /*!< in: table instance */
const fts_string_t* word, /*!< in: the word to check */
ibool* found) /*!< out: TRUE if exists */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
#endif /* FTS_DOC_STATS_DEBUG */
/******************************************************************//**
@@ -332,7 +332,7 @@ fts_update_sync_doc_id(
const char* table_name, /*!< in: table name, or NULL */
doc_id_t doc_id, /*!< in: last document id */
trx_t* trx) /*!< in: update trx, or NULL */
- __attribute__((nonnull(1)));
+ MY_ATTRIBUTE((nonnull(1)));
/****************************************************************//**
This function loads the default InnoDB stopword list */
@@ -1075,13 +1075,12 @@ fts_words_free(
}
}
-/*********************************************************************//**
-Clear cache. */
+/** Clear cache.
+@param[in,out] cache fts cache */
UNIV_INTERN
void
fts_cache_clear(
-/*============*/
- fts_cache_t* cache) /*!< in: cache */
+ fts_cache_t* cache)
{
ulint i;
@@ -1477,7 +1476,7 @@ fts_cache_add_doc(
/****************************************************************//**
Drops a table. If the table can't be found we return a SUCCESS code.
@return DB_SUCCESS or error code */
-static __attribute__((nonnull, warn_unused_result))
+static MY_ATTRIBUTE((nonnull, warn_unused_result))
dberr_t
fts_drop_table(
/*===========*/
@@ -1519,7 +1518,7 @@ fts_drop_table(
/****************************************************************//**
Rename a single auxiliary table due to database name change.
@return DB_SUCCESS or error code */
-static __attribute__((nonnull, warn_unused_result))
+static MY_ATTRIBUTE((nonnull, warn_unused_result))
dberr_t
fts_rename_one_aux_table(
/*=====================*/
@@ -1628,7 +1627,7 @@ Drops the common ancillary tables needed for supporting an FTS index
on the given table. row_mysql_lock_data_dictionary must have been called
before this.
@return DB_SUCCESS or error code */
-static __attribute__((nonnull, warn_unused_result))
+static MY_ATTRIBUTE((nonnull, warn_unused_result))
dberr_t
fts_drop_common_tables(
/*===================*/
@@ -1755,7 +1754,7 @@ Drops FTS ancillary tables needed for supporting an FTS index
on the given table. row_mysql_lock_data_dictionary must have been called
before this.
@return DB_SUCCESS or error code */
-static __attribute__((nonnull, warn_unused_result))
+static MY_ATTRIBUTE((nonnull, warn_unused_result))
dberr_t
fts_drop_all_index_tables(
/*======================*/
@@ -1919,7 +1918,7 @@ func_exit:
trx_rollback_to_savepoint(trx, NULL);
- row_drop_table_for_mysql(table->name, trx, FALSE);
+ row_drop_table_for_mysql(table->name, trx, FALSE, TRUE);
trx->error_state = DB_SUCCESS;
}
@@ -2071,7 +2070,7 @@ fts_create_index_tables_low(
trx_rollback_to_savepoint(trx, NULL);
- row_drop_table_for_mysql(table_name, trx, FALSE);
+ row_drop_table_for_mysql(table_name, trx, FALSE, TRUE);
trx->error_state = DB_SUCCESS;
}
@@ -2663,7 +2662,7 @@ fts_get_next_doc_id(
This function fetch the Doc ID from CONFIG table, and compare with
the Doc ID supplied. And store the larger one to the CONFIG table.
@return DB_SUCCESS if OK */
-static __attribute__((nonnull))
+static MY_ATTRIBUTE((nonnull))
dberr_t
fts_cmp_set_sync_doc_id(
/*====================*/
@@ -2917,7 +2916,7 @@ fts_add(
/*********************************************************************//**
Do commit-phase steps necessary for the deletion of a row.
@return DB_SUCCESS or error code */
-static __attribute__((nonnull, warn_unused_result))
+static MY_ATTRIBUTE((nonnull, warn_unused_result))
dberr_t
fts_delete(
/*=======*/
@@ -3008,7 +3007,7 @@ fts_delete(
/*********************************************************************//**
Do commit-phase steps necessary for the modification of a row.
@return DB_SUCCESS or error code */
-static __attribute__((nonnull, warn_unused_result))
+static MY_ATTRIBUTE((nonnull, warn_unused_result))
dberr_t
fts_modify(
/*=======*/
@@ -3079,7 +3078,7 @@ fts_create_doc_id(
The given transaction is about to be committed; do whatever is necessary
from the FTS system's POV.
@return DB_SUCCESS or error code */
-static __attribute__((nonnull, warn_unused_result))
+static MY_ATTRIBUTE((nonnull, warn_unused_result))
dberr_t
fts_commit_table(
/*=============*/
@@ -3413,7 +3412,7 @@ fts_add_doc_by_id(
/*==============*/
fts_trx_table_t*ftt, /*!< in: FTS trx table */
doc_id_t doc_id, /*!< in: doc id */
- ib_vector_t* fts_indexes __attribute__((unused)))
+ ib_vector_t* fts_indexes MY_ATTRIBUTE((unused)))
/*!< in: affected fts indexes */
{
mtr_t mtr;
@@ -3533,7 +3532,7 @@ fts_add_doc_by_id(
get_doc, clust_index, doc_pcur, offsets, &doc);
if (doc.found) {
- ibool success __attribute__((unused));
+ ibool success MY_ATTRIBUTE((unused));
btr_pcur_store_position(doc_pcur, &mtr);
mtr_commit(&mtr);
@@ -3642,7 +3641,7 @@ fts_get_max_doc_id(
dict_table_t* table) /*!< in: user table */
{
dict_index_t* index;
- dict_field_t* dfield __attribute__((unused)) = NULL;
+ dict_field_t* dfield MY_ATTRIBUTE((unused)) = NULL;
doc_id_t doc_id = 0;
mtr_t mtr;
btr_pcur_t pcur;
@@ -3900,7 +3899,7 @@ fts_write_node(
/*********************************************************************//**
Add rows to the DELETED_CACHE table.
@return DB_SUCCESS if all went well else error code*/
-static __attribute__((nonnull, warn_unused_result))
+static MY_ATTRIBUTE((nonnull, warn_unused_result))
dberr_t
fts_sync_add_deleted_cache(
/*=======================*/
@@ -3954,7 +3953,7 @@ fts_sync_add_deleted_cache(
@param[in] index_cache index cache
@param[in] unlock_cache whether unlock cache when write node
@return DB_SUCCESS if all went well else error code */
-static __attribute__((nonnull, warn_unused_result))
+static MY_ATTRIBUTE((nonnull, warn_unused_result))
dberr_t
fts_sync_write_words(
trx_t* trx,
@@ -4090,7 +4089,7 @@ fts_sync_write_words(
/*********************************************************************//**
Write a single documents statistics to disk.
@return DB_SUCCESS if all went well else error code */
-static __attribute__((nonnull, warn_unused_result))
+static MY_ATTRIBUTE((nonnull, warn_unused_result))
dberr_t
fts_sync_write_doc_stat(
/*====================*/
@@ -4344,7 +4343,7 @@ fts_sync_begin(
Run SYNC on the table, i.e., write out data from the index specific
cache to the FTS aux INDEX table and FTS aux doc id stats table.
@return DB_SUCCESS if all OK */
-static __attribute__((nonnull, warn_unused_result))
+static MY_ATTRIBUTE((nonnull, warn_unused_result))
dberr_t
fts_sync_index(
/*===========*/
@@ -4412,7 +4411,7 @@ fts_sync_index_check(
/*********************************************************************//**
Commit the SYNC, change state of processed doc ids etc.
@return DB_SUCCESS if all OK */
-static __attribute__((nonnull, warn_unused_result))
+static MY_ATTRIBUTE((nonnull, warn_unused_result))
dberr_t
fts_sync_commit(
/*============*/
@@ -4474,13 +4473,12 @@ fts_sync_commit(
return(error);
}
-/*********************************************************************//**
-Rollback a sync operation */
+/** Rollback a sync operation
+@param[in,out] sync sync state */
static
void
fts_sync_rollback(
-/*==============*/
- fts_sync_t* sync) /*!< in: sync state */
+ fts_sync_t* sync)
{
trx_t* trx = sync->trx;
fts_cache_t* cache = sync->table->fts->cache;
@@ -6170,7 +6168,7 @@ fts_update_hex_format_flag(
/*********************************************************************//**
Rename an aux table to HEX format. It's called when "%016llu" is used
to format an object id in table name, which only happens in Windows. */
-static __attribute__((nonnull, warn_unused_result))
+static MY_ATTRIBUTE((nonnull, warn_unused_result))
dberr_t
fts_rename_one_aux_table_to_hex_format(
/*===================================*/
@@ -6261,7 +6259,7 @@ Note the ids in tables are correct but the names are old ambiguous ones.
This function should make sure that either all the parent table and aux tables
are set DICT_TF2_FTS_AUX_HEX_NAME with flags2 or none of them are set */
-static __attribute__((nonnull, warn_unused_result))
+static MY_ATTRIBUTE((nonnull, warn_unused_result))
dberr_t
fts_rename_aux_tables_to_hex_format_low(
/*====================================*/
@@ -6415,7 +6413,7 @@ fts_fake_hex_to_dec(
{
ib_id_t dec_id = 0;
char tmp_id[FTS_AUX_MIN_TABLE_ID_LENGTH];
- int ret __attribute__((unused));
+ int ret MY_ATTRIBUTE((unused));
ret = sprintf(tmp_id, UINT64PFx, id);
ut_ad(ret == 16);
@@ -6737,7 +6735,7 @@ fts_drop_aux_table_from_vector(
Check and drop all orphaned FTS auxiliary tables, those that don't have
a parent table or FTS index defined on them.
@return DB_SUCCESS or error code */
-static __attribute__((nonnull))
+static MY_ATTRIBUTE((nonnull))
void
fts_check_and_drop_orphaned_tables(
/*===============================*/
diff --git a/storage/innobase/fts/fts0opt.cc b/storage/innobase/fts/fts0opt.cc
index ccb7090c61d..d9f2532578e 100644
--- a/storage/innobase/fts/fts0opt.cc
+++ b/storage/innobase/fts/fts0opt.cc
@@ -797,7 +797,7 @@ fts_zip_deflate_end(
Read the words from the FTS INDEX.
@return DB_SUCCESS if all OK, DB_TABLE_NOT_FOUND if no more indexes
to search else error code */
-static __attribute__((nonnull, warn_unused_result))
+static MY_ATTRIBUTE((nonnull, warn_unused_result))
dberr_t
fts_index_fetch_words(
/*==================*/
@@ -1131,7 +1131,7 @@ fts_optimize_lookup(
/**********************************************************************//**
Encode the word pos list into the node
@return DB_SUCCESS or error code*/
-static __attribute__((nonnull))
+static MY_ATTRIBUTE((nonnull))
dberr_t
fts_optimize_encode_node(
/*=====================*/
@@ -1220,7 +1220,7 @@ fts_optimize_encode_node(
/**********************************************************************//**
Optimize the data contained in a node.
@return DB_SUCCESS or error code*/
-static __attribute__((nonnull))
+static MY_ATTRIBUTE((nonnull))
dberr_t
fts_optimize_node(
/*==============*/
@@ -1318,7 +1318,7 @@ test_again:
/**********************************************************************//**
Determine the starting pos within the deleted doc id vector for a word.
@return delete position */
-static __attribute__((nonnull, warn_unused_result))
+static MY_ATTRIBUTE((nonnull, warn_unused_result))
int
fts_optimize_deleted_pos(
/*=====================*/
@@ -1447,7 +1447,7 @@ fts_optimize_word(
/**********************************************************************//**
Update the FTS index table. This is a delete followed by an insert.
@return DB_SUCCESS or error code */
-static __attribute__((nonnull, warn_unused_result))
+static MY_ATTRIBUTE((nonnull, warn_unused_result))
dberr_t
fts_optimize_write_word(
/*====================*/
@@ -1550,7 +1550,7 @@ fts_word_free(
/**********************************************************************//**
Optimize the word ilist and rewrite data to the FTS index.
@return status one of RESTART, EXIT, ERROR */
-static __attribute__((nonnull, warn_unused_result))
+static MY_ATTRIBUTE((nonnull, warn_unused_result))
dberr_t
fts_optimize_compact(
/*=================*/
@@ -1645,7 +1645,7 @@ fts_optimize_create(
/**********************************************************************//**
Get optimize start time of an FTS index.
@return DB_SUCCESS if all OK else error code */
-static __attribute__((nonnull, warn_unused_result))
+static MY_ATTRIBUTE((nonnull, warn_unused_result))
dberr_t
fts_optimize_get_index_start_time(
/*==============================*/
@@ -1661,7 +1661,7 @@ fts_optimize_get_index_start_time(
/**********************************************************************//**
Set the optimize start time of an FTS index.
@return DB_SUCCESS if all OK else error code */
-static __attribute__((nonnull, warn_unused_result))
+static MY_ATTRIBUTE((nonnull, warn_unused_result))
dberr_t
fts_optimize_set_index_start_time(
/*==============================*/
@@ -1677,7 +1677,7 @@ fts_optimize_set_index_start_time(
/**********************************************************************//**
Get optimize end time of an FTS index.
@return DB_SUCCESS if all OK else error code */
-static __attribute__((nonnull, warn_unused_result))
+static MY_ATTRIBUTE((nonnull, warn_unused_result))
dberr_t
fts_optimize_get_index_end_time(
/*============================*/
@@ -1692,7 +1692,7 @@ fts_optimize_get_index_end_time(
/**********************************************************************//**
Set the optimize end time of an FTS index.
@return DB_SUCCESS if all OK else error code */
-static __attribute__((nonnull, warn_unused_result))
+static MY_ATTRIBUTE((nonnull, warn_unused_result))
dberr_t
fts_optimize_set_index_end_time(
/*============================*/
@@ -1912,7 +1912,7 @@ fts_optimize_set_next_word(
Optimize is complete. Set the completion time, and reset the optimize
start string for this FTS index to "".
@return DB_SUCCESS if all OK */
-static __attribute__((nonnull, warn_unused_result))
+static MY_ATTRIBUTE((nonnull, warn_unused_result))
dberr_t
fts_optimize_index_completed(
/*=========================*/
@@ -1952,7 +1952,7 @@ fts_optimize_index_completed(
Read the list of words from the FTS auxiliary index that will be
optimized in this pass.
@return DB_SUCCESS if all OK */
-static __attribute__((nonnull, warn_unused_result))
+static MY_ATTRIBUTE((nonnull, warn_unused_result))
dberr_t
fts_optimize_index_read_words(
/*==========================*/
@@ -2009,7 +2009,7 @@ fts_optimize_index_read_words(
Run OPTIMIZE on the given FTS index. Note: this can take a very long
time (hours).
@return DB_SUCCESS if all OK */
-static __attribute__((nonnull, warn_unused_result))
+static MY_ATTRIBUTE((nonnull, warn_unused_result))
dberr_t
fts_optimize_index(
/*===============*/
@@ -2080,7 +2080,7 @@ fts_optimize_index(
/**********************************************************************//**
Delete the document ids in the delete, and delete cache tables.
@return DB_SUCCESS if all OK */
-static __attribute__((nonnull, warn_unused_result))
+static MY_ATTRIBUTE((nonnull, warn_unused_result))
dberr_t
fts_optimize_purge_deleted_doc_ids(
/*===============================*/
@@ -2149,7 +2149,7 @@ fts_optimize_purge_deleted_doc_ids(
/**********************************************************************//**
Delete the document ids in the pending delete, and delete tables.
@return DB_SUCCESS if all OK */
-static __attribute__((nonnull, warn_unused_result))
+static MY_ATTRIBUTE((nonnull, warn_unused_result))
dberr_t
fts_optimize_purge_deleted_doc_id_snapshot(
/*=======================================*/
@@ -2199,7 +2199,7 @@ Copy the deleted doc ids that will be purged during this optimize run
to the being deleted FTS auxiliary tables. The transaction is committed
upon successfull copy and rolled back on DB_DUPLICATE_KEY error.
@return DB_SUCCESS if all OK */
-static __attribute__((nonnull, warn_unused_result))
+static MY_ATTRIBUTE((nonnull, warn_unused_result))
dberr_t
fts_optimize_create_deleted_doc_id_snapshot(
/*========================================*/
@@ -2237,7 +2237,7 @@ fts_optimize_create_deleted_doc_id_snapshot(
Read in the document ids that are to be purged during optimize. The
transaction is committed upon successfully read.
@return DB_SUCCESS if all OK */
-static __attribute__((nonnull, warn_unused_result))
+static MY_ATTRIBUTE((nonnull, warn_unused_result))
dberr_t
fts_optimize_read_deleted_doc_id_snapshot(
/*======================================*/
@@ -2274,7 +2274,7 @@ Optimze all the FTS indexes, skipping those that have already been
optimized, since the FTS auxiliary indexes are not guaranteed to be
of the same cardinality.
@return DB_SUCCESS if all OK */
-static __attribute__((nonnull, warn_unused_result))
+static MY_ATTRIBUTE((nonnull, warn_unused_result))
dberr_t
fts_optimize_indexes(
/*=================*/
@@ -2344,7 +2344,7 @@ fts_optimize_indexes(
/*********************************************************************//**
Cleanup the snapshot tables and the master deleted table.
@return DB_SUCCESS if all OK */
-static __attribute__((nonnull, warn_unused_result))
+static MY_ATTRIBUTE((nonnull, warn_unused_result))
dberr_t
fts_optimize_purge_snapshot(
/*========================*/
@@ -2373,7 +2373,7 @@ fts_optimize_purge_snapshot(
/*********************************************************************//**
Reset the start time to 0 so that a new optimize can be started.
@return DB_SUCCESS if all OK */
-static __attribute__((nonnull, warn_unused_result))
+static MY_ATTRIBUTE((nonnull, warn_unused_result))
dberr_t
fts_optimize_reset_start_time(
/*==========================*/
@@ -2412,7 +2412,7 @@ fts_optimize_reset_start_time(
/*********************************************************************//**
Run OPTIMIZE on the given table by a background thread.
@return DB_SUCCESS if all OK */
-static __attribute__((nonnull))
+static MY_ATTRIBUTE((nonnull))
dberr_t
fts_optimize_table_bk(
/*==================*/
@@ -2757,6 +2757,7 @@ fts_optimize_new_table(
empty_slot = i;
} else if (slot->table->id == table->id) {
/* Already exists in our optimize queue. */
+ ut_ad(slot->table_id = table->id);
return(FALSE);
}
}
@@ -2974,6 +2975,13 @@ fts_optimize_sync_table(
{
dict_table_t* table = NULL;
+ /* Prevent DROP INDEX etc. from running when we are syncing
+ cache in background. */
+ if (!rw_lock_s_lock_nowait(&dict_operation_lock, __FILE__, __LINE__)) {
+ /* Exit when fail to get dict operation lock. */
+ return;
+ }
+
table = dict_table_open_on_id(table_id, FALSE, DICT_TABLE_OP_NORMAL);
if (table) {
@@ -2983,6 +2991,8 @@ fts_optimize_sync_table(
dict_table_close(table, FALSE, FALSE);
}
+
+ rw_lock_s_unlock(&dict_operation_lock);
}
/**********************************************************************//**
diff --git a/storage/innobase/fts/fts0que.cc b/storage/innobase/fts/fts0que.cc
index 9fa38cde561..26bd0378aed 100644
--- a/storage/innobase/fts/fts0que.cc
+++ b/storage/innobase/fts/fts0que.cc
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 2007, 2015, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2007, 2016, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -287,7 +287,7 @@ fts_expand_query(
dict_index_t* index, /*!< in: FTS index to search */
fts_query_t* query) /*!< in: query result, to be freed
by the client */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/*************************************************************//**
This function finds documents that contain all words in a
phrase or proximity search. And if proximity search, verify
@@ -1128,7 +1128,7 @@ cont_search:
/*****************************************************************//**
Set difference.
@return DB_SUCCESS if all go well */
-static __attribute__((nonnull, warn_unused_result))
+static MY_ATTRIBUTE((nonnull, warn_unused_result))
dberr_t
fts_query_difference(
/*=================*/
@@ -1220,7 +1220,7 @@ fts_query_difference(
/*****************************************************************//**
Intersect the token doc ids with the current set.
@return DB_SUCCESS if all go well */
-static __attribute__((nonnull, warn_unused_result))
+static MY_ATTRIBUTE((nonnull, warn_unused_result))
dberr_t
fts_query_intersect(
/*================*/
@@ -1398,7 +1398,7 @@ fts_query_cache(
/*****************************************************************//**
Set union.
@return DB_SUCCESS if all go well */
-static __attribute__((nonnull, warn_unused_result))
+static MY_ATTRIBUTE((nonnull, warn_unused_result))
dberr_t
fts_query_union(
/*============*/
@@ -2015,7 +2015,7 @@ fts_query_select(
Read the rows from the FTS index, that match word and where the
doc id is between first and last doc id.
@return DB_SUCCESS if all go well else error code */
-static __attribute__((nonnull, warn_unused_result))
+static MY_ATTRIBUTE((nonnull, warn_unused_result))
dberr_t
fts_query_find_term(
/*================*/
@@ -2155,7 +2155,7 @@ fts_query_sum(
/********************************************************************
Calculate the total documents that contain a particular word (term).
@return DB_SUCCESS if all go well else error code */
-static __attribute__((nonnull, warn_unused_result))
+static MY_ATTRIBUTE((nonnull, warn_unused_result))
dberr_t
fts_query_total_docs_containing_term(
/*=================================*/
@@ -2234,7 +2234,7 @@ fts_query_total_docs_containing_term(
/********************************************************************
Get the total number of words in a documents.
@return DB_SUCCESS if all go well else error code */
-static __attribute__((nonnull, warn_unused_result))
+static MY_ATTRIBUTE((nonnull, warn_unused_result))
dberr_t
fts_query_terms_in_document(
/*========================*/
@@ -2315,7 +2315,7 @@ fts_query_terms_in_document(
/*****************************************************************//**
Retrieve the document and match the phrase tokens.
@return DB_SUCCESS or error code */
-static __attribute__((nonnull, warn_unused_result))
+static MY_ATTRIBUTE((nonnull, warn_unused_result))
dberr_t
fts_query_match_document(
/*=====================*/
@@ -2361,7 +2361,7 @@ fts_query_match_document(
This function fetches the original documents and count the
words in between matching words to see that is in specified distance
@return DB_SUCCESS if all OK */
-static __attribute__((nonnull, warn_unused_result))
+static MY_ATTRIBUTE((nonnull, warn_unused_result))
bool
fts_query_is_in_proximity_range(
/*============================*/
@@ -2416,7 +2416,7 @@ fts_query_is_in_proximity_range(
Iterate over the matched document ids and search the for the
actual phrase in the text.
@return DB_SUCCESS if all OK */
-static __attribute__((nonnull, warn_unused_result))
+static MY_ATTRIBUTE((nonnull, warn_unused_result))
dberr_t
fts_query_search_phrase(
/*====================*/
@@ -2504,7 +2504,7 @@ func_exit:
/*****************************************************************//**
Text/Phrase search.
@return DB_SUCCESS or error code */
-static __attribute__((nonnull, warn_unused_result))
+static MY_ATTRIBUTE((nonnull, warn_unused_result))
dberr_t
fts_query_phrase_search(
/*====================*/
@@ -2755,7 +2755,7 @@ func_exit:
/*****************************************************************//**
Find the word and evaluate.
@return DB_SUCCESS if all go well */
-static __attribute__((nonnull, warn_unused_result))
+static MY_ATTRIBUTE((nonnull, warn_unused_result))
dberr_t
fts_query_execute(
/*==============*/
@@ -4126,7 +4126,7 @@ words in documents found in the first search pass will be used as
search arguments to search the document again, thus "expand"
the search result set.
@return DB_SUCCESS if success, otherwise the error code */
-static __attribute__((nonnull, warn_unused_result))
+static MY_ATTRIBUTE((nonnull, warn_unused_result))
dberr_t
fts_expand_query(
/*=============*/
diff --git a/storage/innobase/fts/fts0tlex.cc b/storage/innobase/fts/fts0tlex.cc
index b744fbf0763..d4d9b4c48d1 100644
--- a/storage/innobase/fts/fts0tlex.cc
+++ b/storage/innobase/fts/fts0tlex.cc
@@ -305,9 +305,9 @@ YY_BUFFER_STATE fts0t_scan_buffer (char *base,yy_size_t size ,yyscan_t yyscanner
YY_BUFFER_STATE fts0t_scan_string (yyconst char *yy_str ,yyscan_t yyscanner );
YY_BUFFER_STATE fts0t_scan_bytes (yyconst char *bytes,int len ,yyscan_t yyscanner );
-void *fts0talloc (yy_size_t , yyscan_t yyscanner __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) );
-void *fts0trealloc (void *,yy_size_t , yyscan_t yyscanner __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) );
-void fts0tfree (void * , yyscan_t yyscanner __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) );
+void *fts0talloc (yy_size_t , yyscan_t yyscanner MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) );
+void *fts0trealloc (void *,yy_size_t , yyscan_t yyscanner MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) );
+void fts0tfree (void * , yyscan_t yyscanner MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) );
#define yy_new_buffer fts0t_create_buffer
@@ -347,7 +347,7 @@ typedef int yy_state_type;
static yy_state_type yy_get_previous_state (yyscan_t yyscanner );
static yy_state_type yy_try_NUL_trans (yy_state_type current_state ,yyscan_t yyscanner);
static int yy_get_next_buffer (yyscan_t yyscanner );
-static void yy_fatal_error (yyconst char msg[] , yyscan_t yyscanner __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) );
+static void yy_fatal_error (yyconst char msg[] , yyscan_t yyscanner MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) );
/* Done after the current pattern has been matched and before the
* corresponding action - sets up yytext.
@@ -447,7 +447,7 @@ static yyconst flex_int16_t yy_chk[29] =
#line 1 "fts0tlex.l"
/*****************************************************************************
-Copyright (c) 2007, 2014, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2007, 2016, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -575,11 +575,11 @@ extern int fts0twrap (yyscan_t yyscanner );
#endif
#ifndef yytext_ptr
-static void yy_flex_strncpy (char *,yyconst char *,int , yyscan_t yyscanner __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)));
+static void yy_flex_strncpy (char *,yyconst char *,int , yyscan_t yyscanner MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)));
#endif
#ifdef YY_NEED_STRLEN
-static int yy_flex_strlen (yyconst char * , yyscan_t yyscanner __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)));
+static int yy_flex_strlen (yyconst char * , yyscan_t yyscanner MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)));
#endif
#ifndef YY_NO_INPUT
@@ -1602,7 +1602,7 @@ YY_BUFFER_STATE fts0t_scan_bytes (yyconst char * yybytes, int _yybytes_len , y
#define YY_EXIT_FAILURE 2
#endif
-static void yy_fatal_error (yyconst char* msg , yyscan_t yyscanner __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)))
+static void yy_fatal_error (yyconst char* msg , yyscan_t yyscanner MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)))
{
(void) fprintf( stderr, "%s\n", msg );
exit( YY_EXIT_FAILURE );
@@ -1903,7 +1903,7 @@ int fts0tlex_destroy (yyscan_t yyscanner)
*/
#ifndef yytext_ptr
-static void yy_flex_strncpy (char* s1, yyconst char * s2, int n , yyscan_t yyscanner __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)))
+static void yy_flex_strncpy (char* s1, yyconst char * s2, int n , yyscan_t yyscanner MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)))
{
register int i;
for ( i = 0; i < n; ++i )
@@ -1912,7 +1912,7 @@ static void yy_flex_strncpy (char* s1, yyconst char * s2, int n , yys
#endif
#ifdef YY_NEED_STRLEN
-static int yy_flex_strlen (yyconst char * s , yyscan_t yyscanner __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)))
+static int yy_flex_strlen (yyconst char * s , yyscan_t yyscanner MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)))
{
register int n;
for ( n = 0; s[n]; ++n )
@@ -1922,12 +1922,12 @@ static int yy_flex_strlen (yyconst char * s , yyscan_t yyscanner __at
}
#endif
-void *fts0talloc (yy_size_t size , yyscan_t yyscanner __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)))
+void *fts0talloc (yy_size_t size , yyscan_t yyscanner MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)))
{
return (void *) malloc( size );
}
-void *fts0trealloc (void * ptr, yy_size_t size , yyscan_t yyscanner __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)))
+void *fts0trealloc (void * ptr, yy_size_t size , yyscan_t yyscanner MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)))
{
/* The cast to (char *) in the following accommodates both
* implementations that use char* generic pointers, and those
@@ -1939,7 +1939,7 @@ void *fts0trealloc (void * ptr, yy_size_t size , yyscan_t yyscanner
return (void *) realloc( (char *) ptr, size );
}
-void fts0tfree (void * ptr , yyscan_t yyscanner __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)))
+void fts0tfree (void * ptr , yyscan_t yyscanner MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)))
{
free( (char *) ptr ); /* see fts0trealloc() for (char *) cast */
}
diff --git a/storage/innobase/fts/make_parser.sh b/storage/innobase/fts/make_parser.sh
index 2c072914c8b..52b63eff674 100755
--- a/storage/innobase/fts/make_parser.sh
+++ b/storage/innobase/fts/make_parser.sh
@@ -1,6 +1,6 @@
#!/bin/sh
#
-# Copyright (c) 2007, 2011, Oracle and/or its affiliates. All Rights Reserved.
+# Copyright (c) 2007, 2016, Oracle and/or its affiliates. All Rights Reserved.
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
@@ -22,15 +22,15 @@ make -f Makefile.query
echo '#include "univ.i"' > $TMPF
# This is to avoid compiler warning about unused parameters.
-# FIXME: gcc extension "__attribute__" causing compilation errors on windows
+# FIXME: gcc extension "MY_ATTRIBUTE" causing compilation errors on windows
# platform. Quote them out for now.
sed -e '
-s/^\(static.*void.*yy_fatal_error.*msg.*,\)\(.*yyscanner\)/\1 \2 __attribute__((unused))/;
-s/^\(static.*void.*yy_flex_strncpy.*n.*,\)\(.*yyscanner\)/\1 \2 __attribute__((unused))/;
-s/^\(static.*int.*yy_flex_strlen.*s.*,\)\(.*yyscanner\)/\1 \2 __attribute__((unused))/;
-s/^\(\(static\|void\).*fts0[bt]alloc.*,\)\(.*yyscanner\)/\1 \3 __attribute__((unused))/;
-s/^\(\(static\|void\).*fts0[bt]realloc.*,\)\(.*yyscanner\)/\1 \3 __attribute__((unused))/;
-s/^\(\(static\|void\).*fts0[bt]free.*,\)\(.*yyscanner\)/\1 \3 __attribute__((unused))/;
+s/^\(static.*void.*yy_fatal_error.*msg.*,\)\(.*yyscanner\)/\1 \2 MY_ATTRIBUTE((unused))/;
+s/^\(static.*void.*yy_flex_strncpy.*n.*,\)\(.*yyscanner\)/\1 \2 MY_ATTRIBUTE((unused))/;
+s/^\(static.*int.*yy_flex_strlen.*s.*,\)\(.*yyscanner\)/\1 \2 MY_ATTRIBUTE((unused))/;
+s/^\(\(static\|void\).*fts0[bt]alloc.*,\)\(.*yyscanner\)/\1 \3 MY_ATTRIBUTE((unused))/;
+s/^\(\(static\|void\).*fts0[bt]realloc.*,\)\(.*yyscanner\)/\1 \3 MY_ATTRIBUTE((unused))/;
+s/^\(\(static\|void\).*fts0[bt]free.*,\)\(.*yyscanner\)/\1 \3 MY_ATTRIBUTE((unused))/;
' < fts0blex.cc >> $TMPF
mv $TMPF fts0blex.cc
@@ -38,12 +38,12 @@ mv $TMPF fts0blex.cc
echo '#include "univ.i"' > $TMPF
sed -e '
-s/^\(static.*void.*yy_fatal_error.*msg.*,\)\(.*yyscanner\)/\1 \2 __attribute__((unused))/;
-s/^\(static.*void.*yy_flex_strncpy.*n.*,\)\(.*yyscanner\)/\1 \2 __attribute__((unused))/;
-s/^\(static.*int.*yy_flex_strlen.*s.*,\)\(.*yyscanner\)/\1 \2 __attribute__((unused))/;
-s/^\(\(static\|void\).*fts0[bt]alloc.*,\)\(.*yyscanner\)/\1 \3 __attribute__((unused))/;
-s/^\(\(static\|void\).*fts0[bt]realloc.*,\)\(.*yyscanner\)/\1 \3 __attribute__((unused))/;
-s/^\(\(static\|void\).*fts0[bt]free.*,\)\(.*yyscanner\)/\1 \3 __attribute__((unused))/;
+s/^\(static.*void.*yy_fatal_error.*msg.*,\)\(.*yyscanner\)/\1 \2 MY_ATTRIBUTE((unused))/;
+s/^\(static.*void.*yy_flex_strncpy.*n.*,\)\(.*yyscanner\)/\1 \2 MY_ATTRIBUTE((unused))/;
+s/^\(static.*int.*yy_flex_strlen.*s.*,\)\(.*yyscanner\)/\1 \2 MY_ATTRIBUTE((unused))/;
+s/^\(\(static\|void\).*fts0[bt]alloc.*,\)\(.*yyscanner\)/\1 \3 MY_ATTRIBUTE((unused))/;
+s/^\(\(static\|void\).*fts0[bt]realloc.*,\)\(.*yyscanner\)/\1 \3 MY_ATTRIBUTE((unused))/;
+s/^\(\(static\|void\).*fts0[bt]free.*,\)\(.*yyscanner\)/\1 \3 MY_ATTRIBUTE((unused))/;
' < fts0tlex.cc >> $TMPF
mv $TMPF fts0tlex.cc
diff --git a/storage/innobase/handler/ha_innodb.cc b/storage/innobase/handler/ha_innodb.cc
index a60462bc9d6..27a92db00e4 100644
--- a/storage/innobase/handler/ha_innodb.cc
+++ b/storage/innobase/handler/ha_innodb.cc
@@ -1762,7 +1762,7 @@ thd_set_lock_wait_time(
/********************************************************************//**
Obtain the InnoDB transaction of a MySQL thread.
@return reference to transaction pointer */
-__attribute__((warn_unused_result, nonnull))
+MY_ATTRIBUTE((warn_unused_result, nonnull))
static inline
trx_t*&
thd_to_trx(
@@ -3993,7 +3993,7 @@ int
innobase_end(
/*=========*/
handlerton* hton, /*!< in/out: InnoDB handlerton */
- ha_panic_function type __attribute__((unused)))
+ ha_panic_function type MY_ATTRIBUTE((unused)))
/*!< in: ha_panic() parameter */
{
int err= 0;
@@ -10603,7 +10603,7 @@ create_table_check_doc_id_col(
/*****************************************************************//**
Creates a table definition to an InnoDB database. */
-static __attribute__((nonnull, warn_unused_result))
+static MY_ATTRIBUTE((nonnull, warn_unused_result))
int
create_table_def(
/*=============*/
@@ -12277,6 +12277,25 @@ ha_innobase::discard_or_import_tablespace(
/* Commit the transaction in order to release the table lock. */
trx_commit_for_mysql(prebuilt->trx);
+ if (err == DB_SUCCESS && !discard
+ && dict_stats_is_persistent_enabled(dict_table)) {
+ dberr_t ret;
+
+ /* Adjust the persistent statistics. */
+ ret = dict_stats_update(dict_table,
+ DICT_STATS_RECALC_PERSISTENT);
+
+ if (ret != DB_SUCCESS) {
+ push_warning_printf(
+ ha_thd(),
+ Sql_condition::WARN_LEVEL_WARN,
+ ER_ALTER_INFO,
+ "Error updating stats for table '%s'"
+ " after table rebuild: %s",
+ dict_table->name, ut_strerr(ret));
+ }
+ }
+
DBUG_RETURN(convert_error_code_to_mysql(err, dict_table->flags, NULL));
}
@@ -12398,7 +12417,8 @@ ha_innobase::delete_table(
/* Drop the table in InnoDB */
err = row_drop_table_for_mysql(
- norm_name, trx, thd_sql_command(thd) == SQLCOM_DROP_DB);
+ norm_name, trx, thd_sql_command(thd) == SQLCOM_DROP_DB,
+ FALSE);
if (err == DB_TABLE_NOT_FOUND
@@ -12429,7 +12449,8 @@ ha_innobase::delete_table(
#endif
err = row_drop_table_for_mysql(
par_case_name, trx,
- thd_sql_command(thd) == SQLCOM_DROP_DB);
+ thd_sql_command(thd) == SQLCOM_DROP_DB,
+ FALSE);
}
}
@@ -12621,7 +12642,7 @@ innobase_drop_database(
/*********************************************************************//**
Renames an InnoDB table.
@return DB_SUCCESS or error code */
-static __attribute__((nonnull, warn_unused_result))
+static MY_ATTRIBUTE((nonnull, warn_unused_result))
dberr_t
innobase_rename_table(
/*==================*/
@@ -13783,6 +13804,35 @@ ha_innobase::check(
DBUG_RETURN(HA_ADMIN_CORRUPT);
}
+ if (prebuilt->table->corrupted) {
+ char index_name[MAX_FULL_NAME_LEN + 1];
+ /* If some previous operation has marked the table as
+ corrupted in memory, and has not propagated such to
+ clustered index, we will do so here */
+ index = dict_table_get_first_index(prebuilt->table);
+
+ if (!dict_index_is_corrupted(index)) {
+ row_mysql_lock_data_dictionary(prebuilt->trx);
+ dict_set_corrupted(index, prebuilt->trx, "CHECK TABLE");
+ row_mysql_unlock_data_dictionary(prebuilt->trx);
+ }
+
+ innobase_format_name(index_name, sizeof index_name,
+ index->name, TRUE);
+
+ push_warning_printf(thd,
+ Sql_condition::WARN_LEVEL_WARN,
+ HA_ERR_INDEX_CORRUPT,
+ "InnoDB: Index %s is marked as"
+ " corrupted", index_name);
+
+ /* Now that the table is already marked as corrupted,
+ there is no need to check any index of this table */
+ prebuilt->trx->op_info = "";
+
+ DBUG_RETURN(HA_ADMIN_CORRUPT);
+ }
+
prebuilt->trx->op_info = "checking table";
old_isolation_level = prebuilt->trx->isolation_level;
@@ -13866,6 +13916,15 @@ ha_innobase::check(
prebuilt->index_usable = row_merge_is_index_usable(
prebuilt->trx, prebuilt->index);
+ DBUG_EXECUTE_IF(
+ "dict_set_index_corrupted",
+ if (!dict_index_is_clust(index)) {
+ prebuilt->index_usable = FALSE;
+ row_mysql_lock_data_dictionary(prebuilt->trx);
+ dict_set_corrupted(index, prebuilt->trx, "dict_set_index_corrupted");;
+ row_mysql_unlock_data_dictionary(prebuilt->trx);
+ });
+
if (UNIV_UNLIKELY(!prebuilt->index_usable)) {
innobase_format_name(
index_name, sizeof index_name,
@@ -17585,7 +17644,7 @@ static char* srv_buffer_pool_evict;
Evict all uncompressed pages of compressed tables from the buffer pool.
Keep the compressed pages in the buffer pool.
@return whether all uncompressed pages were evicted */
-static __attribute__((warn_unused_result))
+static MY_ATTRIBUTE((warn_unused_result))
bool
innodb_buffer_pool_evict_uncompressed(void)
/*=======================================*/
@@ -17930,13 +17989,13 @@ void
purge_run_now_set(
/*==============*/
THD* thd /*!< in: thread handle */
- __attribute__((unused)),
+ MY_ATTRIBUTE((unused)),
struct st_mysql_sys_var* var /*!< in: pointer to system
variable */
- __attribute__((unused)),
+ MY_ATTRIBUTE((unused)),
void* var_ptr /*!< out: where the formal
string goes */
- __attribute__((unused)),
+ MY_ATTRIBUTE((unused)),
const void* save) /*!< in: immediate result from
check function */
{
@@ -17953,13 +18012,13 @@ void
purge_stop_now_set(
/*===============*/
THD* thd /*!< in: thread handle */
- __attribute__((unused)),
+ MY_ATTRIBUTE((unused)),
struct st_mysql_sys_var* var /*!< in: pointer to system
variable */
- __attribute__((unused)),
+ MY_ATTRIBUTE((unused)),
void* var_ptr /*!< out: where the formal
string goes */
- __attribute__((unused)),
+ MY_ATTRIBUTE((unused)),
const void* save) /*!< in: immediate result from
check function */
{
@@ -17975,13 +18034,13 @@ void
checkpoint_now_set(
/*===============*/
THD* thd /*!< in: thread handle */
- __attribute__((unused)),
+ MY_ATTRIBUTE((unused)),
struct st_mysql_sys_var* var /*!< in: pointer to system
variable */
- __attribute__((unused)),
+ MY_ATTRIBUTE((unused)),
void* var_ptr /*!< out: where the formal
string goes */
- __attribute__((unused)),
+ MY_ATTRIBUTE((unused)),
const void* save) /*!< in: immediate result from
check function */
{
@@ -18002,13 +18061,13 @@ void
buf_flush_list_now_set(
/*===================*/
THD* thd /*!< in: thread handle */
- __attribute__((unused)),
+ MY_ATTRIBUTE((unused)),
struct st_mysql_sys_var* var /*!< in: pointer to system
variable */
- __attribute__((unused)),
+ MY_ATTRIBUTE((unused)),
void* var_ptr /*!< out: where the formal
string goes */
- __attribute__((unused)),
+ MY_ATTRIBUTE((unused)),
const void* save) /*!< in: immediate result from
check function */
{
@@ -18105,13 +18164,13 @@ void
buffer_pool_dump_now(
/*=================*/
THD* thd /*!< in: thread handle */
- __attribute__((unused)),
+ MY_ATTRIBUTE((unused)),
struct st_mysql_sys_var* var /*!< in: pointer to system
variable */
- __attribute__((unused)),
+ MY_ATTRIBUTE((unused)),
void* var_ptr /*!< out: where the formal
string goes */
- __attribute__((unused)),
+ MY_ATTRIBUTE((unused)),
const void* save) /*!< in: immediate result from
check function */
{
diff --git a/storage/innobase/handler/ha_innodb.h b/storage/innobase/handler/ha_innodb.h
index 107e90b1080..478187e0b23 100644
--- a/storage/innobase/handler/ha_innodb.h
+++ b/storage/innobase/handler/ha_innodb.h
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 2000, 2012, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2000, 2016, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2013, 2016, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
@@ -462,7 +462,7 @@ enum durability_properties thd_get_durability_property(const MYSQL_THD thd);
@return True if sql_mode has strict mode (all or trans), false otherwise.
*/
bool thd_is_strict_mode(const MYSQL_THD thd)
-__attribute__((nonnull));
+MY_ATTRIBUTE((nonnull));
} /* extern "C" */
/** Get the file name and position of the MySQL binlog corresponding to the
@@ -507,7 +507,7 @@ innobase_index_name_is_reserved(
const KEY* key_info, /*!< in: Indexes to be created */
ulint num_of_keys) /*!< in: Number of indexes to
be created. */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/*****************************************************************//**
#ifdef WITH_WSREP
@@ -527,7 +527,7 @@ innobase_table_flags(
outside system tablespace */
ulint* flags, /*!< out: DICT_TF flags */
ulint* flags2) /*!< out: DICT_TF2 flags */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/*****************************************************************//**
Validates the create options. We may build on this function
@@ -544,7 +544,7 @@ create_options_are_invalid(
columns and indexes */
HA_CREATE_INFO* create_info, /*!< in: create info. */
bool use_tablespace) /*!< in: srv_file_per_table */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/*********************************************************************//**
Retrieve the FTS Relevance Ranking result for doc with doc_id
@@ -574,7 +574,7 @@ void
innobase_fts_close_ranking(
/*=======================*/
FT_INFO* fts_hdl) /*!< in: FTS handler */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/*****************************************************************//**
Initialize the table FTS stopword list
@return TRUE if success */
@@ -585,7 +585,7 @@ innobase_fts_load_stopword(
dict_table_t* table, /*!< in: Table has the FTS */
trx_t* trx, /*!< in: transaction */
THD* thd) /*!< in: current thread */
- __attribute__((nonnull(1,3), warn_unused_result));
+ MY_ATTRIBUTE((nonnull(1,3), warn_unused_result));
/** Some defines for innobase_fts_check_doc_id_index() return value */
enum fts_doc_id_index_enum {
@@ -607,7 +607,7 @@ innobase_fts_check_doc_id_index(
that is being altered */
ulint* fts_doc_col_no) /*!< out: The column number for
Doc ID */
- __attribute__((warn_unused_result));
+ MY_ATTRIBUTE((warn_unused_result));
/*******************************************************************//**
Check whether the table has a unique index with FTS_DOC_ID_INDEX_NAME
@@ -620,7 +620,7 @@ innobase_fts_check_doc_id_index_in_def(
/*===================================*/
ulint n_key, /*!< in: Number of keys */
const KEY* key_info) /*!< in: Key definitions */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/***********************************************************************
@return version of the extended FTS API */
diff --git a/storage/innobase/handler/handler0alter.cc b/storage/innobase/handler/handler0alter.cc
index f5db3775909..fc56cdf9456 100644
--- a/storage/innobase/handler/handler0alter.cc
+++ b/storage/innobase/handler/handler0alter.cc
@@ -1,7 +1,7 @@
/*****************************************************************************
-Copyright (c) 2005, 2015, Oracle and/or its affiliates
-Copyright (c) 2013, 2015, MariaDB Corporation. All Rights Reserved.
+Copyright (c) 2005, 2016, Oracle and/or its affiliates
+Copyright (c) 2013, 2016, MariaDB Corporation. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -101,7 +101,7 @@ static const Alter_inplace_info::HA_ALTER_FLAGS INNOBASE_ALTER_NOREBUILD
| Alter_inplace_info::ALTER_COLUMN_NAME;
/* Report an InnoDB error to the client by invoking my_error(). */
-static UNIV_COLD __attribute__((nonnull))
+static UNIV_COLD MY_ATTRIBUTE((nonnull))
void
my_error_innodb(
/*============*/
@@ -203,7 +203,7 @@ Determine if ALTER TABLE needs to rebuild the table.
@param ha_alter_info the DDL operation
@param altered_table MySQL original table
@return whether it is necessary to rebuild the table */
-static __attribute__((nonnull, warn_unused_result))
+static MY_ATTRIBUTE((nonnull, warn_unused_result))
bool
innobase_need_rebuild(
/*==================*/
@@ -646,7 +646,7 @@ ha_innobase::check_if_supported_inplace_alter(
/*************************************************************//**
Initialize the dict_foreign_t structure with supplied info
@return true if added, false if duplicate foreign->id */
-static __attribute__((nonnull(1,3,5,7)))
+static MY_ATTRIBUTE((nonnull(1,3,5,7)))
bool
innobase_init_foreign(
/*==================*/
@@ -735,7 +735,7 @@ innobase_init_foreign(
/*************************************************************//**
Check whether the foreign key options is legit
@return true if it is */
-static __attribute__((nonnull, warn_unused_result))
+static MY_ATTRIBUTE((nonnull, warn_unused_result))
bool
innobase_check_fk_option(
/*=====================*/
@@ -767,7 +767,7 @@ innobase_check_fk_option(
/*************************************************************//**
Set foreign key options
@return true if successfully set */
-static __attribute__((nonnull, warn_unused_result))
+static MY_ATTRIBUTE((nonnull, warn_unused_result))
bool
innobase_set_foreign_key_option(
/*============================*/
@@ -812,7 +812,7 @@ innobase_set_foreign_key_option(
Check if a foreign key constraint can make use of an index
that is being created.
@return useable index, or NULL if none found */
-static __attribute__((nonnull, warn_unused_result))
+static MY_ATTRIBUTE((nonnull, warn_unused_result))
const KEY*
innobase_find_equiv_index(
/*======================*/
@@ -868,7 +868,7 @@ no_match:
Find an index whose first fields are the columns in the array
in the same order and is not marked for deletion
@return matching index, NULL if not found */
-static __attribute__((nonnull(1,2,6), warn_unused_result))
+static MY_ATTRIBUTE((nonnull(1,2,6), warn_unused_result))
dict_index_t*
innobase_find_fk_index(
/*===================*/
@@ -916,7 +916,7 @@ next_rec:
Create InnoDB foreign key structure from MySQL alter_info
@retval true if successful
@retval false on error (will call my_error()) */
-static __attribute__((nonnull(1,2,3,7,8), warn_unused_result))
+static MY_ATTRIBUTE((nonnull(1,2,3,7,8), warn_unused_result))
bool
innobase_get_foreign_key_info(
/*==========================*/
@@ -1416,7 +1416,7 @@ innobase_rec_reset(
/*******************************************************************//**
This function checks that index keys are sensible.
@return 0 or error number */
-static __attribute__((nonnull, warn_unused_result))
+static MY_ATTRIBUTE((nonnull, warn_unused_result))
int
innobase_check_index_keys(
/*======================*/
@@ -1537,7 +1537,7 @@ name_ok:
/*******************************************************************//**
Create index field definition for key part */
-static __attribute__((nonnull(2,3)))
+static MY_ATTRIBUTE((nonnull(2,3)))
void
innobase_create_index_field_def(
/*============================*/
@@ -1586,7 +1586,7 @@ innobase_create_index_field_def(
/*******************************************************************//**
Create index definition for key */
-static __attribute__((nonnull))
+static MY_ATTRIBUTE((nonnull))
void
innobase_create_index_def(
/*======================*/
@@ -1876,7 +1876,7 @@ ELSE
ENDIF
@return key definitions */
-static __attribute__((nonnull, warn_unused_result, malloc))
+static MY_ATTRIBUTE((nonnull, warn_unused_result, malloc))
index_def_t*
innobase_create_key_defs(
/*=====================*/
@@ -2100,7 +2100,7 @@ created_clustered:
/*******************************************************************//**
Check each index column size, make sure they do not exceed the max limit
@return true if index column size exceeds limit */
-static __attribute__((nonnull, warn_unused_result))
+static MY_ATTRIBUTE((nonnull, warn_unused_result))
bool
innobase_check_column_length(
/*=========================*/
@@ -2250,7 +2250,7 @@ online_retry_drop_indexes_low(
/********************************************************************//**
Drop any indexes that we were not able to free previously due to
open table handles. */
-static __attribute__((nonnull))
+static MY_ATTRIBUTE((nonnull))
void
online_retry_drop_indexes(
/*======================*/
@@ -2280,7 +2280,7 @@ online_retry_drop_indexes(
/********************************************************************//**
Commit a dictionary transaction and drop any indexes that we were not
able to free previously due to open table handles. */
-static __attribute__((nonnull))
+static MY_ATTRIBUTE((nonnull))
void
online_retry_drop_indexes_with_trx(
/*===============================*/
@@ -2309,7 +2309,7 @@ online_retry_drop_indexes_with_trx(
@param drop_fk constraints being dropped
@param n_drop_fk number of constraints that are being dropped
@return whether the constraint is being dropped */
-inline __attribute__((pure, nonnull, warn_unused_result))
+inline MY_ATTRIBUTE((pure, nonnull, warn_unused_result))
bool
innobase_dropping_foreign(
/*======================*/
@@ -2336,7 +2336,7 @@ column that is being dropped or modified to NOT NULL.
@retval true Not allowed (will call my_error())
@retval false Allowed
*/
-static __attribute__((pure, nonnull, warn_unused_result))
+static MY_ATTRIBUTE((pure, nonnull, warn_unused_result))
bool
innobase_check_foreigns_low(
/*========================*/
@@ -2436,7 +2436,7 @@ column that is being dropped or modified to NOT NULL.
@retval true Not allowed (will call my_error())
@retval false Allowed
*/
-static __attribute__((pure, nonnull, warn_unused_result))
+static MY_ATTRIBUTE((pure, nonnull, warn_unused_result))
bool
innobase_check_foreigns(
/*====================*/
@@ -2481,7 +2481,7 @@ innobase_check_foreigns(
@param dfield InnoDB data field to copy to
@param field MySQL value for the column
@param comp nonzero if in compact format */
-static __attribute__((nonnull))
+static MY_ATTRIBUTE((nonnull))
void
innobase_build_col_map_add(
/*=======================*/
@@ -2515,7 +2515,7 @@ adding columns.
@param heap Memory heap where allocated
@return array of integers, mapping column numbers in the table
to column numbers in altered_table */
-static __attribute__((nonnull(1,2,3,4,5,7), warn_unused_result))
+static MY_ATTRIBUTE((nonnull(1,2,3,4,5,7), warn_unused_result))
const ulint*
innobase_build_col_map(
/*===================*/
@@ -2665,7 +2665,7 @@ innobase_drop_fts_index_table(
@param user_table InnoDB table as it is before the ALTER operation
@param heap Memory heap for the allocation
@return array of new column names in rebuilt_table, or NULL if not renamed */
-static __attribute__((nonnull, warn_unused_result))
+static MY_ATTRIBUTE((nonnull, warn_unused_result))
const char**
innobase_get_col_names(
Alter_inplace_info* ha_alter_info,
@@ -2728,7 +2728,7 @@ while preparing ALTER TABLE.
@retval true Failure
@retval false Success
*/
-static __attribute__((warn_unused_result, nonnull(1,2,3,4)))
+static MY_ATTRIBUTE((warn_unused_result, nonnull(1,2,3,4)))
bool
prepare_inplace_alter_table_dict(
/*=============================*/
@@ -3382,7 +3382,7 @@ err_exit:
/* Check whether an index is needed for the foreign key constraint.
If so, if it is dropped, is there an equivalent index can play its role.
@return true if the index is needed and can't be dropped */
-static __attribute__((nonnull(1,2,3,5), warn_unused_result))
+static MY_ATTRIBUTE((nonnull(1,2,3,5), warn_unused_result))
bool
innobase_check_foreign_key_index(
/*=============================*/
@@ -3926,6 +3926,24 @@ check_if_can_drop_indexes:
drop_index = NULL;
}
+ /* Check if any of the existing indexes are marked as corruption
+ and if they are, refuse adding more indexes. */
+ if (ha_alter_info->handler_flags & Alter_inplace_info::ADD_INDEX) {
+ for (dict_index_t* index = dict_table_get_first_index(indexed_table);
+ index != NULL; index = dict_table_get_next_index(index)) {
+
+ if (!index->to_be_dropped && dict_index_is_corrupted(index)) {
+ char index_name[MAX_FULL_NAME_LEN + 1];
+
+ innobase_format_name(index_name, sizeof index_name,
+ index->name, TRUE);
+
+ my_error(ER_INDEX_CORRUPT, MYF(0), index_name);
+ DBUG_RETURN(true);
+ }
+ }
+ }
+
n_add_fk = 0;
if (ha_alter_info->handler_flags
@@ -4296,7 +4314,7 @@ temparary index prefix
@param locked TRUE=table locked, FALSE=may need to do a lazy drop
@param trx the transaction
*/
-static __attribute__((nonnull))
+static MY_ATTRIBUTE((nonnull))
void
innobase_rollback_sec_index(
/*========================*/
@@ -4330,7 +4348,7 @@ during prepare, but might not be during commit).
@retval true Failure
@retval false Success
*/
-inline __attribute__((nonnull, warn_unused_result))
+inline MY_ATTRIBUTE((nonnull, warn_unused_result))
bool
rollback_inplace_alter_table(
/*=========================*/
@@ -4462,7 +4480,7 @@ func_exit:
@param foreign_id Foreign key constraint identifier
@retval true Failure
@retval false Success */
-static __attribute__((nonnull, warn_unused_result))
+static MY_ATTRIBUTE((nonnull, warn_unused_result))
bool
innobase_drop_foreign_try(
/*======================*/
@@ -4519,7 +4537,7 @@ innobase_drop_foreign_try(
@param new_clustered whether the table has been rebuilt
@retval true Failure
@retval false Success */
-static __attribute__((nonnull, warn_unused_result))
+static MY_ATTRIBUTE((nonnull, warn_unused_result))
bool
innobase_rename_column_try(
/*=======================*/
@@ -4728,7 +4746,7 @@ rename_foreign:
@param table_name Table name in MySQL
@retval true Failure
@retval false Success */
-static __attribute__((nonnull, warn_unused_result))
+static MY_ATTRIBUTE((nonnull, warn_unused_result))
bool
innobase_rename_columns_try(
/*========================*/
@@ -4778,7 +4796,7 @@ as part of commit_cache_norebuild().
@param ha_alter_info Data used during in-place alter.
@param table the TABLE
@param user_table InnoDB table that was being altered */
-static __attribute__((nonnull))
+static MY_ATTRIBUTE((nonnull))
void
innobase_rename_columns_cache(
/*==========================*/
@@ -4822,7 +4840,7 @@ processed_field:
@param altered_table MySQL table that is being altered
@param old_table MySQL table as it is before the ALTER operation
@return the next auto-increment value (0 if not present) */
-static __attribute__((nonnull, warn_unused_result))
+static MY_ATTRIBUTE((nonnull, warn_unused_result))
ulonglong
commit_get_autoinc(
/*===============*/
@@ -4906,7 +4924,7 @@ but do not touch the data dictionary cache.
@retval true Failure
@retval false Success
*/
-static __attribute__((nonnull, warn_unused_result))
+static MY_ATTRIBUTE((nonnull, warn_unused_result))
bool
innobase_update_foreign_try(
/*========================*/
@@ -4990,7 +5008,7 @@ after the changes to data dictionary tables were committed.
@param ctx In-place ALTER TABLE context
@param user_thd MySQL connection
@return InnoDB error code (should always be DB_SUCCESS) */
-static __attribute__((nonnull, warn_unused_result))
+static MY_ATTRIBUTE((nonnull, warn_unused_result))
dberr_t
innobase_update_foreign_cache(
/*==========================*/
@@ -5075,7 +5093,7 @@ when rebuilding the table.
@retval true Failure
@retval false Success
*/
-inline __attribute__((nonnull, warn_unused_result))
+inline MY_ATTRIBUTE((nonnull, warn_unused_result))
bool
commit_try_rebuild(
/*===============*/
@@ -5237,7 +5255,7 @@ commit_try_rebuild(
/** Apply the changes made during commit_try_rebuild(),
to the data dictionary cache and the file system.
@param ctx In-place ALTER TABLE context */
-inline __attribute__((nonnull))
+inline MY_ATTRIBUTE((nonnull))
void
commit_cache_rebuild(
/*=================*/
@@ -5332,7 +5350,7 @@ when not rebuilding the table.
@retval true Failure
@retval false Success
*/
-inline __attribute__((nonnull, warn_unused_result))
+inline MY_ATTRIBUTE((nonnull, warn_unused_result))
bool
commit_try_norebuild(
/*=================*/
@@ -5442,7 +5460,7 @@ after a successful commit_try_norebuild() call.
@param trx Data dictionary transaction object
(will be started and committed)
@return whether all replacements were found for dropped indexes */
-inline __attribute__((nonnull, warn_unused_result))
+inline MY_ATTRIBUTE((nonnull, warn_unused_result))
bool
commit_cache_norebuild(
/*===================*/
diff --git a/storage/innobase/ibuf/ibuf0ibuf.cc b/storage/innobase/ibuf/ibuf0ibuf.cc
index 9e8d8659739..0a2140c4a29 100644
--- a/storage/innobase/ibuf/ibuf0ibuf.cc
+++ b/storage/innobase/ibuf/ibuf0ibuf.cc
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1997, 2014, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1997, 2016, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2016, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
@@ -670,7 +670,7 @@ byte*
ibuf_parse_bitmap_init(
/*===================*/
byte* ptr, /*!< in: buffer */
- byte* end_ptr __attribute__((unused)), /*!< in: buffer end */
+ byte* end_ptr MY_ATTRIBUTE((unused)), /*!< in: buffer end */
buf_block_t* block, /*!< in: block or NULL */
mtr_t* mtr) /*!< in: mtr or NULL */
{
@@ -2513,7 +2513,7 @@ ibuf_get_merge_page_nos_func(
/*******************************************************************//**
Get the matching records for space id.
@return current rec or NULL */
-static __attribute__((nonnull, warn_unused_result))
+static MY_ATTRIBUTE((nonnull, warn_unused_result))
const rec_t*
ibuf_get_user_rec(
/*===============*/
@@ -2535,7 +2535,7 @@ ibuf_get_user_rec(
Reads page numbers for a space id from an ibuf tree.
@return a lower limit for the combined volume of records which will be
merged */
-static __attribute__((nonnull, warn_unused_result))
+static MY_ATTRIBUTE((nonnull, warn_unused_result))
ulint
ibuf_get_merge_pages(
/*=================*/
@@ -2643,40 +2643,22 @@ ibuf_merge_pages(
}
/*********************************************************************//**
-Get the table instance from the table id.
-@return table instance */
-static __attribute__((warn_unused_result))
-dict_table_t*
-ibuf_get_table(
-/*===========*/
- table_id_t table_id) /*!< in: valid table id */
-{
- rw_lock_s_lock_func(&dict_operation_lock, 0, __FILE__, __LINE__);
-
- dict_table_t* table = dict_table_open_on_id(
- table_id, FALSE, DICT_TABLE_OP_NORMAL);
-
- rw_lock_s_unlock_gen(&dict_operation_lock, 0);
-
- return(table);
-}
-
-/*********************************************************************//**
-Contracts insert buffer trees by reading pages to the buffer pool.
-@return a lower limit for the combined size in bytes of entries which
-will be merged from ibuf trees to the pages read, 0 if ibuf is
-empty */
-static
+Contracts insert buffer trees by reading pages referring to space_id
+to the buffer pool.
+@returns number of pages merged.*/
+UNIV_INTERN
ulint
ibuf_merge_space(
/*=============*/
- ulint space, /*!< in: tablespace id to merge */
- ulint* n_pages)/*!< out: number of pages to which merged */
+ ulint space) /*!< in: tablespace id to merge */
{
mtr_t mtr;
btr_pcur_t pcur;
mem_heap_t* heap = mem_heap_create(512);
dtuple_t* tuple = ibuf_search_tuple_build(space, 0, heap);
+ ulint n_pages = 0;
+
+ ut_ad(space < SRV_LOG_SPACE_FIRST_ID);
ibuf_mtr_start(&mtr);
@@ -2708,50 +2690,47 @@ ibuf_merge_space(
} else {
sum_sizes = ibuf_get_merge_pages(
- &pcur, space, IBUF_MAX_N_PAGES_MERGED,
- &pages[0], &spaces[0], &versions[0], n_pages,
- &mtr);
+ &pcur, space, IBUF_MAX_N_PAGES_MERGED,
+ &pages[0], &spaces[0], &versions[0], &n_pages,
+ &mtr);
+ ib_logf(IB_LOG_LEVEL_INFO,"\n Size of pages merged %lu"
+ ,sum_sizes);
- ++sum_sizes;
}
ibuf_mtr_commit(&mtr);
btr_pcur_close(&pcur);
- if (sum_sizes > 0) {
-
- ut_a(*n_pages > 0 || sum_sizes == 1);
+ if (n_pages > 0) {
#ifdef UNIV_DEBUG
- ut_ad(*n_pages <= UT_ARR_SIZE(pages));
+ ut_ad(n_pages <= UT_ARR_SIZE(pages));
- for (ulint i = 0; i < *n_pages; ++i) {
+ for (ulint i = 0; i < n_pages; ++i) {
ut_ad(spaces[i] == space);
ut_ad(i == 0 || versions[i] == versions[i - 1]);
}
#endif /* UNIV_DEBUG */
buf_read_ibuf_merge_pages(
- true, spaces, versions, pages, *n_pages);
+ true, spaces, versions, pages, n_pages);
}
- return(sum_sizes);
+ return(n_pages);
}
-/*********************************************************************//**
-Contracts insert buffer trees by reading pages to the buffer pool.
+/** Contract the change buffer by reading pages to the buffer pool.
+@param[out] n_pages number of pages merged
+@param[in] sync whether the caller waits for
+the issued reads to complete
@return a lower limit for the combined size in bytes of entries which
will be merged from ibuf trees to the pages read, 0 if ibuf is
empty */
-static __attribute__((nonnull, warn_unused_result))
+static MY_ATTRIBUTE((nonnull, warn_unused_result))
ulint
ibuf_merge(
/*=======*/
- table_id_t table_id, /*!< in: if merge should be
- done only for a specific
- table, for all tables this
- should be 0 */
ulint* n_pages, /*!< out: number of pages to
which merged */
bool sync) /*!< in: TRUE if the caller
@@ -2759,8 +2738,6 @@ ibuf_merge(
read with the highest
tablespace address to complete */
{
- dict_table_t* table;
-
*n_pages = 0;
/* We perform a dirty read of ibuf->empty, without latching
@@ -2774,55 +2751,45 @@ ibuf_merge(
} else if (ibuf_debug) {
return(0);
#endif /* UNIV_DEBUG || UNIV_IBUF_DEBUG */
- } else if (table_id == 0) {
+ } else {
return(ibuf_merge_pages(n_pages, sync));
- } else if ((table = ibuf_get_table(table_id)) == 0) {
- /* Table has been dropped. */
- return(0);
}
-
- ulint volume = ibuf_merge_space(table->space, n_pages);
-
- dict_table_close(table, FALSE, FALSE);
-
- return(volume);
}
-/*********************************************************************//**
-Contracts insert buffer trees by reading pages to the buffer pool.
+/** Contract the change buffer by reading pages to the buffer pool.
+@param[in] sync whether the caller waits for
+the issued reads to complete
@return a lower limit for the combined size in bytes of entries which
-will be merged from ibuf trees to the pages read, 0 if ibuf is
-empty */
+will be merged from ibuf trees to the pages read, 0 if ibuf is empty */
static
ulint
ibuf_contract(
/*==========*/
- ibool sync) /*!< in: TRUE if the caller wants to wait for the
+ bool sync) /*!< in: TRUE if the caller wants to wait for the
issued read with the highest tablespace address
to complete */
{
ulint n_pages;
- return(ibuf_merge(0, &n_pages, sync));
+ return(ibuf_merge_pages(&n_pages, sync));
}
-/*********************************************************************//**
-Contracts insert buffer trees by reading pages to the buffer pool.
+/** Contract the change buffer by reading pages to the buffer pool.
+@param[in] full If true, do a full contraction based
+on PCT_IO(100). If false, the size of contract batch is determined
+based on the current size of the change buffer.
@return a lower limit for the combined size in bytes of entries which
will be merged from ibuf trees to the pages read, 0 if ibuf is
empty */
UNIV_INTERN
ulint
-ibuf_contract_in_background(
-/*========================*/
- table_id_t table_id, /*!< in: if merge should be done only
- for a specific table, for all tables
- this should be 0 */
- ibool full) /*!< in: TRUE if the caller wants to
- do a full contract based on PCT_IO(100).
- If FALSE then the size of contract
- batch is determined based on the
- current size of the ibuf tree. */
+ibuf_merge_in_background(
+/*=====================*/
+ bool full) /*!< in: TRUE if the caller wants to
+ do a full contract based on PCT_IO(100).
+ If FALSE then the size of contract
+ batch is determined based on the
+ current size of the ibuf tree. */
{
ulint sum_bytes = 0;
ulint sum_pages = 0;
@@ -2830,7 +2797,7 @@ ibuf_contract_in_background(
ulint n_pages;
#if defined UNIV_DEBUG || defined UNIV_IBUF_DEBUG
- if (srv_ibuf_disable_background_merge && table_id == 0) {
+ if (srv_ibuf_disable_background_merge) {
return(0);
}
#endif /* UNIV_DEBUG || UNIV_IBUF_DEBUG */
@@ -2867,7 +2834,7 @@ ibuf_contract_in_background(
while (sum_pages < n_pages) {
ulint n_bytes;
- n_bytes = ibuf_merge(table_id, &n_pag2, FALSE);
+ n_bytes = ibuf_merge(&n_pag2, false);
if (n_bytes == 0) {
return(sum_bytes);
@@ -3471,7 +3438,7 @@ ibuf_get_entry_counter_func(
Buffer an operation in the insert/delete buffer, instead of doing it
directly to the disk page, if this is possible.
@return DB_SUCCESS, DB_STRONG_FAIL or other error */
-static __attribute__((nonnull, warn_unused_result))
+static MY_ATTRIBUTE((nonnull, warn_unused_result))
dberr_t
ibuf_insert_low(
/*============*/
@@ -3538,8 +3505,7 @@ ibuf_insert_low(
#ifdef UNIV_IBUF_DEBUG
fputs("Ibuf too big\n", stderr);
#endif
- /* Use synchronous contract (== TRUE) */
- ibuf_contract(TRUE);
+ ibuf_contract(true);
return(DB_STRONG_FAIL);
}
@@ -3962,7 +3928,7 @@ skip_watch:
During merge, inserts to an index page a secondary index entry extracted
from the insert buffer.
@return newly inserted record */
-static __attribute__((nonnull))
+static MY_ATTRIBUTE((nonnull))
rec_t*
ibuf_insert_to_index_page_low(
/*==========================*/
@@ -4393,7 +4359,7 @@ ibuf_delete(
/*********************************************************************//**
Restores insert buffer tree cursor position
@return TRUE if the position was restored; FALSE if not */
-static __attribute__((nonnull))
+static MY_ATTRIBUTE((nonnull))
ibool
ibuf_restore_pos(
/*=============*/
@@ -4448,7 +4414,7 @@ Deletes from ibuf the record on which pcur is positioned. If we have to
resort to a pessimistic delete, this function commits mtr and closes
the cursor.
@return TRUE if mtr was committed and pcur closed in this operation */
-static __attribute__((warn_unused_result))
+static MY_ATTRIBUTE((warn_unused_result))
ibool
ibuf_delete_rec(
/*============*/
diff --git a/storage/innobase/include/api0api.h b/storage/innobase/include/api0api.h
index e4c9c941de5..500bf4fe3b2 100644
--- a/storage/innobase/include/api0api.h
+++ b/storage/innobase/include/api0api.h
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 2011, 2015, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2011, 2016, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -36,7 +36,7 @@ InnoDB Native API
#endif
#if defined(__GNUC__) && (__GNUC__ > 2) && ! defined(__INTEL_COMPILER)
-#define UNIV_NO_IGNORE __attribute__ ((warn_unused_result))
+#define UNIV_NO_IGNORE MY_ATTRIBUTE ((warn_unused_result))
#else
#define UNIV_NO_IGNORE
#endif /* __GNUC__ && __GNUC__ > 2 && !__INTEL_COMPILER */
diff --git a/storage/innobase/include/btr0btr.h b/storage/innobase/include/btr0btr.h
index 95ce104e7ea..bf3f4a76301 100644
--- a/storage/innobase/include/btr0btr.h
+++ b/storage/innobase/include/btr0btr.h
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1994, 2013, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1994, 2016, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2012, Facebook Inc.
Copyright (c) 2014, 2015, MariaDB Corporation. All Rights Reserved.
@@ -115,7 +115,7 @@ btr_corruption_report(
/*==================*/
const buf_block_t* block, /*!< in: corrupted block */
const dict_index_t* index) /*!< in: index tree */
- UNIV_COLD __attribute__((nonnull));
+ UNIV_COLD MY_ATTRIBUTE((nonnull));
/** Assert that a B-tree page is not corrupted.
@param block buffer block containing a B-tree page
@@ -157,7 +157,7 @@ btr_blob_dbg_add_blob(
ulint page_no, /*!< in: start page of the column */
dict_index_t* index, /*!< in/out: index tree */
const char* ctx) /*!< in: context (for logging) */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/**************************************************************//**
Display the references to off-page columns.
This function is to be called from a debugger,
@@ -167,7 +167,7 @@ void
btr_blob_dbg_print(
/*===============*/
const dict_index_t* index) /*!< in: index tree */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/**************************************************************//**
Check that there are no references to off-page columns from or to
the given page. Invoked when freeing or clearing a page.
@@ -178,7 +178,7 @@ btr_blob_dbg_is_empty(
/*==================*/
dict_index_t* index, /*!< in: index */
ulint page_no) /*!< in: page number */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/**************************************************************//**
Modify the 'deleted' flag of a record. */
@@ -190,7 +190,7 @@ btr_blob_dbg_set_deleted_flag(
dict_index_t* index, /*!< in/out: index */
const ulint* offsets,/*!< in: rec_get_offs(rec, index) */
ibool del) /*!< in: TRUE=deleted, FALSE=exists */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/**************************************************************//**
Change the ownership of an off-page column. */
UNIV_INTERN
@@ -202,7 +202,7 @@ btr_blob_dbg_owner(
const ulint* offsets,/*!< in: rec_get_offs(rec, index) */
ulint i, /*!< in: ith field in rec */
ibool own) /*!< in: TRUE=owned, FALSE=disowned */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/** Assert that there are no BLOB references to or from the given page. */
# define btr_blob_dbg_assert_empty(index, page_no) \
ut_a(btr_blob_dbg_is_empty(index, page_no))
@@ -222,7 +222,7 @@ btr_root_get(
/*=========*/
const dict_index_t* index, /*!< in: index tree */
mtr_t* mtr) /*!< in: mtr */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/**************************************************************//**
Checks and adjusts the root node of a tree during IMPORT TABLESPACE.
@@ -232,7 +232,7 @@ dberr_t
btr_root_adjust_on_import(
/*======================*/
const dict_index_t* index) /*!< in: index tree */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/**************************************************************//**
Gets the height of the B-tree (the level of the root, when the leaf
@@ -245,7 +245,7 @@ btr_height_get(
/*===========*/
dict_index_t* index, /*!< in: index tree */
mtr_t* mtr) /*!< in/out: mini-transaction */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/**************************************************************//**
Gets a buffer page and declares its latching order level. */
UNIV_INLINE
@@ -307,7 +307,7 @@ index_id_t
btr_page_get_index_id(
/*==================*/
const page_t* page) /*!< in: index page */
- __attribute__((nonnull, pure, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, pure, warn_unused_result));
#ifndef UNIV_HOTBACKUP
/********************************************************//**
Gets the node level field in an index page.
@@ -317,7 +317,7 @@ ulint
btr_page_get_level_low(
/*===================*/
const page_t* page) /*!< in: index page */
- __attribute__((nonnull, pure, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, pure, warn_unused_result));
#define btr_page_get_level(page, mtr) btr_page_get_level_low(page)
/********************************************************//**
Gets the next index page number.
@@ -328,7 +328,7 @@ btr_page_get_next(
/*==============*/
const page_t* page, /*!< in: index page */
mtr_t* mtr) /*!< in: mini-transaction handle */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/********************************************************//**
Gets the previous index page number.
@return prev page number */
@@ -338,7 +338,7 @@ btr_page_get_prev(
/*==============*/
const page_t* page, /*!< in: index page */
mtr_t* mtr) /*!< in: mini-transaction handle */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/*************************************************************//**
Gets pointer to the previous user record in the tree. It is assumed
that the caller has appropriate latches on the page and its neighbor.
@@ -350,7 +350,7 @@ btr_get_prev_user_rec(
rec_t* rec, /*!< in: record on leaf level */
mtr_t* mtr) /*!< in: mtr holding a latch on the page, and if
needed, also to the previous page */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/*************************************************************//**
Gets pointer to the next user record in the tree. It is assumed
that the caller has appropriate latches on the page and its neighbor.
@@ -362,7 +362,7 @@ btr_get_next_user_rec(
rec_t* rec, /*!< in: record on leaf level */
mtr_t* mtr) /*!< in: mtr holding a latch on the page, and if
needed, also to the next page */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/**************************************************************//**
Releases the latch on a leaf page and bufferunfixes it. */
UNIV_INLINE
@@ -373,7 +373,7 @@ btr_leaf_page_release(
ulint latch_mode, /*!< in: BTR_SEARCH_LEAF or
BTR_MODIFY_LEAF */
mtr_t* mtr) /*!< in: mtr */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/**************************************************************//**
Gets the child node file address in a node pointer.
NOTE: the offsets array must contain all offsets for the record since
@@ -387,7 +387,7 @@ btr_node_ptr_get_child_page_no(
/*===========================*/
const rec_t* rec, /*!< in: node pointer record */
const ulint* offsets)/*!< in: array returned by rec_get_offsets() */
- __attribute__((nonnull, pure, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, pure, warn_unused_result));
/************************************************************//**
Creates the root node for a new index tree.
@return page number of the created root, FIL_NULL if did not succeed */
@@ -402,7 +402,7 @@ btr_create(
index_id_t index_id,/*!< in: index id */
dict_index_t* index, /*!< in: index */
mtr_t* mtr) /*!< in: mini-transaction handle */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/************************************************************//**
Frees a B-tree except the root page, which MUST be freed after this
by calling btr_free_root. */
@@ -425,7 +425,7 @@ btr_free_root(
or 0 for uncompressed pages */
ulint root_page_no, /*!< in: root page number */
mtr_t* mtr) /*!< in/out: mini-transaction */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/*************************************************************//**
Makes tree one level higher by splitting the root, and inserts
the tuple. It is assumed that mtr contains an x-latch on the tree.
@@ -474,7 +474,7 @@ btr_page_reorganize_low(
page_cur_t* cursor, /*!< in/out: page cursor */
dict_index_t* index, /*!< in: the index tree of the page */
mtr_t* mtr) /*!< in/out: mini-transaction */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/*************************************************************//**
Reorganizes an index page.
@@ -493,7 +493,7 @@ btr_page_reorganize(
page_cur_t* cursor, /*!< in/out: page cursor */
dict_index_t* index, /*!< in: the index tree of the page */
mtr_t* mtr) /*!< in/out: mini-transaction */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/*************************************************************//**
Decides if the page should be split at the convergence point of
inserts converging to left.
@@ -506,7 +506,7 @@ btr_page_get_split_rec_to_left(
rec_t** split_rec)/*!< out: if split recommended,
the first record on upper half page,
or NULL if tuple should be first */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/*************************************************************//**
Decides if the page should be split at the convergence point of
inserts converging to right.
@@ -519,7 +519,7 @@ btr_page_get_split_rec_to_right(
rec_t** split_rec)/*!< out: if split recommended,
the first record on upper half page,
or NULL if tuple should be first */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/*************************************************************//**
Splits an index page to halves and inserts the tuple. It is assumed
that mtr holds an x-latch to the index tree. NOTE: the tree x-latch is
@@ -558,7 +558,7 @@ btr_insert_on_non_leaf_level_func(
const char* file, /*!< in: file name */
ulint line, /*!< in: line where called */
mtr_t* mtr) /*!< in: mtr */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
# define btr_insert_on_non_leaf_level(f,i,l,t,m) \
btr_insert_on_non_leaf_level_func(f,i,l,t,__FILE__,__LINE__,m)
#endif /* !UNIV_HOTBACKUP */
@@ -570,7 +570,7 @@ btr_set_min_rec_mark(
/*=================*/
rec_t* rec, /*!< in/out: record */
mtr_t* mtr) /*!< in: mtr */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
#ifndef UNIV_HOTBACKUP
/*************************************************************//**
Deletes on the upper level the node pointer to a page. */
@@ -581,7 +581,7 @@ btr_node_ptr_delete(
dict_index_t* index, /*!< in: index tree */
buf_block_t* block, /*!< in: page whose node pointer is deleted */
mtr_t* mtr) /*!< in: mtr */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
#ifdef UNIV_DEBUG
/************************************************************//**
Checks that the node pointer to a page is appropriate.
@@ -593,7 +593,7 @@ btr_check_node_ptr(
dict_index_t* index, /*!< in: index tree */
buf_block_t* block, /*!< in: index page */
mtr_t* mtr) /*!< in: mtr */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
#endif /* UNIV_DEBUG */
/*************************************************************//**
Tries to merge the page first to the left immediate brother if such a
@@ -616,7 +616,7 @@ btr_compress(
ibool adjust, /*!< in: TRUE if should adjust the
cursor position even if compression occurs */
mtr_t* mtr) /*!< in/out: mini-transaction */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/*************************************************************//**
Discards a page from a B-tree. This is used to remove the last record from
a B-tree page: the whole page must be removed at the same time. This cannot
@@ -628,7 +628,7 @@ btr_discard_page(
btr_cur_t* cursor, /*!< in: cursor on the page to discard: not on
the root page */
mtr_t* mtr) /*!< in: mtr */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
#endif /* !UNIV_HOTBACKUP */
/****************************************************************//**
Parses the redo log record for setting an index record as the predefined
@@ -643,7 +643,7 @@ btr_parse_set_min_rec_mark(
ulint comp, /*!< in: nonzero=compact page format */
page_t* page, /*!< in: page or NULL */
mtr_t* mtr) /*!< in: mtr or NULL */
- __attribute__((nonnull(1,2), warn_unused_result));
+ MY_ATTRIBUTE((nonnull(1,2), warn_unused_result));
/***********************************************************//**
Parses a redo log record of reorganizing a page.
@return end of log record or NULL */
@@ -657,7 +657,7 @@ btr_parse_page_reorganize(
bool compressed,/*!< in: true if compressed page */
buf_block_t* block, /*!< in: page to be reorganized, or NULL */
mtr_t* mtr) /*!< in: mtr or NULL */
- __attribute__((nonnull(1,2,3), warn_unused_result));
+ MY_ATTRIBUTE((nonnull(1,2,3), warn_unused_result));
#ifndef UNIV_HOTBACKUP
/**************************************************************//**
Gets the number of pages in a B-tree.
@@ -670,7 +670,7 @@ btr_get_size(
ulint flag, /*!< in: BTR_N_LEAF_PAGES or BTR_TOTAL_SIZE */
mtr_t* mtr) /*!< in/out: mini-transaction where index
is s-latched */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/**************************************************************//**
Gets the number of reserved and used pages in a B-tree.
@return number of pages reserved, or ULINT_UNDEFINED if the index
@@ -708,7 +708,7 @@ btr_page_alloc(
mtr_t* init_mtr) /*!< in/out: mini-transaction
for x-latching and initializing
the page */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/**************************************************************//**
Frees a file page used in an index tree. NOTE: cannot free field external
storage pages because the page must contain info on its level. */
@@ -719,7 +719,7 @@ btr_page_free(
dict_index_t* index, /*!< in: index tree */
buf_block_t* block, /*!< in: block to be freed, x-latched */
mtr_t* mtr) /*!< in: mtr */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/**************************************************************//**
Frees a file page used in an index tree. Can be used also to BLOB
external storage pages, because the page level 0 can be given as an
@@ -769,7 +769,7 @@ void
btr_print_size(
/*===========*/
dict_index_t* index) /*!< in: index tree */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/**************************************************************//**
Prints directories and other info of all nodes in the index. */
UNIV_INTERN
@@ -779,7 +779,7 @@ btr_print_index(
dict_index_t* index, /*!< in: index */
ulint width) /*!< in: print this many entries from start
and end */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
#endif /* UNIV_BTR_PRINT */
/************************************************************//**
Checks the size and number of fields in a record based on the definition of
@@ -794,7 +794,7 @@ btr_index_rec_validate(
ibool dump_on_error) /*!< in: TRUE if the function
should print hex dump of record
and page on error */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/**************************************************************//**
Checks the consistency of an index tree.
@return DB_SUCCESS if ok, error code if not */
@@ -804,7 +804,7 @@ btr_validate_index(
/*===============*/
dict_index_t* index, /*!< in: index */
const trx_t* trx) /*!< in: transaction or 0 */
- __attribute__((nonnull(1), warn_unused_result));
+ MY_ATTRIBUTE((nonnull(1), warn_unused_result));
#ifdef UNIV_SYNC_DEBUG
/*************************************************************//**
diff --git a/storage/innobase/include/btr0btr.ic b/storage/innobase/include/btr0btr.ic
index 5fc621f469d..64b3d5a0975 100644
--- a/storage/innobase/include/btr0btr.ic
+++ b/storage/innobase/include/btr0btr.ic
@@ -1,7 +1,7 @@
/*****************************************************************************
-Copyright (c) 1994, 2012, Oracle and/or its affiliates. All Rights Reserved.
-Copyright (c) 2015, MariaDB Corporation.
+Copyright (c) 1994, 2016, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2015, 2016, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -165,10 +165,11 @@ ulint
btr_page_get_next(
/*==============*/
const page_t* page, /*!< in: index page */
- mtr_t* mtr __attribute__((unused)))
+ mtr_t* mtr MY_ATTRIBUTE((unused)))
/*!< in: mini-transaction handle */
{
- ut_ad(page && mtr);
+ ut_ad(page != NULL);
+ ut_ad(mtr != NULL);
#ifndef UNIV_INNOCHECKSUM
ut_ad(mtr_memo_contains_page(mtr, page, MTR_MEMO_PAGE_X_FIX)
|| mtr_memo_contains_page(mtr, page, MTR_MEMO_PAGE_S_FIX));
@@ -188,7 +189,8 @@ btr_page_set_next(
ulint next, /*!< in: next page number */
mtr_t* mtr) /*!< in: mini-transaction handle */
{
- ut_ad(page && mtr);
+ ut_ad(page != NULL);
+ ut_ad(mtr != NULL);
if (page_zip) {
mach_write_to_4(page + FIL_PAGE_NEXT, next);
@@ -206,9 +208,10 @@ ulint
btr_page_get_prev(
/*==============*/
const page_t* page, /*!< in: index page */
- mtr_t* mtr __attribute__((unused))) /*!< in: mini-transaction handle */
+ mtr_t* mtr MY_ATTRIBUTE((unused))) /*!< in: mini-transaction handle */
{
- ut_ad(page && mtr);
+ ut_ad(page != NULL);
+ ut_ad(mtr != NULL);
return(mach_read_from_4(page + FIL_PAGE_PREV));
}
@@ -225,7 +228,8 @@ btr_page_set_prev(
ulint prev, /*!< in: previous page number */
mtr_t* mtr) /*!< in: mini-transaction handle */
{
- ut_ad(page && mtr);
+ ut_ad(page != NULL);
+ ut_ad(mtr != NULL);
if (page_zip) {
mach_write_to_4(page + FIL_PAGE_PREV, prev);
diff --git a/storage/innobase/include/btr0cur.h b/storage/innobase/include/btr0cur.h
index 88e3b84e04b..aa799e0fc00 100644
--- a/storage/innobase/include/btr0cur.h
+++ b/storage/innobase/include/btr0cur.h
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1994, 2014, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1994, 2016, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -186,7 +186,7 @@ btr_cur_open_at_index_side_func(
const char* file, /*!< in: file name */
ulint line, /*!< in: line where called */
mtr_t* mtr) /*!< in/out: mini-transaction */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
#define btr_cur_open_at_index_side(f,i,l,c,lv,m) \
btr_cur_open_at_index_side_func(f,i,l,c,lv,__FILE__,__LINE__,m)
/**********************************************************************//**
@@ -235,7 +235,7 @@ btr_cur_optimistic_insert(
compressed tablespace, the caller must
mtr_commit(mtr) before latching
any further pages */
- __attribute__((nonnull(2,3,4,5,6,7,10), warn_unused_result));
+ MY_ATTRIBUTE((nonnull(2,3,4,5,6,7,10), warn_unused_result));
/*************************************************************//**
Performs an insert on a page of an index tree. It is assumed that mtr
holds an x-latch on the tree and on the cursor page. If the insert is
@@ -266,7 +266,7 @@ btr_cur_pessimistic_insert(
ulint n_ext, /*!< in: number of externally stored columns */
que_thr_t* thr, /*!< in: query thread or NULL */
mtr_t* mtr) /*!< in/out: mini-transaction */
- __attribute__((nonnull(2,3,4,5,6,7,10), warn_unused_result));
+ MY_ATTRIBUTE((nonnull(2,3,4,5,6,7,10), warn_unused_result));
/*************************************************************//**
See if there is enough place in the page modification log to log
an update-in-place.
@@ -293,7 +293,7 @@ btr_cur_update_alloc_zip_func(
bool create, /*!< in: true=delete-and-insert,
false=update-in-place */
mtr_t* mtr) /*!< in/out: mini-transaction */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
#ifdef UNIV_DEBUG
# define btr_cur_update_alloc_zip(page_zip,cursor,index,offsets,len,cr,mtr) \
btr_cur_update_alloc_zip_func(page_zip,cursor,index,offsets,len,cr,mtr)
@@ -325,7 +325,7 @@ btr_cur_update_in_place(
is a secondary index, the caller must
mtr_commit(mtr) before latching any
further pages */
- __attribute__((warn_unused_result, nonnull));
+ MY_ATTRIBUTE((warn_unused_result, nonnull));
/***********************************************************//**
Writes a redo log record of updating a record in-place. */
UNIV_INTERN
@@ -339,7 +339,7 @@ btr_cur_update_in_place_log(
trx_id_t trx_id, /*!< in: transaction id */
roll_ptr_t roll_ptr, /*!< in: roll ptr */
mtr_t* mtr) /*!< in: mtr */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/*************************************************************//**
Tries to update a record on a page in an index tree. It is assumed that mtr
holds an x-latch on the page. The operation does not succeed if there is too
@@ -371,7 +371,7 @@ btr_cur_optimistic_update(
is a secondary index, the caller must
mtr_commit(mtr) before latching any
further pages */
- __attribute__((warn_unused_result, nonnull));
+ MY_ATTRIBUTE((warn_unused_result, nonnull));
/*************************************************************//**
Performs an update of a record on a page of a tree. It is assumed
that mtr holds an x-latch on the tree and on the cursor page. If the
@@ -405,7 +405,7 @@ btr_cur_pessimistic_update(
trx_id_t trx_id, /*!< in: transaction id */
mtr_t* mtr) /*!< in/out: mini-transaction; must be committed
before latching any further pages */
- __attribute__((warn_unused_result, nonnull));
+ MY_ATTRIBUTE((warn_unused_result, nonnull));
/***********************************************************//**
Marks a clustered index record deleted. Writes an undo log record to
undo log on this delete marking. Writes in the trx id field the id
@@ -422,7 +422,7 @@ btr_cur_del_mark_set_clust_rec(
const ulint* offsets,/*!< in: rec_get_offsets(rec) */
que_thr_t* thr, /*!< in: query thread */
mtr_t* mtr) /*!< in/out: mini-transaction */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/***********************************************************//**
Sets a secondary index record delete mark to TRUE or FALSE.
@return DB_SUCCESS, DB_LOCK_WAIT, or error number */
@@ -435,7 +435,7 @@ btr_cur_del_mark_set_sec_rec(
ibool val, /*!< in: value to set */
que_thr_t* thr, /*!< in: query thread */
mtr_t* mtr) /*!< in/out: mini-transaction */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/*************************************************************//**
Tries to compress a page of the tree if it seems useful. It is assumed
that mtr holds an x-latch on the tree and on the cursor page. To avoid
@@ -453,7 +453,7 @@ btr_cur_compress_if_useful(
ibool adjust, /*!< in: TRUE if should adjust the
cursor position even if compression occurs */
mtr_t* mtr) /*!< in/out: mini-transaction */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/*******************************************************//**
Removes the record on which the tree cursor is positioned. It is assumed
that the mtr has an x-latch on the page where the cursor is positioned,
@@ -474,7 +474,7 @@ btr_cur_optimistic_delete_func(
TRUE on a leaf page of a secondary
index, the mtr must be committed
before latching any further pages */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
# ifdef UNIV_DEBUG
# define btr_cur_optimistic_delete(cursor, flags, mtr) \
btr_cur_optimistic_delete_func(cursor, flags, mtr)
@@ -510,7 +510,7 @@ btr_cur_pessimistic_delete(
ulint flags, /*!< in: BTR_CREATE_FLAG or 0 */
enum trx_rb_ctx rb_ctx, /*!< in: rollback context */
mtr_t* mtr) /*!< in: mtr */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
#endif /* !UNIV_HOTBACKUP */
/***********************************************************//**
Parses a redo log record of updating a record in-place.
@@ -604,7 +604,7 @@ btr_cur_disown_inherited_fields(
const ulint* offsets,/*!< in: array returned by rec_get_offsets() */
const upd_t* update, /*!< in: update vector */
mtr_t* mtr) /*!< in/out: mini-transaction */
- __attribute__((nonnull(2,3,4,5,6)));
+ MY_ATTRIBUTE((nonnull(2,3,4,5,6)));
/** Operation code for btr_store_big_rec_extern_fields(). */
enum blob_op {
@@ -624,7 +624,7 @@ ibool
btr_blob_op_is_update(
/*==================*/
enum blob_op op) /*!< in: operation */
- __attribute__((warn_unused_result));
+ MY_ATTRIBUTE((warn_unused_result));
/*******************************************************************//**
Stores the fields in big_rec_vec to the tablespace and puts pointers to
@@ -649,7 +649,7 @@ btr_store_big_rec_extern_fields(
mtr_t* btr_mtr, /*!< in: mtr containing the
latches to the clustered index */
enum blob_op op) /*! in: operation code */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/*******************************************************************//**
Frees the space in an externally stored field to the file space
@@ -746,7 +746,7 @@ btr_push_update_extern_fields(
dtuple_t* tuple, /*!< in/out: data tuple */
const upd_t* update, /*!< in: update vector */
mem_heap_t* heap) /*!< in: memory heap */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/***********************************************************//**
Sets a secondary index record's delete mark to the given value. This
function is only used by the insert buffer merge mechanism. */
diff --git a/storage/innobase/include/btr0pcur.h b/storage/innobase/include/btr0pcur.h
index d8e7cf6b283..dafe14ce556 100644
--- a/storage/innobase/include/btr0pcur.h
+++ b/storage/innobase/include/btr0pcur.h
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1996, 2013, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -155,7 +155,7 @@ btr_pcur_open_at_index_side(
ulint level, /*!< in: level to search for
(0=leaf) */
mtr_t* mtr) /*!< in/out: mini-transaction */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/**************************************************************//**
Gets the up_match value for a pcur after a search.
@return number of matched fields at the cursor or to the right if
diff --git a/storage/innobase/include/btr0sea.h b/storage/innobase/include/btr0sea.h
index 848bde451a0..c95ca28057a 100644
--- a/storage/innobase/include/btr0sea.h
+++ b/storage/innobase/include/btr0sea.h
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1996, 2011, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -69,7 +69,7 @@ btr_search_t*
btr_search_get_info(
/*================*/
dict_index_t* index) /*!< in: index */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/*****************************************************************//**
Creates and initializes a search info struct.
@return own: search info struct */
diff --git a/storage/innobase/include/btr0types.h b/storage/innobase/include/btr0types.h
index c1a4531f861..04b69d8145c 100644
--- a/storage/innobase/include/btr0types.h
+++ b/storage/innobase/include/btr0types.h
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1996, 2011, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -81,7 +81,7 @@ btr_blob_dbg_rbt_insert(
dict_index_t* index, /*!< in/out: index tree */
const btr_blob_dbg_t* b, /*!< in: the reference */
const char* ctx) /*!< in: context (for logging) */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/** Remove from index->blobs a reference to an off-page column.
@param index the index tree
@@ -94,7 +94,7 @@ btr_blob_dbg_rbt_delete(
dict_index_t* index, /*!< in/out: index tree */
const btr_blob_dbg_t* b, /*!< in: the reference */
const char* ctx) /*!< in: context (for logging) */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/**************************************************************//**
Add to index->blobs any references to off-page columns from a record.
@@ -107,7 +107,7 @@ btr_blob_dbg_add_rec(
dict_index_t* index, /*!< in/out: index */
const ulint* offsets,/*!< in: offsets */
const char* ctx) /*!< in: context (for logging) */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/**************************************************************//**
Remove from index->blobs any references to off-page columns from a record.
@return number of references removed */
@@ -119,7 +119,7 @@ btr_blob_dbg_remove_rec(
dict_index_t* index, /*!< in/out: index */
const ulint* offsets,/*!< in: offsets */
const char* ctx) /*!< in: context (for logging) */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/**************************************************************//**
Count and add to index->blobs any references to off-page columns
from records on a page.
@@ -131,7 +131,7 @@ btr_blob_dbg_add(
const page_t* page, /*!< in: rewritten page */
dict_index_t* index, /*!< in/out: index */
const char* ctx) /*!< in: context (for logging) */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/**************************************************************//**
Count and remove from index->blobs any references to off-page columns
from records on a page.
@@ -144,7 +144,7 @@ btr_blob_dbg_remove(
const page_t* page, /*!< in: b-tree page */
dict_index_t* index, /*!< in/out: index */
const char* ctx) /*!< in: context (for logging) */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/**************************************************************//**
Restore in index->blobs any references to off-page columns
Used when page reorganize fails due to compressed page overflow. */
@@ -156,7 +156,7 @@ btr_blob_dbg_restore(
const page_t* page, /*!< in: copy of original page */
dict_index_t* index, /*!< in/out: index */
const char* ctx) /*!< in: context (for logging) */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/** Operation that processes the BLOB references of an index record
@param[in] rec record on index page
@@ -180,7 +180,7 @@ btr_blob_dbg_op(
dict_index_t* index, /*!< in/out: index */
const char* ctx, /*!< in: context (for logging) */
const btr_blob_dbg_op_f op) /*!< in: operation on records */
- __attribute__((nonnull(1,3,4,5)));
+ MY_ATTRIBUTE((nonnull(1,3,4,5)));
#else /* UNIV_BLOB_DEBUG */
# define btr_blob_dbg_add_rec(rec, index, offsets, ctx) ((void) 0)
# define btr_blob_dbg_add(page, index, ctx) ((void) 0)
diff --git a/storage/innobase/include/buf0buddy.h b/storage/innobase/include/buf0buddy.h
index fab9a4b828b..7fc4408505d 100644
--- a/storage/innobase/include/buf0buddy.h
+++ b/storage/innobase/include/buf0buddy.h
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 2006, 2011, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2006, 2016, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -54,7 +54,7 @@ buf_buddy_alloc(
storage was allocated from the
LRU list and buf_pool->mutex was
temporarily released */
- __attribute__((malloc, nonnull));
+ MY_ATTRIBUTE((malloc, nonnull));
/**********************************************************************//**
Deallocate a block. */
@@ -68,7 +68,7 @@ buf_buddy_free(
be pointed to by the buffer pool */
ulint size) /*!< in: block size,
up to UNIV_PAGE_SIZE */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
#ifndef UNIV_NONINL
# include "buf0buddy.ic"
diff --git a/storage/innobase/include/buf0buddy.ic b/storage/innobase/include/buf0buddy.ic
index be2f950162d..4352ebe8945 100644
--- a/storage/innobase/include/buf0buddy.ic
+++ b/storage/innobase/include/buf0buddy.ic
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 2006, 2011, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2006, 2016, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -50,7 +50,7 @@ buf_buddy_alloc_low(
allocated from the LRU list and
buf_pool->mutex was temporarily
released */
- __attribute__((malloc, nonnull));
+ MY_ATTRIBUTE((malloc, nonnull));
/**********************************************************************//**
Deallocate a block. */
@@ -63,7 +63,7 @@ buf_buddy_free_low(
pointed to by the buffer pool */
ulint i) /*!< in: index of buf_pool->zip_free[],
or BUF_BUDDY_SIZES */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/**********************************************************************//**
Get the index of buf_pool->zip_free[] for a given block size.
diff --git a/storage/innobase/include/buf0buf.h b/storage/innobase/include/buf0buf.h
index 985bddd1317..c737f3a6f1d 100644
--- a/storage/innobase/include/buf0buf.h
+++ b/storage/innobase/include/buf0buf.h
@@ -1,7 +1,7 @@
/*****************************************************************************
-Copyright (c) 1995, 2014, Oracle and/or its affiliates. All Rights Reserved.
-Copyright (c) 2013, 2015, MariaDB Corporation.
+Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2013, 2016, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -256,7 +256,7 @@ buf_relocate(
buf_page_get_state(bpage) must be
BUF_BLOCK_ZIP_DIRTY or BUF_BLOCK_ZIP_PAGE */
buf_page_t* dpage) /*!< in/out: destination control block */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/*********************************************************************//**
Gets the current size of buffer buf_pool in bytes.
@return size in bytes */
@@ -287,7 +287,7 @@ UNIV_INLINE
buf_page_t*
buf_page_alloc_descriptor(void)
/*===========================*/
- __attribute__((malloc));
+ MY_ATTRIBUTE((malloc));
/********************************************************************//**
Free a buf_page_t descriptor. */
UNIV_INLINE
@@ -295,7 +295,7 @@ void
buf_page_free_descriptor(
/*=====================*/
buf_page_t* bpage) /*!< in: bpage descriptor to free. */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/********************************************************************//**
Allocates a buffer block.
@@ -536,7 +536,7 @@ ulint
buf_page_get_freed_page_clock(
/*==========================*/
const buf_page_t* bpage) /*!< in: block */
- __attribute__((pure));
+ MY_ATTRIBUTE((pure));
/********************************************************************//**
Reads the freed_page_clock of a buffer block.
@return freed_page_clock */
@@ -545,7 +545,7 @@ ulint
buf_block_get_freed_page_clock(
/*===========================*/
const buf_block_t* block) /*!< in: block */
- __attribute__((pure));
+ MY_ATTRIBUTE((pure));
/********************************************************************//**
Tells if a block is still close enough to the MRU end of the LRU list
@@ -608,7 +608,7 @@ buf_block_buf_fix_inc_func(
ulint line, /*!< in: line */
# endif /* UNIV_SYNC_DEBUG */
buf_block_t* block) /*!< in/out: block to bufferfix */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/*******************************************************************//**
Increments the bufferfix count. */
@@ -654,7 +654,7 @@ buf_page_is_corrupted(
const byte* read_buf, /*!< in: a database page */
ulint zip_size) /*!< in: size of compressed page;
0 for uncompressed pages */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/********************************************************************//**
Checks if a page is all zeroes.
@return TRUE if the page is all zeroes */
@@ -684,7 +684,7 @@ ulint
buf_block_get_lock_hash_val(
/*========================*/
const buf_block_t* block) /*!< in: block */
- __attribute__((pure));
+ MY_ATTRIBUTE((pure));
#ifdef UNIV_DEBUG
/*********************************************************************//**
Finds a block in the buffer pool that points to a
@@ -745,7 +745,7 @@ buf_page_print(
ulint flags) /*!< in: 0 or
BUF_PAGE_PRINT_NO_CRASH or
BUF_PAGE_PRINT_NO_FULL */
- UNIV_COLD __attribute__((nonnull));
+ UNIV_COLD MY_ATTRIBUTE((nonnull));
/********************************************************************//**
Decompress a block.
@return TRUE if successful */
@@ -882,7 +882,7 @@ enum buf_page_state
buf_block_get_state(
/*================*/
const buf_block_t* block) /*!< in: pointer to the control block */
- __attribute__((pure));
+ MY_ATTRIBUTE((pure));
/*********************************************************************//**
Sets the state of a block. */
UNIV_INLINE
@@ -907,7 +907,7 @@ ibool
buf_page_in_file(
/*=============*/
const buf_page_t* bpage) /*!< in: pointer to control block */
- __attribute__((pure));
+ MY_ATTRIBUTE((pure));
#ifndef UNIV_HOTBACKUP
/*********************************************************************//**
Determines if a block should be on unzip_LRU list.
@@ -917,7 +917,7 @@ ibool
buf_page_belongs_to_unzip_LRU(
/*==========================*/
const buf_page_t* bpage) /*!< in: pointer to control block */
- __attribute__((pure));
+ MY_ATTRIBUTE((pure));
/*********************************************************************//**
Gets the mutex of a block.
@@ -927,7 +927,7 @@ ib_mutex_t*
buf_page_get_mutex(
/*===============*/
const buf_page_t* bpage) /*!< in: pointer to control block */
- __attribute__((pure));
+ MY_ATTRIBUTE((pure));
/*********************************************************************//**
Get the flush type of a page.
@@ -937,7 +937,7 @@ buf_flush_t
buf_page_get_flush_type(
/*====================*/
const buf_page_t* bpage) /*!< in: buffer page */
- __attribute__((pure));
+ MY_ATTRIBUTE((pure));
/*********************************************************************//**
Set the flush type of a page. */
UNIV_INLINE
@@ -963,7 +963,7 @@ enum buf_io_fix
buf_page_get_io_fix(
/*================*/
const buf_page_t* bpage) /*!< in: pointer to the control block */
- __attribute__((pure));
+ MY_ATTRIBUTE((pure));
/*********************************************************************//**
Gets the io_fix state of a block.
@return io_fix state */
@@ -972,7 +972,7 @@ enum buf_io_fix
buf_block_get_io_fix(
/*================*/
const buf_block_t* block) /*!< in: pointer to the control block */
- __attribute__((pure));
+ MY_ATTRIBUTE((pure));
/*********************************************************************//**
Sets the io_fix state of a block. */
UNIV_INLINE
@@ -1018,7 +1018,7 @@ ibool
buf_page_can_relocate(
/*==================*/
const buf_page_t* bpage) /*!< control block being relocated */
- __attribute__((pure));
+ MY_ATTRIBUTE((pure));
/*********************************************************************//**
Determine if a block has been flagged old.
@@ -1028,7 +1028,7 @@ ibool
buf_page_is_old(
/*============*/
const buf_page_t* bpage) /*!< in: control block */
- __attribute__((pure));
+ MY_ATTRIBUTE((pure));
/*********************************************************************//**
Flag a block old. */
UNIV_INLINE
@@ -1045,7 +1045,7 @@ unsigned
buf_page_is_accessed(
/*=================*/
const buf_page_t* bpage) /*!< in: control block */
- __attribute__((nonnull, pure));
+ MY_ATTRIBUTE((nonnull, pure));
/*********************************************************************//**
Flag a block accessed. */
UNIV_INLINE
@@ -1053,7 +1053,7 @@ void
buf_page_set_accessed(
/*==================*/
buf_page_t* bpage) /*!< in/out: control block */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/*********************************************************************//**
Gets the buf_block_t handle of a buffered file block if an uncompressed
page frame exists, or NULL. Note: even though bpage is not declared a
@@ -1064,7 +1064,7 @@ buf_block_t*
buf_page_get_block(
/*===============*/
buf_page_t* bpage) /*!< in: control block, or NULL */
- __attribute__((pure));
+ MY_ATTRIBUTE((pure));
#endif /* !UNIV_HOTBACKUP */
#ifdef UNIV_DEBUG
/*********************************************************************//**
@@ -1075,7 +1075,7 @@ buf_frame_t*
buf_block_get_frame(
/*================*/
const buf_block_t* block) /*!< in: pointer to the control block */
- __attribute__((pure));
+ MY_ATTRIBUTE((pure));
#else /* UNIV_DEBUG */
# define buf_block_get_frame(block) (block)->frame
#endif /* UNIV_DEBUG */
@@ -1087,7 +1087,7 @@ ulint
buf_page_get_space(
/*===============*/
const buf_page_t* bpage) /*!< in: pointer to the control block */
- __attribute__((pure));
+ MY_ATTRIBUTE((pure));
/*********************************************************************//**
Gets the space id of a block.
@return space id */
@@ -1096,7 +1096,7 @@ ulint
buf_block_get_space(
/*================*/
const buf_block_t* block) /*!< in: pointer to the control block */
- __attribute__((pure));
+ MY_ATTRIBUTE((pure));
/*********************************************************************//**
Gets the page number of a block.
@return page number */
@@ -1105,7 +1105,7 @@ ulint
buf_page_get_page_no(
/*=================*/
const buf_page_t* bpage) /*!< in: pointer to the control block */
- __attribute__((pure));
+ MY_ATTRIBUTE((pure));
/*********************************************************************//**
Gets the page number of a block.
@return page number */
@@ -1114,7 +1114,7 @@ ulint
buf_block_get_page_no(
/*==================*/
const buf_block_t* block) /*!< in: pointer to the control block */
- __attribute__((pure));
+ MY_ATTRIBUTE((pure));
/*********************************************************************//**
Gets the compressed page size of a block.
@return compressed page size, or 0 */
@@ -1123,7 +1123,7 @@ ulint
buf_page_get_zip_size(
/*==================*/
const buf_page_t* bpage) /*!< in: pointer to the control block */
- __attribute__((pure));
+ MY_ATTRIBUTE((pure));
/*********************************************************************//**
Gets the compressed page size of a block.
@return compressed page size, or 0 */
@@ -1132,7 +1132,7 @@ ulint
buf_block_get_zip_size(
/*===================*/
const buf_block_t* block) /*!< in: pointer to the control block */
- __attribute__((pure));
+ MY_ATTRIBUTE((pure));
/*********************************************************************//**
Gets the compressed page descriptor corresponding to an uncompressed page
if applicable. */
@@ -1223,7 +1223,7 @@ buf_page_address_fold(
/*==================*/
ulint space, /*!< in: space id */
ulint offset) /*!< in: offset of the page within space */
- __attribute__((const));
+ MY_ATTRIBUTE((const));
/********************************************************************//**
Calculates the index of a buffer pool to the buf_pool[] array.
@return the position of the buffer pool in buf_pool[] */
@@ -1232,7 +1232,7 @@ ulint
buf_pool_index(
/*===========*/
const buf_pool_t* buf_pool) /*!< in: buffer pool */
- __attribute__((nonnull, const));
+ MY_ATTRIBUTE((nonnull, const));
/******************************************************************//**
Returns the buffer pool instance given a page instance
@return buf_pool */
@@ -1372,7 +1372,7 @@ buf_pool_watch_is_sentinel(
/*=======================*/
buf_pool_t* buf_pool, /*!< buffer pool instance */
const buf_page_t* bpage) /*!< in: block */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/****************************************************************//**
Add watch for the given page to be read in. Caller must have the buffer pool
@return NULL if watch set, block if the page is in the buffer pool */
@@ -1383,7 +1383,7 @@ buf_pool_watch_set(
ulint space, /*!< in: space id */
ulint offset, /*!< in: page number */
ulint fold) /*!< in: buf_page_address_fold(space, offset) */
- __attribute__((warn_unused_result));
+ MY_ATTRIBUTE((warn_unused_result));
/****************************************************************//**
Stop watching if the page has been read in.
buf_pool_watch_set(space,offset) must have returned NULL before. */
@@ -1404,7 +1404,7 @@ buf_pool_watch_occurred(
/*====================*/
ulint space, /*!< in: space id */
ulint offset) /*!< in: page number */
- __attribute__((warn_unused_result));
+ MY_ATTRIBUTE((warn_unused_result));
/********************************************************************//**
Get total buffer pool statistics. */
UNIV_INTERN
diff --git a/storage/innobase/include/buf0flu.h b/storage/innobase/include/buf0flu.h
index 3ab3f7c308a..f1ca1039ccb 100644
--- a/storage/innobase/include/buf0flu.h
+++ b/storage/innobase/include/buf0flu.h
@@ -1,7 +1,7 @@
/*****************************************************************************
-Copyright (c) 1995, 2013, Oracle and/or its affiliates. All Rights Reserved.
-Copyright (c) 2014, 2014, SkySQL Ab.
+Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2014, 2016, MariaDB Corporation
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -97,7 +97,7 @@ buf_flush_page_try(
/*===============*/
buf_pool_t* buf_pool, /*!< in/out: buffer pool instance */
buf_block_t* block) /*!< in/out: buffer control block */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
# endif /* UNIV_DEBUG || UNIV_IBUF_DEBUG */
/*******************************************************************//**
This utility flushes dirty blocks from the end of the flush list of
@@ -266,7 +266,7 @@ buf_flush_ready_for_flush(
buf_page_t* bpage, /*!< in: buffer control block, must be
buf_page_in_file(bpage) */
buf_flush_t flush_type)/*!< in: type of flush */
- __attribute__((warn_unused_result));
+ MY_ATTRIBUTE((warn_unused_result));
#ifdef UNIV_DEBUG
/******************************************************************//**
diff --git a/storage/innobase/include/buf0lru.h b/storage/innobase/include/buf0lru.h
index f1f6abd2d68..a7a65df33aa 100644
--- a/storage/innobase/include/buf0lru.h
+++ b/storage/innobase/include/buf0lru.h
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1995, 2013, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -93,7 +93,7 @@ buf_LRU_free_page(
buf_page_t* bpage, /*!< in: block to be freed */
bool zip) /*!< in: true if should remove also the
compressed page of an uncompressed page */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/******************************************************************//**
Try to free a replaceable block.
@return TRUE if found and freed */
@@ -105,7 +105,7 @@ buf_LRU_scan_and_free_block(
ibool scan_all) /*!< in: scan whole LRU list
if TRUE, otherwise scan only
'old' blocks. */
- __attribute__((nonnull,warn_unused_result));
+ MY_ATTRIBUTE((nonnull,warn_unused_result));
/******************************************************************//**
Returns a free block from the buf_pool. The block is taken off the
free list. If it is empty, returns NULL.
@@ -144,7 +144,7 @@ buf_block_t*
buf_LRU_get_free_block(
/*===================*/
buf_pool_t* buf_pool) /*!< in/out: buffer pool instance */
- __attribute__((nonnull,warn_unused_result));
+ MY_ATTRIBUTE((nonnull,warn_unused_result));
/******************************************************************//**
Determines if the unzip_LRU list should be used for evicting a victim
instead of the general LRU list.
@@ -227,7 +227,7 @@ buf_LRU_free_one_page(
buf_page_t* bpage) /*!< in/out: block, must contain a file page and
be in a state where it can be freed; there
may or may not be a hash index to the page */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/******************************************************************//**
Adjust LRU hazard pointers if needed. */
diff --git a/storage/innobase/include/data0data.h b/storage/innobase/include/data0data.h
index a548c7b89b3..1d954bfc07c 100644
--- a/storage/innobase/include/data0data.h
+++ b/storage/innobase/include/data0data.h
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1994, 2012, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1994, 2016, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -46,7 +46,7 @@ dtype_t*
dfield_get_type(
/*============*/
const dfield_t* field) /*!< in: SQL data field */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/*********************************************************************//**
Gets pointer to the data in a field.
@return pointer to data */
@@ -55,7 +55,7 @@ void*
dfield_get_data(
/*============*/
const dfield_t* field) /*!< in: field */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
#else /* UNIV_DEBUG */
# define dfield_get_type(field) (&(field)->type)
# define dfield_get_data(field) ((field)->data)
@@ -68,7 +68,7 @@ dfield_set_type(
/*============*/
dfield_t* field, /*!< in: SQL data field */
const dtype_t* type) /*!< in: pointer to data type struct */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/*********************************************************************//**
Gets length of field data.
@return length of data; UNIV_SQL_NULL if SQL null data */
@@ -77,7 +77,7 @@ ulint
dfield_get_len(
/*===========*/
const dfield_t* field) /*!< in: field */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/*********************************************************************//**
Sets length in a field. */
UNIV_INLINE
@@ -86,7 +86,7 @@ dfield_set_len(
/*===========*/
dfield_t* field, /*!< in: field */
ulint len) /*!< in: length or UNIV_SQL_NULL */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/*********************************************************************//**
Determines if a field is SQL NULL
@return nonzero if SQL null data */
@@ -95,7 +95,7 @@ ulint
dfield_is_null(
/*===========*/
const dfield_t* field) /*!< in: field */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/*********************************************************************//**
Determines if a field is externally stored
@return nonzero if externally stored */
@@ -104,7 +104,7 @@ ulint
dfield_is_ext(
/*==========*/
const dfield_t* field) /*!< in: field */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/*********************************************************************//**
Sets the "external storage" flag */
UNIV_INLINE
@@ -112,7 +112,7 @@ void
dfield_set_ext(
/*===========*/
dfield_t* field) /*!< in/out: field */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/*********************************************************************//**
Sets pointer to the data and length in a field. */
UNIV_INLINE
@@ -122,7 +122,7 @@ dfield_set_data(
dfield_t* field, /*!< in: field */
const void* data, /*!< in: data */
ulint len) /*!< in: length or UNIV_SQL_NULL */
- __attribute__((nonnull(1)));
+ MY_ATTRIBUTE((nonnull(1)));
/*********************************************************************//**
Sets a data field to SQL NULL. */
UNIV_INLINE
@@ -130,7 +130,7 @@ void
dfield_set_null(
/*============*/
dfield_t* field) /*!< in/out: field */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/**********************************************************************//**
Writes an SQL null field full of zeros. */
UNIV_INLINE
@@ -139,7 +139,7 @@ data_write_sql_null(
/*================*/
byte* data, /*!< in: pointer to a buffer of size len */
ulint len) /*!< in: SQL null size in bytes */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/*********************************************************************//**
Copies the data and len fields. */
UNIV_INLINE
@@ -148,7 +148,7 @@ dfield_copy_data(
/*=============*/
dfield_t* field1, /*!< out: field to copy to */
const dfield_t* field2) /*!< in: field to copy from */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/*********************************************************************//**
Copies a data field to another. */
UNIV_INLINE
@@ -157,7 +157,7 @@ dfield_copy(
/*========*/
dfield_t* field1, /*!< out: field to copy to */
const dfield_t* field2) /*!< in: field to copy from */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/*********************************************************************//**
Copies the data pointed to by a data field. */
UNIV_INLINE
@@ -166,7 +166,7 @@ dfield_dup(
/*=======*/
dfield_t* field, /*!< in/out: data field */
mem_heap_t* heap) /*!< in: memory heap where allocated */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
#ifndef UNIV_HOTBACKUP
/*********************************************************************//**
Tests if two data fields are equal.
@@ -181,7 +181,7 @@ dfield_datas_are_binary_equal(
const dfield_t* field2, /*!< in: field */
ulint len) /*!< in: maximum prefix to compare,
or 0 to compare the whole field length */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/*********************************************************************//**
Tests if dfield data length and content is equal to the given.
@return TRUE if equal */
@@ -192,7 +192,7 @@ dfield_data_is_binary_equal(
const dfield_t* field, /*!< in: field */
ulint len, /*!< in: data length or UNIV_SQL_NULL */
const byte* data) /*!< in: data */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
#endif /* !UNIV_HOTBACKUP */
/*********************************************************************//**
Gets number of fields in a data tuple.
@@ -202,7 +202,7 @@ ulint
dtuple_get_n_fields(
/*================*/
const dtuple_t* tuple) /*!< in: tuple */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
#ifdef UNIV_DEBUG
/*********************************************************************//**
Gets nth field of a tuple.
@@ -224,7 +224,7 @@ ulint
dtuple_get_info_bits(
/*=================*/
const dtuple_t* tuple) /*!< in: tuple */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/*********************************************************************//**
Sets info bits in a data tuple. */
UNIV_INLINE
@@ -233,7 +233,7 @@ dtuple_set_info_bits(
/*=================*/
dtuple_t* tuple, /*!< in: tuple */
ulint info_bits) /*!< in: info bits */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/*********************************************************************//**
Gets number of fields used in record comparisons.
@return number of fields used in comparisons in rem0cmp.* */
@@ -242,7 +242,7 @@ ulint
dtuple_get_n_fields_cmp(
/*====================*/
const dtuple_t* tuple) /*!< in: tuple */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/*********************************************************************//**
Gets number of fields used in record comparisons. */
UNIV_INLINE
@@ -252,7 +252,7 @@ dtuple_set_n_fields_cmp(
dtuple_t* tuple, /*!< in: tuple */
ulint n_fields_cmp) /*!< in: number of fields used in
comparisons in rem0cmp.* */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/* Estimate the number of bytes that are going to be allocated when
creating a new dtuple_t object */
@@ -272,7 +272,7 @@ dtuple_create_from_mem(
void* buf, /*!< in, out: buffer to use */
ulint buf_size, /*!< in: buffer size */
ulint n_fields) /*!< in: number of fields */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/**********************************************************//**
Creates a data tuple to a memory heap. The default value for number
@@ -286,7 +286,7 @@ dtuple_create(
is created, DTUPLE_EST_ALLOC(n_fields)
bytes will be allocated from this heap */
ulint n_fields)/*!< in: number of fields */
- __attribute__((nonnull, malloc));
+ MY_ATTRIBUTE((nonnull, malloc));
/*********************************************************************//**
Sets number of fields used in a tuple. Normally this is set in
@@ -297,7 +297,7 @@ dtuple_set_n_fields(
/*================*/
dtuple_t* tuple, /*!< in: tuple */
ulint n_fields) /*!< in: number of fields */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/*********************************************************************//**
Copies a data tuple to another. This is a shallow copy; if a deep copy
is desired, dfield_dup() will have to be invoked on each field.
@@ -309,7 +309,7 @@ dtuple_copy(
const dtuple_t* tuple, /*!< in: tuple to copy from */
mem_heap_t* heap) /*!< in: memory heap
where the tuple is created */
- __attribute__((nonnull, malloc));
+ MY_ATTRIBUTE((nonnull, malloc));
/**********************************************************//**
The following function returns the sum of data lengths of a tuple. The space
occupied by the field structs or the tuple struct is not counted.
@@ -320,7 +320,7 @@ dtuple_get_data_size(
/*=================*/
const dtuple_t* tuple, /*!< in: typed data tuple */
ulint comp) /*!< in: nonzero=ROW_FORMAT=COMPACT */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/*********************************************************************//**
Computes the number of externally stored fields in a data tuple.
@return number of fields */
@@ -329,7 +329,7 @@ ulint
dtuple_get_n_ext(
/*=============*/
const dtuple_t* tuple) /*!< in: tuple */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/************************************************************//**
Compare two data tuples, respecting the collation of character fields.
@return 1, 0 , -1 if tuple1 is greater, equal, less, respectively,
@@ -340,7 +340,7 @@ dtuple_coll_cmp(
/*============*/
const dtuple_t* tuple1, /*!< in: tuple 1 */
const dtuple_t* tuple2) /*!< in: tuple 2 */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/************************************************************//**
Folds a prefix given as the number of fields of a tuple.
@return the folded value */
@@ -353,7 +353,7 @@ dtuple_fold(
ulint n_bytes,/*!< in: number of bytes to fold in an
incomplete last field */
index_id_t tree_id)/*!< in: index tree id */
- __attribute__((nonnull, pure, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, pure, warn_unused_result));
/*******************************************************************//**
Sets types of fields binary in a tuple. */
UNIV_INLINE
@@ -362,7 +362,7 @@ dtuple_set_types_binary(
/*====================*/
dtuple_t* tuple, /*!< in: data tuple */
ulint n) /*!< in: number of fields to set */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/**********************************************************************//**
Checks if a dtuple contains an SQL null value.
@return TRUE if some field is SQL null */
@@ -371,7 +371,7 @@ ibool
dtuple_contains_null(
/*=================*/
const dtuple_t* tuple) /*!< in: dtuple */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/**********************************************************//**
Checks that a data field is typed. Asserts an error if not.
@return TRUE if ok */
@@ -380,7 +380,7 @@ ibool
dfield_check_typed(
/*===============*/
const dfield_t* field) /*!< in: data field */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/**********************************************************//**
Checks that a data tuple is typed. Asserts an error if not.
@return TRUE if ok */
@@ -389,7 +389,7 @@ ibool
dtuple_check_typed(
/*===============*/
const dtuple_t* tuple) /*!< in: tuple */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/**********************************************************//**
Checks that a data tuple is typed.
@return TRUE if ok */
@@ -398,7 +398,7 @@ ibool
dtuple_check_typed_no_assert(
/*=========================*/
const dtuple_t* tuple) /*!< in: tuple */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
#ifdef UNIV_DEBUG
/**********************************************************//**
Validates the consistency of a tuple which must be complete, i.e,
@@ -409,7 +409,7 @@ ibool
dtuple_validate(
/*============*/
const dtuple_t* tuple) /*!< in: tuple */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
#endif /* UNIV_DEBUG */
/*************************************************************//**
Pretty prints a dfield value according to its data type. */
@@ -418,7 +418,7 @@ void
dfield_print(
/*=========*/
const dfield_t* dfield) /*!< in: dfield */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/*************************************************************//**
Pretty prints a dfield value according to its data type. Also the hex string
is printed if a string contains non-printable characters. */
@@ -427,7 +427,7 @@ void
dfield_print_also_hex(
/*==================*/
const dfield_t* dfield) /*!< in: dfield */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/**********************************************************//**
The following function prints the contents of a tuple. */
UNIV_INTERN
@@ -436,7 +436,7 @@ dtuple_print(
/*=========*/
FILE* f, /*!< in: output stream */
const dtuple_t* tuple) /*!< in: tuple */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/**************************************************************//**
Moves parts of long fields in entry to the big record vector so that
the size of tuple drops below the maximum record size allowed in the
@@ -453,7 +453,7 @@ dtuple_convert_big_rec(
dtuple_t* entry, /*!< in/out: index entry */
ulint* n_ext) /*!< in/out: number of
externally stored columns */
- __attribute__((nonnull, malloc, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, malloc, warn_unused_result));
/**************************************************************//**
Puts back to entry the data stored in vector. Note that to ensure the
fields in entry can accommodate the data, vector must have been created
@@ -466,7 +466,7 @@ dtuple_convert_back_big_rec(
dtuple_t* entry, /*!< in: entry whose data was put to vector */
big_rec_t* vector) /*!< in, own: big rec vector; it is
freed in this function */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/**************************************************************//**
Frees the memory in a big rec vector. */
UNIV_INLINE
@@ -475,7 +475,7 @@ dtuple_big_rec_free(
/*================*/
big_rec_t* vector) /*!< in, own: big rec vector; it is
freed in this function */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/*######################################################################*/
diff --git a/storage/innobase/include/data0data.ic b/storage/innobase/include/data0data.ic
index 6937d55d211..11499ab928c 100644
--- a/storage/innobase/include/data0data.ic
+++ b/storage/innobase/include/data0data.ic
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1994, 2012, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1994, 2016, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -56,7 +56,8 @@ dfield_set_type(
dfield_t* field, /*!< in: SQL data field */
const dtype_t* type) /*!< in: pointer to data type struct */
{
- ut_ad(field && type);
+ ut_ad(field != NULL);
+ ut_ad(type != NULL);
field->type = *type;
}
@@ -194,7 +195,8 @@ dfield_copy_data(
dfield_t* field1, /*!< out: field to copy to */
const dfield_t* field2) /*!< in: field to copy from */
{
- ut_ad(field1 && field2);
+ ut_ad(field1 != NULL);
+ ut_ad(field2 != NULL);
field1->data = field2->data;
field1->len = field2->len;
diff --git a/storage/innobase/include/dict0boot.h b/storage/innobase/include/dict0boot.h
index a994c9d8ff1..477e1150f43 100644
--- a/storage/innobase/include/dict0boot.h
+++ b/storage/innobase/include/dict0boot.h
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1996, 2012, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -95,7 +95,7 @@ UNIV_INTERN
dberr_t
dict_boot(void)
/*===========*/
- __attribute__((warn_unused_result));
+ MY_ATTRIBUTE((warn_unused_result));
/*****************************************************************//**
Creates and initializes the data dictionary at the server bootstrap.
@@ -104,7 +104,7 @@ UNIV_INTERN
dberr_t
dict_create(void)
/*=============*/
- __attribute__((warn_unused_result));
+ MY_ATTRIBUTE((warn_unused_result));
/*********************************************************************//**
Check if a table id belongs to system table.
@@ -114,7 +114,7 @@ bool
dict_is_sys_table(
/*==============*/
table_id_t id) /*!< in: table id to check */
- __attribute__((warn_unused_result));
+ MY_ATTRIBUTE((warn_unused_result));
/* Space id and page no where the dictionary header resides */
#define DICT_HDR_SPACE 0 /* the SYSTEM tablespace */
diff --git a/storage/innobase/include/dict0crea.h b/storage/innobase/include/dict0crea.h
index 564fad35748..150c76b2e65 100644
--- a/storage/innobase/include/dict0crea.h
+++ b/storage/innobase/include/dict0crea.h
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1996, 2014, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -137,7 +137,7 @@ dict_create_add_foreign_id(
incremented if used */
const char* name, /*!< in: table name */
dict_foreign_t* foreign)/*!< in/out: foreign key */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/** Adds the given set of foreign key objects to the dictionary tables
in the database. This function does not modify the dictionary cache. The
@@ -156,7 +156,7 @@ dict_create_add_foreigns_to_dictionary(
const dict_foreign_set& local_fk_set,
const dict_table_t* table,
trx_t* trx)
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/****************************************************************//**
Creates the tablespaces and datafiles system tables inside InnoDB
at server bootstrap or server start if they are not found or are
@@ -192,7 +192,7 @@ dict_create_add_foreign_to_dictionary(
const char* name, /*!< in: table name */
const dict_foreign_t* foreign,/*!< in: foreign key */
trx_t* trx) /*!< in/out: dictionary transaction */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/********************************************************************//**
Construct foreign key constraint defintion from data dictionary information.
diff --git a/storage/innobase/include/dict0crea.ic b/storage/innobase/include/dict0crea.ic
index 2d0d9dcb858..1cbaa47032b 100644
--- a/storage/innobase/include/dict0crea.ic
+++ b/storage/innobase/include/dict0crea.ic
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1996, 2012, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -33,7 +33,7 @@ UNIV_INTERN
bool
row_is_mysql_tmp_table_name(
/*========================*/
- const char* name) __attribute__((warn_unused_result));
+ const char* name) MY_ATTRIBUTE((warn_unused_result));
/*!< in: table name in the form
'database/tablename' */
diff --git a/storage/innobase/include/dict0dict.h b/storage/innobase/include/dict0dict.h
index b15d364948c..42f93b5a889 100644
--- a/storage/innobase/include/dict0dict.h
+++ b/storage/innobase/include/dict0dict.h
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1996, 2015, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2012, Facebook Inc.
Copyright (c) 2013, 2015, MariaDB Corporation.
@@ -59,7 +59,7 @@ void
dict_casedn_str(
/*============*/
char* a) /*!< in/out: string to put in lower case */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/********************************************************************//**
Get the database name length in a table name.
@return database name length */
@@ -69,7 +69,7 @@ dict_get_db_name_len(
/*=================*/
const char* name) /*!< in: table name in the form
dbname '/' tablename */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/*********************************************************************//**
Open a table from its database and table name, this is currently used by
foreign constraint parser to get the referenced table.
@@ -113,7 +113,7 @@ dict_remove_db_name(
/*================*/
const char* name) /*!< in: table name in the form
dbname '/' tablename */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/** Operation to perform when opening a table */
enum dict_table_op_t {
@@ -161,7 +161,7 @@ dict_table_close(
ibool try_drop) /*!< in: TRUE=try to drop any orphan
indexes after an aborted online
index creation */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/**********************************************************************//**
Inits the data dictionary module. */
UNIV_INTERN
@@ -186,7 +186,7 @@ ulint
dict_col_get_mbminlen(
/*==================*/
const dict_col_t* col) /*!< in: column */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/*********************************************************************//**
Gets the maximum number of bytes per character.
@return maximum multi-byte char size, in bytes */
@@ -195,7 +195,7 @@ ulint
dict_col_get_mbmaxlen(
/*==================*/
const dict_col_t* col) /*!< in: column */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/*********************************************************************//**
Sets the minimum and maximum number of bytes per character. */
UNIV_INLINE
@@ -207,7 +207,7 @@ dict_col_set_mbminmaxlen(
character size, in bytes */
ulint mbmaxlen) /*!< in: minimum multi-byte
character size, in bytes */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/*********************************************************************//**
Gets the column data type. */
UNIV_INLINE
@@ -216,7 +216,7 @@ dict_col_copy_type(
/*===============*/
const dict_col_t* col, /*!< in: column */
dtype_t* type) /*!< out: data type */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/**********************************************************************//**
Determine bytes of column prefix to be stored in the undo log. Please
note if the table format is UNIV_FORMAT_A (< UNIV_FORMAT_B), no prefix
@@ -229,7 +229,7 @@ dict_max_field_len_store_undo(
dict_table_t* table, /*!< in: table */
const dict_col_t* col) /*!< in: column which index prefix
is based on */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
#endif /* !UNIV_HOTBACKUP */
#ifdef UNIV_DEBUG
/*********************************************************************//**
@@ -241,7 +241,7 @@ dict_col_type_assert_equal(
/*=======================*/
const dict_col_t* col, /*!< in: column */
const dtype_t* type) /*!< in: data type */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
#endif /* UNIV_DEBUG */
#ifndef UNIV_HOTBACKUP
/***********************************************************************//**
@@ -252,7 +252,7 @@ ulint
dict_col_get_min_size(
/*==================*/
const dict_col_t* col) /*!< in: column */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/***********************************************************************//**
Returns the maximum size of the column.
@return maximum size */
@@ -261,7 +261,7 @@ ulint
dict_col_get_max_size(
/*==================*/
const dict_col_t* col) /*!< in: column */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/***********************************************************************//**
Returns the size of a fixed size column, 0 if not a fixed size column.
@return fixed size, or 0 */
@@ -271,7 +271,7 @@ dict_col_get_fixed_size(
/*====================*/
const dict_col_t* col, /*!< in: column */
ulint comp) /*!< in: nonzero=ROW_FORMAT=COMPACT */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/***********************************************************************//**
Returns the ROW_FORMAT=REDUNDANT stored SQL NULL size of a column.
For fixed length types it is the fixed length of the type, otherwise 0.
@@ -282,7 +282,7 @@ dict_col_get_sql_null_size(
/*=======================*/
const dict_col_t* col, /*!< in: column */
ulint comp) /*!< in: nonzero=ROW_FORMAT=COMPACT */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/*********************************************************************//**
Gets the column number.
@return col->ind, table column position (starting from 0) */
@@ -291,7 +291,7 @@ ulint
dict_col_get_no(
/*============*/
const dict_col_t* col) /*!< in: column */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/*********************************************************************//**
Gets the column position in the clustered index. */
UNIV_INLINE
@@ -300,7 +300,7 @@ dict_col_get_clust_pos(
/*===================*/
const dict_col_t* col, /*!< in: table column */
const dict_index_t* clust_index) /*!< in: clustered index */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/****************************************************************//**
If the given column name is reserved for InnoDB system columns, return
TRUE.
@@ -310,7 +310,7 @@ ibool
dict_col_name_is_reserved(
/*======================*/
const char* name) /*!< in: column name */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/********************************************************************//**
Acquire the autoinc lock. */
UNIV_INTERN
@@ -318,7 +318,7 @@ void
dict_table_autoinc_lock(
/*====================*/
dict_table_t* table) /*!< in/out: table */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/********************************************************************//**
Unconditionally set the autoinc counter. */
UNIV_INTERN
@@ -327,7 +327,7 @@ dict_table_autoinc_initialize(
/*==========================*/
dict_table_t* table, /*!< in/out: table */
ib_uint64_t value) /*!< in: next value to assign to a row */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/** Store autoinc value when the table is evicted.
@param[in] table table evicted */
@@ -352,7 +352,7 @@ ib_uint64_t
dict_table_autoinc_read(
/*====================*/
const dict_table_t* table) /*!< in: table */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/********************************************************************//**
Updates the autoinc counter if the value supplied is greater than the
current value. */
@@ -363,7 +363,7 @@ dict_table_autoinc_update_if_greater(
dict_table_t* table, /*!< in/out: table */
ib_uint64_t value) /*!< in: value which was assigned to a row */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/********************************************************************//**
Release the autoinc lock. */
UNIV_INTERN
@@ -371,7 +371,7 @@ void
dict_table_autoinc_unlock(
/*======================*/
dict_table_t* table) /*!< in/out: table */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
#endif /* !UNIV_HOTBACKUP */
/**********************************************************************//**
Adds system columns to a table object. */
@@ -381,7 +381,7 @@ dict_table_add_system_columns(
/*==========================*/
dict_table_t* table, /*!< in/out: table */
mem_heap_t* heap) /*!< in: temporary heap */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
#ifndef UNIV_HOTBACKUP
/**********************************************************************//**
Adds a table object to the dictionary cache. */
@@ -392,7 +392,7 @@ dict_table_add_to_cache(
dict_table_t* table, /*!< in: table */
ibool can_be_evicted, /*!< in: TRUE if can be evicted*/
mem_heap_t* heap) /*!< in: temporary heap */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/**********************************************************************//**
Removes a table object from the dictionary cache. */
UNIV_INTERN
@@ -400,7 +400,7 @@ void
dict_table_remove_from_cache(
/*=========================*/
dict_table_t* table) /*!< in, own: table */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/**********************************************************************//**
Removes a table object from the dictionary cache. */
UNIV_INTERN
@@ -423,7 +423,7 @@ dict_table_rename_in_cache(
/*!< in: in ALTER TABLE we want
to preserve the original table name
in constraints which reference it */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/**********************************************************************//**
Removes an index from the dictionary cache. */
UNIV_INTERN
@@ -432,7 +432,7 @@ dict_index_remove_from_cache(
/*=========================*/
dict_table_t* table, /*!< in/out: table */
dict_index_t* index) /*!< in, own: index */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/**********************************************************************//**
Change the id of a table object in the dictionary cache. This is used in
DISCARD TABLESPACE. */
@@ -442,7 +442,7 @@ dict_table_change_id_in_cache(
/*==========================*/
dict_table_t* table, /*!< in/out: table object already in cache */
table_id_t new_id) /*!< in: new id to set */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/**********************************************************************//**
Removes a foreign constraint struct from the dictionary cache. */
UNIV_INTERN
@@ -450,7 +450,7 @@ void
dict_foreign_remove_from_cache(
/*===========================*/
dict_foreign_t* foreign) /*!< in, own: foreign constraint */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/**********************************************************************//**
Adds a foreign key constraint object to the dictionary cache. May free
the object if there already is an object with the same identifier in.
@@ -471,7 +471,7 @@ dict_foreign_add_to_cache(
compatibility */
dict_err_ignore_t ignore_err)
/*!< in: error to be ignored */
- __attribute__((nonnull(1), warn_unused_result));
+ MY_ATTRIBUTE((nonnull(1), warn_unused_result));
/*********************************************************************//**
Checks if a table is referenced by foreign keys.
@return TRUE if table is referenced by a foreign key */
@@ -480,7 +480,7 @@ ibool
dict_table_is_referenced_by_foreign_key(
/*====================================*/
const dict_table_t* table) /*!< in: InnoDB table */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/**********************************************************************//**
Replace the index passed in with another equivalent index in the
foreign key lists of the table.
@@ -494,7 +494,7 @@ dict_foreign_replace_index(
/*!< in: column names, or NULL
to use table->col_names */
const dict_index_t* index) /*!< in: index to be replaced */
- __attribute__((nonnull(1,3), warn_unused_result));
+ MY_ATTRIBUTE((nonnull(1,3), warn_unused_result));
/**********************************************************************//**
Determines whether a string starts with the specified keyword.
@return TRUE if str starts with keyword */
@@ -505,7 +505,7 @@ dict_str_starts_with_keyword(
THD* thd, /*!< in: MySQL thread handle */
const char* str, /*!< in: string to scan for keyword */
const char* keyword) /*!< in: keyword to look for */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/*********************************************************************//**
Scans a table create SQL string and adds to the data dictionary
the foreign key constraints declared in the string. This function
@@ -534,7 +534,7 @@ dict_create_foreign_constraints(
ibool reject_fks) /*!< in: if TRUE, fail with error
code DB_CANNOT_ADD_CONSTRAINT if
any foreign keys are found. */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/**********************************************************************//**
Parses the CONSTRAINT id's to be dropped in an ALTER TABLE statement.
@return DB_SUCCESS or DB_CANNOT_DROP_CONSTRAINT if syntax error or the
@@ -551,7 +551,7 @@ dict_foreign_parse_drop_constraints(
to drop */
const char*** constraints_to_drop) /*!< out: id's of the
constraints to drop */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/**********************************************************************//**
Returns a table object and increments its open handle count.
NOTE! This is a high-level function to be used mainly from outside the
@@ -570,7 +570,7 @@ dict_table_open_on_name(
dict_err_ignore_t
ignore_err) /*!< in: error to be ignored when
loading the table */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/*********************************************************************//**
Tries to find an index whose first fields are the columns in the array,
@@ -607,7 +607,7 @@ dict_foreign_find_index(
/*!< out: index where error
happened */
- __attribute__((nonnull(1,3), warn_unused_result));
+ MY_ATTRIBUTE((nonnull(1,3), warn_unused_result));
/**********************************************************************//**
Returns a column's name.
@return column name. NOTE: not guaranteed to stay valid if table is
@@ -618,7 +618,7 @@ dict_table_get_col_name(
/*====================*/
const dict_table_t* table, /*!< in: table */
ulint col_nr) /*!< in: column number */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/**********************************************************************//**
Returns a column's name.
@return column name. NOTE: not guaranteed to stay valid if table is
@@ -629,7 +629,7 @@ dict_table_get_col_name_for_mysql(
/*==============================*/
const dict_table_t* table, /*!< in: table */
const char* col_name)/*!< in: MySQL table column name */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/**********************************************************************//**
Prints a table data. */
UNIV_INTERN
@@ -637,7 +637,7 @@ void
dict_table_print(
/*=============*/
dict_table_t* table) /*!< in: table */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/**********************************************************************//**
Outputs info on foreign keys of a table. */
UNIV_INTERN
@@ -669,7 +669,7 @@ dict_index_name_print(
FILE* file, /*!< in: output stream */
const trx_t* trx, /*!< in: transaction */
const dict_index_t* index) /*!< in: index to print */
- __attribute__((nonnull(1,3)));
+ MY_ATTRIBUTE((nonnull(1,3)));
/*********************************************************************//**
Tries to find an index whose first fields are the columns in the array,
in the same order and is not marked for deletion and is not the same
@@ -705,7 +705,7 @@ dict_foreign_qualify_index(
dict_index_t** err_index)
/*!< out: index where error
happened */
- __attribute__((nonnull(1,3), warn_unused_result));
+ MY_ATTRIBUTE((nonnull(1,3), warn_unused_result));
#ifdef UNIV_DEBUG
/********************************************************************//**
Gets the first index on the table (the clustered index).
@@ -715,7 +715,7 @@ dict_index_t*
dict_table_get_first_index(
/*=======================*/
const dict_table_t* table) /*!< in: table */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/********************************************************************//**
Gets the last index on the table.
@return index, NULL if none exists */
@@ -724,7 +724,7 @@ dict_index_t*
dict_table_get_last_index(
/*=======================*/
const dict_table_t* table) /*!< in: table */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/********************************************************************//**
Gets the next index on the table.
@return index, NULL if none left */
@@ -733,7 +733,7 @@ dict_index_t*
dict_table_get_next_index(
/*======================*/
const dict_index_t* index) /*!< in: index */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
#else /* UNIV_DEBUG */
# define dict_table_get_first_index(table) UT_LIST_GET_FIRST((table)->indexes)
# define dict_table_get_last_index(table) UT_LIST_GET_LAST((table)->indexes)
@@ -762,7 +762,7 @@ ulint
dict_index_is_clust(
/*================*/
const dict_index_t* index) /*!< in: index */
- __attribute__((nonnull, pure, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, pure, warn_unused_result));
/********************************************************************//**
Check whether the index is unique.
@return nonzero for unique index, zero for other indexes */
@@ -771,7 +771,7 @@ ulint
dict_index_is_unique(
/*=================*/
const dict_index_t* index) /*!< in: index */
- __attribute__((nonnull, pure, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, pure, warn_unused_result));
/********************************************************************//**
Check whether the index is the insert buffer tree.
@return nonzero for insert buffer, zero for other indexes */
@@ -780,7 +780,7 @@ ulint
dict_index_is_ibuf(
/*===============*/
const dict_index_t* index) /*!< in: index */
- __attribute__((nonnull, pure, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, pure, warn_unused_result));
/********************************************************************//**
Check whether the index is a secondary index or the insert buffer tree.
@return nonzero for insert buffer, zero for other indexes */
@@ -789,7 +789,7 @@ ulint
dict_index_is_sec_or_ibuf(
/*======================*/
const dict_index_t* index) /*!< in: index */
- __attribute__((nonnull, pure, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, pure, warn_unused_result));
/************************************************************************
Gets the all the FTS indexes for the table. NOTE: must not be called for
@@ -801,7 +801,7 @@ dict_table_get_all_fts_indexes(
/* out: number of indexes collected */
dict_table_t* table, /* in: table */
ib_vector_t* indexes)/* out: vector for collecting FTS indexes */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/********************************************************************//**
Gets the number of user-defined columns in a table in the dictionary
cache.
@@ -811,7 +811,7 @@ ulint
dict_table_get_n_user_cols(
/*=======================*/
const dict_table_t* table) /*!< in: table */
- __attribute__((nonnull, pure, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, pure, warn_unused_result));
/********************************************************************//**
Gets the number of system columns in a table in the dictionary cache.
@return number of system (e.g., ROW_ID) columns of a table */
@@ -820,7 +820,7 @@ ulint
dict_table_get_n_sys_cols(
/*======================*/
const dict_table_t* table) /*!< in: table */
- __attribute__((nonnull, pure, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, pure, warn_unused_result));
/********************************************************************//**
Gets the number of all columns (also system) in a table in the dictionary
cache.
@@ -830,7 +830,7 @@ ulint
dict_table_get_n_cols(
/*==================*/
const dict_table_t* table) /*!< in: table */
- __attribute__((nonnull, pure, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, pure, warn_unused_result));
/********************************************************************//**
Gets the approximately estimated number of rows in the table.
@return estimated number of rows */
@@ -839,7 +839,7 @@ ib_uint64_t
dict_table_get_n_rows(
/*==================*/
const dict_table_t* table) /*!< in: table */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/********************************************************************//**
Increment the number of rows in the table by one.
Notice that this operation is not protected by any latch, the number is
@@ -849,7 +849,7 @@ void
dict_table_n_rows_inc(
/*==================*/
dict_table_t* table) /*!< in/out: table */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/********************************************************************//**
Decrement the number of rows in the table by one.
Notice that this operation is not protected by any latch, the number is
@@ -859,7 +859,7 @@ void
dict_table_n_rows_dec(
/*==================*/
dict_table_t* table) /*!< in/out: table */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
#ifdef UNIV_DEBUG
/********************************************************************//**
Gets the nth column of a table.
@@ -870,7 +870,7 @@ dict_table_get_nth_col(
/*===================*/
const dict_table_t* table, /*!< in: table */
ulint pos) /*!< in: position of column */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/********************************************************************//**
Gets the given system column of a table.
@return pointer to column object */
@@ -880,7 +880,7 @@ dict_table_get_sys_col(
/*===================*/
const dict_table_t* table, /*!< in: table */
ulint sys) /*!< in: DATA_ROW_ID, ... */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
#else /* UNIV_DEBUG */
#define dict_table_get_nth_col(table, pos) \
((table)->cols + (pos))
@@ -896,7 +896,7 @@ dict_table_get_sys_col_no(
/*======================*/
const dict_table_t* table, /*!< in: table */
ulint sys) /*!< in: DATA_ROW_ID, ... */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
#ifndef UNIV_HOTBACKUP
/********************************************************************//**
Returns the minimum data size of an index record.
@@ -906,7 +906,7 @@ ulint
dict_index_get_min_size(
/*====================*/
const dict_index_t* index) /*!< in: index */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
#endif /* !UNIV_HOTBACKUP */
/********************************************************************//**
Check whether the table uses the compact page format.
@@ -916,7 +916,7 @@ ibool
dict_table_is_comp(
/*===============*/
const dict_table_t* table) /*!< in: table */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/********************************************************************//**
Determine the file format of a table.
@return file format version */
@@ -925,7 +925,7 @@ ulint
dict_table_get_format(
/*==================*/
const dict_table_t* table) /*!< in: table */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/********************************************************************//**
Determine the file format from a dict_table_t::flags.
@return file format version */
@@ -934,7 +934,7 @@ ulint
dict_tf_get_format(
/*===============*/
ulint flags) /*!< in: dict_table_t::flags */
- __attribute__((warn_unused_result));
+ MY_ATTRIBUTE((warn_unused_result));
/********************************************************************//**
Set the various values in a dict_table_t::flags pointer. */
UNIV_INLINE
@@ -969,7 +969,7 @@ ulint
dict_tf_to_fsp_flags(
/*=================*/
ulint flags) /*!< in: dict_table_t::flags */
- __attribute__((const));
+ MY_ATTRIBUTE((const));
/********************************************************************//**
Extract the compressed page size from table flags.
@return compressed page size, or 0 if not compressed */
@@ -988,7 +988,7 @@ ulint
dict_table_zip_size(
/*================*/
const dict_table_t* table) /*!< in: table */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
#ifndef UNIV_HOTBACKUP
/*********************************************************************//**
Obtain exclusive locks on all index trees of the table. This is to prevent
@@ -999,7 +999,7 @@ void
dict_table_x_lock_indexes(
/*======================*/
dict_table_t* table) /*!< in: table */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/*********************************************************************//**
Release the exclusive locks on all index tree. */
UNIV_INLINE
@@ -1007,7 +1007,7 @@ void
dict_table_x_unlock_indexes(
/*========================*/
dict_table_t* table) /*!< in: table */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/********************************************************************//**
Checks if a column is in the ordering columns of the clustered index of a
table. Column prefixes are treated like whole columns.
@@ -1018,7 +1018,7 @@ dict_table_col_in_clustered_key(
/*============================*/
const dict_table_t* table, /*!< in: table */
ulint n) /*!< in: column number */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/*******************************************************************//**
Check if the table has an FTS index.
@return TRUE if table has an FTS index */
@@ -1027,7 +1027,7 @@ ibool
dict_table_has_fts_index(
/*=====================*/
dict_table_t* table) /*!< in: table */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/*******************************************************************//**
Copies types of columns contained in table to tuple and sets all
fields of the tuple to the SQL NULL value. This function should
@@ -1038,7 +1038,7 @@ dict_table_copy_types(
/*==================*/
dtuple_t* tuple, /*!< in/out: data tuple */
const dict_table_t* table) /*!< in: table */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/********************************************************************
Wait until all the background threads of the given table have exited, i.e.,
bg_threads == 0. Note: bg_threads_mutex must be reserved when
@@ -1050,7 +1050,7 @@ dict_table_wait_for_bg_threads_to_exit(
dict_table_t* table, /* in: table */
ulint delay) /* in: time in microseconds to wait between
checks of bg_threads. */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/**********************************************************************//**
Looks for an index with the given id. NOTE that we do not reserve
the dictionary mutex: this function is for emergency purposes like
@@ -1061,7 +1061,7 @@ dict_index_t*
dict_index_find_on_id_low(
/*======================*/
index_id_t id) /*!< in: index id */
- __attribute__((warn_unused_result));
+ MY_ATTRIBUTE((warn_unused_result));
/**********************************************************************//**
Make room in the table cache by evicting an unused table. The unused table
should not be part of FK relationship and currently not used in any user
@@ -1087,7 +1087,7 @@ dict_index_add_to_cache(
ibool strict) /*!< in: TRUE=refuse to create the index
if records could be too big to fit in
an B-tree page */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/**********************************************************************//**
Removes an index from the dictionary cache. */
UNIV_INTERN
@@ -1096,7 +1096,7 @@ dict_index_remove_from_cache(
/*=========================*/
dict_table_t* table, /*!< in/out: table */
dict_index_t* index) /*!< in, own: index */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
#endif /* !UNIV_HOTBACKUP */
/********************************************************************//**
Gets the number of fields in the internal representation of an index,
@@ -1109,7 +1109,7 @@ dict_index_get_n_fields(
const dict_index_t* index) /*!< in: an internal
representation of index (in
the dictionary cache) */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/********************************************************************//**
Gets the number of fields in the internal representation of an index
that uniquely determine the position of an index entry in the index, if
@@ -1122,7 +1122,7 @@ dict_index_get_n_unique(
/*====================*/
const dict_index_t* index) /*!< in: an internal representation
of index (in the dictionary cache) */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/********************************************************************//**
Gets the number of fields in the internal representation of an index
which uniquely determine the position of an index entry in the index, if
@@ -1134,7 +1134,7 @@ dict_index_get_n_unique_in_tree(
/*============================*/
const dict_index_t* index) /*!< in: an internal representation
of index (in the dictionary cache) */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/********************************************************************//**
Gets the number of user-defined ordering fields in the index. In the internal
representation we add the row id to the ordering fields to make all indexes
@@ -1147,7 +1147,7 @@ dict_index_get_n_ordering_defined_by_user(
/*======================================*/
const dict_index_t* index) /*!< in: an internal representation
of index (in the dictionary cache) */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
#ifdef UNIV_DEBUG
/********************************************************************//**
Gets the nth field of an index.
@@ -1158,7 +1158,7 @@ dict_index_get_nth_field(
/*=====================*/
const dict_index_t* index, /*!< in: index */
ulint pos) /*!< in: position of field */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
#else /* UNIV_DEBUG */
# define dict_index_get_nth_field(index, pos) ((index)->fields + (pos))
#endif /* UNIV_DEBUG */
@@ -1171,7 +1171,7 @@ dict_index_get_nth_col(
/*===================*/
const dict_index_t* index, /*!< in: index */
ulint pos) /*!< in: position of the field */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/********************************************************************//**
Gets the column number of the nth field in an index.
@return column number */
@@ -1181,7 +1181,7 @@ dict_index_get_nth_col_no(
/*======================*/
const dict_index_t* index, /*!< in: index */
ulint pos) /*!< in: position of the field */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/********************************************************************//**
Looks for column n in an index.
@return position in internal representation of the index;
@@ -1218,7 +1218,7 @@ dict_index_contains_col_or_prefix(
/*==============================*/
const dict_index_t* index, /*!< in: index */
ulint n) /*!< in: column number */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/********************************************************************//**
Looks for a matching field in an index. The column has to be the same. The
column in index must be complete, or must contain a prefix longer than the
@@ -1233,7 +1233,7 @@ dict_index_get_nth_field_pos(
const dict_index_t* index, /*!< in: index from which to search */
const dict_index_t* index2, /*!< in: index */
ulint n) /*!< in: field number in index2 */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/********************************************************************//**
Looks for column n position in the clustered index.
@return position in internal representation of the clustered index */
@@ -1243,7 +1243,7 @@ dict_table_get_nth_col_pos(
/*=======================*/
const dict_table_t* table, /*!< in: table */
ulint n) /*!< in: column number */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/********************************************************************//**
Returns the position of a system column in an index.
@return position, ULINT_UNDEFINED if not contained */
@@ -1253,7 +1253,7 @@ dict_index_get_sys_col_pos(
/*=======================*/
const dict_index_t* index, /*!< in: index */
ulint type) /*!< in: DATA_ROW_ID, ... */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/*******************************************************************//**
Adds a column to index. */
UNIV_INTERN
@@ -1264,7 +1264,7 @@ dict_index_add_col(
const dict_table_t* table, /*!< in: table */
dict_col_t* col, /*!< in: column */
ulint prefix_len) /*!< in: column prefix length */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
#ifndef UNIV_HOTBACKUP
/*******************************************************************//**
Copies types of fields contained in index to tuple. */
@@ -1276,7 +1276,7 @@ dict_index_copy_types(
const dict_index_t* index, /*!< in: index */
ulint n_fields) /*!< in: number of
field types to copy */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
#endif /* !UNIV_HOTBACKUP */
/*********************************************************************//**
Gets the field column.
@@ -1286,7 +1286,7 @@ const dict_col_t*
dict_field_get_col(
/*===============*/
const dict_field_t* field) /*!< in: index field */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
#ifndef UNIV_HOTBACKUP
/**********************************************************************//**
Returns an index object if it is found in the dictionary cache.
@@ -1297,7 +1297,7 @@ dict_index_t*
dict_index_get_if_in_cache_low(
/*===========================*/
index_id_t index_id) /*!< in: index id */
- __attribute__((warn_unused_result));
+ MY_ATTRIBUTE((warn_unused_result));
#if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
/**********************************************************************//**
Returns an index object if it is found in the dictionary cache.
@@ -1307,7 +1307,7 @@ dict_index_t*
dict_index_get_if_in_cache(
/*=======================*/
index_id_t index_id) /*!< in: index id */
- __attribute__((warn_unused_result));
+ MY_ATTRIBUTE((warn_unused_result));
#endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */
#ifdef UNIV_DEBUG
/**********************************************************************//**
@@ -1320,7 +1320,7 @@ dict_index_check_search_tuple(
/*==========================*/
const dict_index_t* index, /*!< in: index tree */
const dtuple_t* tuple) /*!< in: tuple used in a search */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/** Whether and when to allow temporary index names */
enum check_name {
/** Require all indexes to be complete. */
@@ -1340,7 +1340,7 @@ dict_table_check_for_dup_indexes(
in this table */
enum check_name check) /*!< in: whether and when to allow
temporary index names */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
#endif /* UNIV_DEBUG */
/**********************************************************************//**
Builds a node pointer out of a physical record and a page number.
@@ -1358,7 +1358,7 @@ dict_index_build_node_ptr(
created */
ulint level) /*!< in: level of rec in tree:
0 means leaf level */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/**********************************************************************//**
Copies an initial segment of a physical record, long enough to specify an
index entry uniquely.
@@ -1374,7 +1374,7 @@ dict_index_copy_rec_order_prefix(
byte** buf, /*!< in/out: memory buffer for the
copied prefix, or NULL */
ulint* buf_size)/*!< in/out: buffer size */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/**********************************************************************//**
Builds a typed data tuple out of a physical record.
@return own: data tuple */
@@ -1386,7 +1386,7 @@ dict_index_build_data_tuple(
rec_t* rec, /*!< in: record for which to build data tuple */
ulint n_fields,/*!< in: number of data fields */
mem_heap_t* heap) /*!< in: memory heap where tuple created */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/*********************************************************************//**
Gets the space id of the root of the index tree.
@return space id */
@@ -1395,7 +1395,7 @@ ulint
dict_index_get_space(
/*=================*/
const dict_index_t* index) /*!< in: index */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/*********************************************************************//**
Sets the space id of the root of the index tree. */
UNIV_INLINE
@@ -1404,7 +1404,7 @@ dict_index_set_space(
/*=================*/
dict_index_t* index, /*!< in/out: index */
ulint space) /*!< in: space id */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/*********************************************************************//**
Gets the page number of the root of the index tree.
@return page number */
@@ -1413,7 +1413,7 @@ ulint
dict_index_get_page(
/*================*/
const dict_index_t* tree) /*!< in: index */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/*********************************************************************//**
Gets the read-write lock of the index tree.
@return read-write lock */
@@ -1422,7 +1422,7 @@ rw_lock_t*
dict_index_get_lock(
/*================*/
dict_index_t* index) /*!< in: index */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/********************************************************************//**
Returns free space reserved for future updates of records. This is
relevant only in the case of many consecutive inserts, as updates
@@ -1442,7 +1442,7 @@ enum online_index_status
dict_index_get_online_status(
/*=========================*/
const dict_index_t* index) /*!< in: secondary index */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/********************************************************************//**
Sets the status of online index creation. */
UNIV_INLINE
@@ -1451,7 +1451,7 @@ dict_index_set_online_status(
/*=========================*/
dict_index_t* index, /*!< in/out: index */
enum online_index_status status) /*!< in: status */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/********************************************************************//**
Determines if a secondary index is being or has been created online,
or if the table is being rebuilt online, allowing concurrent modifications
@@ -1465,7 +1465,7 @@ bool
dict_index_is_online_ddl(
/*=====================*/
const dict_index_t* index) /*!< in: index */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/*********************************************************************//**
Calculates the minimum record length in an index. */
UNIV_INTERN
@@ -1473,7 +1473,7 @@ ulint
dict_index_calc_min_rec_len(
/*========================*/
const dict_index_t* index) /*!< in: index */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/********************************************************************//**
Reserves the dictionary system mutex for MySQL. */
UNIV_INTERN
@@ -1541,7 +1541,7 @@ dict_tables_have_same_db(
dbname '/' tablename */
const char* name2) /*!< in: table name in the form
dbname '/' tablename */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/*********************************************************************//**
Removes an index from the cache */
UNIV_INTERN
@@ -1550,7 +1550,7 @@ dict_index_remove_from_cache(
/*=========================*/
dict_table_t* table, /*!< in/out: table */
dict_index_t* index) /*!< in, own: index */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/**********************************************************************//**
Get index by name
@return index, NULL if does not exist */
@@ -1560,7 +1560,7 @@ dict_table_get_index_on_name(
/*=========================*/
dict_table_t* table, /*!< in: table */
const char* name) /*!< in: name of the index to find */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/**********************************************************************//**
Looks for an index with the given id given a table instance.
@return index or NULL */
@@ -1581,7 +1581,7 @@ dict_table_get_index_on_name_and_min_id(
/*====================================*/
dict_table_t* table, /*!< in: table */
const char* name) /*!< in: name of the index to find */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/***************************************************************
Check whether a column exists in an FTS index. */
UNIV_INLINE
@@ -1592,7 +1592,7 @@ dict_table_is_fts_column(
the offset within the vector */
ib_vector_t* indexes,/* in: vector containing only FTS indexes */
ulint col_no) /* in: col number to search for */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/**********************************************************************//**
Move a table to the non LRU end of the LRU list. */
UNIV_INTERN
@@ -1600,7 +1600,7 @@ void
dict_table_move_from_lru_to_non_lru(
/*================================*/
dict_table_t* table) /*!< in: table to move from LRU to non-LRU */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/**********************************************************************//**
Move a table to the LRU list from the non-LRU list. */
UNIV_INTERN
@@ -1608,7 +1608,7 @@ void
dict_table_move_from_non_lru_to_lru(
/*================================*/
dict_table_t* table) /*!< in: table to move from non-LRU to LRU */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/**********************************************************************//**
Move to the most recently used segment of the LRU list. */
UNIV_INTERN
@@ -1616,7 +1616,7 @@ void
dict_move_to_mru(
/*=============*/
dict_table_t* table) /*!< in: table to move to MRU */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/** Maximum number of columns in a foreign key constraint. Please Note MySQL
has a much lower limit on the number of columns allowed in a foreign key
@@ -1740,7 +1740,7 @@ dict_table_schema_check(
!= DB_TABLE_NOT_FOUND is
returned */
size_t errstr_sz) /*!< in: errstr size */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/* @} */
/*********************************************************************//**
@@ -1758,7 +1758,7 @@ dict_fs2utf8(
size_t db_utf8_size, /*!< in: dbname_utf8 size */
char* table_utf8, /*!< out: table name, e.g. aюbØc */
size_t table_utf8_size)/*!< in: table_utf8 size */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/**********************************************************************//**
Closes the data dictionary module. */
@@ -1775,7 +1775,7 @@ ulint
dict_table_is_corrupted(
/*====================*/
const dict_table_t* table) /*!< in: table */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/**********************************************************************//**
Check whether the index is corrupted.
@@ -1785,7 +1785,7 @@ ulint
dict_index_is_corrupted(
/*====================*/
const dict_index_t* index) /*!< in: index */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
#endif /* !UNIV_HOTBACKUP */
/**********************************************************************//**
@@ -1798,7 +1798,7 @@ dict_set_corrupted(
dict_index_t* index, /*!< in/out: index */
trx_t* trx, /*!< in/out: transaction */
const char* ctx) /*!< in: context */
- UNIV_COLD __attribute__((nonnull));
+ UNIV_COLD MY_ATTRIBUTE((nonnull));
/**********************************************************************//**
Flags an index corrupted in the data dictionary cache only. This
@@ -1810,7 +1810,7 @@ dict_set_corrupted_index_cache_only(
/*================================*/
dict_index_t* index, /*!< in/out: index */
dict_table_t* table) /*!< in/out: table */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/**********************************************************************//**
Flags a table with specified space_id corrupted in the table dictionary
@@ -1830,7 +1830,7 @@ bool
dict_tf_is_valid(
/*=============*/
ulint flags) /*!< in: table flags */
- __attribute__((warn_unused_result));
+ MY_ATTRIBUTE((warn_unused_result));
/********************************************************************//**
Check if the tablespace for the table has been discarded.
@@ -1840,7 +1840,7 @@ bool
dict_table_is_discarded(
/*====================*/
const dict_table_t* table) /*!< in: table to check */
- __attribute__((nonnull, pure, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, pure, warn_unused_result));
/********************************************************************//**
Check if it is a temporary table.
@@ -1850,7 +1850,7 @@ bool
dict_table_is_temporary(
/*====================*/
const dict_table_t* table) /*!< in: table to check */
- __attribute__((nonnull, pure, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, pure, warn_unused_result));
#ifndef UNIV_HOTBACKUP
/*********************************************************************//**
@@ -1861,7 +1861,7 @@ void
dict_index_zip_success(
/*===================*/
dict_index_t* index) /*!< in/out: index to be updated. */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/*********************************************************************//**
This function should be called whenever a page compression attempt
fails. Updates the compression padding information. */
@@ -1870,7 +1870,7 @@ void
dict_index_zip_failure(
/*===================*/
dict_index_t* index) /*!< in/out: index to be updated. */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/*********************************************************************//**
Return the optimal page size, for which page will likely compress.
@return page size beyond which page may not compress*/
@@ -1880,7 +1880,7 @@ dict_index_zip_pad_optimal_page_size(
/*=================================*/
dict_index_t* index) /*!< in: index for which page size
is requested */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/*************************************************************//**
Convert table flag to row format string.
@return row format name */
diff --git a/storage/innobase/include/dict0dict.ic b/storage/innobase/include/dict0dict.ic
index a3a3446d507..3d2f0dff0da 100644
--- a/storage/innobase/include/dict0dict.ic
+++ b/storage/innobase/include/dict0dict.ic
@@ -1,7 +1,7 @@
/*****************************************************************************
-Copyright (c) 1996, 2015, Oracle and/or its affiliates
-Copyright (c) 2013, 2015, SkySQL Ab
+Copyright (c) 1996, 2016, Oracle and/or its affiliates
+Copyright (c) 2013, 2016, MariaDB Corporation
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -81,7 +81,8 @@ dict_col_copy_type(
const dict_col_t* col, /*!< in: column */
dtype_t* type) /*!< out: data type */
{
- ut_ad(col && type);
+ ut_ad(col != NULL);
+ ut_ad(type != NULL);
type->mtype = col->mtype;
type->prtype = col->prtype;
@@ -358,7 +359,7 @@ UNIV_INLINE
ulint
dict_table_get_n_sys_cols(
/*======================*/
- const dict_table_t* table __attribute__((unused))) /*!< in: table */
+ const dict_table_t* table MY_ATTRIBUTE((unused))) /*!< in: table */
{
ut_ad(table);
ut_ad(table->magic_n == DICT_TABLE_MAGIC_N);
diff --git a/storage/innobase/include/dict0load.h b/storage/innobase/include/dict0load.h
index 030190b1a8e..dcbc3de8e94 100644
--- a/storage/innobase/include/dict0load.h
+++ b/storage/innobase/include/dict0load.h
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1996, 2013, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -243,7 +243,7 @@ dict_load_foreigns(
bool check_charsets, /*!< in: whether to check
charset compatibility */
dict_err_ignore_t ignore_err) /*!< in: error to be ignored */
- __attribute__((nonnull(1), warn_unused_result));
+ MY_ATTRIBUTE((nonnull(1), warn_unused_result));
/********************************************************************//**
Prints to the standard output information on all tables found in the data
dictionary system table. */
diff --git a/storage/innobase/include/dict0mem.h b/storage/innobase/include/dict0mem.h
index eb2a7968bc4..f964447fb8f 100644
--- a/storage/innobase/include/dict0mem.h
+++ b/storage/innobase/include/dict0mem.h
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1996, 2015, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2012, Facebook Inc.
Copyright (c) 2013, 2016, MariaDB Corporation.
@@ -192,19 +192,19 @@ DEFAULT=0, ON = 1, OFF = 2
/** Bit mask of the COMPACT field */
#define DICT_TF_MASK_COMPACT \
- ((~(~0 << DICT_TF_WIDTH_COMPACT)) \
+ ((~(~0U << DICT_TF_WIDTH_COMPACT)) \
<< DICT_TF_POS_COMPACT)
/** Bit mask of the ZIP_SSIZE field */
#define DICT_TF_MASK_ZIP_SSIZE \
- ((~(~0 << DICT_TF_WIDTH_ZIP_SSIZE)) \
+ ((~(~0U << DICT_TF_WIDTH_ZIP_SSIZE)) \
<< DICT_TF_POS_ZIP_SSIZE)
/** Bit mask of the ATOMIC_BLOBS field */
#define DICT_TF_MASK_ATOMIC_BLOBS \
- ((~(~0 << DICT_TF_WIDTH_ATOMIC_BLOBS)) \
+ ((~(~0U << DICT_TF_WIDTH_ATOMIC_BLOBS)) \
<< DICT_TF_POS_ATOMIC_BLOBS)
/** Bit mask of the DATA_DIR field */
#define DICT_TF_MASK_DATA_DIR \
- ((~(~0 << DICT_TF_WIDTH_DATA_DIR)) \
+ ((~(~0U << DICT_TF_WIDTH_DATA_DIR)) \
<< DICT_TF_POS_DATA_DIR)
/** Bit mask of the PAGE_COMPRESSION field */
#define DICT_TF_MASK_PAGE_COMPRESSION \
@@ -279,7 +279,7 @@ for unknown bits in order to protect backward incompatibility. */
/* @{ */
/** Total number of bits in table->flags2. */
#define DICT_TF2_BITS 7
-#define DICT_TF2_BIT_MASK ~(~0 << DICT_TF2_BITS)
+#define DICT_TF2_BIT_MASK ~(~0U << DICT_TF2_BITS)
/** TEMPORARY; TRUE for tables from CREATE TEMPORARY TABLE. */
#define DICT_TF2_TEMPORARY 1
@@ -367,7 +367,7 @@ dict_mem_table_add_col(
ulint mtype, /*!< in: main datatype */
ulint prtype, /*!< in: precise type */
ulint len) /*!< in: precision */
- __attribute__((nonnull(1)));
+ MY_ATTRIBUTE((nonnull(1)));
/**********************************************************************//**
Renames a column of a table in the data dictionary cache. */
UNIV_INTERN
@@ -378,7 +378,7 @@ dict_mem_table_col_rename(
unsigned nth_col,/*!< in: column index */
const char* from, /*!< in: old column name */
const char* to) /*!< in: new column name */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/**********************************************************************//**
This function populates a dict_col_t memory structure with
supplied information. */
diff --git a/storage/innobase/include/dict0stats.h b/storage/innobase/include/dict0stats.h
index abf56b2f0c7..72501bf9429 100644
--- a/storage/innobase/include/dict0stats.h
+++ b/storage/innobase/include/dict0stats.h
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 2009, 2012, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2009, 2016, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -77,7 +77,7 @@ dict_stats_set_persistent(
dict_table_t* table, /*!< in/out: table */
ibool ps_on, /*!< in: persistent stats explicitly enabled */
ibool ps_off) /*!< in: persistent stats explicitly disabled */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/*********************************************************************//**
Check whether persistent statistics is enabled for a given table.
@@ -87,7 +87,7 @@ ibool
dict_stats_is_persistent_enabled(
/*=============================*/
const dict_table_t* table) /*!< in: table */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/*********************************************************************//**
Set the auto recalc flag for a given table (only honored for a persistent
@@ -127,7 +127,7 @@ void
dict_stats_deinit(
/*==============*/
dict_table_t* table) /*!< in/out: table */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/*********************************************************************//**
Calculates new estimates for table and index statistics. The statistics
@@ -179,7 +179,7 @@ void
dict_stats_update_for_index(
/*========================*/
dict_index_t* index) /*!< in/out: index */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/*********************************************************************//**
Renames a table in InnoDB persistent stats storage.
diff --git a/storage/innobase/include/dict0stats_bg.h b/storage/innobase/include/dict0stats_bg.h
index 32fac3015e8..34dc4657829 100644
--- a/storage/innobase/include/dict0stats_bg.h
+++ b/storage/innobase/include/dict0stats_bg.h
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 2012, 2013, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2012, 2016, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -96,7 +96,7 @@ bool
dict_stats_stop_bg(
/*===============*/
dict_table_t* table) /*!< in/out: table */
- __attribute__((warn_unused_result));
+ MY_ATTRIBUTE((warn_unused_result));
/*****************************************************************//**
Wait until background stats thread has stopped using the specified table.
diff --git a/storage/innobase/include/dyn0dyn.h b/storage/innobase/include/dyn0dyn.h
index 7f23302d1ff..1bd10b6bf58 100644
--- a/storage/innobase/include/dyn0dyn.h
+++ b/storage/innobase/include/dyn0dyn.h
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1996, 2013, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -48,7 +48,7 @@ dyn_array_create(
/*=============*/
dyn_array_t* arr) /*!< in/out memory buffer of
size sizeof(dyn_array_t) */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/************************************************************//**
Frees a dynamic array. */
UNIV_INLINE
@@ -56,7 +56,7 @@ void
dyn_array_free(
/*===========*/
dyn_array_t* arr) /*!< in,own: dyn array */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/*********************************************************************//**
Makes room on top of a dyn array and returns a pointer to a buffer in it.
After copying the elements, the caller must close the buffer using
@@ -69,7 +69,7 @@ dyn_array_open(
dyn_array_t* arr, /*!< in: dynamic array */
ulint size) /*!< in: size in bytes of the buffer; MUST be
smaller than DYN_ARRAY_DATA_SIZE! */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/*********************************************************************//**
Closes the buffer returned by dyn_array_open. */
UNIV_INLINE
@@ -78,7 +78,7 @@ dyn_array_close(
/*============*/
dyn_array_t* arr, /*!< in: dynamic array */
const byte* ptr) /*!< in: end of used space */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/*********************************************************************//**
Makes room on top of a dyn array and returns a pointer to
the added element. The caller must copy the element to
@@ -90,7 +90,7 @@ dyn_array_push(
/*===========*/
dyn_array_t* arr, /*!< in/out: dynamic array */
ulint size) /*!< in: size in bytes of the element */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/************************************************************//**
Returns pointer to an element in dyn array.
@return pointer to element */
@@ -101,7 +101,7 @@ dyn_array_get_element(
const dyn_array_t* arr, /*!< in: dyn array */
ulint pos) /*!< in: position of element
in bytes from array start */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/************************************************************//**
Returns the size of stored data in a dyn array.
@return data size in bytes */
@@ -110,7 +110,7 @@ ulint
dyn_array_get_data_size(
/*====================*/
const dyn_array_t* arr) /*!< in: dyn array */
- __attribute__((nonnull, warn_unused_result, pure));
+ MY_ATTRIBUTE((nonnull, warn_unused_result, pure));
/************************************************************//**
Gets the first block in a dyn array.
@param arr dyn array
@@ -144,7 +144,7 @@ ulint
dyn_block_get_used(
/*===============*/
const dyn_block_t* block) /*!< in: dyn array block */
- __attribute__((nonnull, warn_unused_result, pure));
+ MY_ATTRIBUTE((nonnull, warn_unused_result, pure));
/********************************************************************//**
Gets pointer to the start of data in a dyn array block.
@return pointer to data */
@@ -153,7 +153,7 @@ byte*
dyn_block_get_data(
/*===============*/
const dyn_block_t* block) /*!< in: dyn array block */
- __attribute__((nonnull, warn_unused_result, pure));
+ MY_ATTRIBUTE((nonnull, warn_unused_result, pure));
/********************************************************//**
Pushes n bytes to a dyn array. */
UNIV_INLINE
@@ -163,7 +163,7 @@ dyn_push_string(
dyn_array_t* arr, /*!< in/out: dyn array */
const byte* str, /*!< in: string to write */
ulint len) /*!< in: string length */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/*#################################################################*/
diff --git a/storage/innobase/include/dyn0dyn.ic b/storage/innobase/include/dyn0dyn.ic
index 0296554e2ee..f18f2e6dff9 100644
--- a/storage/innobase/include/dyn0dyn.ic
+++ b/storage/innobase/include/dyn0dyn.ic
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1996, 2013, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -36,7 +36,7 @@ dyn_block_t*
dyn_array_add_block(
/*================*/
dyn_array_t* arr) /*!< in/out: dyn array */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/********************************************************************//**
Gets the number of used bytes in a dyn array block.
diff --git a/storage/innobase/include/fil0fil.h b/storage/innobase/include/fil0fil.h
index 7fe0cc88618..0c7ed7b3ab0 100644
--- a/storage/innobase/include/fil0fil.h
+++ b/storage/innobase/include/fil0fil.h
@@ -1,7 +1,7 @@
/*****************************************************************************
-Copyright (c) 1995, 2015, Oracle and/or its affiliates.
-Copyright (c) 2013, 2015, MariaDB Corporation.
+Copyright (c) 1995, 2016, Oracle and/or its affiliates.
+Copyright (c) 2013, 2016, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -443,7 +443,7 @@ fil_node_create(
ulint id, /*!< in: space id where to append */
ibool is_raw) /*!< in: TRUE if a raw device or
a raw disk partition */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
#ifdef UNIV_LOG_ARCHIVE
/****************************************************************//**
Drops files from the start of a file space, so that its size is cut by
@@ -702,7 +702,7 @@ dberr_t
fil_discard_tablespace(
/*===================*/
ulint id) /*!< in: space id */
- __attribute__((warn_unused_result));
+ MY_ATTRIBUTE((warn_unused_result));
#endif /* !UNIV_HOTBACKUP */
/** Test if a tablespace file can be renamed to a new filepath by checking
@@ -1200,7 +1200,7 @@ fil_tablespace_iterate(
dict_table_t* table,
ulint n_io_buffers,
PageCallback& callback)
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/*******************************************************************//**
Checks if a single-table tablespace for a given table name exists in the
@@ -1224,7 +1224,7 @@ fil_get_space_names(
/*================*/
space_name_list_t& space_name_list)
/*!< in/out: Vector for collecting the names. */
- __attribute__((warn_unused_result));
+ MY_ATTRIBUTE((warn_unused_result));
/** Generate redo log for swapping two .ibd files
@param[in] old_table old table
@@ -1239,7 +1239,7 @@ fil_mtr_rename_log(
const dict_table_t* new_table,
const char* tmp_name,
mtr_t* mtr)
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/*******************************************************************//**
Finds the given page_no of the given space id from the double write buffer,
diff --git a/storage/innobase/include/fsp0fsp.h b/storage/innobase/include/fsp0fsp.h
index 296dfd8d841..b9ff05b4bd4 100644
--- a/storage/innobase/include/fsp0fsp.h
+++ b/storage/innobase/include/fsp0fsp.h
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1995, 2012, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2013, 2016, MariaDB Corporation. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
@@ -109,23 +109,23 @@ dictionary */
/** Bit mask of the POST_ANTELOPE field */
#define FSP_FLAGS_MASK_POST_ANTELOPE \
- ((~(~0 << FSP_FLAGS_WIDTH_POST_ANTELOPE)) \
+ ((~(~0U << FSP_FLAGS_WIDTH_POST_ANTELOPE)) \
<< FSP_FLAGS_POS_POST_ANTELOPE)
/** Bit mask of the ZIP_SSIZE field */
#define FSP_FLAGS_MASK_ZIP_SSIZE \
- ((~(~0 << FSP_FLAGS_WIDTH_ZIP_SSIZE)) \
+ ((~(~0U << FSP_FLAGS_WIDTH_ZIP_SSIZE)) \
<< FSP_FLAGS_POS_ZIP_SSIZE)
/** Bit mask of the ATOMIC_BLOBS field */
#define FSP_FLAGS_MASK_ATOMIC_BLOBS \
- ((~(~0 << FSP_FLAGS_WIDTH_ATOMIC_BLOBS)) \
+ ((~(~0U << FSP_FLAGS_WIDTH_ATOMIC_BLOBS)) \
<< FSP_FLAGS_POS_ATOMIC_BLOBS)
/** Bit mask of the PAGE_SSIZE field */
#define FSP_FLAGS_MASK_PAGE_SSIZE \
- ((~(~0 << FSP_FLAGS_WIDTH_PAGE_SSIZE)) \
+ ((~(~0U << FSP_FLAGS_WIDTH_PAGE_SSIZE)) \
<< FSP_FLAGS_POS_PAGE_SSIZE)
/** Bit mask of the DATA_DIR field */
#define FSP_FLAGS_MASK_DATA_DIR \
- ((~(~0 << FSP_FLAGS_WIDTH_DATA_DIR)) \
+ ((~(~0U << FSP_FLAGS_WIDTH_DATA_DIR)) \
<< FSP_FLAGS_POS_DATA_DIR)
/** Bit mask of the DATA_DIR field */
#define FSP_FLAGS_MASK_DATA_DIR_ORACLE \
@@ -582,7 +582,7 @@ fseg_alloc_free_page_general(
in which the page should be initialized.
If init_mtr!=mtr, but the page is already
latched in mtr, do not initialize the page. */
- __attribute__((warn_unused_result, nonnull));
+ MY_ATTRIBUTE((warn_unused_result, nonnull));
/**********************************************************************//**
Reserves free pages from a tablespace. All mini-transactions which may
use several pages from the tablespace should call this function beforehand
@@ -651,7 +651,7 @@ fseg_page_is_free(
fseg_header_t* seg_header, /*!< in: segment header */
ulint space, /*!< in: space id */
ulint page) /*!< in: page offset */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/**********************************************************************//**
Frees part of a segment. This function can be used to free a segment
by repeatedly calling this function in different mini-transactions.
@@ -747,7 +747,7 @@ bool
fsp_flags_is_valid(
/*===============*/
ulint flags) /*!< in: tablespace flags */
- __attribute__((warn_unused_result, const));
+ MY_ATTRIBUTE((warn_unused_result, const));
/********************************************************************//**
Determine if the tablespace is compressed from dict_table_t::flags.
@return TRUE if compressed, FALSE if not compressed */
diff --git a/storage/innobase/include/fts0ast.h b/storage/innobase/include/fts0ast.h
index b2380f78b39..50f62063893 100644
--- a/storage/innobase/include/fts0ast.h
+++ b/storage/innobase/include/fts0ast.h
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 2007, 2014, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2007, 2016, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -200,7 +200,7 @@ fts_ast_visit(
and ignored processing an
operator, currently we only
ignore FTS_IGNORE operator */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/*****************************************************************//**
Process (nested) sub-expression, create a new result set to store the
sub-expression result by processing nodes under current sub-expression
@@ -213,7 +213,7 @@ fts_ast_visit_sub_exp(
fts_ast_node_t* node, /*!< in: instance to traverse*/
fts_ast_callback visitor, /*!< in: callback */
void* arg) /*!< in: callback arg */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/********************************************************************
Create a lex instance.*/
UNIV_INTERN
@@ -223,7 +223,7 @@ fts_lexer_create(
ibool boolean_mode, /*!< in: query type */
const byte* query, /*!< in: query string */
ulint query_len) /*!< in: query string len */
- __attribute__((nonnull, malloc, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, malloc, warn_unused_result));
/********************************************************************
Free an fts_lexer_t instance.*/
UNIV_INTERN
@@ -232,7 +232,7 @@ fts_lexer_free(
/*===========*/
fts_lexer_t* fts_lexer) /*!< in: lexer instance to
free */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/**
Create an ast string object, with NUL-terminator, so the string
diff --git a/storage/innobase/include/fts0fts.h b/storage/innobase/include/fts0fts.h
index 9f7b0216d9b..68d4d333245 100644
--- a/storage/innobase/include/fts0fts.h
+++ b/storage/innobase/include/fts0fts.h
@@ -94,7 +94,10 @@ those defined in mysql file ft_global.h */
/** Threshold where our optimize thread automatically kicks in */
#define FTS_OPTIMIZE_THRESHOLD 10000000
-#define FTS_DOC_ID_MAX_STEP 10000
+/** Threshold to avoid exhausting of doc ids. Consecutive doc id difference
+should not exceed FTS_DOC_ID_MAX_STEP */
+#define FTS_DOC_ID_MAX_STEP 65535
+
/** Variable specifying the FTS parallel sort degree */
extern ulong fts_sort_pll_degree;
@@ -408,7 +411,7 @@ fts_get_next_doc_id(
/*================*/
const dict_table_t* table, /*!< in: table */
doc_id_t* doc_id) /*!< out: new document id */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/*********************************************************************//**
Update the next and last Doc ID in the CONFIG table to be the input
"doc_id" value (+ 1). We would do so after each FTS index build or
@@ -421,7 +424,7 @@ fts_update_next_doc_id(
const dict_table_t* table, /*!< in: table */
const char* table_name, /*!< in: table name, or NULL */
doc_id_t doc_id) /*!< in: DOC ID to set */
- __attribute__((nonnull(2)));
+ MY_ATTRIBUTE((nonnull(2)));
/******************************************************************//**
Create a new document id .
@@ -437,7 +440,7 @@ fts_create_doc_id(
current row that is being
inserted. */
mem_heap_t* heap) /*!< in: heap */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/******************************************************************//**
Create a new fts_doc_ids_t.
@return new fts_doc_ids_t. */
@@ -466,7 +469,7 @@ fts_trx_add_op(
fts_row_state state, /*!< in: state of the row */
ib_vector_t* fts_indexes) /*!< in: FTS indexes affected
(NULL=all) */
- __attribute__((nonnull(1,2)));
+ MY_ATTRIBUTE((nonnull(1,2)));
/******************************************************************//**
Free an FTS trx. */
@@ -491,7 +494,7 @@ fts_create_common_tables(
index */
const char* name, /*!< in: table name */
bool skip_doc_id_index) /*!< in: Skip index on doc id */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/******************************************************************//**
Wrapper function of fts_create_index_tables_low(), create auxiliary
tables for an FTS index
@@ -503,7 +506,7 @@ fts_create_index_tables(
trx_t* trx, /*!< in: transaction handle */
const dict_index_t* index) /*!< in: the FTS index
instance */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/******************************************************************//**
Creates the column specific ancillary tables needed for supporting an
FTS index on the given table. row_mysql_lock_data_dictionary must have
@@ -519,7 +522,7 @@ fts_create_index_tables_low(
instance */
const char* table_name, /*!< in: the table name */
table_id_t table_id) /*!< in: the table id */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/******************************************************************//**
Add the FTS document id hidden column. */
UNIV_INTERN
@@ -528,7 +531,7 @@ fts_add_doc_id_column(
/*==================*/
dict_table_t* table, /*!< in/out: Table with FTS index */
mem_heap_t* heap) /*!< in: temporary memory heap, or NULL */
- __attribute__((nonnull(1)));
+ MY_ATTRIBUTE((nonnull(1)));
/*********************************************************************//**
Drops the ancillary tables needed for supporting an FTS index on the
@@ -542,7 +545,7 @@ fts_drop_tables(
trx_t* trx, /*!< in: transaction */
dict_table_t* table) /*!< in: table has the FTS
index */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/******************************************************************//**
The given transaction is about to be committed; do whatever is necessary
from the FTS system's POV.
@@ -552,7 +555,7 @@ dberr_t
fts_commit(
/*=======*/
trx_t* trx) /*!< in: transaction */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/*******************************************************************//**
FTS Query entry point.
@@ -569,7 +572,7 @@ fts_query(
in bytes */
fts_result_t** result) /*!< out: query result, to be
freed by the caller.*/
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/******************************************************************//**
Retrieve the FTS Relevance Ranking result for doc with doc_id
@@ -687,7 +690,7 @@ dberr_t
fts_optimize_table(
/*===============*/
dict_table_t* table) /*!< in: table to optimiza */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/**********************************************************************//**
Startup the optimize thread and create the work queue. */
@@ -713,7 +716,7 @@ fts_drop_index_tables(
/*==================*/
trx_t* trx, /*!< in: transaction */
dict_index_t* index) /*!< in: Index to drop */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/******************************************************************//**
Remove the table from the OPTIMIZER's list. We do wait for
@@ -754,7 +757,7 @@ fts_savepoint_take(
trx_t* trx, /*!< in: transaction */
fts_trx_t* fts_trx, /*!< in: fts transaction */
const char* name) /*!< in: savepoint name */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/**********************************************************************//**
Refresh last statement savepoint. */
UNIV_INTERN
@@ -762,7 +765,7 @@ void
fts_savepoint_laststmt_refresh(
/*===========================*/
trx_t* trx) /*!< in: transaction */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/**********************************************************************//**
Release the savepoint data identified by name. */
UNIV_INTERN
@@ -780,13 +783,12 @@ fts_cache_destroy(
/*==============*/
fts_cache_t* cache); /*!< in: cache*/
-/*********************************************************************//**
-Clear cache. */
+/** Clear cache.
+@param[in,out] cache fts cache */
UNIV_INTERN
void
fts_cache_clear(
-/*============*/
- fts_cache_t* cache); /*!< in: cache */
+ fts_cache_t* cache);
/*********************************************************************//**
Initialize things in cache. */
@@ -831,7 +833,7 @@ fts_drop_index_split_tables(
/*========================*/
trx_t* trx, /*!< in: transaction */
dict_index_t* index) /*!< in: fts instance */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/** Run SYNC on the table, i.e., write out data from the cache to the
FTS auxiliary INDEX table and clear the cache at the end.
@@ -1023,7 +1025,7 @@ fts_drop_index(
dict_table_t* table, /*!< in: Table where indexes are dropped */
dict_index_t* index, /*!< in: Index to be dropped */
trx_t* trx) /*!< in: Transaction for the drop */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/****************************************************************//**
Rename auxiliary tables for all fts index for a table
diff --git a/storage/innobase/include/fts0priv.h b/storage/innobase/include/fts0priv.h
index b4d9e1d41ec..2d4e9d88fd1 100644
--- a/storage/innobase/include/fts0priv.h
+++ b/storage/innobase/include/fts0priv.h
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 2011, 2013, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2011, 2016, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -121,7 +121,7 @@ fts_parse_sql(
fts_table_t* fts_table, /*!< in: FTS aux table */
pars_info_t* info, /*!< in: info struct, or NULL */
const char* sql) /*!< in: SQL string to evaluate */
- __attribute__((nonnull(3), malloc, warn_unused_result));
+ MY_ATTRIBUTE((nonnull(3), malloc, warn_unused_result));
/******************************************************************//**
Evaluate a parsed SQL statement
@return DB_SUCCESS or error code */
@@ -131,7 +131,7 @@ fts_eval_sql(
/*=========*/
trx_t* trx, /*!< in: transaction */
que_t* graph) /*!< in: Parsed statement */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/******************************************************************//**
Construct the name of an ancillary FTS table for the given table.
@return own: table name, must be freed with mem_free() */
@@ -141,7 +141,7 @@ fts_get_table_name(
/*===============*/
const fts_table_t*
fts_table) /*!< in: FTS aux table info */
- __attribute__((nonnull, malloc, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, malloc, warn_unused_result));
/******************************************************************//**
Construct the column specification part of the SQL string for selecting the
indexed FTS columns for the given table. Adds the necessary bound
@@ -164,7 +164,7 @@ fts_get_select_columns_str(
dict_index_t* index, /*!< in: FTS index */
pars_info_t* info, /*!< in/out: parser info */
mem_heap_t* heap) /*!< in: memory heap */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/** define for fts_doc_fetch_by_doc_id() "option" value, defines whether
we want to get Doc whose ID is equal to or greater or smaller than supplied
@@ -191,7 +191,7 @@ fts_doc_fetch_by_doc_id(
callback, /*!< in: callback to read
records */
void* arg) /*!< in: callback arg */
- __attribute__((nonnull(6)));
+ MY_ATTRIBUTE((nonnull(6)));
/*******************************************************************//**
Callback function for fetch that stores the text of an FTS document,
@@ -203,7 +203,7 @@ fts_query_expansion_fetch_doc(
/*==========================*/
void* row, /*!< in: sel_node_t* */
void* user_arg) /*!< in: fts_doc_t* */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/********************************************************************
Write out a single word's data as new entry/entries in the INDEX table.
@return DB_SUCCESS if all OK. */
@@ -216,7 +216,7 @@ fts_write_node(
fts_table_t* fts_table, /*!< in: the FTS aux index */
fts_string_t* word, /*!< in: word in UTF-8 */
fts_node_t* node) /*!< in: node columns */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/*******************************************************************//**
Tokenize a document. */
UNIV_INTERN
@@ -227,7 +227,7 @@ fts_tokenize_document(
tokenize */
fts_doc_t* result) /*!< out: if provided, save
result tokens here */
- __attribute__((nonnull(1)));
+ MY_ATTRIBUTE((nonnull(1)));
/*******************************************************************//**
Continue to tokenize a document. */
@@ -241,7 +241,7 @@ fts_tokenize_document_next(
tokens from this tokenization */
fts_doc_t* result) /*!< out: if provided, save
result tokens here */
- __attribute__((nonnull(1)));
+ MY_ATTRIBUTE((nonnull(1)));
/******************************************************************//**
Initialize a document. */
UNIV_INTERN
@@ -249,7 +249,7 @@ void
fts_doc_init(
/*=========*/
fts_doc_t* doc) /*!< in: doc to initialize */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/******************************************************************//**
Do a binary search for a doc id in the array
@@ -263,7 +263,7 @@ fts_bsearch(
int lower, /*!< in: lower bound of array*/
int upper, /*!< in: upper bound of array*/
doc_id_t doc_id) /*!< in: doc id to lookup */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/******************************************************************//**
Free document. */
UNIV_INTERN
@@ -271,7 +271,7 @@ void
fts_doc_free(
/*=========*/
fts_doc_t* doc) /*!< in: document */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/******************************************************************//**
Free fts_optimizer_word_t instanace.*/
UNIV_INTERN
@@ -279,7 +279,7 @@ void
fts_word_free(
/*==========*/
fts_word_t* word) /*!< in: instance to free.*/
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/******************************************************************//**
Read the rows from the FTS inde
@return DB_SUCCESS or error code */
@@ -293,7 +293,7 @@ fts_index_fetch_nodes(
const fts_string_t*
word, /*!< in: the word to fetch */
fts_fetch_t* fetch) /*!< in: fetch callback.*/
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/******************************************************************//**
Create a fts_optimizer_word_t instance.
@return new instance */
@@ -304,7 +304,7 @@ fts_word_init(
fts_word_t* word, /*!< in: word to initialize */
byte* utf8, /*!< in: UTF-8 string */
ulint len) /*!< in: length of string in bytes */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/******************************************************************//**
Compare two fts_trx_table_t instances, we actually compare the
table id's here.
@@ -315,7 +315,7 @@ fts_trx_table_cmp(
/*==============*/
const void* v1, /*!< in: id1 */
const void* v2) /*!< in: id2 */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/******************************************************************//**
Compare a table id with a trx_table_t table id.
@return < 0 if n1 < n2, 0 if n1 == n2, > 0 if n1 > n2 */
@@ -325,7 +325,7 @@ fts_trx_table_id_cmp(
/*=================*/
const void* p1, /*!< in: id1 */
const void* p2) /*!< in: id2 */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/******************************************************************//**
Commit a transaction.
@return DB_SUCCESS if all OK */
@@ -334,7 +334,7 @@ dberr_t
fts_sql_commit(
/*===========*/
trx_t* trx) /*!< in: transaction */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/******************************************************************//**
Rollback a transaction.
@return DB_SUCCESS if all OK */
@@ -343,7 +343,7 @@ dberr_t
fts_sql_rollback(
/*=============*/
trx_t* trx) /*!< in: transaction */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/******************************************************************//**
Parse an SQL string. %s is replaced with the table's id. Don't acquire
the dict mutex
@@ -355,7 +355,7 @@ fts_parse_sql_no_dict_lock(
fts_table_t* fts_table, /*!< in: table with FTS index */
pars_info_t* info, /*!< in: parser info */
const char* sql) /*!< in: SQL string to evaluate */
- __attribute__((nonnull(3), malloc, warn_unused_result));
+ MY_ATTRIBUTE((nonnull(3), malloc, warn_unused_result));
/******************************************************************//**
Get value from config table. The caller must ensure that enough
space is allocated for value to hold the column contents
@@ -370,7 +370,7 @@ fts_config_get_value(
this parameter name */
fts_string_t* value) /*!< out: value read from
config table */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/******************************************************************//**
Get value specific to an FTS index from the config table. The caller
must ensure that enough space is allocated for value to hold the
@@ -386,7 +386,7 @@ fts_config_get_index_value(
this parameter name */
fts_string_t* value) /*!< out: value read from
config table */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/******************************************************************//**
Set the value in the config table for name.
@return DB_SUCCESS or error code */
@@ -400,7 +400,7 @@ fts_config_set_value(
this parameter name */
const fts_string_t*
value) /*!< in: value to update */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/****************************************************************//**
Set an ulint value in the config table.
@return DB_SUCCESS if all OK else error code */
@@ -412,7 +412,7 @@ fts_config_set_ulint(
fts_table_t* fts_table, /*!< in: the indexed FTS table */
const char* name, /*!< in: param name */
ulint int_value) /*!< in: value */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/******************************************************************//**
Set the value specific to an FTS index in the config table.
@return DB_SUCCESS or error code */
@@ -426,7 +426,7 @@ fts_config_set_index_value(
this parameter name */
fts_string_t* value) /*!< out: value read from
config table */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/******************************************************************//**
Increment the value in the config table for column name.
@return DB_SUCCESS or error code */
@@ -439,7 +439,7 @@ fts_config_increment_value(
const char* name, /*!< in: increment config value
for this parameter name */
ulint delta) /*!< in: increment by this much */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/******************************************************************//**
Increment the per index value in the config table for column name.
@return DB_SUCCESS or error code */
@@ -452,7 +452,7 @@ fts_config_increment_index_value(
const char* name, /*!< in: increment config value
for this parameter name */
ulint delta) /*!< in: increment by this much */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/******************************************************************//**
Get an ulint value from the config table.
@return DB_SUCCESS or error code */
@@ -464,7 +464,7 @@ fts_config_get_index_ulint(
dict_index_t* index, /*!< in: FTS index */
const char* name, /*!< in: param name */
ulint* int_value) /*!< out: value */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/******************************************************************//**
Set an ulint value int the config table.
@return DB_SUCCESS or error code */
@@ -476,7 +476,7 @@ fts_config_set_index_ulint(
dict_index_t* index, /*!< in: FTS index */
const char* name, /*!< in: param name */
ulint int_value) /*!< in: value */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/******************************************************************//**
Get an ulint value from the config table.
@return DB_SUCCESS or error code */
@@ -488,7 +488,7 @@ fts_config_get_ulint(
fts_table_t* fts_table, /*!< in: the indexed FTS table */
const char* name, /*!< in: param name */
ulint* int_value) /*!< out: value */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/******************************************************************//**
Search cache for word.
@return the word node vector if found else NULL */
@@ -500,7 +500,7 @@ fts_cache_find_word(
index_cache, /*!< in: cache to search */
const fts_string_t*
text) /*!< in: word to search for */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/******************************************************************//**
Check cache for deleted doc id.
@return TRUE if deleted */
@@ -511,7 +511,7 @@ fts_cache_is_deleted_doc_id(
const fts_cache_t*
cache, /*!< in: cache ito search */
doc_id_t doc_id) /*!< in: doc id to search for */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/******************************************************************//**
Append deleted doc ids to vector and sort the vector. */
UNIV_INTERN
@@ -546,7 +546,7 @@ fts_get_total_word_count(
trx_t* trx, /*!< in: transaction */
dict_index_t* index, /*!< in: for this index */
ulint* total) /*!< out: total words */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
#endif
/******************************************************************//**
Search the index specific cache for a particular FTS index.
@@ -559,7 +559,7 @@ fts_find_index_cache(
cache, /*!< in: cache to search */
const dict_index_t*
index) /*!< in: index to search for */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/******************************************************************//**
Write the table id to the given buffer (including final NUL). Buffer must be
at least FTS_AUX_MIN_TABLE_ID_LENGTH bytes long.
@@ -570,10 +570,10 @@ fts_write_object_id(
/*================*/
ib_id_t id, /*!< in: a table/index id */
char* str, /*!< in: buffer to write the id to */
- bool hex_format __attribute__((unused)))
+ bool hex_format MY_ATTRIBUTE((unused)))
/*!< in: true for fixed hex format,
false for old ambiguous format */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/******************************************************************//**
Read the table id from the string generated by fts_write_object_id().
@return TRUE if parse successful */
@@ -583,7 +583,7 @@ fts_read_object_id(
/*===============*/
ib_id_t* id, /*!< out: a table id */
const char* str) /*!< in: buffer to read from */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/******************************************************************//**
Get the table id.
@return number of bytes written */
@@ -596,7 +596,7 @@ fts_get_table_id(
char* table_id) /*!< out: table id, must be at least
FTS_AUX_MIN_TABLE_ID_LENGTH bytes
long */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/******************************************************************//**
Add the table to add to the OPTIMIZER's list. */
UNIV_INTERN
@@ -604,7 +604,7 @@ void
fts_optimize_add_table(
/*===================*/
dict_table_t* table) /*!< in: table to add */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/******************************************************************//**
Optimize a table. */
UNIV_INTERN
@@ -612,7 +612,7 @@ void
fts_optimize_do_table(
/*==================*/
dict_table_t* table) /*!< in: table to optimize */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/******************************************************************//**
Construct the prefix name of an FTS table.
@return own: table name, must be freed with mem_free() */
@@ -622,7 +622,7 @@ fts_get_table_name_prefix(
/*======================*/
const fts_table_t*
fts_table) /*!< in: Auxiliary table type */
- __attribute__((nonnull, malloc, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, malloc, warn_unused_result));
/******************************************************************//**
Add node positions. */
UNIV_INTERN
@@ -633,7 +633,7 @@ fts_cache_node_add_positions(
fts_node_t* node, /*!< in: word node */
doc_id_t doc_id, /*!< in: doc id */
ib_vector_t* positions) /*!< in: fts_token_t::positions */
- __attribute__((nonnull(2,4)));
+ MY_ATTRIBUTE((nonnull(2,4)));
/******************************************************************//**
Create the config table name for retrieving index specific value.
@@ -644,7 +644,7 @@ fts_config_create_index_param_name(
/*===============================*/
const char* param, /*!< in: base name of param */
const dict_index_t* index) /*!< in: index for config */
- __attribute__((nonnull, malloc, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, malloc, warn_unused_result));
#ifndef UNIV_NONINL
#include "fts0priv.ic"
diff --git a/storage/innobase/include/fts0priv.ic b/storage/innobase/include/fts0priv.ic
index ec61691870b..88f2d67c7b8 100644
--- a/storage/innobase/include/fts0priv.ic
+++ b/storage/innobase/include/fts0priv.ic
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 2011, 2013, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2011, 2016, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -33,7 +33,7 @@ fts_write_object_id(
/*================*/
ib_id_t id, /* in: a table/index id */
char* str, /* in: buffer to write the id to */
- bool hex_format __attribute__((unused)))
+ bool hex_format MY_ATTRIBUTE((unused)))
/* in: true for fixed hex format,
false for old ambiguous format */
{
diff --git a/storage/innobase/include/ha_prototypes.h b/storage/innobase/include/ha_prototypes.h
index 1177a8fd3cf..12453099ef7 100644
--- a/storage/innobase/include/ha_prototypes.h
+++ b/storage/innobase/include/ha_prototypes.h
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 2006, 2015, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2006, 2016, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -136,7 +136,7 @@ enum durability_properties
thd_requested_durability(
/*=====================*/
const THD* thd) /*!< in: thread handle */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/******************************************************************//**
Returns true if the transaction this thread is processing has edited
@@ -177,7 +177,7 @@ innobase_mysql_cmp(
const unsigned char* b, /*!< in: data field */
unsigned int b_length) /*!< in: data field length,
not UNIV_SQL_NULL */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/*****************************************************************//**
Log code calls this whenever log has been written and/or flushed up
@@ -205,7 +205,7 @@ get_innobase_type_from_mysql_type(
and unsigned integer
types are 'unsigned types' */
const void* field) /*!< in: MySQL Field */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/******************************************************************//**
Get the variable length bounds of the given character set. */
@@ -315,7 +315,7 @@ innobase_get_stmt(
/*==============*/
THD* thd, /*!< in: MySQL thread handle */
size_t* length) /*!< out: length of the SQL statement */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/******************************************************************//**
This function is used to find the storage length in bytes of the first n
characters for prefix indexes using a multibyte character set. The function
@@ -341,7 +341,7 @@ enum icp_result
innobase_index_cond(
/*================*/
void* file) /*!< in/out: pointer to ha_innobase */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/******************************************************************//**
Returns true if the thread supports XA,
global value of innodb_supports_xa if thd is NULL.
@@ -477,7 +477,7 @@ innobase_format_name(
const char* name, /*!< in: index or table name
to format */
ibool is_index_name) /*!< in: index name */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/** Corresponds to Sql_condition:enum_warning_level. */
enum ib_log_level_t {
@@ -507,7 +507,7 @@ ib_errf(
ib_uint32_t code, /*!< MySQL error code */
const char* format, /*!< printf format */
...) /*!< Args */
- __attribute__((format(printf, 4, 5)));
+ MY_ATTRIBUTE((format(printf, 4, 5)));
/******************************************************************//**
Use this when the args are passed to the format string from
@@ -538,7 +538,7 @@ ib_logf(
ib_log_level_t level, /*!< in: warning level */
const char* format, /*!< printf format */
...) /*!< Args */
- __attribute__((format(printf, 2, 3)));
+ MY_ATTRIBUTE((format(printf, 2, 3)));
/******************************************************************//**
Returns the NUL terminated value of glob_hostname.
@@ -584,7 +584,7 @@ innobase_next_autoinc(
ulonglong step, /*!< in: AUTOINC increment step */
ulonglong offset, /*!< in: AUTOINC offset */
ulonglong max_value) /*!< in: max value for type */
- __attribute__((pure, warn_unused_result));
+ MY_ATTRIBUTE((pure, warn_unused_result));
/********************************************************************//**
Get the upper limit of the MySQL integral and floating-point type.
@@ -594,7 +594,7 @@ ulonglong
innobase_get_int_col_max_value(
/*===========================*/
const Field* field) /*!< in: MySQL field */
- __attribute__((nonnull, pure, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, pure, warn_unused_result));
/**********************************************************************
Converts an identifier from my_charset_filename to UTF-8 charset. */
diff --git a/storage/innobase/include/handler0alter.h b/storage/innobase/include/handler0alter.h
index 66b963ae39a..3dd6c99eb6d 100644
--- a/storage/innobase/include/handler0alter.h
+++ b/storage/innobase/include/handler0alter.h
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 2005, 2013, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2005, 2016, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -32,7 +32,7 @@ innobase_rec_to_mysql(
const dict_index_t* index, /*!< in: index */
const ulint* offsets)/*!< in: rec_get_offsets(
rec, index, ...) */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/*************************************************************//**
Copies an InnoDB index entry to table->record[0]. */
@@ -43,7 +43,7 @@ innobase_fields_to_mysql(
struct TABLE* table, /*!< in/out: MySQL table */
const dict_index_t* index, /*!< in: InnoDB index */
const dfield_t* fields) /*!< in: InnoDB index fields */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/*************************************************************//**
Copies an InnoDB row to table->record[0]. */
@@ -54,7 +54,7 @@ innobase_row_to_mysql(
struct TABLE* table, /*!< in/out: MySQL table */
const dict_table_t* itab, /*!< in: InnoDB table */
const dtuple_t* row) /*!< in: InnoDB row */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/*************************************************************//**
Resets table->record[0]. */
@@ -63,7 +63,7 @@ void
innobase_rec_reset(
/*===============*/
struct TABLE* table) /*!< in/out: MySQL table */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/** Generate the next autoinc based on a snapshot of the session
auto_increment_increment and auto_increment_offset variables. */
diff --git a/storage/innobase/include/ibuf0ibuf.h b/storage/innobase/include/ibuf0ibuf.h
index badafe6befd..09c48822b9f 100644
--- a/storage/innobase/include/ibuf0ibuf.h
+++ b/storage/innobase/include/ibuf0ibuf.h
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1997, 2013, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1997, 2016, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2016, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
@@ -120,7 +120,7 @@ void
ibuf_mtr_start(
/*===========*/
mtr_t* mtr) /*!< out: mini-transaction */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/***************************************************************//**
Commits an insert buffer mini-transaction. */
UNIV_INLINE
@@ -128,7 +128,7 @@ void
ibuf_mtr_commit(
/*============*/
mtr_t* mtr) /*!< in/out: mini-transaction */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/*********************************************************************//**
Initializes an ibuf bitmap page. */
UNIV_INTERN
@@ -254,7 +254,7 @@ ibool
ibuf_inside(
/*========*/
const mtr_t* mtr) /*!< in: mini-transaction */
- __attribute__((nonnull, pure));
+ MY_ATTRIBUTE((nonnull, pure));
/***********************************************************************//**
Checks if a page address is an ibuf bitmap page (level 3 page) address.
@return TRUE if a bitmap page */
@@ -287,7 +287,7 @@ ibuf_page_low(
is not one of the fixed address ibuf
pages, or NULL, in which case a new
transaction is created. */
- __attribute__((warn_unused_result));
+ MY_ATTRIBUTE((warn_unused_result));
#ifdef UNIV_DEBUG
/** Checks if a page is a level 2 or 3 page in the ibuf hierarchy of
pages. Must not be called when recv_no_ibuf_operations==TRUE.
@@ -366,23 +366,31 @@ void
ibuf_delete_for_discarded_space(
/*============================*/
ulint space); /*!< in: space id */
-/*********************************************************************//**
-Contracts insert buffer trees by reading pages to the buffer pool.
+/** Contract the change buffer by reading pages to the buffer pool.
+@param[in] full If true, do a full contraction based
+on PCT_IO(100). If false, the size of contract batch is determined
+based on the current size of the change buffer.
@return a lower limit for the combined size in bytes of entries which
will be merged from ibuf trees to the pages read, 0 if ibuf is
empty */
UNIV_INTERN
ulint
-ibuf_contract_in_background(
-/*========================*/
- table_id_t table_id, /*!< in: if merge should be done only
- for a specific table, for all tables
- this should be 0 */
- ibool full); /*!< in: TRUE if the caller wants to
- do a full contract based on PCT_IO(100).
- If FALSE then the size of contract
- batch is determined based on the
- current size of the ibuf tree. */
+ibuf_merge_in_background(
+ bool full); /*!< in: TRUE if the caller wants to
+ do a full contract based on PCT_IO(100).
+ If FALSE then the size of contract
+ batch is determined based on the
+ current size of the ibuf tree. */
+
+/** Contracts insert buffer trees by reading pages referring to space_id
+to the buffer pool.
+@returns number of pages merged.*/
+UNIV_INTERN
+ulint
+ibuf_merge_space(
+/*=============*/
+ ulint space); /*!< in: space id */
+
#endif /* !UNIV_HOTBACKUP */
/*********************************************************************//**
Parses a redo log record of an ibuf bitmap page init.
@@ -447,7 +455,7 @@ ibuf_check_bitmap_on_import(
/*========================*/
const trx_t* trx, /*!< in: transaction */
ulint space_id) /*!< in: tablespace identifier */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
#define IBUF_HEADER_PAGE_NO FSP_IBUF_HEADER_PAGE_NO
#define IBUF_TREE_ROOT_PAGE_NO FSP_IBUF_TREE_ROOT_PAGE_NO
diff --git a/storage/innobase/include/lock0lock.h b/storage/innobase/include/lock0lock.h
index acfb8ef19c6..a6fafd95754 100644
--- a/storage/innobase/include/lock0lock.h
+++ b/storage/innobase/include/lock0lock.h
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1996, 2014, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -276,7 +276,7 @@ lock_rec_expl_exist_on_page(
/*========================*/
ulint space, /*!< in: space id */
ulint page_no)/*!< in: page number */
- __attribute__((warn_unused_result));
+ MY_ATTRIBUTE((warn_unused_result));
/*********************************************************************//**
Checks if locks of other transactions prevent an immediate insert of
a record. If they do, first tests if the query thread should anyway
@@ -299,7 +299,7 @@ lock_rec_insert_check_and_lock(
inserted record maybe should inherit
LOCK_GAP type locks from the successor
record */
- __attribute__((nonnull(2,3,4,6,7), warn_unused_result));
+ MY_ATTRIBUTE((nonnull(2,3,4,6,7), warn_unused_result));
/*********************************************************************//**
Checks if locks of other transactions prevent an immediate modify (update,
delete mark, or delete unmark) of a clustered index record. If they do,
@@ -320,7 +320,7 @@ lock_clust_rec_modify_check_and_lock(
dict_index_t* index, /*!< in: clustered index */
const ulint* offsets,/*!< in: rec_get_offsets(rec, index) */
que_thr_t* thr) /*!< in: query thread */
- __attribute__((warn_unused_result, nonnull));
+ MY_ATTRIBUTE((warn_unused_result, nonnull));
/*********************************************************************//**
Checks if locks of other transactions prevent an immediate modify
(delete mark or delete unmark) of a secondary index record.
@@ -341,7 +341,7 @@ lock_sec_rec_modify_check_and_lock(
que_thr_t* thr, /*!< in: query thread
(can be NULL if BTR_NO_LOCKING_FLAG) */
mtr_t* mtr) /*!< in/out: mini-transaction */
- __attribute__((warn_unused_result, nonnull(2,3,4,6)));
+ MY_ATTRIBUTE((warn_unused_result, nonnull(2,3,4,6)));
/*********************************************************************//**
Like lock_clust_rec_read_check_and_lock(), but reads a
secondary index record.
@@ -428,7 +428,7 @@ lock_clust_rec_read_check_and_lock_alt(
ulint gap_mode,/*!< in: LOCK_ORDINARY, LOCK_GAP, or
LOCK_REC_NOT_GAP */
que_thr_t* thr) /*!< in: query thread */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/*********************************************************************//**
Checks that a record is seen in a consistent read.
@return true if sees, or false if an earlier version of the record
@@ -460,7 +460,7 @@ lock_sec_rec_cons_read_sees(
should be read or passed over
by a read cursor */
const read_view_t* view) /*!< in: consistent read view */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/*********************************************************************//**
Locks the specified database table in the mode given. If the lock cannot
be granted immediately, the query thread is put to wait.
@@ -475,7 +475,7 @@ lock_table(
in dictionary cache */
enum lock_mode mode, /*!< in: lock mode */
que_thr_t* thr) /*!< in: query thread */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/*********************************************************************//**
Creates a table IX lock object for a resurrected transaction. */
UNIV_INTERN
@@ -530,7 +530,7 @@ lock_rec_fold(
/*==========*/
ulint space, /*!< in: space */
ulint page_no)/*!< in: page number */
- __attribute__((const));
+ MY_ATTRIBUTE((const));
/*********************************************************************//**
Calculates the hash value of a page file address: used in inserting or
searching for a lock in the hash table.
@@ -580,7 +580,7 @@ lock_is_table_exclusive(
/*====================*/
const dict_table_t* table, /*!< in: table */
const trx_t* trx) /*!< in: transaction */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/*********************************************************************//**
Checks if a lock request lock1 has to wait for request lock2.
@return TRUE if lock1 has to wait for lock2 to be removed */
@@ -604,7 +604,7 @@ lock_report_trx_id_insanity(
dict_index_t* index, /*!< in: index */
const ulint* offsets, /*!< in: rec_get_offsets(rec, index) */
trx_id_t max_trx_id) /*!< in: trx_sys_get_max_trx_id() */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/*********************************************************************//**
Prints info of a table lock. */
UNIV_INTERN
@@ -631,7 +631,7 @@ lock_print_info_summary(
/*====================*/
FILE* file, /*!< in: file where to print */
ibool nowait) /*!< in: whether to wait for the lock mutex */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/*********************************************************************//**
Prints info of locks for each transaction. This function assumes that the
caller holds the lock mutex and more importantly it will release the lock
@@ -651,7 +651,7 @@ ulint
lock_number_of_rows_locked(
/*=======================*/
const trx_lock_t* trx_lock) /*!< in: transaction locks */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/*******************************************************************//**
Gets the type of a lock. Non-inline version for using outside of the
@@ -819,7 +819,7 @@ dberr_t
lock_trx_handle_wait(
/*=================*/
trx_t* trx) /*!< in/out: trx lock state */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/*********************************************************************//**
Get the number of locks on a table.
@return number of locks */
@@ -828,7 +828,7 @@ ulint
lock_table_get_n_locks(
/*===================*/
const dict_table_t* table) /*!< in: table */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
#ifdef UNIV_DEBUG
/*********************************************************************//**
Checks that a transaction id is sensible, i.e., not in the future.
@@ -841,7 +841,7 @@ lock_check_trx_id_sanity(
const rec_t* rec, /*!< in: user record */
dict_index_t* index, /*!< in: index */
const ulint* offsets) /*!< in: rec_get_offsets(rec, index) */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/*******************************************************************//**
Check if the transaction holds any locks on the sys tables
or its records.
@@ -851,7 +851,7 @@ const lock_t*
lock_trx_has_sys_table_locks(
/*=========================*/
const trx_t* trx) /*!< in: transaction to check */
- __attribute__((warn_unused_result));
+ MY_ATTRIBUTE((warn_unused_result));
/*******************************************************************//**
Check if the transaction holds an exclusive lock on a record.
@@ -864,7 +864,7 @@ lock_trx_has_rec_x_lock(
const dict_table_t* table, /*!< in: table to check */
const buf_block_t* block, /*!< in: buffer block of the record */
ulint heap_no)/*!< in: record heap number */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
#endif /* UNIV_DEBUG */
/** Lock modes and types */
diff --git a/storage/innobase/include/lock0priv.h b/storage/innobase/include/lock0priv.h
index 16423e6a282..b60dd0d92c8 100644
--- a/storage/innobase/include/lock0priv.h
+++ b/storage/innobase/include/lock0priv.h
@@ -1,7 +1,7 @@
/*****************************************************************************
-Copyright (c) 2007, 2011, Oracle and/or its affiliates. All Rights Reserved.
-Copyright (c) 2015, MariaDB Corporation
+Copyright (c) 2007, 2016, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2015, 2016, MariaDB Corporation
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -124,7 +124,7 @@ lock_clust_rec_some_has_impl(
const rec_t* rec, /*!< in: user record */
const dict_index_t* index, /*!< in: clustered index */
const ulint* offsets)/*!< in: rec_get_offsets(rec, index) */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
#ifndef UNIV_NONINL
#include "lock0priv.ic"
diff --git a/storage/innobase/include/log0recv.h b/storage/innobase/include/log0recv.h
index 292953854f7..b6c977bdc74 100644
--- a/storage/innobase/include/log0recv.h
+++ b/storage/innobase/include/log0recv.h
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1997, 2014, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1997, 2016, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -51,7 +51,7 @@ recv_read_checkpoint_info_for_backup(
lsn_t* first_header_lsn)
/*!< out: lsn of of the start of the
first log file */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/*******************************************************************//**
Scans the log segment and n_bytes_scanned is set to the length of valid
log scanned. */
diff --git a/storage/innobase/include/mach0data.h b/storage/innobase/include/mach0data.h
index d0087f56aaa..9859def0adc 100644
--- a/storage/innobase/include/mach0data.h
+++ b/storage/innobase/include/mach0data.h
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1995, 2009, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -53,7 +53,7 @@ ulint
mach_read_from_1(
/*=============*/
const byte* b) /*!< in: pointer to byte */
- __attribute__((nonnull, pure));
+ MY_ATTRIBUTE((nonnull, pure));
/*******************************************************//**
The following function is used to store data in two consecutive
bytes. We store the most significant byte to the lower address. */
@@ -72,7 +72,7 @@ ulint
mach_read_from_2(
/*=============*/
const byte* b) /*!< in: pointer to two bytes */
- __attribute__((nonnull, pure));
+ MY_ATTRIBUTE((nonnull, pure));
/********************************************************//**
The following function is used to convert a 16-bit data item
@@ -84,7 +84,7 @@ uint16
mach_encode_2(
/*==========*/
ulint n) /*!< in: integer in machine-dependent format */
- __attribute__((const));
+ MY_ATTRIBUTE((const));
/********************************************************//**
The following function is used to convert a 16-bit data item
from the canonical format, for fast bytewise equality test
@@ -95,7 +95,7 @@ ulint
mach_decode_2(
/*==========*/
uint16 n) /*!< in: 16-bit integer in canonical format */
- __attribute__((const));
+ MY_ATTRIBUTE((const));
/*******************************************************//**
The following function is used to store data in 3 consecutive
bytes. We store the most significant byte to the lowest address. */
@@ -114,7 +114,7 @@ ulint
mach_read_from_3(
/*=============*/
const byte* b) /*!< in: pointer to 3 bytes */
- __attribute__((nonnull, pure));
+ MY_ATTRIBUTE((nonnull, pure));
/*******************************************************//**
The following function is used to store data in four consecutive
bytes. We store the most significant byte to the lowest address. */
@@ -133,7 +133,7 @@ ulint
mach_read_from_4(
/*=============*/
const byte* b) /*!< in: pointer to four bytes */
- __attribute__((nonnull, pure));
+ MY_ATTRIBUTE((nonnull, pure));
/*********************************************************//**
Writes a ulint in a compressed form (1..5 bytes).
@return stored size in bytes */
@@ -151,7 +151,7 @@ ulint
mach_get_compressed_size(
/*=====================*/
ulint n) /*!< in: ulint integer to be stored */
- __attribute__((const));
+ MY_ATTRIBUTE((const));
/*********************************************************//**
Reads a ulint in a compressed form.
@return read integer */
@@ -160,7 +160,7 @@ ulint
mach_read_compressed(
/*=================*/
const byte* b) /*!< in: pointer to memory from where to read */
- __attribute__((nonnull, pure));
+ MY_ATTRIBUTE((nonnull, pure));
/*******************************************************//**
The following function is used to store data in 6 consecutive
bytes. We store the most significant byte to the lowest address. */
@@ -179,7 +179,7 @@ ib_uint64_t
mach_read_from_6(
/*=============*/
const byte* b) /*!< in: pointer to 6 bytes */
- __attribute__((nonnull, pure));
+ MY_ATTRIBUTE((nonnull, pure));
/*******************************************************//**
The following function is used to store data in 7 consecutive
bytes. We store the most significant byte to the lowest address. */
@@ -198,7 +198,7 @@ ib_uint64_t
mach_read_from_7(
/*=============*/
const byte* b) /*!< in: pointer to 7 bytes */
- __attribute__((nonnull, pure));
+ MY_ATTRIBUTE((nonnull, pure));
/*******************************************************//**
The following function is used to store data in 8 consecutive
bytes. We store the most significant byte to the lowest address. */
@@ -217,7 +217,7 @@ ib_uint64_t
mach_read_from_8(
/*=============*/
const byte* b) /*!< in: pointer to 8 bytes */
- __attribute__((nonnull, pure));
+ MY_ATTRIBUTE((nonnull, pure));
/*********************************************************//**
Writes a 64-bit integer in a compressed form (5..9 bytes).
@return size in bytes */
@@ -243,7 +243,7 @@ ib_uint64_t
mach_ull_read_compressed(
/*=====================*/
const byte* b) /*!< in: pointer to memory from where to read */
- __attribute__((nonnull, pure));
+ MY_ATTRIBUTE((nonnull, pure));
/*********************************************************//**
Writes a 64-bit integer in a compressed form (1..11 bytes).
@return size in bytes */
@@ -261,7 +261,7 @@ ulint
mach_ull_get_much_compressed_size(
/*==============================*/
ib_uint64_t n) /*!< in: 64-bit integer to be stored */
- __attribute__((const));
+ MY_ATTRIBUTE((const));
/*********************************************************//**
Reads a 64-bit integer in a compressed form.
@return the value read */
@@ -270,7 +270,7 @@ ib_uint64_t
mach_ull_read_much_compressed(
/*==========================*/
const byte* b) /*!< in: pointer to memory from where to read */
- __attribute__((nonnull, pure));
+ MY_ATTRIBUTE((nonnull, pure));
/*********************************************************//**
Reads a ulint in a compressed form if the log record fully contains it.
@return pointer to end of the stored field, NULL if not complete */
@@ -301,7 +301,7 @@ double
mach_double_read(
/*=============*/
const byte* b) /*!< in: pointer to memory from where to read */
- __attribute__((nonnull, pure));
+ MY_ATTRIBUTE((nonnull, pure));
/*********************************************************//**
Writes a double. It is stored in a little-endian format. */
UNIV_INLINE
@@ -318,7 +318,7 @@ float
mach_float_read(
/*============*/
const byte* b) /*!< in: pointer to memory from where to read */
- __attribute__((nonnull, pure));
+ MY_ATTRIBUTE((nonnull, pure));
/*********************************************************//**
Writes a float. It is stored in a little-endian format. */
UNIV_INLINE
@@ -336,7 +336,7 @@ mach_read_from_n_little_endian(
/*===========================*/
const byte* buf, /*!< in: from where to read */
ulint buf_size) /*!< in: from how many bytes to read */
- __attribute__((nonnull, pure));
+ MY_ATTRIBUTE((nonnull, pure));
/*********************************************************//**
Writes a ulint in the little-endian format. */
UNIV_INLINE
@@ -354,7 +354,7 @@ ulint
mach_read_from_2_little_endian(
/*===========================*/
const byte* buf) /*!< in: from where to read */
- __attribute__((nonnull, pure));
+ MY_ATTRIBUTE((nonnull, pure));
/*********************************************************//**
Writes a ulint in the little-endian format. */
UNIV_INLINE
diff --git a/storage/innobase/include/mem0mem.h b/storage/innobase/include/mem0mem.h
index f30034f3074..de9b8b29fd9 100644
--- a/storage/innobase/include/mem0mem.h
+++ b/storage/innobase/include/mem0mem.h
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1994, 2010, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1994, 2016, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -353,7 +353,7 @@ mem_heap_printf(
/*============*/
mem_heap_t* heap, /*!< in: memory heap */
const char* format, /*!< in: format string */
- ...) __attribute__ ((format (printf, 2, 3)));
+ ...) MY_ATTRIBUTE ((format (printf, 2, 3)));
#ifdef MEM_PERIODIC_CHECK
/******************************************************************//**
diff --git a/storage/innobase/include/mem0mem.ic b/storage/innobase/include/mem0mem.ic
index 0d983d69e1a..63e68150b61 100644
--- a/storage/innobase/include/mem0mem.ic
+++ b/storage/innobase/include/mem0mem.ic
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1994, 2010, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1994, 2016, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -476,9 +476,9 @@ void
mem_heap_free_func(
/*===============*/
mem_heap_t* heap, /*!< in, own: heap to be freed */
- const char* file_name __attribute__((unused)),
+ const char* file_name MY_ATTRIBUTE((unused)),
/*!< in: file name where freed */
- ulint line __attribute__((unused)))
+ ulint line MY_ATTRIBUTE((unused)))
{
mem_block_t* block;
mem_block_t* prev_block;
diff --git a/storage/innobase/include/mtr0mtr.h b/storage/innobase/include/mtr0mtr.h
index eae981f2fbb..c3307985532 100644
--- a/storage/innobase/include/mtr0mtr.h
+++ b/storage/innobase/include/mtr0mtr.h
@@ -1,7 +1,8 @@
/*****************************************************************************
-Copyright (c) 1995, 2013, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2012, Facebook Inc.
+Copyright (c) 2013, 2016, MariaDB Corporation
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -217,7 +218,7 @@ mtr_start_trx(
/*======*/
mtr_t* mtr, /*!< out: mini-transaction */
trx_t* trx) /*!< in: transaction */
- __attribute__((nonnull (1)));
+ MY_ATTRIBUTE((nonnull (1)));
/***************************************************************//**
Starts a mini-transaction. */
UNIV_INLINE
@@ -228,7 +229,7 @@ mtr_start(
{
mtr_start_trx(mtr, NULL);
}
- __attribute__((nonnull))
+ MY_ATTRIBUTE((nonnull))
/***************************************************************//**
Commits a mini-transaction. */
UNIV_INTERN
@@ -236,7 +237,7 @@ void
mtr_commit(
/*=======*/
mtr_t* mtr) /*!< in/out: mini-transaction */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/**********************************************************//**
Sets and returns a savepoint in mtr.
@return savepoint */
@@ -341,7 +342,7 @@ mtr_memo_release(
mtr_t* mtr, /*!< in/out: mini-transaction */
void* object, /*!< in: object */
ulint type) /*!< in: object type: MTR_MEMO_S_LOCK, ... */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
#ifdef UNIV_DEBUG
# ifndef UNIV_HOTBACKUP
/**********************************************************//**
@@ -354,7 +355,7 @@ mtr_memo_contains(
mtr_t* mtr, /*!< in: mtr */
const void* object, /*!< in: object to search */
ulint type) /*!< in: type of object */
- __attribute__((warn_unused_result, nonnull));
+ MY_ATTRIBUTE((warn_unused_result, nonnull));
/**********************************************************//**
Checks if memo contains the given page.
diff --git a/storage/innobase/include/mtr0mtr.ic b/storage/innobase/include/mtr0mtr.ic
index 44d548e9b64..37cea34d4eb 100644
--- a/storage/innobase/include/mtr0mtr.ic
+++ b/storage/innobase/include/mtr0mtr.ic
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1995, 2013, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -37,7 +37,7 @@ ibool
mtr_block_dirtied(
/*==============*/
const buf_block_t* block) /*!< in: block being x-fixed */
- __attribute__((nonnull,warn_unused_result));
+ MY_ATTRIBUTE((nonnull,warn_unused_result));
/***************************************************************//**
Starts a mini-transaction. */
diff --git a/storage/innobase/include/os0file.h b/storage/innobase/include/os0file.h
index dd6fd8f9940..2425a682e22 100644
--- a/storage/innobase/include/os0file.h
+++ b/storage/innobase/include/os0file.h
@@ -1,6 +1,6 @@
/***********************************************************************
-Copyright (c) 1995, 2015, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2009, Percona Inc.
Copyright (c) 2013, 2015, MariaDB Corporation.
@@ -637,7 +637,7 @@ pfs_os_file_create_simple_func(
ibool* success,/*!< out: TRUE if succeed, FALSE if error */
const char* src_file,/*!< in: file name where func invoked */
ulint src_line)/*!< in: line where the func invoked */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/****************************************************************//**
NOTE! Please use the corresponding macro
@@ -664,7 +664,7 @@ pfs_os_file_create_simple_no_error_handling_func(
value */
const char* src_file,/*!< in: file name where func invoked */
ulint src_line)/*!< in: line where the func invoked */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/****************************************************************//**
NOTE! Please use the corresponding macro os_file_create(), not directly
@@ -694,7 +694,7 @@ pfs_os_file_create_func(
value*/
const char* src_file,/*!< in: file name where func invoked */
ulint src_line)/*!< in: line where the func invoked */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/***********************************************************************//**
NOTE! Please use the corresponding macro os_file_close(), not directly
@@ -880,7 +880,7 @@ os_offset_t
os_file_get_size(
/*=============*/
os_file_t file) /*!< in: handle to a file */
- __attribute__((warn_unused_result));
+ MY_ATTRIBUTE((warn_unused_result));
/***********************************************************************//**
Write the specified number of zeros to a newly created file.
@return TRUE if success */
@@ -892,7 +892,7 @@ os_file_set_size(
null-terminated string */
os_file_t file, /*!< in: handle to a file */
os_offset_t size) /*!< in: file size */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/***********************************************************************//**
Truncates a file at its current position.
@return TRUE if success */
diff --git a/storage/innobase/include/os0sync.h b/storage/innobase/include/os0sync.h
index 95e724ec48e..1cf4e9ce501 100644
--- a/storage/innobase/include/os0sync.h
+++ b/storage/innobase/include/os0sync.h
@@ -466,7 +466,7 @@ amount to decrement. */
# define os_atomic_decrement_uint64(ptr, amount) \
os_atomic_decrement(ptr, amount)
-# if defined(IB_STRONG_MEMORY_MODEL)
+# if defined(HAVE_ATOMIC_BUILTINS)
/** Do an atomic test and set.
@param[in,out] ptr Memory location to set to non-zero
diff --git a/storage/innobase/include/os0thread.h b/storage/innobase/include/os0thread.h
index 37c54afae80..9a1ada8fa0d 100644
--- a/storage/innobase/include/os0thread.h
+++ b/storage/innobase/include/os0thread.h
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1995, 2011, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -125,7 +125,7 @@ os_thread_exit(
/*===========*/
void* exit_value) /*!< in: exit value; in Windows this void*
is cast as a DWORD */
- UNIV_COLD __attribute__((noreturn));
+ UNIV_COLD MY_ATTRIBUTE((noreturn));
/*****************************************************************//**
Returns the thread identifier of current thread.
@return current thread identifier */
diff --git a/storage/innobase/include/page0cur.h b/storage/innobase/include/page0cur.h
index b1ad49b4915..f04667ff29c 100644
--- a/storage/innobase/include/page0cur.h
+++ b/storage/innobase/include/page0cur.h
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1994, 2013, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1994, 2016, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -180,7 +180,7 @@ page_cur_tuple_insert(
mem_heap_t** heap, /*!< in/out: pointer to memory heap, or NULL */
ulint n_ext, /*!< in: number of externally stored columns */
mtr_t* mtr) /*!< in: mini-transaction handle, or NULL */
- __attribute__((nonnull(1,2,3,4,5), warn_unused_result));
+ MY_ATTRIBUTE((nonnull(1,2,3,4,5), warn_unused_result));
#endif /* !UNIV_HOTBACKUP */
/***********************************************************//**
Inserts a record next to page cursor. Returns pointer to inserted record if
@@ -218,7 +218,7 @@ page_cur_insert_rec_low(
const rec_t* rec, /*!< in: pointer to a physical record */
ulint* offsets,/*!< in/out: rec_get_offsets(rec, index) */
mtr_t* mtr) /*!< in: mini-transaction handle, or NULL */
- __attribute__((nonnull(1,2,3,4), warn_unused_result));
+ MY_ATTRIBUTE((nonnull(1,2,3,4), warn_unused_result));
/***********************************************************//**
Inserts a record next to page cursor on a compressed and uncompressed
page. Returns pointer to inserted record if succeed, i.e.,
@@ -240,7 +240,7 @@ page_cur_insert_rec_zip(
const rec_t* rec, /*!< in: pointer to a physical record */
ulint* offsets,/*!< in/out: rec_get_offsets(rec, index) */
mtr_t* mtr) /*!< in: mini-transaction handle, or NULL */
- __attribute__((nonnull(1,2,3,4), warn_unused_result));
+ MY_ATTRIBUTE((nonnull(1,2,3,4), warn_unused_result));
/*************************************************************//**
Copies records from page to a newly created page, from a given record onward,
including that record. Infimum and supremum records are not copied.
diff --git a/storage/innobase/include/page0page.h b/storage/innobase/include/page0page.h
index d83b6e1985d..e8b4265bc68 100644
--- a/storage/innobase/include/page0page.h
+++ b/storage/innobase/include/page0page.h
@@ -1,6 +1,7 @@
/*****************************************************************************
-Copyright (c) 1994, 2015, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1994, 2016, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2013, 2016, MariaDB Corporation
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -172,7 +173,7 @@ page_t*
page_align(
/*=======*/
const void* ptr) /*!< in: pointer to page frame */
- __attribute__((const));
+ MY_ATTRIBUTE((const));
/************************************************************//**
Gets the offset within a page.
@return offset from the start of the page */
@@ -181,7 +182,7 @@ ulint
page_offset(
/*========*/
const void* ptr) /*!< in: pointer to page frame */
- __attribute__((const));
+ MY_ATTRIBUTE((const));
/*************************************************************//**
Returns the max trx id field value. */
UNIV_INLINE
@@ -239,7 +240,7 @@ page_header_get_offs(
/*=================*/
const page_t* page, /*!< in: page */
ulint field) /*!< in: PAGE_FREE, ... */
- __attribute__((nonnull, pure));
+ MY_ATTRIBUTE((nonnull, pure));
/*************************************************************//**
Returns the pointer stored in the given header field, or NULL. */
@@ -299,7 +300,7 @@ page_rec_get_nth_const(
/*===================*/
const page_t* page, /*!< in: page */
ulint nth) /*!< in: nth record */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/************************************************************//**
Returns the nth record of the record list.
This is the inverse function of page_rec_get_n_recs_before().
@@ -310,7 +311,7 @@ page_rec_get_nth(
/*=============*/
page_t* page, /*< in: page */
ulint nth) /*!< in: nth record */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
#ifndef UNIV_HOTBACKUP
/************************************************************//**
@@ -323,7 +324,7 @@ rec_t*
page_get_middle_rec(
/*================*/
page_t* page) /*!< in: page */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/*************************************************************//**
Compares a data tuple to a physical record. Differs from the function
cmp_dtuple_rec_with_match in the way that the record must reside on an
@@ -536,7 +537,7 @@ bool
page_is_leaf(
/*=========*/
const page_t* page) /*!< in: page */
- __attribute__((nonnull, pure));
+ MY_ATTRIBUTE((nonnull, pure));
#ifndef UNIV_INNOCHECKSUM
/************************************************************//**
Determine whether the page is empty.
@@ -546,7 +547,7 @@ bool
page_is_empty(
/*==========*/
const page_t* page) /*!< in: page */
- __attribute__((nonnull, pure));
+ MY_ATTRIBUTE((nonnull, pure));
/************************************************************//**
Determine whether the page contains garbage.
@return true if the page contains garbage (PAGE_GARBAGE is not 0) */
@@ -555,7 +556,7 @@ bool
page_has_garbage(
/*=============*/
const page_t* page) /*!< in: page */
- __attribute__((nonnull, pure));
+ MY_ATTRIBUTE((nonnull, pure));
/************************************************************//**
Gets the pointer to the next record on the page.
@return pointer to next record */
@@ -627,7 +628,7 @@ ibool
page_rec_is_user_rec_low(
/*=====================*/
ulint offset) /*!< in: record offset on page */
- __attribute__((const));
+ MY_ATTRIBUTE((const));
/************************************************************//**
TRUE if the record is the supremum record on a page.
@return TRUE if the supremum record */
@@ -636,7 +637,7 @@ ibool
page_rec_is_supremum_low(
/*=====================*/
ulint offset) /*!< in: record offset on page */
- __attribute__((const));
+ MY_ATTRIBUTE((const));
/************************************************************//**
TRUE if the record is the infimum record on a page.
@return TRUE if the infimum record */
@@ -645,7 +646,7 @@ ibool
page_rec_is_infimum_low(
/*====================*/
ulint offset) /*!< in: record offset on page */
- __attribute__((const));
+ MY_ATTRIBUTE((const));
/************************************************************//**
TRUE if the record is a user record on the page.
@@ -655,7 +656,7 @@ ibool
page_rec_is_user_rec(
/*=================*/
const rec_t* rec) /*!< in: record */
- __attribute__((const));
+ MY_ATTRIBUTE((const));
/************************************************************//**
TRUE if the record is the supremum record on a page.
@return TRUE if the supremum record */
@@ -664,7 +665,7 @@ ibool
page_rec_is_supremum(
/*=================*/
const rec_t* rec) /*!< in: record */
- __attribute__((const));
+ MY_ATTRIBUTE((const));
/************************************************************//**
TRUE if the record is the infimum record on a page.
@@ -674,7 +675,7 @@ ibool
page_rec_is_infimum(
/*================*/
const rec_t* rec) /*!< in: record */
- __attribute__((const));
+ MY_ATTRIBUTE((const));
/***************************************************************//**
Looks for the record which owns the given record.
@return the owner record */
@@ -694,7 +695,7 @@ page_rec_write_field(
ulint i, /*!< in: index of the field to update */
ulint val, /*!< in: value to write */
mtr_t* mtr) /*!< in/out: mini-transaction */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
#endif /* !UNIV_HOTBACKUP */
/************************************************************//**
Returns the maximum combined size of records which can be inserted on top
@@ -724,7 +725,7 @@ ulint
page_get_free_space_of_empty(
/*=========================*/
ulint comp) /*!< in: nonzero=compact page format */
- __attribute__((const));
+ MY_ATTRIBUTE((const));
/**********************************************************//**
Returns the base extra size of a physical record. This is the
size of the fixed header, independent of the record size.
@@ -810,7 +811,7 @@ page_create_zip(
ulint level, /*!< in: the B-tree level of the page */
trx_id_t max_trx_id, /*!< in: PAGE_MAX_TRX_ID */
mtr_t* mtr) /*!< in/out: mini-transaction */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/**********************************************************//**
Empty a previously created B-tree index page. */
UNIV_INTERN
@@ -820,7 +821,7 @@ page_create_empty(
buf_block_t* block, /*!< in/out: B-tree block */
dict_index_t* index, /*!< in: the index of the page */
mtr_t* mtr) /*!< in/out: mini-transaction */
- __attribute__((nonnull(1,2)));
+ MY_ATTRIBUTE((nonnull(1,2)));
/*************************************************************//**
Differs from page_copy_rec_list_end, because this function does not
touch the lock table and max trx id on page or compress the page.
@@ -859,7 +860,7 @@ page_copy_rec_list_end(
rec_t* rec, /*!< in: record on page */
dict_index_t* index, /*!< in: record descriptor */
mtr_t* mtr) /*!< in: mtr */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/*************************************************************//**
Copies records from page to new_page, up to the given record, NOT
including that record. Infimum and supremum records are not copied.
@@ -881,7 +882,7 @@ page_copy_rec_list_start(
rec_t* rec, /*!< in: record on page */
dict_index_t* index, /*!< in: record descriptor */
mtr_t* mtr) /*!< in: mtr */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/*************************************************************//**
Deletes records from a page from a given record onward, including that record.
The infimum and supremum records are not deleted. */
@@ -898,7 +899,7 @@ page_delete_rec_list_end(
records in the end of the chain to
delete, or ULINT_UNDEFINED if not known */
mtr_t* mtr) /*!< in: mtr */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/*************************************************************//**
Deletes records from page, up to the given record, NOT including
that record. Infimum and supremum records are not deleted. */
@@ -910,7 +911,7 @@ page_delete_rec_list_start(
buf_block_t* block, /*!< in: buffer block of the page */
dict_index_t* index, /*!< in: record descriptor */
mtr_t* mtr) /*!< in: mtr */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/*************************************************************//**
Moves record list end to another page. Moved records include
split_rec.
@@ -931,7 +932,7 @@ page_move_rec_list_end(
rec_t* split_rec, /*!< in: first record to move */
dict_index_t* index, /*!< in: record descriptor */
mtr_t* mtr) /*!< in: mtr */
- __attribute__((nonnull(1, 2, 4, 5)));
+ MY_ATTRIBUTE((nonnull(1, 2, 4, 5)));
/*************************************************************//**
Moves record list start to another page. Moved records do not include
split_rec.
@@ -951,7 +952,7 @@ page_move_rec_list_start(
rec_t* split_rec, /*!< in: first record not to move */
dict_index_t* index, /*!< in: record descriptor */
mtr_t* mtr) /*!< in: mtr */
- __attribute__((nonnull(1, 2, 4, 5)));
+ MY_ATTRIBUTE((nonnull(1, 2, 4, 5)));
/****************************************************************//**
Splits a directory slot which owns too many records. */
UNIV_INTERN
@@ -962,7 +963,7 @@ page_dir_split_slot(
page_zip_des_t* page_zip,/*!< in/out: compressed page whose
uncompressed part will be written, or NULL */
ulint slot_no)/*!< in: the directory slot */
- __attribute__((nonnull(1)));
+ MY_ATTRIBUTE((nonnull(1)));
/*************************************************************//**
Tries to balance the given directory slot with too few records
with the upper neighbor, so that there are at least the minimum number
@@ -975,7 +976,7 @@ page_dir_balance_slot(
page_t* page, /*!< in/out: index page */
page_zip_des_t* page_zip,/*!< in/out: compressed page, or NULL */
ulint slot_no)/*!< in: the directory slot */
- __attribute__((nonnull(1)));
+ MY_ATTRIBUTE((nonnull(1)));
/**********************************************************//**
Parses a log record of a record list end or start deletion.
@return end of log record or NULL */
diff --git a/storage/innobase/include/page0types.h b/storage/innobase/include/page0types.h
index fb9250a5a3b..2892e860875 100644
--- a/storage/innobase/include/page0types.h
+++ b/storage/innobase/include/page0types.h
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1994, 2013, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1994, 2016, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -131,7 +131,7 @@ page_zip_rec_set_deleted(
page_zip_des_t* page_zip,/*!< in/out: compressed page */
const byte* rec, /*!< in: record on the uncompressed page */
ulint flag) /*!< in: the deleted flag (nonzero=TRUE) */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/**********************************************************************//**
Write the "owned" flag of a record on a compressed page. The n_owned field
@@ -143,7 +143,7 @@ page_zip_rec_set_owned(
page_zip_des_t* page_zip,/*!< in/out: compressed page */
const byte* rec, /*!< in: record on the uncompressed page */
ulint flag) /*!< in: the owned flag (nonzero=TRUE) */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/**********************************************************************//**
Shift the dense page directory when a record is deleted. */
@@ -156,7 +156,7 @@ page_zip_dir_delete(
dict_index_t* index, /*!< in: index of rec */
const ulint* offsets,/*!< in: rec_get_offsets(rec) */
const byte* free) /*!< in: previous start of the free list */
- __attribute__((nonnull(1,2,3,4)));
+ MY_ATTRIBUTE((nonnull(1,2,3,4)));
/**********************************************************************//**
Add a slot to the dense page directory. */
@@ -167,5 +167,5 @@ page_zip_dir_add_slot(
page_zip_des_t* page_zip, /*!< in/out: compressed page */
ulint is_clustered) /*!< in: nonzero for clustered index,
zero for others */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
#endif
diff --git a/storage/innobase/include/page0zip.h b/storage/innobase/include/page0zip.h
index 0c2abef4b09..4e362cec641 100644
--- a/storage/innobase/include/page0zip.h
+++ b/storage/innobase/include/page0zip.h
@@ -1,8 +1,9 @@
/*****************************************************************************
-Copyright (c) 2005, 2013, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2005, 2016, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2012, Facebook Inc.
+Copyright (c) 2013, 2016, MariaDB Corporation
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -62,7 +63,7 @@ ulint
page_zip_get_size(
/*==============*/
const page_zip_des_t* page_zip) /*!< in: compressed page */
- __attribute__((nonnull, pure));
+ MY_ATTRIBUTE((nonnull, pure));
/**********************************************************************//**
Set the size of a compressed page in bytes. */
UNIV_INLINE
@@ -85,7 +86,7 @@ page_zip_rec_needs_ext(
ulint n_fields, /*!< in: number of fields in the record;
ignored if zip_size == 0 */
ulint zip_size) /*!< in: compressed page size in bytes, or 0 */
- __attribute__((const));
+ MY_ATTRIBUTE((const));
/**********************************************************************//**
Determine the guaranteed free space on an empty page.
@@ -96,7 +97,7 @@ page_zip_empty_size(
/*================*/
ulint n_fields, /*!< in: number of columns in the index */
ulint zip_size) /*!< in: compressed page size in bytes */
- __attribute__((const));
+ MY_ATTRIBUTE((const));
#endif /* !UNIV_HOTBACKUP */
/**********************************************************************//**
@@ -132,7 +133,7 @@ page_zip_compress(
dict_index_t* index, /*!< in: index of the B-tree node */
ulint level, /*!< in: compression level */
mtr_t* mtr) /*!< in: mini-transaction, or NULL */
- __attribute__((nonnull(1,2,3)));
+ MY_ATTRIBUTE((nonnull(1,2,3)));
/**********************************************************************//**
Decompress a page. This function should tolerate errors on the compressed
@@ -150,7 +151,7 @@ page_zip_decompress(
FALSE=verify but do not copy some
page header fields that should not change
after page creation */
- __attribute__((nonnull(1,2)));
+ MY_ATTRIBUTE((nonnull(1,2)));
#ifndef UNIV_INNOCHECKSUM
#ifdef UNIV_DEBUG
@@ -179,7 +180,7 @@ page_zip_validate_low(
const dict_index_t* index, /*!< in: index of the page, if known */
ibool sloppy) /*!< in: FALSE=strict,
TRUE=ignore the MIN_REC_FLAG */
- __attribute__((nonnull(1,2)));
+ MY_ATTRIBUTE((nonnull(1,2)));
/**********************************************************************//**
Check that the compressed and decompressed pages match. */
UNIV_INTERN
@@ -189,7 +190,7 @@ page_zip_validate(
const page_zip_des_t* page_zip,/*!< in: compressed page */
const page_t* page, /*!< in: uncompressed page */
const dict_index_t* index) /*!< in: index of the page, if known */
- __attribute__((nonnull(1,2)));
+ MY_ATTRIBUTE((nonnull(1,2)));
#endif /* UNIV_ZIP_DEBUG */
#ifndef UNIV_INNOCHECKSUM
@@ -203,7 +204,7 @@ page_zip_max_ins_size(
/*==================*/
const page_zip_des_t* page_zip,/*!< in: compressed page */
ibool is_clust)/*!< in: TRUE if clustered index */
- __attribute__((nonnull, pure));
+ MY_ATTRIBUTE((nonnull, pure));
/**********************************************************************//**
Determine if enough space is available in the modification log.
@@ -217,7 +218,7 @@ page_zip_available(
ulint length, /*!< in: combined size of the record */
ulint create) /*!< in: nonzero=add the record to
the heap */
- __attribute__((nonnull, pure));
+ MY_ATTRIBUTE((nonnull, pure));
/**********************************************************************//**
Write data to the uncompressed header portion of a page. The data must
@@ -230,7 +231,7 @@ page_zip_write_header(
const byte* str, /*!< in: address on the uncompressed page */
ulint length, /*!< in: length of the data */
mtr_t* mtr) /*!< in: mini-transaction, or NULL */
- __attribute__((nonnull(1,2)));
+ MY_ATTRIBUTE((nonnull(1,2)));
/**********************************************************************//**
Write an entire record on the compressed page. The data must already
@@ -244,7 +245,7 @@ page_zip_write_rec(
dict_index_t* index, /*!< in: the index the record belongs to */
const ulint* offsets,/*!< in: rec_get_offsets(rec, index) */
ulint create) /*!< in: nonzero=insert, zero=update */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/***********************************************************//**
Parses a log record of writing a BLOB pointer of a record.
@@ -273,7 +274,7 @@ page_zip_write_blob_ptr(
ulint n, /*!< in: column index */
mtr_t* mtr) /*!< in: mini-transaction handle,
or NULL if no logging is needed */
- __attribute__((nonnull(1,2,3,4)));
+ MY_ATTRIBUTE((nonnull(1,2,3,4)));
/***********************************************************//**
Parses a log record of writing the node pointer of a record.
@@ -298,7 +299,7 @@ page_zip_write_node_ptr(
ulint size, /*!< in: data size of rec */
ulint ptr, /*!< in: node pointer */
mtr_t* mtr) /*!< in: mini-transaction, or NULL */
- __attribute__((nonnull(1,2)));
+ MY_ATTRIBUTE((nonnull(1,2)));
/**********************************************************************//**
Write the trx_id and roll_ptr of a record on a B-tree leaf node page. */
@@ -312,7 +313,7 @@ page_zip_write_trx_id_and_roll_ptr(
ulint trx_id_col,/*!< in: column number of TRX_ID in rec */
trx_id_t trx_id, /*!< in: transaction identifier */
roll_ptr_t roll_ptr)/*!< in: roll_ptr */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/**********************************************************************//**
Write the "deleted" flag of a record on a compressed page. The flag must
@@ -324,7 +325,7 @@ page_zip_rec_set_deleted(
page_zip_des_t* page_zip,/*!< in/out: compressed page */
const byte* rec, /*!< in: record on the uncompressed page */
ulint flag) /*!< in: the deleted flag (nonzero=TRUE) */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/**********************************************************************//**
Write the "owned" flag of a record on a compressed page. The n_owned field
@@ -336,7 +337,7 @@ page_zip_rec_set_owned(
page_zip_des_t* page_zip,/*!< in/out: compressed page */
const byte* rec, /*!< in: record on the uncompressed page */
ulint flag) /*!< in: the owned flag (nonzero=TRUE) */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/**********************************************************************//**
Insert a record to the dense page directory. */
@@ -363,7 +364,7 @@ page_zip_dir_delete(
const ulint* offsets, /*!< in: rec_get_offsets(rec) */
const byte* free) /*!< in: previous start of
the free list */
- __attribute__((nonnull(1,2,3,4)));
+ MY_ATTRIBUTE((nonnull(1,2,3,4)));
/**********************************************************************//**
Add a slot to the dense page directory. */
@@ -374,7 +375,7 @@ page_zip_dir_add_slot(
page_zip_des_t* page_zip, /*!< in/out: compressed page */
ulint is_clustered) /*!< in: nonzero for clustered index,
zero for others */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/***********************************************************//**
Parses a log record of writing to the header of a page.
@@ -402,7 +403,7 @@ page_zip_write_header(
const byte* str, /*!< in: address on the uncompressed page */
ulint length, /*!< in: length of the data */
mtr_t* mtr) /*!< in: mini-transaction, or NULL */
- __attribute__((nonnull(1,2)));
+ MY_ATTRIBUTE((nonnull(1,2)));
/**********************************************************************//**
Reorganize and compress a page. This is a low-level operation for
@@ -425,7 +426,7 @@ page_zip_reorganize(
m_start, m_end, m_nonempty */
dict_index_t* index, /*!< in: index of the B-tree node */
mtr_t* mtr) /*!< in: mini-transaction */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
#endif /* !UNIV_INNOCHECKSUM */
#ifndef UNIV_HOTBACKUP
@@ -446,7 +447,7 @@ page_zip_copy_recs(
const page_t* src, /*!< in: page */
dict_index_t* index, /*!< in: index of the B-tree */
mtr_t* mtr) /*!< in: mini-transaction */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
#endif /* !UNIV_HOTBACKUP */
/**********************************************************************//**
@@ -460,7 +461,7 @@ page_zip_parse_compress(
byte* end_ptr,/*!< in: buffer end */
page_t* page, /*!< out: uncompressed page */
page_zip_des_t* page_zip)/*!< out: compressed page */
- __attribute__((nonnull(1,2)));
+ MY_ATTRIBUTE((nonnull(1,2)));
/**********************************************************************//**
Calculate the compressed page checksum.
@@ -472,7 +473,7 @@ page_zip_calc_checksum(
const void* data, /*!< in: compressed page */
ulint size, /*!< in: size of compressed page */
srv_checksum_algorithm_t algo) /*!< in: algorithm to use */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/**********************************************************************//**
Verify a compressed page's checksum.
@@ -508,7 +509,7 @@ page_zip_parse_compress_no_data(
page_t* page, /*!< in: uncompressed page */
page_zip_des_t* page_zip, /*!< out: compressed page */
dict_index_t* index) /*!< in: index */
- __attribute__((nonnull(1,2)));
+ MY_ATTRIBUTE((nonnull(1,2)));
/**********************************************************************//**
Reset the counters used for filling
diff --git a/storage/innobase/include/pars0pars.h b/storage/innobase/include/pars0pars.h
index 65ff7533828..73585c78a6a 100644
--- a/storage/innobase/include/pars0pars.h
+++ b/storage/innobase/include/pars0pars.h
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1996, 2012, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -472,7 +472,7 @@ pars_complete_graph_for_exec(
query graph, or NULL for dummy graph */
trx_t* trx, /*!< in: transaction handle */
mem_heap_t* heap) /*!< in: memory heap from which allocated */
- __attribute__((nonnull(2,3), warn_unused_result));
+ MY_ATTRIBUTE((nonnull(2,3), warn_unused_result));
/****************************************************************//**
Create parser info struct.
@@ -628,7 +628,7 @@ pars_info_bind_ull_literal(
pars_info_t* info, /*!< in: info struct */
const char* name, /*!< in: name */
const ib_uint64_t* val) /*!< in: value */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/****************************************************************//**
Add bound id. */
diff --git a/storage/innobase/include/read0read.h b/storage/innobase/include/read0read.h
index 980faddf98e..ae75cfac6f5 100644
--- a/storage/innobase/include/read0read.h
+++ b/storage/innobase/include/read0read.h
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1997, 2012, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1997, 2016, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -82,7 +82,7 @@ read_view_sees_trx_id(
/*==================*/
const read_view_t* view, /*!< in: read view */
trx_id_t trx_id) /*!< in: trx id */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/*********************************************************************//**
Prints a read view to stderr. */
UNIV_INTERN
diff --git a/storage/innobase/include/rem0cmp.h b/storage/innobase/include/rem0cmp.h
index cb3c85ac2c8..65116229fdc 100644
--- a/storage/innobase/include/rem0cmp.h
+++ b/storage/innobase/include/rem0cmp.h
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1994, 2012, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1994, 2016, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -174,7 +174,7 @@ cmp_dtuple_rec_with_match_low(
bytes within the first field not completely
matched; when function returns, contains the
value for current comparison */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
#define cmp_dtuple_rec_with_match(tuple,rec,offsets,fields,bytes) \
cmp_dtuple_rec_with_match_low( \
tuple,rec,offsets,dtuple_get_n_fields_cmp(tuple),fields,bytes)
@@ -218,7 +218,7 @@ cmp_rec_rec_simple(
struct TABLE* table) /*!< in: MySQL table, for reporting
duplicate key value if applicable,
or NULL */
- __attribute__((nonnull(1,2,3,4), warn_unused_result));
+ MY_ATTRIBUTE((nonnull(1,2,3,4), warn_unused_result));
/*************************************************************//**
This function is used to compare two physical records. Only the common
first fields are compared, and if an externally stored field is
diff --git a/storage/innobase/include/rem0rec.h b/storage/innobase/include/rem0rec.h
index 22899c2a815..83286a98f8e 100644
--- a/storage/innobase/include/rem0rec.h
+++ b/storage/innobase/include/rem0rec.h
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1994, 2012, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1994, 2016, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -101,7 +101,7 @@ rec_get_next_ptr_const(
/*===================*/
const rec_t* rec, /*!< in: physical record */
ulint comp) /*!< in: nonzero=compact page format */
- __attribute__((nonnull, pure, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, pure, warn_unused_result));
/******************************************************//**
The following function is used to get the pointer of the next chained record
on the same page.
@@ -112,7 +112,7 @@ rec_get_next_ptr(
/*=============*/
rec_t* rec, /*!< in: physical record */
ulint comp) /*!< in: nonzero=compact page format */
- __attribute__((nonnull, pure, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, pure, warn_unused_result));
/******************************************************//**
The following function is used to get the offset of the
next chained record on the same page.
@@ -123,7 +123,7 @@ rec_get_next_offs(
/*==============*/
const rec_t* rec, /*!< in: physical record */
ulint comp) /*!< in: nonzero=compact page format */
- __attribute__((nonnull, pure, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, pure, warn_unused_result));
/******************************************************//**
The following function is used to set the next record offset field
of an old-style record. */
@@ -133,7 +133,7 @@ rec_set_next_offs_old(
/*==================*/
rec_t* rec, /*!< in: old-style physical record */
ulint next) /*!< in: offset of the next record */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/******************************************************//**
The following function is used to set the next record offset field
of a new-style record. */
@@ -143,7 +143,7 @@ rec_set_next_offs_new(
/*==================*/
rec_t* rec, /*!< in/out: new-style physical record */
ulint next) /*!< in: offset of the next record */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/******************************************************//**
The following function is used to get the number of fields
in an old-style record.
@@ -153,7 +153,7 @@ ulint
rec_get_n_fields_old(
/*=================*/
const rec_t* rec) /*!< in: physical record */
- __attribute__((nonnull, pure, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, pure, warn_unused_result));
/******************************************************//**
The following function is used to get the number of fields
in a record.
@@ -164,7 +164,7 @@ rec_get_n_fields(
/*=============*/
const rec_t* rec, /*!< in: physical record */
const dict_index_t* index) /*!< in: record descriptor */
- __attribute__((nonnull, pure, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, pure, warn_unused_result));
/******************************************************//**
The following function is used to get the number of records owned by the
previous directory record.
@@ -174,7 +174,7 @@ ulint
rec_get_n_owned_old(
/*================*/
const rec_t* rec) /*!< in: old-style physical record */
- __attribute__((nonnull, pure, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, pure, warn_unused_result));
/******************************************************//**
The following function is used to set the number of owned records. */
UNIV_INLINE
@@ -183,7 +183,7 @@ rec_set_n_owned_old(
/*================*/
rec_t* rec, /*!< in: old-style physical record */
ulint n_owned) /*!< in: the number of owned */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/******************************************************//**
The following function is used to get the number of records owned by the
previous directory record.
@@ -193,7 +193,7 @@ ulint
rec_get_n_owned_new(
/*================*/
const rec_t* rec) /*!< in: new-style physical record */
- __attribute__((nonnull, pure, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, pure, warn_unused_result));
/******************************************************//**
The following function is used to set the number of owned records. */
UNIV_INLINE
@@ -203,7 +203,7 @@ rec_set_n_owned_new(
rec_t* rec, /*!< in/out: new-style physical record */
page_zip_des_t* page_zip,/*!< in/out: compressed page, or NULL */
ulint n_owned)/*!< in: the number of owned */
- __attribute__((nonnull(1)));
+ MY_ATTRIBUTE((nonnull(1)));
/******************************************************//**
The following function is used to retrieve the info bits of
a record.
@@ -214,7 +214,7 @@ rec_get_info_bits(
/*==============*/
const rec_t* rec, /*!< in: physical record */
ulint comp) /*!< in: nonzero=compact page format */
- __attribute__((nonnull, pure, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, pure, warn_unused_result));
/******************************************************//**
The following function is used to set the info bits of a record. */
UNIV_INLINE
@@ -223,7 +223,7 @@ rec_set_info_bits_old(
/*==================*/
rec_t* rec, /*!< in: old-style physical record */
ulint bits) /*!< in: info bits */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/******************************************************//**
The following function is used to set the info bits of a record. */
UNIV_INLINE
@@ -232,7 +232,7 @@ rec_set_info_bits_new(
/*==================*/
rec_t* rec, /*!< in/out: new-style physical record */
ulint bits) /*!< in: info bits */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/******************************************************//**
The following function retrieves the status bits of a new-style record.
@return status bits */
@@ -241,7 +241,7 @@ ulint
rec_get_status(
/*===========*/
const rec_t* rec) /*!< in: physical record */
- __attribute__((nonnull, pure, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, pure, warn_unused_result));
/******************************************************//**
The following function is used to set the status bits of a new-style record. */
@@ -251,7 +251,7 @@ rec_set_status(
/*===========*/
rec_t* rec, /*!< in/out: physical record */
ulint bits) /*!< in: info bits */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/******************************************************//**
The following function is used to retrieve the info and status
@@ -263,7 +263,7 @@ rec_get_info_and_status_bits(
/*=========================*/
const rec_t* rec, /*!< in: physical record */
ulint comp) /*!< in: nonzero=compact page format */
- __attribute__((nonnull, pure, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, pure, warn_unused_result));
/******************************************************//**
The following function is used to set the info and status
bits of a record. (Only compact records have status bits.) */
@@ -273,7 +273,7 @@ rec_set_info_and_status_bits(
/*=========================*/
rec_t* rec, /*!< in/out: compact physical record */
ulint bits) /*!< in: info bits */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/******************************************************//**
The following function tells if record is delete marked.
@@ -284,7 +284,7 @@ rec_get_deleted_flag(
/*=================*/
const rec_t* rec, /*!< in: physical record */
ulint comp) /*!< in: nonzero=compact page format */
- __attribute__((nonnull, pure, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, pure, warn_unused_result));
/******************************************************//**
The following function is used to set the deleted bit. */
UNIV_INLINE
@@ -293,7 +293,7 @@ rec_set_deleted_flag_old(
/*=====================*/
rec_t* rec, /*!< in: old-style physical record */
ulint flag) /*!< in: nonzero if delete marked */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/******************************************************//**
The following function is used to set the deleted bit. */
UNIV_INLINE
@@ -303,7 +303,7 @@ rec_set_deleted_flag_new(
rec_t* rec, /*!< in/out: new-style physical record */
page_zip_des_t* page_zip,/*!< in/out: compressed page, or NULL */
ulint flag) /*!< in: nonzero if delete marked */
- __attribute__((nonnull(1)));
+ MY_ATTRIBUTE((nonnull(1)));
/******************************************************//**
The following function tells if a new-style record is a node pointer.
@return TRUE if node pointer */
@@ -312,7 +312,7 @@ ibool
rec_get_node_ptr_flag(
/*==================*/
const rec_t* rec) /*!< in: physical record */
- __attribute__((nonnull, pure, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, pure, warn_unused_result));
/******************************************************//**
The following function is used to get the order number
of an old-style record in the heap of the index page.
@@ -322,7 +322,7 @@ ulint
rec_get_heap_no_old(
/*================*/
const rec_t* rec) /*!< in: physical record */
- __attribute__((nonnull, pure, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, pure, warn_unused_result));
/******************************************************//**
The following function is used to set the heap number
field in an old-style record. */
@@ -332,7 +332,7 @@ rec_set_heap_no_old(
/*================*/
rec_t* rec, /*!< in: physical record */
ulint heap_no)/*!< in: the heap number */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/******************************************************//**
The following function is used to get the order number
of a new-style record in the heap of the index page.
@@ -342,7 +342,7 @@ ulint
rec_get_heap_no_new(
/*================*/
const rec_t* rec) /*!< in: physical record */
- __attribute__((nonnull, pure, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, pure, warn_unused_result));
/******************************************************//**
The following function is used to set the heap number
field in a new-style record. */
@@ -352,7 +352,7 @@ rec_set_heap_no_new(
/*================*/
rec_t* rec, /*!< in/out: physical record */
ulint heap_no)/*!< in: the heap number */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/******************************************************//**
The following function is used to test whether the data offsets
in the record are stored in one-byte or two-byte format.
@@ -362,7 +362,7 @@ ibool
rec_get_1byte_offs_flag(
/*====================*/
const rec_t* rec) /*!< in: physical record */
- __attribute__((nonnull, pure, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, pure, warn_unused_result));
/******************************************************//**
The following function is used to set the 1-byte offsets flag. */
@@ -372,7 +372,7 @@ rec_set_1byte_offs_flag(
/*====================*/
rec_t* rec, /*!< in: physical record */
ibool flag) /*!< in: TRUE if 1byte form */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/******************************************************//**
Returns the offset of nth field end if the record is stored in the 1-byte
@@ -385,7 +385,7 @@ rec_1_get_field_end_info(
/*=====================*/
const rec_t* rec, /*!< in: record */
ulint n) /*!< in: field index */
- __attribute__((nonnull, pure, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, pure, warn_unused_result));
/******************************************************//**
Returns the offset of nth field end if the record is stored in the 2-byte
@@ -399,7 +399,7 @@ rec_2_get_field_end_info(
/*=====================*/
const rec_t* rec, /*!< in: record */
ulint n) /*!< in: field index */
- __attribute__((nonnull, pure, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, pure, warn_unused_result));
/******************************************************//**
Returns nonzero if the field is stored off-page.
@@ -411,7 +411,7 @@ rec_2_is_field_extern(
/*==================*/
const rec_t* rec, /*!< in: record */
ulint n) /*!< in: field index */
- __attribute__((nonnull, pure, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, pure, warn_unused_result));
/******************************************************//**
Determine how many of the first n columns in a compact
@@ -424,7 +424,7 @@ rec_get_n_extern_new(
const rec_t* rec, /*!< in: compact physical record */
const dict_index_t* index, /*!< in: record descriptor */
ulint n) /*!< in: number of columns to scan */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/******************************************************//**
The following function determines the offsets to each field
@@ -449,9 +449,9 @@ rec_get_offsets_func(
#endif /* UNIV_DEBUG */
mem_heap_t** heap) /*!< in/out: memory heap */
#ifdef UNIV_DEBUG
- __attribute__((nonnull(1,2,5,7),warn_unused_result));
+ MY_ATTRIBUTE((nonnull(1,2,5,7),warn_unused_result));
#else /* UNIV_DEBUG */
- __attribute__((nonnull(1,2,5),warn_unused_result));
+ MY_ATTRIBUTE((nonnull(1,2,5),warn_unused_result));
#endif /* UNIV_DEBUG */
#ifdef UNIV_DEBUG
@@ -478,7 +478,7 @@ rec_get_offsets_reverse(
0=leaf node */
ulint* offsets)/*!< in/out: array consisting of
offsets[0] allocated elements */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
#ifdef UNIV_DEBUG
/************************************************************//**
Validates offsets returned by rec_get_offsets().
@@ -491,7 +491,7 @@ rec_offs_validate(
const dict_index_t* index, /*!< in: record descriptor or NULL */
const ulint* offsets)/*!< in: array returned by
rec_get_offsets() */
- __attribute__((nonnull(3), warn_unused_result));
+ MY_ATTRIBUTE((nonnull(3), warn_unused_result));
/************************************************************//**
Updates debug data in offsets, in order to avoid bogus
rec_offs_validate() failures. */
@@ -503,7 +503,7 @@ rec_offs_make_valid(
const dict_index_t* index, /*!< in: record descriptor */
ulint* offsets)/*!< in: array returned by
rec_get_offsets() */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
#else
# define rec_offs_make_valid(rec, index, offsets) ((void) 0)
#endif /* UNIV_DEBUG */
@@ -520,7 +520,7 @@ rec_get_nth_field_offs_old(
ulint n, /*!< in: index of the field */
ulint* len) /*!< out: length of the field; UNIV_SQL_NULL
if SQL null */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
#define rec_get_nth_field_old(rec, n, len) \
((rec) + rec_get_nth_field_offs_old(rec, n, len))
/************************************************************//**
@@ -534,7 +534,7 @@ rec_get_nth_field_size(
/*===================*/
const rec_t* rec, /*!< in: record */
ulint n) /*!< in: index of the field */
- __attribute__((nonnull, pure, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, pure, warn_unused_result));
/************************************************************//**
The following function is used to get an offset to the nth
data field in a record.
@@ -547,7 +547,7 @@ rec_get_nth_field_offs(
ulint n, /*!< in: index of the field */
ulint* len) /*!< out: length of the field; UNIV_SQL_NULL
if SQL null */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
#define rec_get_nth_field(rec, offsets, n, len) \
((rec) + rec_get_nth_field_offs(offsets, n, len))
/******************************************************//**
@@ -559,7 +559,7 @@ ulint
rec_offs_comp(
/*==========*/
const ulint* offsets)/*!< in: array returned by rec_get_offsets() */
- __attribute__((nonnull, pure, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, pure, warn_unused_result));
/******************************************************//**
Determine if the offsets are for a record containing
externally stored columns.
@@ -569,7 +569,7 @@ ulint
rec_offs_any_extern(
/*================*/
const ulint* offsets)/*!< in: array returned by rec_get_offsets() */
- __attribute__((nonnull, pure, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, pure, warn_unused_result));
/******************************************************//**
Determine if the offsets are for a record containing null BLOB pointers.
@return first field containing a null BLOB pointer, or NULL if none found */
@@ -579,7 +579,7 @@ rec_offs_any_null_extern(
/*=====================*/
const rec_t* rec, /*!< in: record */
const ulint* offsets) /*!< in: rec_get_offsets(rec) */
- __attribute__((nonnull, pure, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, pure, warn_unused_result));
/******************************************************//**
Returns nonzero if the extern bit is set in nth field of rec.
@return nonzero if externally stored */
@@ -589,7 +589,7 @@ rec_offs_nth_extern(
/*================*/
const ulint* offsets,/*!< in: array returned by rec_get_offsets() */
ulint n) /*!< in: nth field */
- __attribute__((nonnull, pure, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, pure, warn_unused_result));
/******************************************************//**
Returns nonzero if the SQL NULL bit is set in nth field of rec.
@return nonzero if SQL NULL */
@@ -599,7 +599,7 @@ rec_offs_nth_sql_null(
/*==================*/
const ulint* offsets,/*!< in: array returned by rec_get_offsets() */
ulint n) /*!< in: nth field */
- __attribute__((nonnull, pure, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, pure, warn_unused_result));
/******************************************************//**
Gets the physical size of a field.
@return length of field */
@@ -609,7 +609,7 @@ rec_offs_nth_size(
/*==============*/
const ulint* offsets,/*!< in: array returned by rec_get_offsets() */
ulint n) /*!< in: nth field */
- __attribute__((nonnull, pure, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, pure, warn_unused_result));
/******************************************************//**
Returns the number of extern bits set in a record.
@@ -619,7 +619,7 @@ ulint
rec_offs_n_extern(
/*==============*/
const ulint* offsets)/*!< in: array returned by rec_get_offsets() */
- __attribute__((nonnull, pure, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, pure, warn_unused_result));
/***********************************************************//**
This is used to modify the value of an already existing field in a record.
The previous value must have exactly the same size as the new value. If len
@@ -639,7 +639,7 @@ rec_set_nth_field(
length as the previous value.
If SQL null, previous value must be
SQL null. */
- __attribute__((nonnull(1,2)));
+ MY_ATTRIBUTE((nonnull(1,2)));
/**********************************************************//**
The following function returns the data size of an old-style physical
record, that is the sum of field lengths. SQL null fields
@@ -651,7 +651,7 @@ ulint
rec_get_data_size_old(
/*==================*/
const rec_t* rec) /*!< in: physical record */
- __attribute__((nonnull, pure, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, pure, warn_unused_result));
/**********************************************************//**
The following function returns the number of allocated elements
for an array of offsets.
@@ -661,7 +661,7 @@ ulint
rec_offs_get_n_alloc(
/*=================*/
const ulint* offsets)/*!< in: array for rec_get_offsets() */
- __attribute__((nonnull, pure, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, pure, warn_unused_result));
/**********************************************************//**
The following function sets the number of allocated elements
for an array of offsets. */
@@ -672,7 +672,7 @@ rec_offs_set_n_alloc(
ulint* offsets, /*!< out: array for rec_get_offsets(),
must be allocated */
ulint n_alloc) /*!< in: number of elements */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
#define rec_offs_init(offsets) \
rec_offs_set_n_alloc(offsets, (sizeof offsets) / sizeof *offsets)
/**********************************************************//**
@@ -683,7 +683,7 @@ ulint
rec_offs_n_fields(
/*==============*/
const ulint* offsets)/*!< in: array returned by rec_get_offsets() */
- __attribute__((nonnull, pure, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, pure, warn_unused_result));
/**********************************************************//**
The following function returns the data size of a physical
record, that is the sum of field lengths. SQL null fields
@@ -695,7 +695,7 @@ ulint
rec_offs_data_size(
/*===============*/
const ulint* offsets)/*!< in: array returned by rec_get_offsets() */
- __attribute__((nonnull, pure, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, pure, warn_unused_result));
/**********************************************************//**
Returns the total size of record minus data size of record.
The value returned by the function is the distance from record
@@ -706,7 +706,7 @@ ulint
rec_offs_extra_size(
/*================*/
const ulint* offsets)/*!< in: array returned by rec_get_offsets() */
- __attribute__((nonnull, pure, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, pure, warn_unused_result));
/**********************************************************//**
Returns the total size of a physical record.
@return size */
@@ -715,7 +715,7 @@ ulint
rec_offs_size(
/*==========*/
const ulint* offsets)/*!< in: array returned by rec_get_offsets() */
- __attribute__((nonnull, pure, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, pure, warn_unused_result));
#ifdef UNIV_DEBUG
/**********************************************************//**
Returns a pointer to the start of the record.
@@ -726,7 +726,7 @@ rec_get_start(
/*==========*/
const rec_t* rec, /*!< in: pointer to record */
const ulint* offsets)/*!< in: array returned by rec_get_offsets() */
- __attribute__((nonnull, pure, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, pure, warn_unused_result));
/**********************************************************//**
Returns a pointer to the end of the record.
@return pointer to end */
@@ -736,7 +736,7 @@ rec_get_end(
/*========*/
const rec_t* rec, /*!< in: pointer to record */
const ulint* offsets)/*!< in: array returned by rec_get_offsets() */
- __attribute__((nonnull, pure, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, pure, warn_unused_result));
#else /* UNIV_DEBUG */
# define rec_get_start(rec, offsets) ((rec) - rec_offs_extra_size(offsets))
# define rec_get_end(rec, offsets) ((rec) + rec_offs_data_size(offsets))
@@ -751,7 +751,7 @@ rec_copy(
void* buf, /*!< in: buffer */
const rec_t* rec, /*!< in: physical record */
const ulint* offsets)/*!< in: array returned by rec_get_offsets() */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
#ifndef UNIV_HOTBACKUP
/**********************************************************//**
Determines the size of a data tuple prefix in a temporary file.
@@ -764,7 +764,7 @@ rec_get_converted_size_temp(
const dfield_t* fields, /*!< in: array of data fields */
ulint n_fields,/*!< in: number of data fields */
ulint* extra) /*!< out: extra size */
- __attribute__((warn_unused_result, nonnull));
+ MY_ATTRIBUTE((warn_unused_result, nonnull));
/******************************************************//**
Determine the offset to each field in temporary file.
@@ -777,7 +777,7 @@ rec_init_offsets_temp(
const dict_index_t* index, /*!< in: record descriptor */
ulint* offsets)/*!< in/out: array of offsets;
in: n=rec_offs_n_fields(offsets) */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/*********************************************************//**
Builds a temporary file record out of a data tuple.
@@ -790,7 +790,7 @@ rec_convert_dtuple_to_temp(
const dict_index_t* index, /*!< in: record descriptor */
const dfield_t* fields, /*!< in: array of data fields */
ulint n_fields) /*!< in: number of fields */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/**************************************************************//**
Copies the first n fields of a physical record to a new physical record in
@@ -808,7 +808,7 @@ rec_copy_prefix_to_buf(
for the copied prefix,
or NULL */
ulint* buf_size) /*!< in/out: buffer size */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/************************************************************//**
Folds a prefix of a physical record to a ulint.
@return the folded value */
@@ -824,7 +824,7 @@ rec_fold(
ulint n_bytes, /*!< in: number of bytes to fold
in an incomplete last field */
index_id_t tree_id) /*!< in: index tree id */
- __attribute__((nonnull, pure, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, pure, warn_unused_result));
#endif /* !UNIV_HOTBACKUP */
/*********************************************************//**
Builds a physical record out of a data tuple and
@@ -840,7 +840,7 @@ rec_convert_dtuple_to_rec(
const dtuple_t* dtuple, /*!< in: data tuple */
ulint n_ext) /*!< in: number of
externally stored columns */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/**********************************************************//**
Returns the extra size of an old-style physical record if we know its
data size and number of fields.
@@ -852,7 +852,7 @@ rec_get_converted_extra_size(
ulint data_size, /*!< in: data size */
ulint n_fields, /*!< in: number of fields */
ulint n_ext) /*!< in: number of externally stored columns */
- __attribute__((const));
+ MY_ATTRIBUTE((const));
/**********************************************************//**
Determines the size of a data tuple prefix in ROW_FORMAT=COMPACT.
@return total size */
@@ -864,7 +864,7 @@ rec_get_converted_size_comp_prefix(
const dfield_t* fields, /*!< in: array of data fields */
ulint n_fields,/*!< in: number of data fields */
ulint* extra) /*!< out: extra size */
- __attribute__((warn_unused_result, nonnull(1,2)));
+ MY_ATTRIBUTE((warn_unused_result, nonnull(1,2)));
/**********************************************************//**
Determines the size of a data tuple in ROW_FORMAT=COMPACT.
@return total size */
@@ -880,7 +880,7 @@ rec_get_converted_size_comp(
const dfield_t* fields, /*!< in: array of data fields */
ulint n_fields,/*!< in: number of data fields */
ulint* extra) /*!< out: extra size */
- __attribute__((nonnull(1,3)));
+ MY_ATTRIBUTE((nonnull(1,3)));
/**********************************************************//**
The following function returns the size of a data tuple when converted to
a physical record.
@@ -892,7 +892,7 @@ rec_get_converted_size(
dict_index_t* index, /*!< in: record descriptor */
const dtuple_t* dtuple, /*!< in: data tuple */
ulint n_ext) /*!< in: number of externally stored columns */
- __attribute__((warn_unused_result, nonnull));
+ MY_ATTRIBUTE((warn_unused_result, nonnull));
#ifndef UNIV_HOTBACKUP
/**************************************************************//**
Copies the first n fields of a physical record to a data tuple.
@@ -907,7 +907,7 @@ rec_copy_prefix_to_dtuple(
ulint n_fields, /*!< in: number of fields
to copy */
mem_heap_t* heap) /*!< in: memory heap */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
#endif /* !UNIV_HOTBACKUP */
/***************************************************************//**
Validates the consistency of a physical record.
@@ -918,7 +918,7 @@ rec_validate(
/*=========*/
const rec_t* rec, /*!< in: physical record */
const ulint* offsets)/*!< in: array returned by rec_get_offsets() */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/***************************************************************//**
Prints an old-style physical record. */
UNIV_INTERN
@@ -927,7 +927,7 @@ rec_print_old(
/*==========*/
FILE* file, /*!< in: file where to print */
const rec_t* rec) /*!< in: physical record */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
#ifndef UNIV_HOTBACKUP
/***************************************************************//**
Prints a physical record in ROW_FORMAT=COMPACT. Ignores the
@@ -939,7 +939,7 @@ rec_print_comp(
FILE* file, /*!< in: file where to print */
const rec_t* rec, /*!< in: physical record */
const ulint* offsets)/*!< in: array returned by rec_get_offsets() */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/***************************************************************//**
Prints a physical record. */
UNIV_INTERN
@@ -949,7 +949,7 @@ rec_print_new(
FILE* file, /*!< in: file where to print */
const rec_t* rec, /*!< in: physical record */
const ulint* offsets)/*!< in: array returned by rec_get_offsets() */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/***************************************************************//**
Prints a physical record. */
UNIV_INTERN
@@ -959,7 +959,7 @@ rec_print(
FILE* file, /*!< in: file where to print */
const rec_t* rec, /*!< in: physical record */
const dict_index_t* index) /*!< in: record descriptor */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
# ifdef UNIV_DEBUG
/************************************************************//**
@@ -971,7 +971,7 @@ rec_get_trx_id(
/*===========*/
const rec_t* rec, /*!< in: record */
const dict_index_t* index) /*!< in: clustered index */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
# endif /* UNIV_DEBUG */
#endif /* UNIV_HOTBACKUP */
diff --git a/storage/innobase/include/rem0rec.ic b/storage/innobase/include/rem0rec.ic
index a539320dd2a..5811a77a48b 100644
--- a/storage/innobase/include/rem0rec.ic
+++ b/storage/innobase/include/rem0rec.ic
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1994, 2013, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1994, 2016, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -1540,7 +1540,8 @@ rec_copy(
ulint extra_len;
ulint data_len;
- ut_ad(rec && buf);
+ ut_ad(rec != NULL);
+ ut_ad(buf != NULL);
ut_ad(rec_offs_validate(rec, NULL, offsets));
ut_ad(rec_validate(rec, offsets));
diff --git a/storage/innobase/include/row0ftsort.h b/storage/innobase/include/row0ftsort.h
index eeef10f3397..00bd3317de3 100644
--- a/storage/innobase/include/row0ftsort.h
+++ b/storage/innobase/include/row0ftsort.h
@@ -1,7 +1,7 @@
/*****************************************************************************
-Copyright (c) 2010, 2012, Oracle and/or its affiliates. All Rights Reserved.
-Copyright (c) 2015, MariaDB Corporation.
+Copyright (c) 2010, 2016, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2015, 2016, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -193,7 +193,7 @@ row_fts_psort_info_init(
instantiated */
fts_psort_t** merge) /*!< out: parallel merge info
to be instantiated */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/********************************************************************//**
Clean up and deallocate FTS parallel sort structures, and close
temparary merge sort files */
@@ -281,5 +281,5 @@ row_fts_merge_insert(
fts_psort_t* psort_info, /*!< parallel sort info */
ulint id) /* !< in: which auxiliary table's data
to insert to */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
#endif /* row0ftsort_h */
diff --git a/storage/innobase/include/row0import.h b/storage/innobase/include/row0import.h
index aa46fdb7c27..a821c230a3b 100644
--- a/storage/innobase/include/row0import.h
+++ b/storage/innobase/include/row0import.h
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 2012, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2012, 2016, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -46,7 +46,7 @@ row_import_for_mysql(
dict_table_t* table, /*!< in/out: table */
row_prebuilt_t* prebuilt) /*!< in: prebuilt struct
in MySQL */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/*****************************************************************//**
Update the DICT_TF2_DISCARDED flag in SYS_TABLES.
@@ -64,7 +64,7 @@ row_import_update_discarded_flag(
bool dict_locked) /*!< in: Set to true if the
caller already owns the
dict_sys_t:: mutex. */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/*****************************************************************//**
Update the (space, root page) of a table's indexes from the values
@@ -83,7 +83,7 @@ row_import_update_index_root(
bool dict_locked) /*!< in: Set to true if the
caller already owns the
dict_sys_t:: mutex. */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
#ifndef UNIV_NONINL
#include "row0import.ic"
#endif
diff --git a/storage/innobase/include/row0ins.h b/storage/innobase/include/row0ins.h
index 2a892d2f5df..71ee39070ef 100644
--- a/storage/innobase/include/row0ins.h
+++ b/storage/innobase/include/row0ins.h
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1996, 2012, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -53,7 +53,7 @@ row_ins_check_foreign_constraint(
table, else the referenced table */
dtuple_t* entry, /*!< in: index entry for index */
que_thr_t* thr) /*!< in: query thread */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/*********************************************************************//**
Creates an insert node struct.
@return own: insert node struct */
@@ -98,7 +98,7 @@ row_ins_clust_index_entry_low(
dtuple_t* entry, /*!< in/out: index entry to insert */
ulint n_ext, /*!< in: number of externally stored columns */
que_thr_t* thr) /*!< in: query thread or NULL */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/***************************************************************//**
Tries to insert an entry into a secondary index. If a record with exactly the
same fields is found, the other record is necessarily marked deleted.
@@ -123,7 +123,7 @@ row_ins_sec_index_entry_low(
trx_id_t trx_id, /*!< in: PAGE_MAX_TRX_ID during
row_log_table_apply(), or 0 */
que_thr_t* thr) /*!< in: query thread */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/***************************************************************//**
Tries to insert the externally stored fields (off-page columns)
of a clustered index entry.
@@ -142,7 +142,7 @@ row_ins_index_entry_big_rec_func(
const void* thd, /*!< in: connection, or NULL */
#endif /* DBUG_OFF */
ulint line) /*!< in: line number of caller */
- __attribute__((nonnull(1,2,3,4,5,6), warn_unused_result));
+ MY_ATTRIBUTE((nonnull(1,2,3,4,5,6), warn_unused_result));
#ifdef DBUG_OFF
# define row_ins_index_entry_big_rec(e,big,ofs,heap,index,thd,file,line) \
row_ins_index_entry_big_rec_func(e,big,ofs,heap,index,file,line)
@@ -164,7 +164,7 @@ row_ins_clust_index_entry(
dtuple_t* entry, /*!< in/out: index entry to insert */
que_thr_t* thr, /*!< in: query thread */
ulint n_ext) /*!< in: number of externally stored columns */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/***************************************************************//**
Inserts an entry into a secondary index. Tries first optimistic,
then pessimistic descent down the tree. If the entry matches enough
@@ -178,7 +178,7 @@ row_ins_sec_index_entry(
dict_index_t* index, /*!< in: secondary index */
dtuple_t* entry, /*!< in/out: index entry to insert */
que_thr_t* thr) /*!< in: query thread */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/***********************************************************//**
Inserts a row to a table. This is a high-level function used in
SQL execution graphs.
diff --git a/storage/innobase/include/row0log.h b/storage/innobase/include/row0log.h
index e127504c484..5ff148ff045 100644
--- a/storage/innobase/include/row0log.h
+++ b/storage/innobase/include/row0log.h
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 2011, 2015, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2011, 2016, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -58,7 +58,7 @@ row_log_allocate(
const ulint* col_map,/*!< in: mapping of old column
numbers to new ones, or NULL if !table */
const char* path) /*!< in: where to create temporary file */
- __attribute__((nonnull(1), warn_unused_result));
+ MY_ATTRIBUTE((nonnull(1), warn_unused_result));
/******************************************************//**
Free the row log for an index that was being created online. */
@@ -67,7 +67,7 @@ void
row_log_free(
/*=========*/
row_log_t*& log) /*!< in,own: row log */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/******************************************************//**
Free the row log for an index on which online creation was aborted. */
@@ -76,7 +76,7 @@ void
row_log_abort_sec(
/*==============*/
dict_index_t* index) /*!< in/out: index (x-latched) */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/******************************************************//**
Try to log an operation to a secondary index that is
@@ -91,7 +91,7 @@ row_log_online_op_try(
const dtuple_t* tuple, /*!< in: index tuple */
trx_id_t trx_id) /*!< in: transaction ID for insert,
or 0 for delete */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/******************************************************//**
Logs an operation to a secondary index that is (or was) being created. */
UNIV_INTERN
@@ -102,7 +102,7 @@ row_log_online_op(
const dtuple_t* tuple, /*!< in: index tuple */
trx_id_t trx_id) /*!< in: transaction ID for insert,
or 0 for delete */
- UNIV_COLD __attribute__((nonnull));
+ UNIV_COLD MY_ATTRIBUTE((nonnull));
/******************************************************//**
Gets the error status of the online index rebuild log.
@@ -113,7 +113,7 @@ row_log_table_get_error(
/*====================*/
const dict_index_t* index) /*!< in: clustered index of a table
that is being rebuilt online */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/******************************************************//**
Logs a delete operation to a table that is being rebuilt.
@@ -129,7 +129,7 @@ row_log_table_delete(
const ulint* offsets,/*!< in: rec_get_offsets(rec,index) */
const byte* sys) /*!< in: DB_TRX_ID,DB_ROLL_PTR that should
be logged, or NULL to use those in rec */
- UNIV_COLD __attribute__((nonnull(1,2,3)));
+ UNIV_COLD MY_ATTRIBUTE((nonnull(1,2,3)));
/******************************************************//**
Logs an update operation to a table that is being rebuilt.
@@ -145,7 +145,7 @@ row_log_table_update(
const ulint* offsets,/*!< in: rec_get_offsets(rec,index) */
const dtuple_t* old_pk) /*!< in: row_log_table_get_pk()
before the update */
- UNIV_COLD __attribute__((nonnull(1,2,3)));
+ UNIV_COLD MY_ATTRIBUTE((nonnull(1,2,3)));
/******************************************************//**
Constructs the old PRIMARY KEY and DB_TRX_ID,DB_ROLL_PTR
@@ -165,7 +165,7 @@ row_log_table_get_pk(
byte* sys, /*!< out: DB_TRX_ID,DB_ROLL_PTR for
row_log_table_delete(), or NULL */
mem_heap_t** heap) /*!< in/out: memory heap where allocated */
- UNIV_COLD __attribute__((nonnull(1,2,5), warn_unused_result));
+ UNIV_COLD MY_ATTRIBUTE((nonnull(1,2,5), warn_unused_result));
/******************************************************//**
Logs an insert to a table that is being rebuilt.
@@ -179,7 +179,7 @@ row_log_table_insert(
dict_index_t* index, /*!< in/out: clustered index, S-latched
or X-latched */
const ulint* offsets)/*!< in: rec_get_offsets(rec,index) */
- UNIV_COLD __attribute__((nonnull));
+ UNIV_COLD MY_ATTRIBUTE((nonnull));
/******************************************************//**
Notes that a BLOB is being freed during online ALTER TABLE. */
UNIV_INTERN
@@ -188,7 +188,7 @@ row_log_table_blob_free(
/*====================*/
dict_index_t* index, /*!< in/out: clustered index, X-latched */
ulint page_no)/*!< in: starting page number of the BLOB */
- UNIV_COLD __attribute__((nonnull));
+ UNIV_COLD MY_ATTRIBUTE((nonnull));
/******************************************************//**
Notes that a BLOB is being allocated during online ALTER TABLE. */
UNIV_INTERN
@@ -197,7 +197,7 @@ row_log_table_blob_alloc(
/*=====================*/
dict_index_t* index, /*!< in/out: clustered index, X-latched */
ulint page_no)/*!< in: starting page number of the BLOB */
- UNIV_COLD __attribute__((nonnull));
+ UNIV_COLD MY_ATTRIBUTE((nonnull));
/******************************************************//**
Apply the row_log_table log to a table upon completing rebuild.
@return DB_SUCCESS, or error code on failure */
@@ -210,7 +210,7 @@ row_log_table_apply(
/*!< in: old table */
struct TABLE* table) /*!< in/out: MySQL table
(for reporting duplicates) */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/******************************************************//**
Get the latest transaction ID that has invoked row_log_online_op()
@@ -221,7 +221,7 @@ trx_id_t
row_log_get_max_trx(
/*================*/
dict_index_t* index) /*!< in: index, must be locked */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/******************************************************//**
Merge the row log to the index upon completing index creation.
@@ -235,7 +235,7 @@ row_log_apply(
dict_index_t* index, /*!< in/out: secondary index */
struct TABLE* table) /*!< in/out: MySQL table
(for reporting duplicates) */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
#ifndef UNIV_NONINL
#include "row0log.ic"
diff --git a/storage/innobase/include/row0merge.h b/storage/innobase/include/row0merge.h
index 53164b5197f..04d4010ad48 100644
--- a/storage/innobase/include/row0merge.h
+++ b/storage/innobase/include/row0merge.h
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 2005, 2015, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2005, 2016, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2015, 2016, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
@@ -144,7 +144,7 @@ row_merge_dup_report(
/*=================*/
row_merge_dup_t* dup, /*!< in/out: for reporting duplicates */
const dfield_t* entry) /*!< in: duplicate index entry */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/*********************************************************************//**
Sets an exclusive lock on a table, for the duration of creating indexes.
@return error code or DB_SUCCESS */
@@ -155,7 +155,7 @@ row_merge_lock_table(
trx_t* trx, /*!< in/out: transaction */
dict_table_t* table, /*!< in: table to lock */
enum lock_mode mode) /*!< in: LOCK_X or LOCK_S */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/*********************************************************************//**
Drop indexes that were created before an error occurred.
The data dictionary must have been locked exclusively by the caller,
@@ -166,7 +166,7 @@ row_merge_drop_indexes_dict(
/*========================*/
trx_t* trx, /*!< in/out: dictionary transaction */
table_id_t table_id)/*!< in: table identifier */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/*********************************************************************//**
Drop those indexes which were created before an error occurred.
The data dictionary must have been locked exclusively by the caller,
@@ -179,7 +179,7 @@ row_merge_drop_indexes(
dict_table_t* table, /*!< in/out: table containing the indexes */
ibool locked) /*!< in: TRUE=table locked,
FALSE=may need to do a lazy drop */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/*********************************************************************//**
Drop all partially created indexes during crash recovery. */
UNIV_INTERN
@@ -195,7 +195,7 @@ UNIV_INTERN
int
row_merge_file_create_low(
const char* path)
- __attribute__((warn_unused_result));
+ MY_ATTRIBUTE((warn_unused_result));
/*********************************************************************//**
Destroy a merge file. And de-register the file from Performance Schema
if UNIV_PFS_IO is defined. */
@@ -231,7 +231,7 @@ row_merge_rename_tables_dict(
old_table->name */
const char* tmp_name, /*!< in: new name for old_table */
trx_t* trx) /*!< in/out: dictionary transaction */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/*********************************************************************//**
Rename an index in the dictionary that was created. The data
@@ -245,7 +245,7 @@ row_merge_rename_index_to_add(
trx_t* trx, /*!< in/out: transaction */
table_id_t table_id, /*!< in: table identifier */
index_id_t index_id) /*!< in: index identifier */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/*********************************************************************//**
Rename an index in the dictionary that is to be dropped. The data
dictionary must have been locked exclusively by the caller, because
@@ -258,7 +258,7 @@ row_merge_rename_index_to_drop(
trx_t* trx, /*!< in/out: transaction */
table_id_t table_id, /*!< in: table identifier */
index_id_t index_id) /*!< in: index identifier */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/*********************************************************************//**
Create the index and load in to the dictionary.
@return index, or NULL on error */
@@ -294,7 +294,7 @@ row_merge_drop_table(
/*=================*/
trx_t* trx, /*!< in: transaction */
dict_table_t* table) /*!< in: table instance to drop */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/*********************************************************************//**
Build indexes on a table by reading a clustered index,
creating a temporary file containing index entries, merge sorting
@@ -327,7 +327,7 @@ row_merge_build_indexes(
AUTO_INCREMENT column, or
ULINT_UNDEFINED if none is added */
ib_sequence_t& sequence) /*!< in/out: autoinc sequence */
- __attribute__((nonnull(1,2,3,5,6,8), warn_unused_result));
+ MY_ATTRIBUTE((nonnull(1,2,3,5,6,8), warn_unused_result));
/********************************************************************//**
Write a buffer to a block. */
UNIV_INTERN
@@ -337,7 +337,7 @@ row_merge_buf_write(
const row_merge_buf_t* buf, /*!< in: sorted buffer */
const merge_file_t* of, /*!< in: output file */
row_merge_block_t* block) /*!< out: buffer for writing to file */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/********************************************************************//**
Sort a buffer. */
UNIV_INTERN
@@ -347,7 +347,7 @@ row_merge_buf_sort(
row_merge_buf_t* buf, /*!< in/out: sort buffer */
row_merge_dup_t* dup) /*!< in/out: reporter of duplicates
(NULL if non-unique index) */
- __attribute__((nonnull(1)));
+ MY_ATTRIBUTE((nonnull(1)));
/********************************************************************//**
Write a merge block to the file system.
@return TRUE if request was successful, FALSE if fail */
@@ -371,7 +371,7 @@ row_merge_buf_t*
row_merge_buf_empty(
/*================*/
row_merge_buf_t* buf) /*!< in,own: sort buffer */
- __attribute__((warn_unused_result, nonnull));
+ MY_ATTRIBUTE((warn_unused_result, nonnull));
/** Create a merge file in the given location.
@param[out] merge_file merge file structure
@@ -412,7 +412,7 @@ row_merge_buf_t*
row_merge_buf_create(
/*=================*/
dict_index_t* index) /*!< in: secondary index */
- __attribute__((warn_unused_result, nonnull, malloc));
+ MY_ATTRIBUTE((warn_unused_result, nonnull, malloc));
/*********************************************************************//**
Deallocate a sort buffer. */
UNIV_INTERN
@@ -420,7 +420,7 @@ void
row_merge_buf_free(
/*===============*/
row_merge_buf_t* buf) /*!< in,own: sort buffer to be freed */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/*********************************************************************//**
Destroy a merge file. */
UNIV_INTERN
@@ -428,7 +428,7 @@ void
row_merge_file_destroy(
/*===================*/
merge_file_t* merge_file) /*!< in/out: merge file structure */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/********************************************************************//**
Read a merge block from the file system.
@return TRUE if request was successful, FALSE if fail */
diff --git a/storage/innobase/include/row0mysql.h b/storage/innobase/include/row0mysql.h
index 7fda64e21b7..71e3b9bb19e 100644
--- a/storage/innobase/include/row0mysql.h
+++ b/storage/innobase/include/row0mysql.h
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 2000, 2012, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2000, 2016, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -168,7 +168,7 @@ row_mysql_handle_errors(
trx_t* trx, /*!< in: transaction */
que_thr_t* thr, /*!< in: query thread, or NULL */
trx_savept_t* savept) /*!< in: savepoint, or NULL */
- __attribute__((nonnull(1,2)));
+ MY_ATTRIBUTE((nonnull(1,2)));
/********************************************************************//**
Create a prebuilt struct for a MySQL table handle.
@return own: a prebuilt struct */
@@ -210,7 +210,7 @@ row_lock_table_autoinc_for_mysql(
/*=============================*/
row_prebuilt_t* prebuilt) /*!< in: prebuilt struct in the MySQL
table handle */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/*********************************************************************//**
Sets a table lock on the table mentioned in prebuilt.
@return error code or DB_SUCCESS */
@@ -226,7 +226,7 @@ row_lock_table_for_mysql(
prebuilt->select_lock_type */
ulint mode) /*!< in: lock mode of table
(ignored if table==NULL) */
- __attribute__((nonnull(1)));
+ MY_ATTRIBUTE((nonnull(1)));
/*********************************************************************//**
Does an insert for MySQL.
@return error code or DB_SUCCESS */
@@ -237,7 +237,7 @@ row_insert_for_mysql(
byte* mysql_rec, /*!< in: row in the MySQL format */
row_prebuilt_t* prebuilt) /*!< in: prebuilt struct in MySQL
handle */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/*********************************************************************//**
Builds a dummy query graph used in selects. */
UNIV_INTERN
@@ -277,7 +277,7 @@ row_update_for_mysql(
the MySQL format */
row_prebuilt_t* prebuilt) /*!< in: prebuilt struct in MySQL
handle */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/*********************************************************************//**
This can only be used when srv_locks_unsafe_for_binlog is TRUE or this
session is using a READ COMMITTED or READ UNCOMMITTED isolation level.
@@ -298,7 +298,7 @@ row_unlock_for_mysql(
the records under pcur and
clust_pcur, and we do not need
to reposition the cursors. */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/*********************************************************************//**
Checks if a table name contains the string "/#sql" which denotes temporary
tables in MySQL.
@@ -307,7 +307,7 @@ UNIV_INTERN
bool
row_is_mysql_tmp_table_name(
/*========================*/
- const char* name) __attribute__((warn_unused_result));
+ const char* name) MY_ATTRIBUTE((warn_unused_result));
/*!< in: table name in the form
'database/tablename' */
@@ -332,7 +332,7 @@ row_update_cascade_for_mysql(
upd_node_t* node, /*!< in: update node used in the cascade
or set null operation */
dict_table_t* table) /*!< in: table where we do the operation */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/*********************************************************************//**
Locks the data dictionary exclusively for performing a table create or other
data dictionary modification operation. */
@@ -409,7 +409,7 @@ row_create_index_for_mysql(
index columns, which are
then checked for not being too
large. */
- __attribute__((nonnull(1,2), warn_unused_result));
+ MY_ATTRIBUTE((nonnull(1,2), warn_unused_result));
/*********************************************************************//**
Scans a table create SQL string and adds to the data dictionary
the foreign key constraints declared in the string. This function
@@ -435,7 +435,7 @@ row_table_add_foreign_constraints(
ibool reject_fks) /*!< in: if TRUE, fail with error
code DB_CANNOT_ADD_CONSTRAINT if
any foreign keys are found. */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/*********************************************************************//**
The master thread in srv0srv.cc calls this regularly to drop tables which
we must drop in background after queries to them have ended. Such lazy
@@ -464,7 +464,7 @@ row_mysql_lock_table(
dict_table_t* table, /*!< in: table to lock */
enum lock_mode mode, /*!< in: LOCK_X or LOCK_S */
const char* op_info) /*!< in: string for trx->op_info */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/*********************************************************************//**
Truncates a table for MySQL.
@@ -475,7 +475,7 @@ row_truncate_table_for_mysql(
/*=========================*/
dict_table_t* table, /*!< in: table handle */
trx_t* trx) /*!< in: transaction handle */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/*********************************************************************//**
Drops a table for MySQL. If the name of the dropped table ends in
one of "innodb_monitor", "innodb_lock_monitor", "innodb_tablespace_monitor",
@@ -491,10 +491,13 @@ row_drop_table_for_mysql(
const char* name, /*!< in: table name */
trx_t* trx, /*!< in: dictionary transaction handle */
bool drop_db,/*!< in: true=dropping whole database */
+ ibool create_failed,/*!<in: TRUE=create table failed
+ because e.g. foreign key column
+ type mismatch. */
bool nonatomic = true)
/*!< in: whether it is permitted
to release and reacquire dict_operation_lock */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/*********************************************************************//**
Drop all temporary tables during crash recovery. */
UNIV_INTERN
@@ -513,7 +516,7 @@ row_discard_tablespace_for_mysql(
/*=============================*/
const char* name, /*!< in: table name */
trx_t* trx) /*!< in: transaction handle */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/*****************************************************************//**
Imports a tablespace. The space id in the .ibd file must match the space id
of the table in the data dictionary.
@@ -524,7 +527,7 @@ row_import_tablespace_for_mysql(
/*============================*/
dict_table_t* table, /*!< in/out: table */
row_prebuilt_t* prebuilt) /*!< in: prebuilt struct in MySQL */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/*********************************************************************//**
Drops a database for MySQL.
@return error code or DB_SUCCESS */
@@ -534,7 +537,7 @@ row_drop_database_for_mysql(
/*========================*/
const char* name, /*!< in: database name which ends to '/' */
trx_t* trx) /*!< in: transaction handle */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/*********************************************************************//**
Renames a table for MySQL.
@return error code or DB_SUCCESS */
@@ -546,7 +549,7 @@ row_rename_table_for_mysql(
const char* new_name, /*!< in: new table name */
trx_t* trx, /*!< in/out: transaction */
bool commit) /*!< in: whether to commit trx */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/*********************************************************************//**
Checks that the index contains entries in an ascending order, unique
constraint is not broken, and calculates the number of index entries
@@ -561,7 +564,7 @@ row_check_index_for_mysql(
const dict_index_t* index, /*!< in: index */
ulint* n_rows) /*!< out: number of entries
seen in the consistent read */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/*********************************************************************//**
Determines if a table is a magic monitor table.
@return true if monitor table */
@@ -571,7 +574,7 @@ row_is_magic_monitor_table(
/*=======================*/
const char* table_name) /*!< in: name of the table, in the
form database/table_name */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/*********************************************************************//**
Initialize this module */
UNIV_INTERN
@@ -596,7 +599,7 @@ row_mysql_table_id_reassign(
dict_table_t* table, /*!< in/out: table */
trx_t* trx, /*!< in/out: transaction */
table_id_t* new_id) /*!< out: new table id */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/* A struct describing a place for an individual column in the MySQL
row format which is presented to the table handler in ha_innobase.
diff --git a/storage/innobase/include/row0purge.h b/storage/innobase/include/row0purge.h
index 888289a6c79..5df899bc399 100644
--- a/storage/innobase/include/row0purge.h
+++ b/storage/innobase/include/row0purge.h
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1997, 2015, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1997, 2016, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -47,7 +47,7 @@ row_purge_node_create(
que_thr_t* parent, /*!< in: parent node, i.e., a
thr node */
mem_heap_t* heap) /*!< in: memory heap where created */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/***********************************************************//**
Determines if it is possible to remove a secondary index entry.
Removal is possible if the secondary index entry does not refer to any
@@ -70,7 +70,7 @@ row_purge_poss_sec(
purge_node_t* node, /*!< in/out: row purge node */
dict_index_t* index, /*!< in: secondary index */
const dtuple_t* entry) /*!< in: secondary index entry */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/***************************************************************
Does the purge operation for a single undo log record. This is a high-level
function used in an SQL execution graph.
@@ -80,7 +80,7 @@ que_thr_t*
row_purge_step(
/*===========*/
que_thr_t* thr) /*!< in: query thread */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/* Purge node structure */
diff --git a/storage/innobase/include/row0quiesce.h b/storage/innobase/include/row0quiesce.h
index 1d6d11291b8..35d8184d33c 100644
--- a/storage/innobase/include/row0quiesce.h
+++ b/storage/innobase/include/row0quiesce.h
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 2012, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2012, 2016, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -43,7 +43,7 @@ row_quiesce_table_start(
/*====================*/
dict_table_t* table, /*!< in: quiesce this table */
trx_t* trx) /*!< in/out: transaction/session */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/*********************************************************************//**
Set a table's quiesce state.
@@ -55,7 +55,7 @@ row_quiesce_set_state(
dict_table_t* table, /*!< in: quiesce this table */
ib_quiesce_t state, /*!< in: quiesce state to set */
trx_t* trx) /*!< in/out: transaction */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/*********************************************************************//**
Cleanup after table quiesce. */
@@ -65,7 +65,7 @@ row_quiesce_table_complete(
/*=======================*/
dict_table_t* table, /*!< in: quiesce this table */
trx_t* trx) /*!< in/out: transaction/session */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
#ifndef UNIV_NONINL
#include "row0quiesce.ic"
diff --git a/storage/innobase/include/row0row.h b/storage/innobase/include/row0row.h
index a4e5e0dd2fa..b04068c5a5d 100644
--- a/storage/innobase/include/row0row.h
+++ b/storage/innobase/include/row0row.h
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1996, 2012, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -47,7 +47,7 @@ row_get_trx_id_offset(
/*==================*/
const dict_index_t* index, /*!< in: clustered index */
const ulint* offsets)/*!< in: record offsets */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/*********************************************************************//**
Reads the trx id field from a clustered index record.
@return value of the field */
@@ -58,7 +58,7 @@ row_get_rec_trx_id(
const rec_t* rec, /*!< in: record */
const dict_index_t* index, /*!< in: clustered index */
const ulint* offsets)/*!< in: rec_get_offsets(rec, index) */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/*********************************************************************//**
Reads the roll pointer field from a clustered index record.
@return value of the field */
@@ -69,7 +69,7 @@ row_get_rec_roll_ptr(
const rec_t* rec, /*!< in: record */
const dict_index_t* index, /*!< in: clustered index */
const ulint* offsets)/*!< in: rec_get_offsets(rec, index) */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/*****************************************************************//**
When an insert or purge to a table is performed, this function builds
the entry to be inserted into or purged from an index on the table.
@@ -88,7 +88,7 @@ row_build_index_entry_low(
mem_heap_t* heap) /*!< in: memory heap from which
the memory for the index entry
is allocated */
- __attribute__((warn_unused_result, nonnull(1,3,4)));
+ MY_ATTRIBUTE((warn_unused_result, nonnull(1,3,4)));
/*****************************************************************//**
When an insert or purge to a table is performed, this function builds
the entry to be inserted into or purged from an index on the table.
@@ -107,7 +107,7 @@ row_build_index_entry(
mem_heap_t* heap) /*!< in: memory heap from which
the memory for the index entry
is allocated */
- __attribute__((warn_unused_result, nonnull(1,3,4)));
+ MY_ATTRIBUTE((warn_unused_result, nonnull(1,3,4)));
/*******************************************************************//**
An inverse function to row_build_index_entry. Builds a row from a
record in a clustered index.
@@ -155,7 +155,7 @@ row_build(
prefixes, or NULL */
mem_heap_t* heap) /*!< in: memory heap from which
the memory needed is allocated */
- __attribute__((nonnull(2,3,9)));
+ MY_ATTRIBUTE((nonnull(2,3,9)));
/*******************************************************************//**
Converts an index record to a typed data tuple.
@return index entry built; does not set info_bits, and the data fields
@@ -171,7 +171,7 @@ row_rec_to_index_entry_low(
stored columns */
mem_heap_t* heap) /*!< in: memory heap from which
the memory needed is allocated */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/*******************************************************************//**
Converts an index record to a typed data tuple. NOTE that externally
stored (often big) fields are NOT copied to heap.
@@ -187,7 +187,7 @@ row_rec_to_index_entry(
stored columns */
mem_heap_t* heap) /*!< in: memory heap from which
the memory needed is allocated */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/*******************************************************************//**
Builds from a secondary index record a row reference with which we can
search the clustered index record.
@@ -210,7 +210,7 @@ row_build_row_ref(
as long as the row reference is used! */
mem_heap_t* heap) /*!< in: memory heap from which the memory
needed is allocated */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/*******************************************************************//**
Builds from a secondary index record a row reference with which we can
search the clustered index record. */
@@ -232,7 +232,7 @@ row_build_row_ref_in_tuple(
ulint* offsets,/*!< in: rec_get_offsets(rec, index)
or NULL */
trx_t* trx) /*!< in: transaction or NULL */
- __attribute__((nonnull(1,2,3)));
+ MY_ATTRIBUTE((nonnull(1,2,3)));
/*******************************************************************//**
Builds from a secondary index record a row reference with which we can
search the clustered index record. */
@@ -263,7 +263,7 @@ row_search_on_row_ref(
const dict_table_t* table, /*!< in: table */
const dtuple_t* ref, /*!< in: row reference */
mtr_t* mtr) /*!< in/out: mtr */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/*********************************************************************//**
Fetches the clustered index record for a secondary index record. The latches
on the secondary index record are preserved.
@@ -277,7 +277,7 @@ row_get_clust_rec(
dict_index_t* index, /*!< in: secondary index */
dict_index_t** clust_index,/*!< out: clustered index */
mtr_t* mtr) /*!< in: mtr */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/** Result of row_search_index_entry */
enum row_search_result {
@@ -305,7 +305,7 @@ row_search_index_entry(
btr_pcur_t* pcur, /*!< in/out: persistent cursor, which must
be closed by the caller */
mtr_t* mtr) /*!< in: mtr */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
#define ROW_COPY_DATA 1
#define ROW_COPY_POINTERS 2
@@ -334,7 +334,7 @@ row_raw_format(
char* buf, /*!< out: output buffer */
ulint buf_size) /*!< in: output buffer size
in bytes */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
#ifndef UNIV_NONINL
#include "row0row.ic"
diff --git a/storage/innobase/include/row0sel.h b/storage/innobase/include/row0sel.h
index c8be80f89d9..fd5bc755a22 100644
--- a/storage/innobase/include/row0sel.h
+++ b/storage/innobase/include/row0sel.h
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1997, 2012, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1997, 2016, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -168,7 +168,7 @@ row_search_for_mysql(
then prebuilt must have a pcur
with stored position! In opening of a
cursor 'direction' should be 0. */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/*******************************************************************//**
Checks if MySQL at the moment is allowed for this table to retrieve a
consistent read result, or store it to the query cache.
@@ -190,7 +190,7 @@ row_search_max_autoinc(
dict_index_t* index, /*!< in: index to search */
const char* col_name, /*!< in: autoinc column name */
ib_uint64_t* value) /*!< out: AUTOINC value read */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/** A structure for caching column values for prefetched rows */
struct sel_buf_t{
diff --git a/storage/innobase/include/row0uins.h b/storage/innobase/include/row0uins.h
index ebf4881208a..89e334e5433 100644
--- a/storage/innobase/include/row0uins.h
+++ b/storage/innobase/include/row0uins.h
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1997, 2012, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1997, 2016, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -46,7 +46,7 @@ dberr_t
row_undo_ins(
/*=========*/
undo_node_t* node) /*!< in: row undo node */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
#ifndef UNIV_NONINL
#include "row0uins.ic"
#endif
diff --git a/storage/innobase/include/row0umod.h b/storage/innobase/include/row0umod.h
index f89d5a334fc..4f1d8e1f66c 100644
--- a/storage/innobase/include/row0umod.h
+++ b/storage/innobase/include/row0umod.h
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1997, 2012, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1997, 2016, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -43,7 +43,7 @@ row_undo_mod(
/*=========*/
undo_node_t* node, /*!< in: row undo node */
que_thr_t* thr) /*!< in: query thread */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
#ifndef UNIV_NONINL
#include "row0umod.ic"
diff --git a/storage/innobase/include/row0upd.h b/storage/innobase/include/row0upd.h
index 27dedeb65a7..e59ec58b63c 100644
--- a/storage/innobase/include/row0upd.h
+++ b/storage/innobase/include/row0upd.h
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1996, 2012, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -91,7 +91,7 @@ upd_get_field_by_field_no(
/*======================*/
const upd_t* update, /*!< in: update vector */
ulint no) /*!< in: field_no */
- __attribute__((nonnull, pure));
+ MY_ATTRIBUTE((nonnull, pure));
/*********************************************************************//**
Writes into the redo log the values of trx id and roll ptr and enough info
to determine their positions within a clustered index record.
@@ -174,7 +174,7 @@ bool
row_upd_changes_disowned_external(
/*==============================*/
const upd_t* update) /*!< in: update vector */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
#endif /* !UNIV_HOTBACKUP */
/***********************************************************//**
Replaces the new column values stored in the update vector to the
@@ -207,7 +207,7 @@ row_upd_build_sec_rec_difference_binary(
const ulint* offsets,/*!< in: rec_get_offsets(rec, index) */
const dtuple_t* entry, /*!< in: entry to insert */
mem_heap_t* heap) /*!< in: memory heap from which allocated */
- __attribute__((warn_unused_result, nonnull));
+ MY_ATTRIBUTE((warn_unused_result, nonnull));
/***************************************************************//**
Builds an update vector from those fields, excluding the roll ptr and
trx id fields, which in an index entry differ from a record that has
@@ -227,7 +227,7 @@ row_upd_build_difference_binary(
trx_t* trx, /*!< in: transaction (for diagnostics),
or NULL */
mem_heap_t* heap) /*!< in: memory heap from which allocated */
- __attribute__((nonnull(1,2,3,7), warn_unused_result));
+ MY_ATTRIBUTE((nonnull(1,2,3,7), warn_unused_result));
/***********************************************************//**
Replaces the new column values stored in the update vector to the index entry
given. */
@@ -250,7 +250,7 @@ row_upd_index_replace_new_col_vals_index_pos(
does not work for non-clustered indexes. */
mem_heap_t* heap) /*!< in: memory heap for allocating and
copying the new values */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/***********************************************************//**
Replaces the new column values stored in the update vector to the index entry
given. */
@@ -269,7 +269,7 @@ row_upd_index_replace_new_col_vals(
an upd_field is the clustered index position */
mem_heap_t* heap) /*!< in: memory heap for allocating and
copying the new values */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/***********************************************************//**
Replaces the new column values stored in the update vector. */
UNIV_INTERN
@@ -311,7 +311,7 @@ row_upd_changes_ord_field_binary_func(
compile time */
const row_ext_t*ext) /*!< NULL, or prefixes of the externally
stored columns in the old row */
- __attribute__((nonnull(1,2), warn_unused_result));
+ MY_ATTRIBUTE((nonnull(1,2), warn_unused_result));
#ifdef UNIV_DEBUG
# define row_upd_changes_ord_field_binary(index,update,thr,row,ext) \
row_upd_changes_ord_field_binary_func(index,update,thr,row,ext)
@@ -338,7 +338,7 @@ row_upd_changes_doc_id(
/*===================*/
dict_table_t* table, /*!< in: table */
upd_field_t* upd_field) /*!< in: field to check */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/***********************************************************//**
Checks if an update vector changes an ordering field of an index record.
This function is fast if the update vector is short or the number of ordering
diff --git a/storage/innobase/include/row0vers.h b/storage/innobase/include/row0vers.h
index 1df5b4d3e98..7b850215701 100644
--- a/storage/innobase/include/row0vers.h
+++ b/storage/innobase/include/row0vers.h
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1997, 2012, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1997, 2016, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -110,7 +110,7 @@ row_vers_build_for_consistent_read(
if the history is missing or the record
does not exist in the view, that is,
it was freshly inserted afterwards */
- __attribute__((nonnull(1,2,3,4,5,6,7)));
+ MY_ATTRIBUTE((nonnull(1,2,3,4,5,6,7)));
/*****************************************************************//**
Constructs the last committed version of a clustered index record,
@@ -136,7 +136,7 @@ row_vers_build_for_semi_consistent_read(
const rec_t** old_vers)/*!< out: rec, old version, or NULL if the
record does not exist in the view, that is,
it was freshly inserted afterwards */
- __attribute__((nonnull(1,2,3,4,5)));
+ MY_ATTRIBUTE((nonnull(1,2,3,4,5)));
#ifndef UNIV_NONINL
diff --git a/storage/innobase/include/srv0srv.h b/storage/innobase/include/srv0srv.h
index 9a6107f4330..04c8cbecf99 100644
--- a/storage/innobase/include/srv0srv.h
+++ b/storage/innobase/include/srv0srv.h
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1995, 2015, Oracle and/or its affiliates. All rights reserved.
+Copyright (c) 1995, 2016, Oracle and/or its affiliates. All rights reserved.
Copyright (c) 2008, 2009, Google Inc.
Copyright (c) 2009, Percona Inc.
Copyright (c) 2013, 2016, MariaDB Corporation
@@ -858,7 +858,7 @@ UNIV_INTERN
os_thread_ret_t
DECLARE_THREAD(srv_purge_coordinator_thread)(
/*=========================================*/
- void* arg __attribute__((unused))); /*!< in: a dummy parameter
+ void* arg MY_ATTRIBUTE((unused))); /*!< in: a dummy parameter
required by os_thread_create */
/*********************************************************************//**
@@ -868,7 +868,7 @@ UNIV_INTERN
os_thread_ret_t
DECLARE_THREAD(srv_worker_thread)(
/*==============================*/
- void* arg __attribute__((unused))); /*!< in: a dummy parameter
+ void* arg MY_ATTRIBUTE((unused))); /*!< in: a dummy parameter
required by os_thread_create */
} /* extern "C" */
diff --git a/storage/innobase/include/srv0start.h b/storage/innobase/include/srv0start.h
index e1c19982ba5..d2e70f969b7 100644
--- a/storage/innobase/include/srv0start.h
+++ b/storage/innobase/include/srv0start.h
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1995, 2012, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -106,7 +106,7 @@ srv_path_copy(
ulint dest_len, /*!< in: max bytes to copy */
const char* basedir, /*!< in: base directory */
const char* table_name) /*!< in: source table name */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/*****************************************************************//**
Get the meta-data filename from the table name. */
@@ -117,7 +117,7 @@ srv_get_meta_data_filename(
dict_table_t* table, /*!< in: table */
char* filename, /*!< out: filename */
ulint max_len) /*!< in: filename max length */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/** Log sequence number at shutdown */
extern lsn_t srv_shutdown_lsn;
diff --git a/storage/innobase/include/sync0arr.h b/storage/innobase/include/sync0arr.h
index 6c3225b1826..880d7d2a473 100644
--- a/storage/innobase/include/sync0arr.h
+++ b/storage/innobase/include/sync0arr.h
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1995, 2013, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -110,7 +110,7 @@ sync_array_print_long_waits(
/*========================*/
os_thread_id_t* waiter, /*!< out: longest waiting thread */
const void** sema) /*!< out: longest-waited-for semaphore */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/********************************************************************//**
Validates the integrity of the wait array. Checks
that the number of reserved cells equals the count variable. */
diff --git a/storage/innobase/include/sync0rw.h b/storage/innobase/include/sync0rw.h
index d212ac17871..b0fa214be81 100644
--- a/storage/innobase/include/sync0rw.h
+++ b/storage/innobase/include/sync0rw.h
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1995, 2014, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2008, Google Inc.
Portions of this file contain modifications contributed and copyrighted by
@@ -330,7 +330,7 @@ ibool
rw_lock_s_lock_low(
/*===============*/
rw_lock_t* lock, /*!< in: pointer to rw-lock */
- ulint pass __attribute__((unused)),
+ ulint pass MY_ATTRIBUTE((unused)),
/*!< in: pass value; != 0, if the lock will be
passed to another thread to unlock */
const char* file_name, /*!< in: file name where lock requested */
@@ -498,7 +498,7 @@ rw_lock_own(
rw_lock_t* lock, /*!< in: rw-lock */
ulint lock_type) /*!< in: lock type: RW_LOCK_SHARED,
RW_LOCK_EX */
- __attribute__((warn_unused_result));
+ MY_ATTRIBUTE((warn_unused_result));
#endif /* UNIV_SYNC_DEBUG */
/******************************************************************//**
Checks if somebody has locked the rw-lock in the specified mode. */
diff --git a/storage/innobase/include/sync0rw.ic b/storage/innobase/include/sync0rw.ic
index 8c4e938002a..27970188165 100644
--- a/storage/innobase/include/sync0rw.ic
+++ b/storage/innobase/include/sync0rw.ic
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1995, 2011, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2008, Google Inc.
Portions of this file contain modifications contributed and copyrighted by
@@ -306,7 +306,7 @@ ibool
rw_lock_s_lock_low(
/*===============*/
rw_lock_t* lock, /*!< in: pointer to rw-lock */
- ulint pass __attribute__((unused)),
+ ulint pass MY_ATTRIBUTE((unused)),
/*!< in: pass value; != 0, if the lock will be
passed to another thread to unlock */
const char* file_name, /*!< in: file name where lock requested */
diff --git a/storage/innobase/include/sync0sync.h b/storage/innobase/include/sync0sync.h
index c2002de9114..6dff729ee60 100644
--- a/storage/innobase/include/sync0sync.h
+++ b/storage/innobase/include/sync0sync.h
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1995, 2015, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2008, Google Inc.
Copyright (c) 2012, Facebook Inc.
@@ -403,7 +403,7 @@ ibool
mutex_own(
/*======*/
const ib_mutex_t* mutex) /*!< in: mutex */
- __attribute__((warn_unused_result));
+ MY_ATTRIBUTE((warn_unused_result));
#endif /* UNIV_DEBUG */
#ifdef UNIV_SYNC_DEBUG
/******************************************************************//**
@@ -418,7 +418,7 @@ sync_thread_add_level(
ulint level, /*!< in: level in the latching order; if
SYNC_LEVEL_VARYING, nothing is done */
ibool relock) /*!< in: TRUE if re-entering an x-lock */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/******************************************************************//**
Removes a latch from the thread level array if it is found there.
@return TRUE if found in the array; it is no error if the latch is
@@ -448,7 +448,7 @@ sync_thread_levels_nonempty_gen(
/*============================*/
ibool dict_mutex_allowed) /*!< in: TRUE if dictionary mutex is
allowed to be owned by the thread */
- __attribute__((warn_unused_result));
+ MY_ATTRIBUTE((warn_unused_result));
/******************************************************************//**
Checks if the level array for the current thread is empty,
except for data dictionary latches. */
@@ -465,7 +465,7 @@ sync_thread_levels_nonempty_trx(
ibool has_search_latch)
/*!< in: TRUE if and only if the thread
is supposed to hold btr_search_latch */
- __attribute__((warn_unused_result));
+ MY_ATTRIBUTE((warn_unused_result));
/******************************************************************//**
Gets the debug information for a reserved mutex. */
diff --git a/storage/innobase/include/trx0rec.h b/storage/innobase/include/trx0rec.h
index 50da55d2ea3..359937e3583 100644
--- a/storage/innobase/include/trx0rec.h
+++ b/storage/innobase/include/trx0rec.h
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1996, 2013, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -86,7 +86,7 @@ ulint
trx_undo_rec_get_offset(
/*====================*/
undo_no_t undo_no) /*!< in: undo no read from node */
- __attribute__((const));
+ MY_ATTRIBUTE((const));
/**********************************************************************//**
Returns the start of the undo record data area. */
@@ -109,7 +109,7 @@ trx_undo_rec_get_pars(
externally stored fild */
undo_no_t* undo_no, /*!< out: undo log record number */
table_id_t* table_id) /*!< out: table id */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/*******************************************************************//**
Builds a row reference from an undo log record.
@return pointer to remaining part of undo record */
@@ -201,7 +201,7 @@ trx_undo_rec_get_partial_row(
only in the assertion. */
mem_heap_t* heap) /*!< in: memory heap from which the memory
needed is allocated */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/***********************************************************************//**
Writes information to an undo log about an insert, update, or a delete marking
of a clustered index record. This information is used in a rollback of the
@@ -233,7 +233,7 @@ trx_undo_report_row_operation(
inserted undo log record,
0 if BTR_NO_UNDO_LOG
flag was specified */
- __attribute__((nonnull(3,4,10), warn_unused_result));
+ MY_ATTRIBUTE((nonnull(3,4,10), warn_unused_result));
/******************************************************************//**
Copies an undo record to heap. This function can be called if we know that
the undo log record exists.
@@ -244,7 +244,7 @@ trx_undo_get_undo_rec_low(
/*======================*/
roll_ptr_t roll_ptr, /*!< in: roll pointer to record */
mem_heap_t* heap) /*!< in: memory heap where copied */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/*******************************************************************//**
Build a previous version of a clustered index record. The caller must
hold a latch on the index page of the clustered index record.
@@ -268,7 +268,7 @@ trx_undo_prev_version_build(
rec_t** old_vers)/*!< out, own: previous version, or NULL if
rec is the first inserted version, or if
history data has been deleted */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
#endif /* !UNIV_HOTBACKUP */
/***********************************************************//**
Parses a redo log record of adding an undo log record.
diff --git a/storage/innobase/include/trx0roll.h b/storage/innobase/include/trx0roll.h
index d5ab83d7767..98a667b2ec1 100644
--- a/storage/innobase/include/trx0roll.h
+++ b/storage/innobase/include/trx0roll.h
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1996, 2014, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -125,7 +125,7 @@ extern "C" UNIV_INTERN
os_thread_ret_t
DECLARE_THREAD(trx_rollback_or_clean_all_recovered)(
/*================================================*/
- void* arg __attribute__((unused)));
+ void* arg MY_ATTRIBUTE((unused)));
/*!< in: a dummy parameter required by
os_thread_create */
/*********************************************************************//**
@@ -152,7 +152,7 @@ dberr_t
trx_rollback_for_mysql(
/*===================*/
trx_t* trx) /*!< in/out: transaction */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/*******************************************************************//**
Rollback the latest SQL statement for MySQL.
@return error code or DB_SUCCESS */
@@ -161,7 +161,7 @@ dberr_t
trx_rollback_last_sql_stat_for_mysql(
/*=================================*/
trx_t* trx) /*!< in/out: transaction */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/*******************************************************************//**
Rollback a transaction to a given savepoint or do a complete rollback.
@return error code or DB_SUCCESS */
@@ -173,7 +173,7 @@ trx_rollback_to_savepoint(
trx_savept_t* savept) /*!< in: pointer to savepoint undo number, if
partial rollback requested, or NULL for
complete rollback */
- __attribute__((nonnull(1)));
+ MY_ATTRIBUTE((nonnull(1)));
/*******************************************************************//**
Rolls back a transaction back to a named savepoint. Modifications after the
savepoint are undone but InnoDB does NOT release the corresponding locks
@@ -195,7 +195,7 @@ trx_rollback_to_savepoint_for_mysql(
information to remove the
binlog entries of the queries
executed after the savepoint */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/*******************************************************************//**
Creates a named savepoint. If the transaction is not yet started, starts it.
If there is already a savepoint of the same name, this call erases that old
@@ -212,7 +212,7 @@ trx_savepoint_for_mysql(
position corresponding to this
connection at the time of the
savepoint */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/*******************************************************************//**
Releases a named savepoint. Savepoints which
were set after this savepoint are deleted.
@@ -224,7 +224,7 @@ trx_release_savepoint_for_mysql(
/*============================*/
trx_t* trx, /*!< in: transaction handle */
const char* savepoint_name) /*!< in: savepoint name */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/*******************************************************************//**
Frees savepoint structs starting from savep. */
UNIV_INTERN
diff --git a/storage/innobase/include/trx0sys.h b/storage/innobase/include/trx0sys.h
index 9ffc8d99a7f..8c6b13f9dd4 100644
--- a/storage/innobase/include/trx0sys.h
+++ b/storage/innobase/include/trx0sys.h
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1996, 2012, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -270,7 +270,7 @@ ibool
trx_in_trx_list(
/*============*/
const trx_t* in_trx) /*!< in: transaction */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
#endif /* UNIV_DEBUG */
#if defined UNIV_DEBUG || defined UNIV_BLOB_LIGHT_DEBUG
/***********************************************************//**
@@ -281,7 +281,7 @@ ibool
trx_assert_recovered(
/*=================*/
trx_id_t trx_id) /*!< in: transaction identifier */
- __attribute__((warn_unused_result));
+ MY_ATTRIBUTE((warn_unused_result));
#endif /* UNIV_DEBUG || UNIV_BLOB_LIGHT_DEBUG */
/*****************************************************************//**
Updates the offset information about the end of the MySQL binlog entry
diff --git a/storage/innobase/include/trx0trx.h b/storage/innobase/include/trx0trx.h
index 4f0fac4c899..add5e311957 100644
--- a/storage/innobase/include/trx0trx.h
+++ b/storage/innobase/include/trx0trx.h
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1996, 2013, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -103,7 +103,7 @@ void
trx_free_prepared(
/*==============*/
trx_t* trx) /*!< in, own: trx object */
- UNIV_COLD __attribute__((nonnull));
+ UNIV_COLD MY_ATTRIBUTE((nonnull));
/********************************************************************//**
Frees a transaction object for MySQL. */
UNIV_INTERN
@@ -169,7 +169,7 @@ trx_start_for_ddl_low(
/*==================*/
trx_t* trx, /*!< in/out: transaction */
trx_dict_op_t op) /*!< in: dictionary operation type */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
#ifdef UNIV_DEBUG
#define trx_start_for_ddl(t, o) \
@@ -191,7 +191,7 @@ void
trx_commit(
/*=======*/
trx_t* trx) /*!< in/out: transaction */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/****************************************************************//**
Commits a transaction and a mini-transaction. */
UNIV_INTERN
@@ -201,7 +201,7 @@ trx_commit_low(
trx_t* trx, /*!< in/out: transaction */
mtr_t* mtr) /*!< in/out: mini-transaction (will be committed),
or NULL if trx made no modifications */
- __attribute__((nonnull(1)));
+ MY_ATTRIBUTE((nonnull(1)));
/****************************************************************//**
Cleans up a transaction at database startup. The cleanup is needed if
the transaction already got to the middle of a commit when the database
@@ -255,7 +255,7 @@ void
trx_commit_complete_for_mysql(
/*==========================*/
trx_t* trx) /*!< in/out: transaction */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/**********************************************************************//**
Marks the latest SQL statement ended. */
UNIV_INTERN
@@ -317,7 +317,7 @@ trx_print_low(
/*!< in: length of trx->lock.trx_locks */
ulint heap_size)
/*!< in: mem_heap_get_size(trx->lock.lock_heap) */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/**********************************************************************//**
Prints info about a transaction.
@@ -331,7 +331,7 @@ trx_print_latched(
const trx_t* trx, /*!< in: transaction */
ulint max_query_len) /*!< in: max query length to print,
or 0 to use the default max length */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/**********************************************************************//**
Prints info about a transaction.
@@ -344,7 +344,7 @@ trx_print(
const trx_t* trx, /*!< in: transaction */
ulint max_query_len) /*!< in: max query length to print,
or 0 to use the default max length */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/**********************************************************************//**
Determine if a transaction is a dictionary operation.
@@ -354,7 +354,7 @@ enum trx_dict_op_t
trx_get_dict_operation(
/*===================*/
const trx_t* trx) /*!< in: transaction */
- __attribute__((pure));
+ MY_ATTRIBUTE((pure));
/**********************************************************************//**
Flag a transaction a dictionary operation. */
UNIV_INLINE
@@ -383,7 +383,7 @@ trx_state_eq(
if state != TRX_STATE_NOT_STARTED
asserts that
trx->state != TRX_STATE_NOT_STARTED */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
# ifdef UNIV_DEBUG
/**********************************************************************//**
Asserts that a transaction has been started.
@@ -394,7 +394,7 @@ ibool
trx_assert_started(
/*===============*/
const trx_t* trx) /*!< in: transaction */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
# endif /* UNIV_DEBUG */
/**********************************************************************//**
diff --git a/storage/innobase/include/trx0undo.h b/storage/innobase/include/trx0undo.h
index 45733921212..42ac62916e0 100644
--- a/storage/innobase/include/trx0undo.h
+++ b/storage/innobase/include/trx0undo.h
@@ -1,6 +1,7 @@
/*****************************************************************************
-Copyright (c) 1996, 2013, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2013, 2016, MariaDB Corporation
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -75,7 +76,7 @@ bool
trx_undo_trx_id_is_insert(
/*======================*/
const byte* trx_id) /*!< in: DB_TRX_ID, followed by DB_ROLL_PTR */
- __attribute__((nonnull, pure, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, pure, warn_unused_result));
#endif /* !UNIV_HOTBACKUP */
/*****************************************************************//**
Writes a roll ptr to an index page. In case that the size changes in
@@ -216,7 +217,7 @@ trx_undo_add_page(
mtr_t* mtr) /*!< in: mtr which does not have a latch to any
undo log page; the caller must have reserved
the rollback segment mutex */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/********************************************************************//**
Frees the last undo log page.
The caller must hold the rollback segment mutex. */
@@ -231,7 +232,7 @@ trx_undo_free_last_page_func(
mtr_t* mtr) /*!< in/out: mini-transaction which does not
have a latch to any undo log page or which
has allocated the undo log page */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
#ifdef UNIV_DEBUG
# define trx_undo_free_last_page(trx,undo,mtr) \
trx_undo_free_last_page_func(trx,undo,mtr)
@@ -251,7 +252,7 @@ trx_undo_truncate_end(
trx_undo_t* undo, /*!< in/out: undo log */
undo_no_t limit) /*!< in: all undo records with undo number
>= this value should be truncated */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
/***********************************************************************//**
Truncates an undo log from the start. This function is used during a purge
@@ -293,7 +294,7 @@ trx_undo_assign_undo(
/*=================*/
trx_t* trx, /*!< in: transaction */
ulint type) /*!< in: TRX_UNDO_INSERT or TRX_UNDO_UPDATE */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/******************************************************************//**
Sets the state of the undo log segment at a transaction finish.
@return undo log segment header page, x-latched */
@@ -343,7 +344,7 @@ void
trx_undo_free_prepared(
/*===================*/
trx_t* trx) /*!< in/out: PREPARED transaction */
- UNIV_COLD __attribute__((nonnull));
+ UNIV_COLD MY_ATTRIBUTE((nonnull));
#endif /* !UNIV_HOTBACKUP */
/***********************************************************//**
Parses the redo log entry of an undo log page initialization.
diff --git a/storage/innobase/include/univ.i b/storage/innobase/include/univ.i
index 56a257241d9..cd379f0fadd 100644
--- a/storage/innobase/include/univ.i
+++ b/storage/innobase/include/univ.i
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1994, 2013, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1994, 2016, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2008, Google Inc.
Copyright (c) 2013, 2015, MariaDB Corporation.
@@ -45,7 +45,7 @@ Created 1/20/1994 Heikki Tuuri
#define INNODB_VERSION_MAJOR 5
#define INNODB_VERSION_MINOR 6
-#define INNODB_VERSION_BUGFIX 30
+#define INNODB_VERSION_BUGFIX 31
/* The following is the InnoDB version as shown in
SELECT plugin_version FROM information_schema.plugins;
@@ -249,8 +249,9 @@ operations (very slow); also UNIV_DEBUG must be defined */
that are only referenced from within InnoDB, not from MySQL. We disable the
GCC visibility directive on all Sun operating systems because there is no
easy way to get it to work. See http://bugs.mysql.com/bug.php?id=52263. */
+#define MY_ATTRIBUTE __attribute__
#if defined(__GNUC__) && (__GNUC__ >= 4) && !defined(sun) || defined(__INTEL_COMPILER)
-# define UNIV_INTERN __attribute__((visibility ("hidden")))
+# define UNIV_INTERN MY_ATTRIBUTE((visibility ("hidden")))
#else
# define UNIV_INTERN
#endif
@@ -265,7 +266,7 @@ appears close together improving code locality of non-cold parts of
program. The paths leading to call of cold functions within code are
marked as unlikely by the branch prediction mechanism. optimize a
rarely invoked function for size instead for speed. */
-# define UNIV_COLD __attribute__((cold))
+# define UNIV_COLD MY_ATTRIBUTE((cold))
#else
# define UNIV_COLD /* empty */
#endif
@@ -562,7 +563,7 @@ contains the sum of the following flag and the locally stored len. */
#if defined(__GNUC__) && (__GNUC__ > 2) && ! defined(__INTEL_COMPILER)
#define HAVE_GCC_GT_2
/* Tell the compiler that variable/function is unused. */
-# define UNIV_UNUSED __attribute__ ((unused))
+# define UNIV_UNUSED MY_ATTRIBUTE ((unused))
#else
# define UNIV_UNUSED
#endif /* CHECK FOR GCC VER_GT_2 */
diff --git a/storage/innobase/include/ut0byte.h b/storage/innobase/include/ut0byte.h
index 5bdd553ca80..4893ab9f9af 100644
--- a/storage/innobase/include/ut0byte.h
+++ b/storage/innobase/include/ut0byte.h
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1994, 2009, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1994, 2016, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -39,7 +39,7 @@ ut_ull_create(
/*==========*/
ulint high, /*!< in: high-order 32 bits */
ulint low) /*!< in: low-order 32 bits */
- __attribute__((const));
+ MY_ATTRIBUTE((const));
/********************************************************//**
Rounds a 64-bit integer downward to a multiple of a power of 2.
@@ -80,7 +80,7 @@ ut_align_down(
/*==========*/
const void* ptr, /*!< in: pointer */
ulint align_no) /*!< in: align by this number */
- __attribute__((const));
+ MY_ATTRIBUTE((const));
/*********************************************************//**
The following function computes the offset of a pointer from the nearest
aligned address.
@@ -91,7 +91,7 @@ ut_align_offset(
/*============*/
const void* ptr, /*!< in: pointer */
ulint align_no) /*!< in: align by this number */
- __attribute__((const));
+ MY_ATTRIBUTE((const));
/*****************************************************************//**
Gets the nth bit of a ulint.
@return TRUE if nth bit is 1; 0th bit is defined to be the least significant */
diff --git a/storage/innobase/include/ut0dbg.h b/storage/innobase/include/ut0dbg.h
index 6a4afe99597..3f5baef0a3c 100644
--- a/storage/innobase/include/ut0dbg.h
+++ b/storage/innobase/include/ut0dbg.h
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1994, 2009, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1994, 2016, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -59,7 +59,7 @@ ut_dbg_assertion_failed(
const char* expr, /*!< in: the failed assertion */
const char* file, /*!< in: source file containing the assertion */
ulint line) /*!< in: line number of the assertion */
- UNIV_COLD __attribute__((nonnull(2)));
+ UNIV_COLD MY_ATTRIBUTE((nonnull(2)));
/** Abort the execution. */
# define UT_DBG_PANIC abort()
diff --git a/storage/innobase/include/ut0mem.h b/storage/innobase/include/ut0mem.h
index af7eb4e9b1d..81470358f2f 100644
--- a/storage/innobase/include/ut0mem.h
+++ b/storage/innobase/include/ut0mem.h
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1994, 2011, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1994, 2016, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -87,7 +87,7 @@ ut_malloc_low(
ulint n, /*!< in: number of bytes to allocate */
ibool assert_on_error) /*!< in: if TRUE, we crash mysqld if
the memory cannot be allocated */
- __attribute__((malloc));
+ MY_ATTRIBUTE((malloc));
/**********************************************************************//**
Allocates memory. */
#define ut_malloc(n) ut_malloc_low(n, TRUE)
diff --git a/storage/innobase/include/ut0rnd.h b/storage/innobase/include/ut0rnd.h
index 53b769849a5..6ed3ee3b2e5 100644
--- a/storage/innobase/include/ut0rnd.h
+++ b/storage/innobase/include/ut0rnd.h
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1994, 2009, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1994, 2016, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -96,7 +96,7 @@ ulint
ut_fold_ull(
/*========*/
ib_uint64_t d) /*!< in: 64-bit integer */
- __attribute__((const));
+ MY_ATTRIBUTE((const));
/*************************************************************//**
Folds a character string ending in the null character.
@return folded value */
@@ -105,7 +105,7 @@ ulint
ut_fold_string(
/*===========*/
const char* str) /*!< in: null-terminated string */
- __attribute__((pure));
+ MY_ATTRIBUTE((pure));
/***********************************************************//**
Looks for a prime number slightly greater than the given argument.
The prime is chosen so that it is not near any power of 2.
@@ -115,7 +115,7 @@ ulint
ut_find_prime(
/*==========*/
ulint n) /*!< in: positive number > 100 */
- __attribute__((const));
+ MY_ATTRIBUTE((const));
#endif /* !UNIV_INNOCHECKSUM */
@@ -128,7 +128,7 @@ ut_fold_ulint_pair(
/*===============*/
ulint n1, /*!< in: ulint */
ulint n2) /*!< in: ulint */
- __attribute__((const));
+ MY_ATTRIBUTE((const));
/*************************************************************//**
Folds a binary string.
@return folded value */
@@ -138,7 +138,7 @@ ut_fold_binary(
/*===========*/
const byte* str, /*!< in: string of bytes */
ulint len) /*!< in: length */
- __attribute__((pure));
+ MY_ATTRIBUTE((pure));
#ifndef UNIV_NONINL
diff --git a/storage/innobase/include/ut0ut.h b/storage/innobase/include/ut0ut.h
index 176f132704a..6786ad166e8 100644
--- a/storage/innobase/include/ut0ut.h
+++ b/storage/innobase/include/ut0ut.h
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1994, 2014, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1994, 2016, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -241,7 +241,7 @@ ulint
ut_2_power_up(
/*==========*/
ulint n) /*!< in: number != 0 */
- __attribute__((const));
+ MY_ATTRIBUTE((const));
#endif /* !UNIV_INNOCHECKSUM */
@@ -324,7 +324,7 @@ void
ut_print_timestamp(
/*===============*/
FILE* file) /*!< in: file where to print */
- UNIV_COLD __attribute__((nonnull));
+ UNIV_COLD MY_ATTRIBUTE((nonnull));
#ifndef UNIV_INNOCHECKSUM
@@ -524,7 +524,7 @@ ut_ulint_sort(
ulint* aux_arr, /*!< in/out: aux array to use in sort */
ulint low, /*!< in: lower bound */
ulint high) /*!< in: upper bound */
- __attribute__((nonnull));
+ MY_ATTRIBUTE((nonnull));
#ifndef UNIV_NONINL
#include "ut0ut.ic"
diff --git a/storage/innobase/lock/lock0lock.cc b/storage/innobase/lock/lock0lock.cc
index 165db2d33ed..b6fd1a451bf 100644
--- a/storage/innobase/lock/lock0lock.cc
+++ b/storage/innobase/lock/lock0lock.cc
@@ -1,7 +1,7 @@
/*****************************************************************************
-Copyright (c) 1996, 2015, Oracle and/or its affiliates. All Rights Reserved.
-Copyright (c) 2014, 2015, MariaDB Corporation
+Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2014, 2016, MariaDB Corporation
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -429,7 +429,7 @@ ibool
lock_rec_validate_page(
/*===================*/
const buf_block_t* block) /*!< in: buffer block */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
#endif /* UNIV_DEBUG */
/* The lock system */
@@ -513,7 +513,7 @@ Checks that a transaction id is sensible, i.e., not in the future.
#ifdef UNIV_DEBUG
UNIV_INTERN
#else
-static __attribute__((nonnull, warn_unused_result))
+static MY_ATTRIBUTE((nonnull, warn_unused_result))
#endif
bool
lock_check_trx_id_sanity(
@@ -4017,7 +4017,7 @@ lock_get_next_lock(
ut_ad(heap_no == ULINT_UNDEFINED);
ut_ad(lock_get_type_low(lock) == LOCK_TABLE);
- lock = UT_LIST_GET_PREV(un_member.tab_lock.locks, lock);
+ lock = UT_LIST_GET_NEXT(un_member.tab_lock.locks, lock);
}
} while (lock != NULL
&& lock->trx->lock.deadlock_mark > ctx->mark_start);
@@ -4067,7 +4067,8 @@ lock_get_first_lock(
} else {
*heap_no = ULINT_UNDEFINED;
ut_ad(lock_get_type_low(lock) == LOCK_TABLE);
- lock = UT_LIST_GET_PREV(un_member.tab_lock.locks, lock);
+ dict_table_t* table = lock->un_member.tab_lock.table;
+ lock = UT_LIST_GET_FIRST(table->locks);
}
ut_a(lock != NULL);
@@ -4999,7 +5000,8 @@ lock_table(
dberr_t err;
const lock_t* wait_for;
- ut_ad(table && thr);
+ ut_ad(table != NULL);
+ ut_ad(thr != NULL);
if (flags & BTR_NO_LOCKING_FLAG) {
@@ -6431,7 +6433,7 @@ lock_validate_table_locks(
/*********************************************************************//**
Validate record locks up to a limit.
@return lock at limit or NULL if no more locks in the hash bucket */
-static __attribute__((nonnull, warn_unused_result))
+static MY_ATTRIBUTE((nonnull, warn_unused_result))
const lock_t*
lock_rec_validate(
/*==============*/
diff --git a/storage/innobase/lock/lock0wait.cc b/storage/innobase/lock/lock0wait.cc
index a87adcf94c0..c7bd223c491 100644
--- a/storage/innobase/lock/lock0wait.cc
+++ b/storage/innobase/lock/lock0wait.cc
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1996, 2013, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -512,7 +512,7 @@ extern "C" UNIV_INTERN
os_thread_ret_t
DECLARE_THREAD(lock_wait_timeout_thread)(
/*=====================================*/
- void* arg __attribute__((unused)))
+ void* arg MY_ATTRIBUTE((unused)))
/* in: a dummy parameter required by
os_thread_create */
{
diff --git a/storage/innobase/log/log0log.cc b/storage/innobase/log/log0log.cc
index 31104b395c1..89b616aba01 100644
--- a/storage/innobase/log/log0log.cc
+++ b/storage/innobase/log/log0log.cc
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1995, 2015, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2009, Google Inc.
Copyright (C) 2014, 2016, MariaDB Corporation. All Rights Reserved.
@@ -981,7 +981,7 @@ log_group_init(
ulint space_id, /*!< in: space id of the file space
which contains the log files of this
group */
- ulint archive_space_id __attribute__((unused)))
+ ulint archive_space_id MY_ATTRIBUTE((unused)))
/*!< in: space id of the file space
which contains some archived log
files for this group; currently, only
@@ -2396,7 +2396,7 @@ void
log_archived_file_name_gen(
/*=======================*/
char* buf, /*!< in: buffer where to write */
- ulint id __attribute__((unused)),
+ ulint id MY_ATTRIBUTE((unused)),
/*!< in: group id;
currently we only archive the first group */
ulint file_no)/*!< in: file number */
diff --git a/storage/innobase/log/log0recv.cc b/storage/innobase/log/log0recv.cc
index d574cd55397..9fde18757c5 100644
--- a/storage/innobase/log/log0recv.cc
+++ b/storage/innobase/log/log0recv.cc
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1997, 2015, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1997, 2016, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2012, Facebook Inc.
Copyright (c) 2013, 2016, MariaDB Corporation. All Rights Reserved.
@@ -334,7 +334,7 @@ extern "C" UNIV_INTERN
os_thread_ret_t
DECLARE_THREAD(recv_writer_thread)(
/*===============================*/
- void* arg __attribute__((unused)))
+ void* arg MY_ATTRIBUTE((unused)))
/*!< in: a dummy parameter required by
os_thread_create */
{
@@ -755,7 +755,7 @@ recv_check_cp_is_consistent(
/********************************************************//**
Looks for the maximum consistent checkpoint from the log groups.
@return error code or DB_SUCCESS */
-static __attribute__((nonnull, warn_unused_result))
+static MY_ATTRIBUTE((nonnull, warn_unused_result))
dberr_t
recv_find_max_checkpoint(
/*=====================*/
diff --git a/storage/innobase/mem/mem0dbg.cc b/storage/innobase/mem/mem0dbg.cc
index 308c2979551..a77785a369a 100644
--- a/storage/innobase/mem/mem0dbg.cc
+++ b/storage/innobase/mem/mem0dbg.cc
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1994, 2011, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1994, 2016, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -248,7 +248,7 @@ void
mem_field_erase(
/*============*/
byte* buf, /*!< in: memory field */
- ulint n __attribute__((unused)))
+ ulint n MY_ATTRIBUTE((unused)))
/*!< in: how many bytes the user requested */
{
byte* usr_buf;
@@ -450,7 +450,7 @@ void
mem_heap_validate_or_print(
/*=======================*/
mem_heap_t* heap, /*!< in: memory heap */
- byte* top __attribute__((unused)),
+ byte* top MY_ATTRIBUTE((unused)),
/*!< in: calculate and validate only until
this top pointer in the heap is reached,
if this pointer is NULL, ignored */
diff --git a/storage/innobase/mtr/mtr0mtr.cc b/storage/innobase/mtr/mtr0mtr.cc
index 400aa9bff57..5843dd80524 100644
--- a/storage/innobase/mtr/mtr0mtr.cc
+++ b/storage/innobase/mtr/mtr0mtr.cc
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1995, 2013, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -58,7 +58,7 @@ mtr_block_dirtied(
/*****************************************************************//**
Releases the item in the slot given. */
-static __attribute__((nonnull))
+static MY_ATTRIBUTE((nonnull))
void
mtr_memo_slot_release_func(
/*=======================*/
@@ -105,7 +105,7 @@ mtr_memo_slot_release_func(
Releases the mlocks and other objects stored in an mtr memo.
They are released in the order opposite to which they were pushed
to the memo. */
-static __attribute__((nonnull))
+static MY_ATTRIBUTE((nonnull))
void
mtr_memo_pop_all(
/*=============*/
@@ -395,7 +395,7 @@ mtr_read_ulint(
/*===========*/
const byte* ptr, /*!< in: pointer from where to read */
ulint type, /*!< in: MLOG_1BYTE, MLOG_2BYTES, MLOG_4BYTES */
- mtr_t* mtr __attribute__((unused)))
+ mtr_t* mtr MY_ATTRIBUTE((unused)))
/*!< in: mini-transaction handle */
{
ut_ad(mtr->state == MTR_ACTIVE);
diff --git a/storage/innobase/os/os0file.cc b/storage/innobase/os/os0file.cc
index f71a4cf125c..2db53c25b04 100644
--- a/storage/innobase/os/os0file.cc
+++ b/storage/innobase/os/os0file.cc
@@ -1,6 +1,6 @@
/***********************************************************************
-Copyright (c) 1995, 2015, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2009, Percona Inc.
Copyright (c) 2013, 2016, MariaDB Corporation.
@@ -1681,8 +1681,8 @@ os_file_set_nocache(
__attribute__((unused)),
const char* file_name /*!< in: used in the diagnostic
message */
- __attribute__((unused)),
- const char* operation_name __attribute__((unused)))
+ MY_ATTRIBUTE((unused)),
+ const char* operation_name MY_ATTRIBUTE((unused)))
/*!< in: "open" or "create"; used
in the diagnostic message */
{
@@ -2615,7 +2615,7 @@ os_file_flush_func(
/*******************************************************************//**
Does a synchronous read operation in Posix.
@return number of bytes read, -1 if error */
-static __attribute__((nonnull, warn_unused_result))
+static MY_ATTRIBUTE((nonnull, warn_unused_result))
ssize_t
os_file_pread(
/*==========*/
@@ -2726,7 +2726,7 @@ os_file_pread(
/*******************************************************************//**
Does a synchronous write operation in Posix.
@return number of bytes written, -1 if error */
-static __attribute__((nonnull, warn_unused_result))
+static MY_ATTRIBUTE((nonnull, warn_unused_result))
ssize_t
os_file_pwrite(
/*===========*/
diff --git a/storage/innobase/page/page0page.cc b/storage/innobase/page/page0page.cc
index fca8641342c..a09f270a54f 100644
--- a/storage/innobase/page/page0page.cc
+++ b/storage/innobase/page/page0page.cc
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1994, 2015, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1994, 2016, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2012, Facebook Inc.
This program is free software; you can redistribute it and/or modify it under
@@ -304,7 +304,7 @@ byte*
page_parse_create(
/*==============*/
byte* ptr, /*!< in: buffer */
- byte* end_ptr __attribute__((unused)), /*!< in: buffer end */
+ byte* end_ptr MY_ATTRIBUTE((unused)), /*!< in: buffer end */
ulint comp, /*!< in: nonzero=compact page format */
buf_block_t* block, /*!< in: block or NULL */
mtr_t* mtr) /*!< in: mtr or NULL */
diff --git a/storage/innobase/page/page0zip.cc b/storage/innobase/page/page0zip.cc
index 0842971c8d0..2bf1f324784 100644
--- a/storage/innobase/page/page0zip.cc
+++ b/storage/innobase/page/page0zip.cc
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 2005, 2015, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2005, 2016, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2012, Facebook Inc.
This program is free software; you can redistribute it and/or modify it under
@@ -131,7 +131,7 @@ independently of any UNIV_ debugging conditions. */
#ifndef UNIV_INNOCHECKSUM
#if defined UNIV_DEBUG || defined UNIV_ZIP_DEBUG
# include <stdarg.h>
-__attribute__((format (printf, 1, 2)))
+MY_ATTRIBUTE((format (printf, 1, 2)))
/**********************************************************************//**
Report a failure to decompress or compress.
@return number of characters printed */
@@ -752,8 +752,8 @@ static
void
page_zip_free(
/*==========*/
- void* opaque __attribute__((unused)), /*!< in: memory heap */
- void* address __attribute__((unused)))/*!< in: object to free */
+ void* opaque MY_ATTRIBUTE((unused)), /*!< in: memory heap */
+ void* address MY_ATTRIBUTE((unused)))/*!< in: object to free */
{
}
@@ -4807,7 +4807,8 @@ page_zip_parse_compress(
ulint size;
ulint trailer_size;
- ut_ad(ptr && end_ptr);
+ ut_ad(ptr != NULL);
+ ut_ad(end_ptr != NULL);
ut_ad(!page == !page_zip);
if (UNIV_UNLIKELY(ptr + (2 + 2) > end_ptr)) {
@@ -4928,10 +4929,10 @@ page_zip_verify_checksum(
stored = static_cast<ib_uint32_t>(mach_read_from_4(
static_cast<const unsigned char*>(data) + FIL_PAGE_SPACE_OR_CHKSUM));
- ulint page_no __attribute__((unused)) =
+ ulint page_no MY_ATTRIBUTE((unused)) =
mach_read_from_4(static_cast<const unsigned char*>
(data) + FIL_PAGE_OFFSET);
- ulint space_id __attribute__((unused)) =
+ ulint space_id MY_ATTRIBUTE((unused)) =
mach_read_from_4(static_cast<const unsigned char*>
(data) + FIL_PAGE_SPACE_ID);
diff --git a/storage/innobase/pars/lexyy.cc b/storage/innobase/pars/lexyy.cc
index 1c01becd9ed..bfa8e2ea950 100644
--- a/storage/innobase/pars/lexyy.cc
+++ b/storage/innobase/pars/lexyy.cc
@@ -295,7 +295,7 @@ static int yy_start = 0; /* start state number */
static int yy_did_buffer_switch_on_eof;
void yyrestart (FILE *input_file );
-__attribute__((unused)) static void yy_switch_to_buffer (YY_BUFFER_STATE new_buffer );
+MY_ATTRIBUTE((unused)) static void yy_switch_to_buffer (YY_BUFFER_STATE new_buffer );
static YY_BUFFER_STATE yy_create_buffer (FILE *file,int size );
void yy_delete_buffer (YY_BUFFER_STATE b );
void yy_flush_buffer (YY_BUFFER_STATE b );
@@ -916,7 +916,7 @@ char *yytext;
#line 1 "pars0lex.l"
/*****************************************************************************
-Copyright (c) 1997, 2011, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1997, 2016, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -1027,7 +1027,7 @@ static int yy_init_globals (void );
/* Accessor methods to globals.
These are made visible to non-reentrant scanners for convenience. */
-__attribute__((unused)) static int yylex_destroy (void );
+MY_ATTRIBUTE((unused)) static int yylex_destroy (void );
int yyget_debug (void );
@@ -2664,7 +2664,7 @@ static int yy_get_next_buffer (void)
* @param new_buffer The new input buffer.
*
*/
- __attribute__((unused)) static void yy_switch_to_buffer (YY_BUFFER_STATE new_buffer )
+ MY_ATTRIBUTE((unused)) static void yy_switch_to_buffer (YY_BUFFER_STATE new_buffer )
{
/* TODO. We should be able to replace this entire function body
@@ -3042,7 +3042,7 @@ static int yy_init_globals (void)
}
/* yylex_destroy is for both reentrant and non-reentrant scanners. */
-__attribute__((unused)) static int yylex_destroy (void)
+MY_ATTRIBUTE((unused)) static int yylex_destroy (void)
{
/* Pop the buffer stack, destroying each element. */
diff --git a/storage/innobase/pars/make_flex.sh b/storage/innobase/pars/make_flex.sh
index 581fc2342aa..c3db8aea298 100755
--- a/storage/innobase/pars/make_flex.sh
+++ b/storage/innobase/pars/make_flex.sh
@@ -1,6 +1,6 @@
#!/bin/bash
#
-# Copyright (c) 1994, 2011, Oracle and/or its affiliates. All Rights Reserved.
+# Copyright (c) 1994, 2016, Oracle and/or its affiliates. All Rights Reserved.
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
@@ -33,15 +33,15 @@ sed -e '
s/'"$TMPFILE"'/'"$OUTFILE"'/;
s/\(int offset = \)\((yy_c_buf_p) - (yytext_ptr)\);/\1(int)(\2);/;
s/\(void yy\(restart\|_\(delete\|flush\)_buffer\)\)/static \1/;
-s/\(void yy_switch_to_buffer\)/__attribute__((unused)) static \1/;
-s/\(void yy\(push\|pop\)_buffer_state\)/__attribute__((unused)) static \1/;
+s/\(void yy_switch_to_buffer\)/MY_ATTRIBUTE((unused)) static \1/;
+s/\(void yy\(push\|pop\)_buffer_state\)/MY_ATTRIBUTE((unused)) static \1/;
s/\(YY_BUFFER_STATE yy_create_buffer\)/static \1/;
-s/\(\(int\|void\) yy[gs]et_\)/__attribute__((unused)) static \1/;
+s/\(\(int\|void\) yy[gs]et_\)/MY_ATTRIBUTE((unused)) static \1/;
s/\(void \*\?yy\(\(re\)\?alloc\|free\)\)/static \1/;
s/\(extern \)\?\(int yy\(leng\|lineno\|_flex_debug\)\)/static \2/;
-s/\(int yylex_destroy\)/__attribute__((unused)) static \1/;
+s/\(int yylex_destroy\)/MY_ATTRIBUTE((unused)) static \1/;
s/\(extern \)\?\(int yylex \)/UNIV_INTERN \2/;
-s/^\(\(FILE\|char\) *\* *yyget\)/__attribute__((unused)) static \1/;
+s/^\(\(FILE\|char\) *\* *yyget\)/MY_ATTRIBUTE((unused)) static \1/;
s/^\(extern \)\?\(\(FILE\|char\) *\* *yy\)/static \2/;
' < $TMPFILE >> $OUTFILE
diff --git a/storage/innobase/pars/pars0pars.cc b/storage/innobase/pars/pars0pars.cc
index da08939d78a..e6af3d25e86 100644
--- a/storage/innobase/pars/pars0pars.cc
+++ b/storage/innobase/pars/pars0pars.cc
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1996, 2013, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -1926,7 +1926,7 @@ pars_create_table(
sym_node_t* column_defs, /*!< in: list of column names */
sym_node_t* compact, /* in: non-NULL if COMPACT table. */
sym_node_t* block_size, /* in: block size (can be NULL) */
- void* not_fit_in_memory __attribute__((unused)))
+ void* not_fit_in_memory MY_ATTRIBUTE((unused)))
/*!< in: a non-NULL pointer means that
this is a table which in simulations
should be simulated as not fitting
@@ -2143,7 +2143,7 @@ UNIV_INTERN
que_fork_t*
pars_stored_procedure_call(
/*=======================*/
- sym_node_t* sym_node __attribute__((unused)))
+ sym_node_t* sym_node MY_ATTRIBUTE((unused)))
/*!< in: stored procedure name */
{
ut_error;
@@ -2203,7 +2203,7 @@ UNIV_INTERN
void
yyerror(
/*====*/
- const char* s __attribute__((unused)))
+ const char* s MY_ATTRIBUTE((unused)))
/*!< in: error message string */
{
ut_ad(s);
diff --git a/storage/innobase/rem/rem0cmp.cc b/storage/innobase/rem/rem0cmp.cc
index 426cf9e3ac5..616ef322fb5 100644
--- a/storage/innobase/rem/rem0cmp.cc
+++ b/storage/innobase/rem/rem0cmp.cc
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1994, 2012, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1994, 2016, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -75,7 +75,7 @@ cmp_debug_dtuple_rec_with_match(
completely matched fields; when function
returns, contains the value for current
comparison */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
#endif /* UNIV_DEBUG */
/*************************************************************//**
This function is used to compare two data fields for which the data type
@@ -659,7 +659,10 @@ cmp_dtuple_rec_with_match_low(
in current field */
int ret; /* return value */
- ut_ad(dtuple && rec && matched_fields && matched_bytes);
+ ut_ad(dtuple != NULL);
+ ut_ad(rec != NULL);
+ ut_ad(matched_fields != NULL);
+ ut_ad(matched_bytes != NULL);
ut_ad(dtuple_check_typed(dtuple));
ut_ad(rec_offs_validate(rec, NULL, offsets));
@@ -920,7 +923,7 @@ Compare two physical record fields.
@retval 1 if rec1 field is greater than rec2
@retval -1 if rec1 field is less than rec2
@retval 0 if rec1 field equals to rec2 */
-static __attribute__((nonnull, warn_unused_result))
+static MY_ATTRIBUTE((nonnull, warn_unused_result))
int
cmp_rec_rec_simple_field(
/*=====================*/
@@ -1139,7 +1142,9 @@ cmp_rec_rec_with_match(
int ret = 0; /* return value */
ulint comp;
- ut_ad(rec1 && rec2 && index);
+ ut_ad(rec1 != NULL);
+ ut_ad(rec2 != NULL);
+ ut_ad(index != NULL);
ut_ad(rec_offs_validate(rec1, index, offsets1));
ut_ad(rec_offs_validate(rec2, index, offsets2));
ut_ad(rec_offs_comp(offsets1) == rec_offs_comp(offsets2));
@@ -1375,7 +1380,9 @@ cmp_debug_dtuple_rec_with_match(
int ret; /* return value */
ulint cur_field; /* current field number */
- ut_ad(dtuple && rec && matched_fields);
+ ut_ad(dtuple != NULL);
+ ut_ad(rec != NULL);
+ ut_ad(matched_fields != NULL);
ut_ad(dtuple_check_typed(dtuple));
ut_ad(rec_offs_validate(rec, NULL, offsets));
diff --git a/storage/innobase/rem/rem0rec.cc b/storage/innobase/rem/rem0rec.cc
index 3ff71d5c59e..9218e757f6a 100644
--- a/storage/innobase/rem/rem0rec.cc
+++ b/storage/innobase/rem/rem0rec.cc
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1994, 2013, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1994, 2016, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -244,7 +244,7 @@ rec_get_n_extern_new(
Determine the offset to each field in a leaf-page record
in ROW_FORMAT=COMPACT. This is a special case of
rec_init_offsets() and rec_get_offsets_func(). */
-UNIV_INLINE __attribute__((nonnull))
+UNIV_INLINE MY_ATTRIBUTE((nonnull))
void
rec_init_offsets_comp_ordinary(
/*===========================*/
@@ -788,7 +788,7 @@ rec_get_nth_field_offs_old(
/**********************************************************//**
Determines the size of a data tuple prefix in ROW_FORMAT=COMPACT.
@return total size */
-UNIV_INLINE __attribute__((warn_unused_result, nonnull(1,2)))
+UNIV_INLINE MY_ATTRIBUTE((warn_unused_result, nonnull(1,2)))
ulint
rec_get_converted_size_comp_prefix_low(
/*===================================*/
@@ -1133,7 +1133,7 @@ rec_convert_dtuple_to_rec_old(
/*********************************************************//**
Builds a ROW_FORMAT=COMPACT record out of a data tuple. */
-UNIV_INLINE __attribute__((nonnull))
+UNIV_INLINE MY_ATTRIBUTE((nonnull))
void
rec_convert_dtuple_to_rec_comp(
/*===========================*/
@@ -1341,7 +1341,9 @@ rec_convert_dtuple_to_rec(
{
rec_t* rec;
- ut_ad(buf && index && dtuple);
+ ut_ad(buf != NULL);
+ ut_ad(index != NULL);
+ ut_ad(dtuple != NULL);
ut_ad(dtuple_validate(dtuple));
ut_ad(dtuple_check_typed(dtuple));
diff --git a/storage/innobase/row/row0ftsort.cc b/storage/innobase/row/row0ftsort.cc
index 9e340f24213..bdd0e63c0a6 100644
--- a/storage/innobase/row/row0ftsort.cc
+++ b/storage/innobase/row/row0ftsort.cc
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 2010, 2015, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2010, 2016, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2015, 2016, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
@@ -557,8 +557,18 @@ row_merge_fts_doc_tokenize(
dfield_dup(field, buf->heap);
/* One variable length column, word with its lenght less than
- fts_max_token_size, add one extra size and one extra byte */
- cur_len += 2;
+ fts_max_token_size, add one extra size and one extra byte.
+
+ Since the max length for FTS token now is larger than 255,
+ so we will need to signify length byte itself, so only 1 to 128
+ bytes can be used for 1 bytes, larger than that 2 bytes. */
+ if (t_str.f_len < 128) {
+ /* Extra size is one byte. */
+ cur_len += 2;
+ } else {
+ /* Extra size is two bytes. */
+ cur_len += 3;
+ }
/* Reserve one byte for the end marker of row_merge_block_t
and we have reserved ROW_MERGE_RESERVE_SIZE (= 4) for
@@ -1042,7 +1052,7 @@ row_fts_start_parallel_merge(
/********************************************************************//**
Insert processed FTS data to auxillary index tables.
@return DB_SUCCESS if insertion runs fine */
-static __attribute__((nonnull))
+static MY_ATTRIBUTE((nonnull))
dberr_t
row_merge_write_fts_word(
/*=====================*/
diff --git a/storage/innobase/row/row0import.cc b/storage/innobase/row/row0import.cc
index 893ffcab3da..6dd3c4eea94 100644
--- a/storage/innobase/row/row0import.cc
+++ b/storage/innobase/row/row0import.cc
@@ -1,7 +1,7 @@
/*****************************************************************************
-Copyright (c) 2012, 2015, Oracle and/or its affiliates. All Rights Reserved.
-Copyright (c) 2015, MariaDB Corporation.
+Copyright (c) 2012, 2016, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2015, 2016, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -2181,7 +2181,7 @@ PageConverter::operator() (
Clean up after import tablespace failure, this function will acquire
the dictionary latches on behalf of the transaction if the transaction
hasn't already acquired them. */
-static __attribute__((nonnull))
+static MY_ATTRIBUTE((nonnull))
void
row_import_discard_changes(
/*=======================*/
@@ -2232,7 +2232,7 @@ row_import_discard_changes(
/*****************************************************************//**
Clean up after import tablespace. */
-static __attribute__((nonnull, warn_unused_result))
+static MY_ATTRIBUTE((nonnull, warn_unused_result))
dberr_t
row_import_cleanup(
/*===============*/
@@ -2267,7 +2267,7 @@ row_import_cleanup(
/*****************************************************************//**
Report error during tablespace import. */
-static __attribute__((nonnull, warn_unused_result))
+static MY_ATTRIBUTE((nonnull, warn_unused_result))
dberr_t
row_import_error(
/*=============*/
@@ -2295,7 +2295,7 @@ row_import_error(
Adjust the root page index node and leaf node segment headers, update
with the new space id. For all the table's secondary indexes.
@return error code */
-static __attribute__((nonnull, warn_unused_result))
+static MY_ATTRIBUTE((nonnull, warn_unused_result))
dberr_t
row_import_adjust_root_pages_of_secondary_indexes(
/*==============================================*/
@@ -2411,7 +2411,7 @@ row_import_adjust_root_pages_of_secondary_indexes(
/*****************************************************************//**
Ensure that dict_sys->row_id exceeds SELECT MAX(DB_ROW_ID).
@return error code */
-static __attribute__((nonnull, warn_unused_result))
+static MY_ATTRIBUTE((nonnull, warn_unused_result))
dberr_t
row_import_set_sys_max_row_id(
/*==========================*/
@@ -2561,7 +2561,7 @@ row_import_cfg_read_string(
/*********************************************************************//**
Write the meta data (index user fields) config file.
@return DB_SUCCESS or error code. */
-static __attribute__((nonnull, warn_unused_result))
+static MY_ATTRIBUTE((nonnull, warn_unused_result))
dberr_t
row_import_cfg_read_index_fields(
/*=============================*/
@@ -2644,7 +2644,7 @@ row_import_cfg_read_index_fields(
Read the index names and root page numbers of the indexes and set the values.
Row format [root_page_no, len of str, str ... ]
@return DB_SUCCESS or error code. */
-static __attribute__((nonnull, warn_unused_result))
+static MY_ATTRIBUTE((nonnull, warn_unused_result))
dberr_t
row_import_read_index_data(
/*=======================*/
@@ -2839,7 +2839,7 @@ row_import_read_indexes(
/*********************************************************************//**
Read the meta data (table columns) config file. Deserialise the contents of
dict_col_t structure, along with the column name. */
-static __attribute__((nonnull, warn_unused_result))
+static MY_ATTRIBUTE((nonnull, warn_unused_result))
dberr_t
row_import_read_columns(
/*====================*/
@@ -2964,7 +2964,7 @@ row_import_read_columns(
/*****************************************************************//**
Read the contents of the <tablespace>.cfg file.
@return DB_SUCCESS or error code. */
-static __attribute__((nonnull, warn_unused_result))
+static MY_ATTRIBUTE((nonnull, warn_unused_result))
dberr_t
row_import_read_v1(
/*===============*/
@@ -3130,7 +3130,7 @@ row_import_read_v1(
/**
Read the contents of the <tablespace>.cfg file.
@return DB_SUCCESS or error code. */
-static __attribute__((nonnull, warn_unused_result))
+static MY_ATTRIBUTE((nonnull, warn_unused_result))
dberr_t
row_import_read_meta_data(
/*======================*/
@@ -3173,7 +3173,7 @@ row_import_read_meta_data(
/**
Read the contents of the <tablename>.cfg file.
@return DB_SUCCESS or error code. */
-static __attribute__((nonnull, warn_unused_result))
+static MY_ATTRIBUTE((nonnull, warn_unused_result))
dberr_t
row_import_read_cfg(
/*================*/
diff --git a/storage/innobase/row/row0ins.cc b/storage/innobase/row/row0ins.cc
index d3670bcfddd..e3817ea4de7 100644
--- a/storage/innobase/row/row0ins.cc
+++ b/storage/innobase/row/row0ins.cc
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1996, 2015, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -224,7 +224,7 @@ Does an insert operation by updating a delete-marked existing record
in the index. This situation can occur if the delete-marked record is
kept in the index for consistent reads.
@return DB_SUCCESS or error code */
-static __attribute__((nonnull, warn_unused_result))
+static MY_ATTRIBUTE((nonnull, warn_unused_result))
dberr_t
row_ins_sec_index_entry_by_modify(
/*==============================*/
@@ -319,7 +319,7 @@ Does an insert operation by delete unmarking and updating a delete marked
existing record in the index. This situation can occur if the delete marked
record is kept in the index for consistent reads.
@return DB_SUCCESS, DB_FAIL, or error code */
-static __attribute__((nonnull, warn_unused_result))
+static MY_ATTRIBUTE((nonnull, warn_unused_result))
dberr_t
row_ins_clust_index_entry_by_modify(
/*================================*/
@@ -427,7 +427,7 @@ row_ins_cascade_ancestor_updates_table(
Returns the number of ancestor UPDATE or DELETE nodes of a
cascaded update/delete node.
@return number of ancestors */
-static __attribute__((nonnull, warn_unused_result))
+static MY_ATTRIBUTE((nonnull, warn_unused_result))
ulint
row_ins_cascade_n_ancestors(
/*========================*/
@@ -453,7 +453,7 @@ a cascaded update.
can also be 0 if no foreign key fields changed; the returned value is
ULINT_UNDEFINED if the column type in the child table is too short to
fit the new value in the parent table: that means the update fails */
-static __attribute__((nonnull, warn_unused_result))
+static MY_ATTRIBUTE((nonnull, warn_unused_result))
ulint
row_ins_cascade_calc_update_vec(
/*============================*/
@@ -942,7 +942,7 @@ Perform referential actions or checks when a parent row is deleted or updated
and the constraint had an ON DELETE or ON UPDATE condition which was not
RESTRICT.
@return DB_SUCCESS, DB_LOCK_WAIT, or error code */
-static __attribute__((nonnull, warn_unused_result))
+static MY_ATTRIBUTE((nonnull, warn_unused_result))
dberr_t
row_ins_foreign_check_on_constraint(
/*================================*/
@@ -1784,7 +1784,7 @@ Otherwise does searches to the indexes of referenced tables and
sets shared locks which lock either the success or the failure of
a constraint.
@return DB_SUCCESS or error code */
-static __attribute__((nonnull, warn_unused_result))
+static MY_ATTRIBUTE((nonnull, warn_unused_result))
dberr_t
row_ins_check_foreign_constraints(
/*==============================*/
@@ -1925,7 +1925,7 @@ Scans a unique non-clustered index at a given index entry to determine
whether a uniqueness violation has occurred for the key value of the entry.
Set shared locks on possible duplicate records.
@return DB_SUCCESS, DB_DUPLICATE_KEY, or DB_LOCK_WAIT */
-static __attribute__((nonnull, warn_unused_result))
+static MY_ATTRIBUTE((nonnull, warn_unused_result))
dberr_t
row_ins_scan_sec_index_for_duplicate(
/*=================================*/
@@ -2067,7 +2067,7 @@ end_scan:
@retval DB_SUCCESS_LOCKED_REC when rec is an exact match of entry or
a newer version of entry (the entry should not be inserted)
@retval DB_DUPLICATE_KEY when entry is a duplicate of rec */
-static __attribute__((nonnull, warn_unused_result))
+static MY_ATTRIBUTE((nonnull, warn_unused_result))
dberr_t
row_ins_duplicate_online(
/*=====================*/
@@ -2108,7 +2108,7 @@ row_ins_duplicate_online(
@retval DB_SUCCESS_LOCKED_REC when rec is an exact match of entry or
a newer version of entry (the entry should not be inserted)
@retval DB_DUPLICATE_KEY when entry is a duplicate of rec */
-static __attribute__((nonnull, warn_unused_result))
+static MY_ATTRIBUTE((nonnull, warn_unused_result))
dberr_t
row_ins_duplicate_error_in_clust_online(
/*====================================*/
@@ -2151,7 +2151,7 @@ for a clustered index!
record
@retval DB_SUCCESS_LOCKED_REC if an exact match of the record was found
in online table rebuild (flags & (BTR_KEEP_SYS_FLAG | BTR_NO_LOCKING_FLAG)) */
-static __attribute__((nonnull, warn_unused_result))
+static MY_ATTRIBUTE((nonnull, warn_unused_result))
dberr_t
row_ins_duplicate_error_in_clust(
/*=============================*/
@@ -2576,7 +2576,7 @@ func_exit:
/***************************************************************//**
Starts a mini-transaction and checks if the index will be dropped.
@return true if the index is to be dropped */
-static __attribute__((nonnull, warn_unused_result))
+static MY_ATTRIBUTE((nonnull, warn_unused_result))
bool
row_ins_sec_mtr_start_trx_and_check_if_aborted(
/*=======================================*/
@@ -3032,7 +3032,7 @@ row_ins_index_entry(
/***********************************************************//**
Sets the values of the dtuple fields in entry from the values of appropriate
columns in row. */
-static __attribute__((nonnull))
+static MY_ATTRIBUTE((nonnull))
void
row_ins_index_entry_set_vals(
/*=========================*/
@@ -3085,7 +3085,7 @@ row_ins_index_entry_set_vals(
Inserts a single index entry to the table.
@return DB_SUCCESS if operation successfully completed, else error
code or DB_LOCK_WAIT */
-static __attribute__((nonnull, warn_unused_result))
+static MY_ATTRIBUTE((nonnull, warn_unused_result))
dberr_t
row_ins_index_entry_step(
/*=====================*/
@@ -3208,7 +3208,7 @@ row_ins_get_row_from_select(
Inserts a row to a table.
@return DB_SUCCESS if operation successfully completed, else error
code or DB_LOCK_WAIT */
-static __attribute__((nonnull, warn_unused_result))
+static MY_ATTRIBUTE((nonnull, warn_unused_result))
dberr_t
row_ins(
/*====*/
diff --git a/storage/innobase/row/row0log.cc b/storage/innobase/row/row0log.cc
index bddc81d9fb3..5e32663ad32 100644
--- a/storage/innobase/row/row0log.cc
+++ b/storage/innobase/row/row0log.cc
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 2011, 2015, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2011, 2016, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -205,7 +205,7 @@ struct row_log_t {
/** Create the file or online log if it does not exist.
@param[in,out] log online rebuild log
@return file descriptor. */
-static __attribute__((warn_unused_result))
+static MY_ATTRIBUTE((warn_unused_result))
int
row_log_tmpfile(
row_log_t* log)
@@ -221,7 +221,7 @@ row_log_tmpfile(
/** Allocate the memory for the log buffer.
@param[in,out] log_buf Buffer used for log operation
@return TRUE if success, false if not */
-static __attribute__((warn_unused_result))
+static MY_ATTRIBUTE((warn_unused_result))
bool
row_log_block_allocate(
row_log_buf_t& log_buf)
@@ -411,7 +411,7 @@ row_log_table_get_error(
/******************************************************//**
Starts logging an operation to a table that is being rebuilt.
@return pointer to log, or NULL if no logging is necessary */
-static __attribute__((nonnull, warn_unused_result))
+static MY_ATTRIBUTE((nonnull, warn_unused_result))
byte*
row_log_table_open(
/*===============*/
@@ -446,7 +446,7 @@ err_exit:
/******************************************************//**
Stops logging an operation to a table that is being rebuilt. */
-static __attribute__((nonnull))
+static MY_ATTRIBUTE((nonnull))
void
row_log_table_close_func(
/*=====================*/
@@ -820,7 +820,7 @@ row_log_table_low_redundant(
/******************************************************//**
Logs an insert or update to a table that is being rebuilt. */
-static __attribute__((nonnull(1,2,3)))
+static MY_ATTRIBUTE((nonnull(1,2,3)))
void
row_log_table_low(
/*==============*/
@@ -1320,7 +1320,7 @@ row_log_table_blob_alloc(
/******************************************************//**
Converts a log record to a table row.
@return converted row, or NULL if the conversion fails */
-static __attribute__((nonnull, warn_unused_result))
+static MY_ATTRIBUTE((nonnull, warn_unused_result))
const dtuple_t*
row_log_table_apply_convert_mrec(
/*=============================*/
@@ -1474,7 +1474,7 @@ blob_done:
/******************************************************//**
Replays an insert operation on a table that was rebuilt.
@return DB_SUCCESS or error code */
-static __attribute__((nonnull, warn_unused_result))
+static MY_ATTRIBUTE((nonnull, warn_unused_result))
dberr_t
row_log_table_apply_insert_low(
/*===========================*/
@@ -1556,7 +1556,7 @@ row_log_table_apply_insert_low(
/******************************************************//**
Replays an insert operation on a table that was rebuilt.
@return DB_SUCCESS or error code */
-static __attribute__((nonnull, warn_unused_result))
+static MY_ATTRIBUTE((nonnull, warn_unused_result))
dberr_t
row_log_table_apply_insert(
/*=======================*/
@@ -1608,7 +1608,7 @@ row_log_table_apply_insert(
/******************************************************//**
Deletes a record from a table that is being rebuilt.
@return DB_SUCCESS or error code */
-static __attribute__((nonnull(1, 2, 4, 5), warn_unused_result))
+static MY_ATTRIBUTE((nonnull(1, 2, 4, 5), warn_unused_result))
dberr_t
row_log_table_apply_delete_low(
/*===========================*/
@@ -1706,7 +1706,7 @@ flag_ok:
/******************************************************//**
Replays a delete operation on a table that was rebuilt.
@return DB_SUCCESS or error code */
-static __attribute__((nonnull(1, 3, 4, 5, 6, 7), warn_unused_result))
+static MY_ATTRIBUTE((nonnull(1, 3, 4, 5, 6, 7), warn_unused_result))
dberr_t
row_log_table_apply_delete(
/*=======================*/
@@ -1828,7 +1828,7 @@ all_done:
/******************************************************//**
Replays an update operation on a table that was rebuilt.
@return DB_SUCCESS or error code */
-static __attribute__((nonnull, warn_unused_result))
+static MY_ATTRIBUTE((nonnull, warn_unused_result))
dberr_t
row_log_table_apply_update(
/*=======================*/
@@ -2191,7 +2191,7 @@ func_exit_committed:
Applies an operation to a table that was rebuilt.
@return NULL on failure (mrec corruption) or when out of data;
pointer to next record on success */
-static __attribute__((nonnull, warn_unused_result))
+static MY_ATTRIBUTE((nonnull, warn_unused_result))
const mrec_t*
row_log_table_apply_op(
/*===================*/
@@ -2482,7 +2482,7 @@ row_log_table_apply_op(
/******************************************************//**
Applies operations to a table was rebuilt.
@return DB_SUCCESS, or error code on failure */
-static __attribute__((nonnull, warn_unused_result))
+static MY_ATTRIBUTE((nonnull, warn_unused_result))
dberr_t
row_log_table_apply_ops(
/*====================*/
@@ -2982,7 +2982,7 @@ row_log_get_max_trx(
/******************************************************//**
Applies an operation to a secondary index that was being created. */
-static __attribute__((nonnull))
+static MY_ATTRIBUTE((nonnull))
void
row_log_apply_op_low(
/*=================*/
@@ -3209,7 +3209,7 @@ func_exit:
Applies an operation to a secondary index that was being created.
@return NULL on failure (mrec corruption) or when out of data;
pointer to next record on success */
-static __attribute__((nonnull, warn_unused_result))
+static MY_ATTRIBUTE((nonnull, warn_unused_result))
const mrec_t*
row_log_apply_op(
/*=============*/
@@ -3334,7 +3334,7 @@ corrupted:
/******************************************************//**
Applies operations to a secondary index that was being created.
@return DB_SUCCESS, or error code on failure */
-static __attribute__((nonnull))
+static MY_ATTRIBUTE((nonnull))
dberr_t
row_log_apply_ops(
/*==============*/
diff --git a/storage/innobase/row/row0merge.cc b/storage/innobase/row/row0merge.cc
index 9b5b1574280..1d36bfc5a61 100644
--- a/storage/innobase/row/row0merge.cc
+++ b/storage/innobase/row/row0merge.cc
@@ -164,7 +164,7 @@ row_merge_decrypt_buf(
#ifdef UNIV_DEBUG
/******************************************************//**
Display a merge tuple. */
-static __attribute__((nonnull))
+static MY_ATTRIBUTE((nonnull))
void
row_merge_tuple_print(
/*==================*/
@@ -199,7 +199,7 @@ row_merge_tuple_print(
/******************************************************//**
Encode an index record. */
-static __attribute__((nonnull))
+static MY_ATTRIBUTE((nonnull))
void
row_merge_buf_encode(
/*=================*/
@@ -236,7 +236,7 @@ row_merge_buf_encode(
/******************************************************//**
Allocate a sort buffer.
@return own: sort buffer */
-static __attribute__((malloc, nonnull))
+static MY_ATTRIBUTE((malloc, nonnull))
row_merge_buf_t*
row_merge_buf_create_low(
/*=====================*/
@@ -736,7 +736,7 @@ row_merge_dup_report(
/*************************************************************//**
Compare two tuples.
@return 1, 0, -1 if a is greater, equal, less, respectively, than b */
-static __attribute__((warn_unused_result))
+static MY_ATTRIBUTE((warn_unused_result))
int
row_merge_tuple_cmp(
/*================*/
@@ -815,7 +815,7 @@ UT_SORT_FUNCTION_BODY().
/**********************************************************************//**
Merge sort the tuple buffer in main memory. */
-static __attribute__((nonnull(4,5)))
+static MY_ATTRIBUTE((nonnull(4,5)))
void
row_merge_tuple_sort(
/*=================*/
@@ -1391,7 +1391,7 @@ row_merge_write_eof(
@param[in,out] tmpfd temporary file handle
@param[in] path path to create temporary file
@return file descriptor, or -1 on failure */
-static __attribute__((warn_unused_result))
+static MY_ATTRIBUTE((warn_unused_result))
int
row_merge_tmpfile_if_needed(
int* tmpfd,
@@ -1410,7 +1410,7 @@ row_merge_tmpfile_if_needed(
@param[in] nrec number of records in the file
@param[in] path path to create temporary files
@return file descriptor, or -1 on failure */
-static __attribute__((warn_unused_result))
+static MY_ATTRIBUTE((warn_unused_result))
int
row_merge_file_create_if_needed(
merge_file_t* file,
@@ -1459,7 +1459,7 @@ containing the index entries for the indexes to be built.
@param[in] crypt_data crypt data or NULL
@param[in,out] crypt_block crypted file buffer
return DB_SUCCESS or error */
-static __attribute__((nonnull(1,2,3,4,6,9,10,16), warn_unused_result))
+static MY_ATTRIBUTE((nonnull(1,2,3,4,6,9,10,16), warn_unused_result))
dberr_t
row_merge_read_clustered_index(
trx_t* trx,
@@ -2625,10 +2625,16 @@ row_merge_sort(
of file marker). Thus, it must be at least one block. */
ut_ad(file->offset > 0);
+ /* These thd_progress* calls will crash on sol10-64 when innodb_plugin
+ is used. MDEV-9356: innodb.innodb_bug53290 fails (crashes) on
+ sol10-64 in buildbot.
+ */
+#ifndef UNIV_SOLARIS
/* Progress report only for "normal" indexes. */
if (!(dup->index->type & DICT_FTS)) {
thd_progress_init(trx->mysql_thd, 1);
}
+#endif /* UNIV_SOLARIS */
sql_print_information("InnoDB: Online DDL : merge-sorting has estimated %lu runs", num_runs);
@@ -2637,9 +2643,11 @@ row_merge_sort(
/* Report progress of merge sort to MySQL for
show processlist progress field */
/* Progress report only for "normal" indexes. */
+#ifndef UNIV_SOLARIS
if (!(dup->index->type & DICT_FTS)) {
thd_progress_report(trx->mysql_thd, file->offset - num_runs, file->offset);
}
+#endif /* UNIV_SOLARIS */
error = row_merge(trx, dup, file, block, tmpfd,
&num_runs, run_offset,
@@ -2664,16 +2672,18 @@ row_merge_sort(
mem_free(run_offset);
/* Progress report only for "normal" indexes. */
+#ifndef UNIV_SOLARIS
if (!(dup->index->type & DICT_FTS)) {
thd_progress_end(trx->mysql_thd);
}
+#endif /* UNIV_SOLARIS */
DBUG_RETURN(error);
}
/*************************************************************//**
Copy externally stored columns to the data tuple. */
-static __attribute__((nonnull))
+static MY_ATTRIBUTE((nonnull))
void
row_merge_copy_blobs(
/*=================*/
@@ -3755,7 +3765,7 @@ row_merge_rename_tables_dict(
/*********************************************************************//**
Create and execute a query graph for creating an index.
@return DB_SUCCESS or error code */
-static __attribute__((nonnull, warn_unused_result))
+static MY_ATTRIBUTE((nonnull, warn_unused_result))
dberr_t
row_merge_create_index_graph(
/*=========================*/
@@ -3907,7 +3917,7 @@ row_merge_drop_table(
/* There must be no open transactions on the table. */
ut_a(table->n_ref_count == 0);
- return(row_drop_table_for_mysql(table->name, trx, false, false));
+ return(row_drop_table_for_mysql(table->name, trx, false, false, false));
}
/*********************************************************************//**
diff --git a/storage/innobase/row/row0mysql.cc b/storage/innobase/row/row0mysql.cc
index 24d25a0b2c8..a5ab4f4911e 100644
--- a/storage/innobase/row/row0mysql.cc
+++ b/storage/innobase/row/row0mysql.cc
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 2000, 2015, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2000, 2016, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -1432,9 +1432,12 @@ error_exit:
}
/* Difference between Doc IDs are restricted within
- 4 bytes integer. See fts_get_encoded_len() */
+ 4 bytes integer. See fts_get_encoded_len(). Consecutive
+ doc_ids difference should not exceed
+ FTS_DOC_ID_MAX_STEP value. */
- if (doc_id - next_doc_id >= FTS_DOC_ID_MAX_STEP) {
+ if (next_doc_id > 1
+ && doc_id - next_doc_id >= FTS_DOC_ID_MAX_STEP) {
fprintf(stderr,
"InnoDB: Doc ID " UINT64PF " is too"
" big. Its difference with largest"
@@ -1700,7 +1703,8 @@ row_update_for_mysql(
trx_t* trx = prebuilt->trx;
ulint fk_depth = 0;
- ut_ad(prebuilt && trx);
+ ut_ad(prebuilt != NULL);
+ ut_ad(trx != NULL);
UT_NOT_USED(mysql_rec);
if (prebuilt->table->ibd_file_missing) {
@@ -1917,7 +1921,8 @@ row_unlock_for_mysql(
btr_pcur_t* clust_pcur = &prebuilt->clust_pcur;
trx_t* trx = prebuilt->trx;
- ut_ad(prebuilt && trx);
+ ut_ad(prebuilt != NULL);
+ ut_ad(trx != NULL);
if (UNIV_UNLIKELY
(!srv_locks_unsafe_for_binlog
@@ -2405,7 +2410,7 @@ err_exit:
dict_table_close(table, TRUE, FALSE);
- row_drop_table_for_mysql(table->name, trx, FALSE);
+ row_drop_table_for_mysql(table->name, trx, FALSE, TRUE);
if (commit) {
trx_commit_for_mysql(trx);
@@ -2565,7 +2570,7 @@ error_handling:
trx_rollback_to_savepoint(trx, NULL);
- row_drop_table_for_mysql(table_name, trx, FALSE);
+ row_drop_table_for_mysql(table_name, trx, FALSE, TRUE);
trx_commit_for_mysql(trx);
@@ -2642,7 +2647,7 @@ row_table_add_foreign_constraints(
trx_rollback_to_savepoint(trx, NULL);
- row_drop_table_for_mysql(name, trx, FALSE);
+ row_drop_table_for_mysql(name, trx, FALSE, TRUE);
trx_commit_for_mysql(trx);
@@ -2683,7 +2688,7 @@ row_drop_table_for_mysql_in_background(
/* Try to drop the table in InnoDB */
- error = row_drop_table_for_mysql(name, trx, FALSE);
+ error = row_drop_table_for_mysql(name, trx, FALSE, FALSE);
/* Flush the log to reduce probability that the .frm files and
the InnoDB data dictionary get out-of-sync if the user runs
@@ -3830,6 +3835,9 @@ row_drop_table_for_mysql(
const char* name, /*!< in: table name */
trx_t* trx, /*!< in: transaction handle */
bool drop_db,/*!< in: true=dropping whole database */
+ ibool create_failed,/*!<in: TRUE=create table failed
+ because e.g. foreign key column
+ type mismatch. */
bool nonatomic)
/*!< in: whether it is permitted
to release and reacquire dict_operation_lock */
@@ -4035,7 +4043,12 @@ row_drop_table_for_mysql(
name,
foreign->foreign_table_name_lookup);
- if (foreign->foreign_table != table && !ref_ok) {
+ /* We should allow dropping a referenced table if creating
+ that referenced table has failed for some reason. For example
+ if referenced table is created but it column types that are
+ referenced do not match. */
+ if (foreign->foreign_table != table &&
+ !create_failed && !ref_ok) {
FILE* ef = dict_foreign_err_file;
@@ -4578,7 +4591,7 @@ row_mysql_drop_temp_tables(void)
table = dict_load_table(table_name, TRUE, DICT_ERR_IGNORE_NONE);
if (table) {
- row_drop_table_for_mysql(table_name, trx, FALSE);
+ row_drop_table_for_mysql(table_name, trx, FALSE, FALSE);
trx_commit_for_mysql(trx);
}
@@ -4598,7 +4611,7 @@ row_mysql_drop_temp_tables(void)
Drop all foreign keys in a database, see Bug#18942.
Called at the end of row_drop_database_for_mysql().
@return error code or DB_SUCCESS */
-static __attribute__((nonnull, warn_unused_result))
+static MY_ATTRIBUTE((nonnull, warn_unused_result))
dberr_t
drop_all_foreign_keys_in_db(
/*========================*/
@@ -4747,7 +4760,7 @@ loop:
goto loop;
}
- err = row_drop_table_for_mysql(table_name, trx, TRUE);
+ err = row_drop_table_for_mysql(table_name, trx, TRUE, FALSE);
trx_commit_for_mysql(trx);
if (err != DB_SUCCESS) {
@@ -4790,7 +4803,7 @@ loop:
Checks if a table name contains the string "/#sql" which denotes temporary
tables in MySQL.
@return true if temporary table */
-UNIV_INTERN __attribute__((warn_unused_result))
+UNIV_INTERN MY_ATTRIBUTE((warn_unused_result))
bool
row_is_mysql_tmp_table_name(
/*========================*/
@@ -4804,7 +4817,7 @@ row_is_mysql_tmp_table_name(
/****************************************************************//**
Delete a single constraint.
@return error code or DB_SUCCESS */
-static __attribute__((nonnull, warn_unused_result))
+static MY_ATTRIBUTE((nonnull, warn_unused_result))
dberr_t
row_delete_constraint_low(
/*======================*/
@@ -4827,7 +4840,7 @@ row_delete_constraint_low(
/****************************************************************//**
Delete a single constraint.
@return error code or DB_SUCCESS */
-static __attribute__((nonnull, warn_unused_result))
+static MY_ATTRIBUTE((nonnull, warn_unused_result))
dberr_t
row_delete_constraint(
/*==================*/
diff --git a/storage/innobase/row/row0purge.cc b/storage/innobase/row/row0purge.cc
index b26ba971a95..bc2e0b0e1cb 100644
--- a/storage/innobase/row/row0purge.cc
+++ b/storage/innobase/row/row0purge.cc
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1997, 2015, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1997, 2016, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -69,7 +69,8 @@ row_purge_node_create(
{
purge_node_t* node;
- ut_ad(parent && heap);
+ ut_ad(parent != NULL);
+ ut_ad(heap != NULL);
node = static_cast<purge_node_t*>(
mem_heap_zalloc(heap, sizeof(*node)));
@@ -120,7 +121,7 @@ row_purge_reposition_pcur(
Removes a delete marked clustered index record if possible.
@retval true if the row was not found, or it was successfully removed
@retval false if the row was modified after the delete marking */
-static __attribute__((nonnull, warn_unused_result))
+static MY_ATTRIBUTE((nonnull, warn_unused_result))
bool
row_purge_remove_clust_if_poss_low(
/*===============================*/
@@ -202,7 +203,7 @@ marking.
@retval true if the row was not found, or it was successfully removed
@retval false the purge needs to be suspended because of running out
of file space. */
-static __attribute__((nonnull, warn_unused_result))
+static MY_ATTRIBUTE((nonnull, warn_unused_result))
bool
row_purge_remove_clust_if_poss(
/*===========================*/
@@ -274,7 +275,7 @@ row_purge_poss_sec(
Removes a secondary index entry if possible, by modifying the
index tree. Does not try to buffer the delete.
@return TRUE if success or if not found */
-static __attribute__((nonnull, warn_unused_result))
+static MY_ATTRIBUTE((nonnull, warn_unused_result))
ibool
row_purge_remove_sec_if_poss_tree(
/*==============================*/
@@ -396,7 +397,7 @@ Removes a secondary index entry without modifying the index tree,
if possible.
@retval true if success or if not found
@retval false if row_purge_remove_sec_if_poss_tree() should be invoked */
-static __attribute__((nonnull, warn_unused_result))
+static MY_ATTRIBUTE((nonnull, warn_unused_result))
bool
row_purge_remove_sec_if_poss_leaf(
/*==============================*/
@@ -507,7 +508,7 @@ row_purge_remove_sec_if_poss_leaf(
/***********************************************************//**
Removes a secondary index entry if possible. */
-UNIV_INLINE __attribute__((nonnull(1,2)))
+UNIV_INLINE MY_ATTRIBUTE((nonnull(1,2)))
void
row_purge_remove_sec_if_poss(
/*=========================*/
@@ -554,7 +555,7 @@ Purges a delete marking of a record.
@retval true if the row was not found, or it was successfully removed
@retval false the purge needs to be suspended because of
running out of file space */
-static __attribute__((nonnull, warn_unused_result))
+static MY_ATTRIBUTE((nonnull, warn_unused_result))
bool
row_purge_del_mark(
/*===============*/
@@ -745,7 +746,8 @@ row_purge_parse_undo_rec(
ulint info_bits;
ulint type;
- ut_ad(node && thr);
+ ut_ad(node != NULL);
+ ut_ad(thr != NULL);
ptr = trx_undo_rec_get_pars(
undo_rec, &type, &node->cmpl_info,
@@ -830,7 +832,7 @@ err_exit:
/***********************************************************//**
Purges the parsed record.
@return true if purged, false if skipped */
-static __attribute__((nonnull, warn_unused_result))
+static MY_ATTRIBUTE((nonnull, warn_unused_result))
bool
row_purge_record_func(
/*==================*/
@@ -895,7 +897,7 @@ row_purge_record_func(
Fetches an undo log record and does the purge for the recorded operation.
If none left, or the current purge completed, returns the control to the
parent node, which is always a query thread node. */
-static __attribute__((nonnull))
+static MY_ATTRIBUTE((nonnull))
void
row_purge(
/*======*/
diff --git a/storage/innobase/row/row0quiesce.cc b/storage/innobase/row/row0quiesce.cc
index ecd6f47947b..583fbe60fb3 100644
--- a/storage/innobase/row/row0quiesce.cc
+++ b/storage/innobase/row/row0quiesce.cc
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 2012, 2014, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2012, 2016, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -37,7 +37,7 @@ Created 2012-02-08 by Sunny Bains.
/*********************************************************************//**
Write the meta data (index user fields) config file.
@return DB_SUCCESS or error code. */
-static __attribute__((nonnull, warn_unused_result))
+static MY_ATTRIBUTE((nonnull, warn_unused_result))
dberr_t
row_quiesce_write_index_fields(
/*===========================*/
@@ -97,7 +97,7 @@ row_quiesce_write_index_fields(
/*********************************************************************//**
Write the meta data config file index information.
@return DB_SUCCESS or error code. */
-static __attribute__((nonnull, warn_unused_result))
+static MY_ATTRIBUTE((nonnull, warn_unused_result))
dberr_t
row_quiesce_write_indexes(
/*======================*/
@@ -210,7 +210,7 @@ Write the meta data (table columns) config file. Serialise the contents of
dict_col_t structure, along with the column name. All fields are serialized
as ib_uint32_t.
@return DB_SUCCESS or error code. */
-static __attribute__((nonnull, warn_unused_result))
+static MY_ATTRIBUTE((nonnull, warn_unused_result))
dberr_t
row_quiesce_write_table(
/*====================*/
@@ -293,7 +293,7 @@ row_quiesce_write_table(
/*********************************************************************//**
Write the meta data config file header.
@return DB_SUCCESS or error code. */
-static __attribute__((nonnull, warn_unused_result))
+static MY_ATTRIBUTE((nonnull, warn_unused_result))
dberr_t
row_quiesce_write_header(
/*=====================*/
@@ -415,7 +415,7 @@ row_quiesce_write_header(
/*********************************************************************//**
Write the table meta data after quiesce.
@return DB_SUCCESS or error code */
-static __attribute__((nonnull, warn_unused_result))
+static MY_ATTRIBUTE((nonnull, warn_unused_result))
dberr_t
row_quiesce_write_cfg(
/*==================*/
@@ -530,10 +530,8 @@ row_quiesce_table_start(
trx_purge_stop();
}
- ut_a(table->id > 0);
-
for (ulint count = 0;
- ibuf_contract_in_background(table->id, TRUE) != 0
+ ibuf_merge_space(table->space) != 0
&& !trx_is_interrupted(trx);
++count) {
if (!(count % 20)) {
diff --git a/storage/innobase/row/row0row.cc b/storage/innobase/row/row0row.cc
index be786f954fb..96d25e15777 100644
--- a/storage/innobase/row/row0row.cc
+++ b/storage/innobase/row/row0row.cc
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1996, 2012, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -240,7 +240,9 @@ row_build(
ulint offsets_[REC_OFFS_NORMAL_SIZE];
rec_offs_init(offsets_);
- ut_ad(index && rec && heap);
+ ut_ad(index != NULL);
+ ut_ad(rec != NULL);
+ ut_ad(heap != NULL);
ut_ad(dict_index_is_clust(index));
ut_ad(!mutex_own(&trx_sys->mutex));
ut_ad(!col_map || col_table);
@@ -409,7 +411,9 @@ row_rec_to_index_entry_low(
ulint len;
ulint rec_len;
- ut_ad(rec && heap && index);
+ ut_ad(rec != NULL);
+ ut_ad(heap != NULL);
+ ut_ad(index != NULL);
/* Because this function may be invoked by row0merge.cc
on a record whose header is in different format, the check
rec_offs_validate(rec, index, offsets) must be avoided here. */
@@ -464,7 +468,9 @@ row_rec_to_index_entry(
byte* buf;
const rec_t* copy_rec;
- ut_ad(rec && heap && index);
+ ut_ad(rec != NULL);
+ ut_ad(heap != NULL);
+ ut_ad(index != NULL);
ut_ad(rec_offs_validate(rec, index, offsets));
/* Take a copy of rec to heap */
@@ -523,7 +529,9 @@ row_build_row_ref(
ulint* offsets = offsets_;
rec_offs_init(offsets_);
- ut_ad(index && rec && heap);
+ ut_ad(index != NULL);
+ ut_ad(rec != NULL);
+ ut_ad(heap != NULL);
ut_ad(!dict_index_is_clust(index));
offsets = rec_get_offsets(rec, index, offsets,
diff --git a/storage/innobase/row/row0sel.cc b/storage/innobase/row/row0sel.cc
index ea2f93dd788..dab1bc58db5 100644
--- a/storage/innobase/row/row0sel.cc
+++ b/storage/innobase/row/row0sel.cc
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1997, 2015, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1997, 2016, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2008, Google Inc.
Copyright (c) 2015, MariaDB Corporation.
@@ -678,7 +678,7 @@ sel_enqueue_prefetched_row(
/*********************************************************************//**
Builds a previous version of a clustered index record for a consistent read
@return DB_SUCCESS or error code */
-static __attribute__((nonnull, warn_unused_result))
+static MY_ATTRIBUTE((nonnull, warn_unused_result))
dberr_t
row_sel_build_prev_vers(
/*====================*/
@@ -713,7 +713,7 @@ row_sel_build_prev_vers(
/*********************************************************************//**
Builds the last committed version of a clustered index record for a
semi-consistent read. */
-static __attribute__((nonnull))
+static MY_ATTRIBUTE((nonnull))
void
row_sel_build_committed_vers_for_mysql(
/*===================================*/
@@ -811,7 +811,7 @@ row_sel_test_other_conds(
Retrieves the clustered index record corresponding to a record in a
non-clustered index. Does the necessary locking.
@return DB_SUCCESS or error code */
-static __attribute__((nonnull, warn_unused_result))
+static MY_ATTRIBUTE((nonnull, warn_unused_result))
dberr_t
row_sel_get_clust_rec(
/*==================*/
@@ -1314,7 +1314,7 @@ func_exit:
/*********************************************************************//**
Performs a select step.
@return DB_SUCCESS or error code */
-static __attribute__((nonnull, warn_unused_result))
+static MY_ATTRIBUTE((nonnull, warn_unused_result))
dberr_t
row_sel(
/*====*/
@@ -2561,7 +2561,7 @@ row_sel_store_row_id_to_prebuilt(
/**************************************************************//**
Stores a non-SQL-NULL field in the MySQL format. The counterpart of this
function is row_mysql_store_col_in_innobase_format() in row0mysql.cc. */
-static __attribute__((nonnull))
+static MY_ATTRIBUTE((nonnull))
void
row_sel_field_store_in_mysql_format_func(
/*=====================================*/
@@ -2750,7 +2750,7 @@ row_sel_field_store_in_mysql_format_func(
#endif /* UNIV_DEBUG */
/**************************************************************//**
Convert a field in the Innobase format to a field in the MySQL format. */
-static __attribute__((warn_unused_result))
+static MY_ATTRIBUTE((warn_unused_result))
ibool
row_sel_store_mysql_field_func(
/*===========================*/
@@ -2900,7 +2900,7 @@ Note that the template in prebuilt may advise us to copy only a few
columns to mysql_rec, other columns are left blank. All columns may not
be needed in the query.
@return TRUE on success, FALSE if not all columns could be retrieved */
-static __attribute__((warn_unused_result))
+static MY_ATTRIBUTE((warn_unused_result))
ibool
row_sel_store_mysql_rec(
/*====================*/
@@ -2967,7 +2967,7 @@ row_sel_store_mysql_rec(
/*********************************************************************//**
Builds a previous version of a clustered index record for a consistent read
@return DB_SUCCESS or error code */
-static __attribute__((nonnull, warn_unused_result))
+static MY_ATTRIBUTE((nonnull, warn_unused_result))
dberr_t
row_sel_build_prev_vers_for_mysql(
/*==============================*/
@@ -3004,7 +3004,7 @@ Retrieves the clustered index record corresponding to a record in a
non-clustered index. Does the necessary locking. Used in the MySQL
interface.
@return DB_SUCCESS, DB_SUCCESS_LOCKED_REC, or error code */
-static __attribute__((nonnull, warn_unused_result))
+static MY_ATTRIBUTE((nonnull, warn_unused_result))
dberr_t
row_sel_get_clust_rec_for_mysql(
/*============================*/
diff --git a/storage/innobase/row/row0uins.cc b/storage/innobase/row/row0uins.cc
index 849bf096492..651042fb820 100644
--- a/storage/innobase/row/row0uins.cc
+++ b/storage/innobase/row/row0uins.cc
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1997, 2014, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1997, 2016, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -61,7 +61,7 @@ introduced where a call to log_free_check() is bypassed. */
Removes a clustered index record. The pcur in node was positioned on the
record, now it is detached.
@return DB_SUCCESS or DB_OUT_OF_FILE_SPACE */
-static __attribute__((nonnull, warn_unused_result))
+static MY_ATTRIBUTE((nonnull, warn_unused_result))
dberr_t
row_undo_ins_remove_clust_rec(
/*==========================*/
@@ -176,7 +176,7 @@ func_exit:
/***************************************************************//**
Removes a secondary index entry if found.
@return DB_SUCCESS, DB_FAIL, or DB_OUT_OF_FILE_SPACE */
-static __attribute__((nonnull, warn_unused_result))
+static MY_ATTRIBUTE((nonnull, warn_unused_result))
dberr_t
row_undo_ins_remove_sec_low(
/*========================*/
@@ -251,7 +251,7 @@ func_exit_no_pcur:
Removes a secondary index entry from the index if found. Tries first
optimistic, then pessimistic descent down the tree.
@return DB_SUCCESS or DB_OUT_OF_FILE_SPACE */
-static __attribute__((nonnull, warn_unused_result))
+static MY_ATTRIBUTE((nonnull, warn_unused_result))
dberr_t
row_undo_ins_remove_sec(
/*====================*/
@@ -350,7 +350,7 @@ close_table:
/***************************************************************//**
Removes secondary index records.
@return DB_SUCCESS or DB_OUT_OF_FILE_SPACE */
-static __attribute__((nonnull, warn_unused_result))
+static MY_ATTRIBUTE((nonnull, warn_unused_result))
dberr_t
row_undo_ins_remove_sec_rec(
/*========================*/
diff --git a/storage/innobase/row/row0umod.cc b/storage/innobase/row/row0umod.cc
index 7649add4b33..a64e41786d6 100644
--- a/storage/innobase/row/row0umod.cc
+++ b/storage/innobase/row/row0umod.cc
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1997, 2014, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1997, 2016, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -72,7 +72,7 @@ introduced where a call to log_free_check() is bypassed. */
/***********************************************************//**
Undoes a modify in a clustered index record.
@return DB_SUCCESS, DB_FAIL, or error code: we may run out of file space */
-static __attribute__((nonnull, warn_unused_result))
+static MY_ATTRIBUTE((nonnull, warn_unused_result))
dberr_t
row_undo_mod_clust_low(
/*===================*/
@@ -154,7 +154,7 @@ This is attempted when the record was inserted by updating a
delete-marked record and there no longer exist transactions
that would see the delete-marked record.
@return DB_SUCCESS, DB_FAIL, or error code: we may run out of file space */
-static __attribute__((nonnull, warn_unused_result))
+static MY_ATTRIBUTE((nonnull, warn_unused_result))
dberr_t
row_undo_mod_remove_clust_low(
/*==========================*/
@@ -273,7 +273,7 @@ row_undo_mod_remove_clust_low(
Undoes a modify in a clustered index record. Sets also the node state for the
next round of undo.
@return DB_SUCCESS or error code: we may run out of file space */
-static __attribute__((nonnull, warn_unused_result))
+static MY_ATTRIBUTE((nonnull, warn_unused_result))
dberr_t
row_undo_mod_clust(
/*===============*/
@@ -417,7 +417,7 @@ row_undo_mod_clust(
/***********************************************************//**
Delete marks or removes a secondary index entry if found.
@return DB_SUCCESS, DB_FAIL, or DB_OUT_OF_FILE_SPACE */
-static __attribute__((nonnull, warn_unused_result))
+static MY_ATTRIBUTE((nonnull, warn_unused_result))
dberr_t
row_undo_mod_del_mark_or_remove_sec_low(
/*====================================*/
@@ -553,7 +553,7 @@ not cause problems because in row0sel.cc, in queries we always retrieve the
clustered index record or an earlier version of it, if the secondary index
record through which we do the search is delete-marked.
@return DB_SUCCESS or DB_OUT_OF_FILE_SPACE */
-static __attribute__((nonnull, warn_unused_result))
+static MY_ATTRIBUTE((nonnull, warn_unused_result))
dberr_t
row_undo_mod_del_mark_or_remove_sec(
/*================================*/
@@ -586,7 +586,7 @@ fields but alphabetically they stayed the same, e.g., 'abc' -> 'aBc'.
@retval DB_OUT_OF_FILE_SPACE when running out of tablespace
@retval DB_DUPLICATE_KEY if the value was missing
and an insert would lead to a duplicate exists */
-static __attribute__((nonnull, warn_unused_result))
+static MY_ATTRIBUTE((nonnull, warn_unused_result))
dberr_t
row_undo_mod_del_unmark_sec_and_undo_update(
/*========================================*/
@@ -782,7 +782,7 @@ func_exit_no_pcur:
/***********************************************************//**
Flags a secondary index corrupted. */
-static __attribute__((nonnull))
+static MY_ATTRIBUTE((nonnull))
void
row_undo_mod_sec_flag_corrupted(
/*============================*/
@@ -814,7 +814,7 @@ row_undo_mod_sec_flag_corrupted(
/***********************************************************//**
Undoes a modify in secondary indexes when undo record type is UPD_DEL.
@return DB_SUCCESS or DB_OUT_OF_FILE_SPACE */
-static __attribute__((nonnull, warn_unused_result))
+static MY_ATTRIBUTE((nonnull, warn_unused_result))
dberr_t
row_undo_mod_upd_del_sec(
/*=====================*/
@@ -881,7 +881,7 @@ row_undo_mod_upd_del_sec(
/***********************************************************//**
Undoes a modify in secondary indexes when undo record type is DEL_MARK.
@return DB_SUCCESS or DB_OUT_OF_FILE_SPACE */
-static __attribute__((nonnull, warn_unused_result))
+static MY_ATTRIBUTE((nonnull, warn_unused_result))
dberr_t
row_undo_mod_del_mark_sec(
/*======================*/
@@ -949,7 +949,7 @@ row_undo_mod_del_mark_sec(
/***********************************************************//**
Undoes a modify in secondary indexes when undo record type is UPD_EXIST.
@return DB_SUCCESS or DB_OUT_OF_FILE_SPACE */
-static __attribute__((nonnull, warn_unused_result))
+static MY_ATTRIBUTE((nonnull, warn_unused_result))
dberr_t
row_undo_mod_upd_exist_sec(
/*=======================*/
@@ -1065,7 +1065,7 @@ row_undo_mod_upd_exist_sec(
/***********************************************************//**
Parses the row reference and other info in a modify undo log record. */
-static __attribute__((nonnull))
+static MY_ATTRIBUTE((nonnull))
void
row_undo_mod_parse_undo_rec(
/*========================*/
@@ -1142,7 +1142,8 @@ row_undo_mod(
dberr_t err;
ibool dict_locked;
- ut_ad(node && thr);
+ ut_ad(node != NULL);
+ ut_ad(thr != NULL);
ut_ad(node->state == UNDO_NODE_MODIFY);
dict_locked = thr_get_trx(thr)->dict_operation_lock_mode == RW_X_LATCH;
diff --git a/storage/innobase/row/row0undo.cc b/storage/innobase/row/row0undo.cc
index 9977a1e8f04..149dc671930 100644
--- a/storage/innobase/row/row0undo.cc
+++ b/storage/innobase/row/row0undo.cc
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1997, 2012, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1997, 2016, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -245,7 +245,7 @@ Fetches an undo log record and does the undo for the recorded operation.
If none left, or a partial rollback completed, returns control to the
parent node, which is always a query thread node.
@return DB_SUCCESS if operation successfully completed, else error code */
-static __attribute__((nonnull, warn_unused_result))
+static MY_ATTRIBUTE((nonnull, warn_unused_result))
dberr_t
row_undo(
/*=====*/
@@ -257,7 +257,8 @@ row_undo(
roll_ptr_t roll_ptr;
ibool locked_data_dict;
- ut_ad(node && thr);
+ ut_ad(node != NULL);
+ ut_ad(thr != NULL);
trx = node->trx;
diff --git a/storage/innobase/row/row0upd.cc b/storage/innobase/row/row0upd.cc
index 0ea4865d15f..85cd0e6d172 100644
--- a/storage/innobase/row/row0upd.cc
+++ b/storage/innobase/row/row0upd.cc
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1996, 2014, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -219,7 +219,7 @@ NOTE that this function will temporarily commit mtr and lose the
pcur position!
@return DB_SUCCESS or an error code */
-static __attribute__((nonnull, warn_unused_result))
+static MY_ATTRIBUTE((nonnull, warn_unused_result))
dberr_t
row_upd_check_references_constraints(
/*=================================*/
@@ -775,7 +775,7 @@ row_upd_write_sys_vals_to_log(
roll_ptr_t roll_ptr,/*!< in: roll ptr of the undo log record */
byte* log_ptr,/*!< pointer to a buffer of size > 20 opened
in mlog */
- mtr_t* mtr __attribute__((unused))) /*!< in: mtr */
+ mtr_t* mtr MY_ATTRIBUTE((unused))) /*!< in: mtr */
{
ut_ad(dict_index_is_clust(index));
ut_ad(mtr);
@@ -1810,7 +1810,7 @@ row_upd_store_row(
Updates a secondary index entry of a row.
@return DB_SUCCESS if operation successfully completed, else error
code or DB_LOCK_WAIT */
-static __attribute__((nonnull, warn_unused_result))
+static MY_ATTRIBUTE((nonnull, warn_unused_result))
dberr_t
row_upd_sec_index_entry(
/*====================*/
@@ -2049,7 +2049,7 @@ Updates the secondary index record if it is changed in the row update or
deletes it if this is a delete.
@return DB_SUCCESS if operation successfully completed, else error
code or DB_LOCK_WAIT */
-static __attribute__((nonnull, warn_unused_result))
+static MY_ATTRIBUTE((nonnull, warn_unused_result))
dberr_t
row_upd_sec_step(
/*=============*/
@@ -2082,7 +2082,7 @@ updated. We must mark them as inherited in entry, so that they are not
freed in a rollback. A limited version of this function used to be
called btr_cur_mark_dtuple_inherited_extern().
@return TRUE if any columns were inherited */
-static __attribute__((warn_unused_result))
+static MY_ATTRIBUTE((warn_unused_result))
ibool
row_upd_clust_rec_by_insert_inherit_func(
/*=====================================*/
@@ -2161,7 +2161,7 @@ fields of the clustered index record change. This should be quite rare in
database applications.
@return DB_SUCCESS if operation successfully completed, else error
code or DB_LOCK_WAIT */
-static __attribute__((nonnull, warn_unused_result))
+static MY_ATTRIBUTE((nonnull, warn_unused_result))
dberr_t
row_upd_clust_rec_by_insert(
/*========================*/
@@ -2320,7 +2320,7 @@ Updates a clustered index record of a row when the ordering fields do
not change.
@return DB_SUCCESS if operation successfully completed, else error
code or DB_LOCK_WAIT */
-static __attribute__((nonnull, warn_unused_result))
+static MY_ATTRIBUTE((nonnull, warn_unused_result))
dberr_t
row_upd_clust_rec(
/*==============*/
@@ -2479,7 +2479,7 @@ func_exit:
/***********************************************************//**
Delete marks a clustered index record.
@return DB_SUCCESS if operation successfully completed, else error code */
-static __attribute__((nonnull, warn_unused_result))
+static MY_ATTRIBUTE((nonnull, warn_unused_result))
dberr_t
row_upd_del_mark_clust_rec(
/*=======================*/
@@ -2572,7 +2572,7 @@ row_upd_del_mark_clust_rec(
Updates the clustered index record.
@return DB_SUCCESS if operation successfully completed, DB_LOCK_WAIT
in case of a lock wait, else error code */
-static __attribute__((nonnull, warn_unused_result))
+static MY_ATTRIBUTE((nonnull, warn_unused_result))
dberr_t
row_upd_clust_step(
/*===============*/
@@ -2780,7 +2780,7 @@ to this node, we assume that we have a persistent cursor which was on a
record, and the position of the cursor is stored in the cursor.
@return DB_SUCCESS if operation successfully completed, else error
code or DB_LOCK_WAIT */
-static __attribute__((nonnull, warn_unused_result))
+static MY_ATTRIBUTE((nonnull, warn_unused_result))
dberr_t
row_upd(
/*====*/
@@ -2789,7 +2789,8 @@ row_upd(
{
dberr_t err = DB_SUCCESS;
- ut_ad(node && thr);
+ ut_ad(node != NULL);
+ ut_ad(thr != NULL);
if (UNIV_LIKELY(node->in_mysql_interface)) {
diff --git a/storage/innobase/srv/srv0srv.cc b/storage/innobase/srv/srv0srv.cc
index c28f889dfcc..e17b27b44fc 100644
--- a/storage/innobase/srv/srv0srv.cc
+++ b/storage/innobase/srv/srv0srv.cc
@@ -1686,7 +1686,7 @@ extern "C" UNIV_INTERN
os_thread_ret_t
DECLARE_THREAD(srv_monitor_thread)(
/*===============================*/
- void* arg __attribute__((unused)))
+ void* arg MY_ATTRIBUTE((unused)))
/*!< in: a dummy parameter required by
os_thread_create */
{
@@ -1863,7 +1863,7 @@ extern "C" UNIV_INTERN
os_thread_ret_t
DECLARE_THREAD(srv_error_monitor_thread)(
/*=====================================*/
- void* arg __attribute__((unused)))
+ void* arg MY_ATTRIBUTE((unused)))
/*!< in: a dummy parameter required by
os_thread_create */
{
@@ -2309,7 +2309,7 @@ srv_master_do_active_tasks(void)
/* Do an ibuf merge */
srv_main_thread_op_info = "doing insert buffer merge";
counter_time = ut_time_us(NULL);
- ibuf_contract_in_background(0, FALSE);
+ ibuf_merge_in_background(false);
MONITOR_INC_TIME_IN_MICRO_SECS(
MONITOR_SRV_IBUF_MERGE_MICROSECOND, counter_time);
@@ -2404,7 +2404,7 @@ srv_master_do_idle_tasks(void)
/* Do an ibuf merge */
counter_time = ut_time_us(NULL);
srv_main_thread_op_info = "doing insert buffer merge";
- ibuf_contract_in_background(0, TRUE);
+ ibuf_merge_in_background(true);
MONITOR_INC_TIME_IN_MICRO_SECS(
MONITOR_SRV_IBUF_MERGE_MICROSECOND, counter_time);
@@ -2482,7 +2482,7 @@ srv_master_do_shutdown_tasks(
/* Do an ibuf merge */
srv_main_thread_op_info = "doing insert buffer merge";
- n_bytes_merged = ibuf_contract_in_background(0, TRUE);
+ n_bytes_merged = ibuf_merge_in_background(true);
/* Flush logs if needed */
srv_sync_log_buffer_in_background();
@@ -2522,7 +2522,7 @@ extern "C" UNIV_INTERN
os_thread_ret_t
DECLARE_THREAD(srv_master_thread)(
/*==============================*/
- void* arg __attribute__((unused)))
+ void* arg MY_ATTRIBUTE((unused)))
/*!< in: a dummy parameter required by
os_thread_create */
{
@@ -2666,7 +2666,7 @@ extern "C" UNIV_INTERN
os_thread_ret_t
DECLARE_THREAD(srv_worker_thread)(
/*==============================*/
- void* arg __attribute__((unused))) /*!< in: a dummy parameter
+ void* arg MY_ATTRIBUTE((unused))) /*!< in: a dummy parameter
required by os_thread_create */
{
srv_slot_t* slot;
@@ -2924,7 +2924,7 @@ extern "C" UNIV_INTERN
os_thread_ret_t
DECLARE_THREAD(srv_purge_coordinator_thread)(
/*=========================================*/
- void* arg __attribute__((unused))) /*!< in: a dummy parameter
+ void* arg MY_ATTRIBUTE((unused))) /*!< in: a dummy parameter
required by os_thread_create */
{
srv_slot_t* slot;
diff --git a/storage/innobase/srv/srv0start.cc b/storage/innobase/srv/srv0start.cc
index 4a457ae6f00..0b81ad86f1c 100644
--- a/storage/innobase/srv/srv0start.cc
+++ b/storage/innobase/srv/srv0start.cc
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1996, 2015, Oracle and/or its affiliates. All rights reserved.
+Copyright (c) 1996, 2016, Oracle and/or its affiliates. All rights reserved.
Copyright (c) 2008, Google Inc.
Copyright (c) 2009, Percona Inc.
Copyright (c) 2013, 2015, MariaDB Corporation
@@ -532,7 +532,7 @@ UNIV_INTERN
void
srv_normalize_path_for_win(
/*=======================*/
- char* str __attribute__((unused))) /*!< in/out: null-terminated
+ char* str MY_ATTRIBUTE((unused))) /*!< in/out: null-terminated
character string */
{
#ifdef __WIN__
@@ -549,7 +549,7 @@ srv_normalize_path_for_win(
/*********************************************************************//**
Creates a log file.
@return DB_SUCCESS or error code */
-static __attribute__((nonnull, warn_unused_result))
+static MY_ATTRIBUTE((nonnull, warn_unused_result))
dberr_t
create_log_file(
/*============*/
@@ -757,7 +757,7 @@ create_log_files_rename(
/*********************************************************************//**
Opens a log file.
@return DB_SUCCESS or error code */
-static __attribute__((nonnull, warn_unused_result))
+static MY_ATTRIBUTE((nonnull, warn_unused_result))
dberr_t
open_log_file(
/*==========*/
@@ -785,7 +785,7 @@ open_log_file(
/*********************************************************************//**
Creates or opens database data files and closes them.
@return DB_SUCCESS or error code */
-static __attribute__((nonnull, warn_unused_result))
+static MY_ATTRIBUTE((nonnull, warn_unused_result))
dberr_t
open_or_create_data_files(
/*======================*/
diff --git a/storage/innobase/sync/sync0sync.cc b/storage/innobase/sync/sync0sync.cc
index 7eb3d0bd6f2..3e3ce353724 100644
--- a/storage/innobase/sync/sync0sync.cc
+++ b/storage/innobase/sync/sync0sync.cc
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1995, 2015, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2008, Google Inc.
Portions of this file contain modifications contributed and copyrighted by
@@ -386,10 +386,10 @@ ulint
mutex_enter_nowait_func(
/*====================*/
ib_mutex_t* mutex, /*!< in: pointer to mutex */
- const char* file_name __attribute__((unused)),
+ const char* file_name MY_ATTRIBUTE((unused)),
/*!< in: file name where mutex
requested */
- ulint line __attribute__((unused)))
+ ulint line MY_ATTRIBUTE((unused)))
/*!< in: line where requested */
{
ut_ad(mutex_validate(mutex));
diff --git a/storage/innobase/trx/trx0purge.cc b/storage/innobase/trx/trx0purge.cc
index 56d46311f62..efc600d16b1 100644
--- a/storage/innobase/trx/trx0purge.cc
+++ b/storage/innobase/trx/trx0purge.cc
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1996, 2012, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -913,7 +913,7 @@ Fetches the next undo log record from the history list to purge. It must be
released with the corresponding release function.
@return copy of an undo log record or pointer to trx_purge_dummy_rec,
if the whole undo log can skipped in purge; NULL if none left */
-static __attribute__((warn_unused_result, nonnull))
+static MY_ATTRIBUTE((warn_unused_result, nonnull))
trx_undo_rec_t*
trx_purge_fetch_next_rec(
/*=====================*/
diff --git a/storage/innobase/trx/trx0rec.cc b/storage/innobase/trx/trx0rec.cc
index fa3fe0904b8..74a63b60286 100644
--- a/storage/innobase/trx/trx0rec.cc
+++ b/storage/innobase/trx/trx0rec.cc
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1996, 2012, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -1133,7 +1133,7 @@ trx_undo_rec_get_partial_row(
/***********************************************************************//**
Erases the unused undo log page end.
@return TRUE if the page contained something, FALSE if it was empty */
-static __attribute__((nonnull))
+static MY_ATTRIBUTE((nonnull))
ibool
trx_undo_erase_page_end(
/*====================*/
@@ -1159,7 +1159,7 @@ byte*
trx_undo_parse_erase_page_end(
/*==========================*/
byte* ptr, /*!< in: buffer */
- byte* end_ptr __attribute__((unused)), /*!< in: buffer end */
+ byte* end_ptr MY_ATTRIBUTE((unused)), /*!< in: buffer end */
page_t* page, /*!< in: page or NULL */
mtr_t* mtr) /*!< in: mtr or NULL */
{
@@ -1442,7 +1442,7 @@ NOTE: the caller must have latches on the clustered index page.
@retval true if the undo log has been
truncated and we cannot fetch the old version
@retval false if the undo log record is available */
-static __attribute__((nonnull, warn_unused_result))
+static MY_ATTRIBUTE((nonnull, warn_unused_result))
bool
trx_undo_get_undo_rec(
/*==================*/
@@ -1470,7 +1470,7 @@ trx_undo_get_undo_rec(
#ifdef UNIV_DEBUG
#define ATTRIB_USED_ONLY_IN_DEBUG
#else /* UNIV_DEBUG */
-#define ATTRIB_USED_ONLY_IN_DEBUG __attribute__((unused))
+#define ATTRIB_USED_ONLY_IN_DEBUG MY_ATTRIBUTE((unused))
#endif /* UNIV_DEBUG */
/*******************************************************************//**
diff --git a/storage/innobase/trx/trx0roll.cc b/storage/innobase/trx/trx0roll.cc
index e2c3c0b949c..c65d95a9817 100644
--- a/storage/innobase/trx/trx0roll.cc
+++ b/storage/innobase/trx/trx0roll.cc
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1996, 2014, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -336,7 +336,7 @@ the row, these locks are naturally released in the rollback. Savepoints which
were set after this savepoint are deleted.
@return if no savepoint of the name found then DB_NO_SAVEPOINT,
otherwise DB_SUCCESS */
-static __attribute__((nonnull, warn_unused_result))
+static MY_ATTRIBUTE((nonnull, warn_unused_result))
dberr_t
trx_rollback_to_savepoint_for_mysql_low(
/*====================================*/
@@ -641,7 +641,7 @@ trx_rollback_active(
"in recovery",
table->name, trx->table_id);
- err = row_drop_table_for_mysql(table->name, trx, TRUE);
+ err = row_drop_table_for_mysql(table->name, trx, TRUE, FALSE);
trx_commit_for_mysql(trx);
ut_a(err == DB_SUCCESS);
@@ -796,7 +796,7 @@ extern "C" UNIV_INTERN
os_thread_ret_t
DECLARE_THREAD(trx_rollback_or_clean_all_recovered)(
/*================================================*/
- void* arg __attribute__((unused)))
+ void* arg MY_ATTRIBUTE((unused)))
/*!< in: a dummy parameter required by
os_thread_create */
{
diff --git a/storage/innobase/trx/trx0trx.cc b/storage/innobase/trx/trx0trx.cc
index 7f3cfa22255..1a99e159d41 100644
--- a/storage/innobase/trx/trx0trx.cc
+++ b/storage/innobase/trx/trx0trx.cc
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1996, 2015, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -973,7 +973,7 @@ trx_serialisation_number_get(
/****************************************************************//**
Assign the transaction its history serialisation number and write the
update UNDO log record to the assigned rollback segment. */
-static __attribute__((nonnull))
+static MY_ATTRIBUTE((nonnull))
void
trx_write_serialisation_history(
/*============================*/
@@ -1060,7 +1060,7 @@ trx_write_serialisation_history(
/********************************************************************
Finalize a transaction containing updates for a FTS table. */
-static __attribute__((nonnull))
+static MY_ATTRIBUTE((nonnull))
void
trx_finalize_for_fts_table(
/*=======================*/
@@ -1093,7 +1093,7 @@ trx_finalize_for_fts_table(
/******************************************************************//**
Finalize a transaction containing updates to FTS tables. */
-static __attribute__((nonnull))
+static MY_ATTRIBUTE((nonnull))
void
trx_finalize_for_fts(
/*=================*/
@@ -1161,7 +1161,7 @@ trx_flush_log_if_needed_low(
/**********************************************************************//**
If required, flushes the log to disk based on the value of
innodb_flush_log_at_trx_commit. */
-static __attribute__((nonnull))
+static MY_ATTRIBUTE((nonnull))
void
trx_flush_log_if_needed(
/*====================*/
@@ -1176,7 +1176,7 @@ trx_flush_log_if_needed(
/****************************************************************//**
Commits a transaction in memory. */
-static __attribute__((nonnull))
+static MY_ATTRIBUTE((nonnull))
void
trx_commit_in_memory(
/*=================*/
@@ -2165,7 +2165,7 @@ which is in the prepared state
@return trx on match, the trx->xid will be invalidated;
note that the trx may have been committed, unless the caller is
holding lock_sys->mutex */
-static __attribute__((nonnull, warn_unused_result))
+static MY_ATTRIBUTE((nonnull, warn_unused_result))
trx_t*
trx_get_trx_by_xid_low(
/*===================*/
diff --git a/storage/innobase/trx/trx0undo.cc b/storage/innobase/trx/trx0undo.cc
index edb85a89c17..cdd23726f2e 100644
--- a/storage/innobase/trx/trx0undo.cc
+++ b/storage/innobase/trx/trx0undo.cc
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1996, 2013, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -419,11 +419,11 @@ trx_undo_page_init(
Creates a new undo log segment in file.
@return DB_SUCCESS if page creation OK possible error codes are:
DB_TOO_MANY_CONCURRENT_TRXS DB_OUT_OF_FILE_SPACE */
-static __attribute__((nonnull, warn_unused_result))
+static MY_ATTRIBUTE((nonnull, warn_unused_result))
dberr_t
trx_undo_seg_create(
/*================*/
- trx_rseg_t* rseg __attribute__((unused)),/*!< in: rollback segment */
+ trx_rseg_t* rseg MY_ATTRIBUTE((unused)),/*!< in: rollback segment */
trx_rsegf_t* rseg_hdr,/*!< in: rollback segment header, page
x-latched */
ulint type, /*!< in: type of the segment: TRX_UNDO_INSERT or
@@ -443,7 +443,9 @@ trx_undo_seg_create(
ibool success;
dberr_t err = DB_SUCCESS;
- ut_ad(mtr && id && rseg_hdr);
+ ut_ad(mtr != NULL);
+ ut_ad(id != NULL);
+ ut_ad(rseg_hdr != NULL);
ut_ad(mutex_own(&(rseg->mutex)));
/* fputs(type == TRX_UNDO_INSERT
@@ -827,7 +829,7 @@ byte*
trx_undo_parse_discard_latest(
/*==========================*/
byte* ptr, /*!< in: buffer */
- byte* end_ptr __attribute__((unused)), /*!< in: buffer end */
+ byte* end_ptr MY_ATTRIBUTE((unused)), /*!< in: buffer end */
page_t* page, /*!< in: page or NULL */
mtr_t* mtr) /*!< in: mtr or NULL */
{
@@ -1555,7 +1557,7 @@ Creates a new undo log.
@return DB_SUCCESS if successful in creating the new undo lob object,
possible error codes are: DB_TOO_MANY_CONCURRENT_TRXS
DB_OUT_OF_FILE_SPACE DB_OUT_OF_MEMORY */
-static __attribute__((nonnull, warn_unused_result))
+static MY_ATTRIBUTE((nonnull, warn_unused_result))
dberr_t
trx_undo_create(
/*============*/
diff --git a/storage/maria/ma_checkpoint.c b/storage/maria/ma_checkpoint.c
index 36b729e6053..21b4758a720 100644
--- a/storage/maria/ma_checkpoint.c
+++ b/storage/maria/ma_checkpoint.c
@@ -46,7 +46,7 @@ static mysql_mutex_t LOCK_checkpoint;
static mysql_cond_t COND_checkpoint;
/** @brief control structure for checkpoint background thread */
static MA_SERVICE_THREAD_CONTROL checkpoint_control=
- {THREAD_DEAD, FALSE, &LOCK_checkpoint, &COND_checkpoint};
+ {0, FALSE, FALSE, &LOCK_checkpoint, &COND_checkpoint};
/* is ulong like pagecache->blocks_changed */
static ulong pages_to_flush_before_next_checkpoint;
static PAGECACHE_FILE *dfiles, /**< data files to flush in background */
@@ -326,7 +326,6 @@ end:
int ma_checkpoint_init(ulong interval)
{
- pthread_t th;
int res= 0;
DBUG_ENTER("ma_checkpoint_init");
if (ma_service_thread_control_init(&checkpoint_control))
@@ -334,14 +333,14 @@ int ma_checkpoint_init(ulong interval)
else if (interval > 0)
{
compile_time_assert(sizeof(void *) >= sizeof(ulong));
- if (!(res= mysql_thread_create(key_thread_checkpoint,
- &th, NULL, ma_checkpoint_background,
- (void *)interval)))
- {
- /* thread lives, will have to be killed */
- checkpoint_control.status= THREAD_RUNNING;
- }
+ if ((res= mysql_thread_create(key_thread_checkpoint,
+ &checkpoint_control.thread, NULL,
+ ma_checkpoint_background,
+ (void*) interval)))
+ checkpoint_control.killed= TRUE;
}
+ else
+ checkpoint_control.killed= TRUE;
DBUG_RETURN(res);
}
@@ -573,8 +572,6 @@ pthread_handler_t ma_checkpoint_background(void *arg)
sleeps= 1;
pages_to_flush_before_next_checkpoint= 0;
- pthread_detach_this_thread();
-
for(;;) /* iterations of checkpoints and dirty page flushing */
{
#if 0 /* good for testing, to do a lot of checkpoints, finds a lot of bugs */
@@ -723,7 +720,6 @@ pthread_handler_t ma_checkpoint_background(void *arg)
DBUG_EXECUTE_IF("maria_checkpoint_indirect", level= CHECKPOINT_INDIRECT;);
ma_checkpoint_execute(level, FALSE);
}
- my_service_thread_signal_end(&checkpoint_control);
my_thread_end();
return 0;
}
diff --git a/storage/maria/ma_loghandler.c b/storage/maria/ma_loghandler.c
index 1e7cc9483a2..8bcb84c2a20 100644
--- a/storage/maria/ma_loghandler.c
+++ b/storage/maria/ma_loghandler.c
@@ -54,7 +54,7 @@ static mysql_mutex_t LOCK_soft_sync;
static mysql_cond_t COND_soft_sync;
/** @brief control structure for checkpoint background thread */
static MA_SERVICE_THREAD_CONTROL soft_sync_control=
- {THREAD_DEAD, FALSE, &LOCK_soft_sync, &COND_soft_sync};
+ {0, FALSE, FALSE, &LOCK_soft_sync, &COND_soft_sync};
/* transaction log file descriptor */
@@ -8790,7 +8790,6 @@ ma_soft_sync_background( void *arg __attribute__((unused)))
if (my_service_thread_sleep(&soft_sync_control, sleep))
break;
}
- my_service_thread_signal_end(&soft_sync_control);
my_thread_end();
DBUG_RETURN(0);
}
@@ -8803,7 +8802,6 @@ ma_soft_sync_background( void *arg __attribute__((unused)))
int translog_soft_sync_start(void)
{
- pthread_t th;
int res= 0;
uint32 min, max;
DBUG_ENTER("translog_soft_sync_start");
@@ -8818,9 +8816,10 @@ int translog_soft_sync_start(void)
soft_need_sync= 1;
if (!(res= ma_service_thread_control_init(&soft_sync_control)))
- if (!(res= mysql_thread_create(key_thread_soft_sync,
- &th, NULL, ma_soft_sync_background, NULL)))
- soft_sync_control.status= THREAD_RUNNING;
+ if ((res= mysql_thread_create(key_thread_soft_sync,
+ &soft_sync_control.thread, NULL,
+ ma_soft_sync_background, NULL)))
+ soft_sync_control.killed= TRUE;
DBUG_RETURN(res);
}
diff --git a/storage/maria/ma_pagecache.c b/storage/maria/ma_pagecache.c
index bba99703119..4630a84334b 100644
--- a/storage/maria/ma_pagecache.c
+++ b/storage/maria/ma_pagecache.c
@@ -500,8 +500,8 @@ static void test_key_cache(PAGECACHE *pagecache,
const char *where, my_bool lock);
#endif
-#define PAGECACHE_HASH(p, f, pos) (((ulong) (pos) + \
- (ulong) (f).file) & (p->hash_entries-1))
+#define PAGECACHE_HASH(p, f, pos) (((size_t) (pos) + \
+ (size_t) (f).file) & (p->hash_entries-1))
#define FILE_HASH(f,cache) ((uint) (f).file & (cache->changed_blocks_hash_size-1))
#define DEFAULT_PAGECACHE_DEBUG_LOG "pagecache_debug.log"
@@ -641,10 +641,10 @@ static my_bool pagecache_fwrite(PAGECACHE *pagecache,
{
char buff[80];
uint len= my_sprintf(buff,
- (buff, "fwrite: fd: %d id: %u page: %lu",
+ (buff, "fwrite: fd: %d id: %u page: %llu",
filedesc->file,
_ma_file_callback_to_id(filedesc->callback_data),
- (ulong) pageno));
+ pageno));
(void) translog_log_debug_info(0, LOGREC_DEBUG_INFO_QUERY,
(uchar*) buff, len);
}
@@ -745,12 +745,12 @@ static inline uint next_power(uint value)
*/
-ulong init_pagecache(PAGECACHE *pagecache, size_t use_mem,
+size_t init_pagecache(PAGECACHE *pagecache, size_t use_mem,
uint division_limit, uint age_threshold,
uint block_size, uint changed_blocks_hash_size,
myf my_readwrite_flags)
{
- ulong blocks, hash_links, length;
+ size_t blocks, hash_links, length;
int error;
DBUG_ENTER("init_pagecache");
DBUG_ASSERT(block_size >= 512);
@@ -787,10 +787,10 @@ ulong init_pagecache(PAGECACHE *pagecache, size_t use_mem,
DBUG_PRINT("info", ("block_size: %u", block_size));
DBUG_ASSERT(((uint)(1 << pagecache->shift)) == block_size);
- blocks= (ulong) (use_mem / (sizeof(PAGECACHE_BLOCK_LINK) +
+ blocks= use_mem / (sizeof(PAGECACHE_BLOCK_LINK) +
2 * sizeof(PAGECACHE_HASH_LINK) +
sizeof(PAGECACHE_HASH_LINK*) *
- 5/4 + block_size));
+ 5/4 + block_size);
/* Changed blocks hash needs to be a power of 2 */
changed_blocks_hash_size= my_round_up_to_next_power(MY_MAX(changed_blocks_hash_size,
MIN_PAGECACHE_CHANGED_BLOCKS_HASH_SIZE));
@@ -826,7 +826,7 @@ ulong init_pagecache(PAGECACHE *pagecache, size_t use_mem,
blocks--;
/* Allocate memory for cache page buffers */
if ((pagecache->block_mem=
- my_large_malloc((ulong) blocks * pagecache->block_size,
+ my_large_malloc(blocks * pagecache->block_size,
MYF(MY_WME))))
{
/*
@@ -857,7 +857,7 @@ ulong init_pagecache(PAGECACHE *pagecache, size_t use_mem,
blocks= blocks / 4*3;
}
pagecache->blocks_unused= blocks;
- pagecache->disk_blocks= (long) blocks;
+ pagecache->disk_blocks= blocks;
pagecache->hash_links= hash_links;
pagecache->hash_links_used= 0;
pagecache->free_hash_list= NULL;
@@ -894,7 +894,7 @@ ulong init_pagecache(PAGECACHE *pagecache, size_t use_mem,
pagecache->hash_links, (long) pagecache->hash_link_root));
pagecache->blocks= pagecache->disk_blocks > 0 ? pagecache->disk_blocks : 0;
- DBUG_RETURN((ulong) pagecache->disk_blocks);
+ DBUG_RETURN((size_t)pagecache->disk_blocks);
err:
error= my_errno;
@@ -985,11 +985,11 @@ static int flush_all_key_blocks(PAGECACHE *pagecache)
So we disable it for now.
*/
#if NOT_USED /* keep disabled until code is fixed see above !! */
-ulong resize_pagecache(PAGECACHE *pagecache,
+size_t resize_pagecache(PAGECACHE *pagecache,
size_t use_mem, uint division_limit,
uint age_threshold, uint changed_blocks_hash_size)
{
- ulong blocks;
+ size_t blocks;
struct st_my_thread_var *thread;
WQUEUE *wqueue;
DBUG_ENTER("resize_pagecache");
@@ -1385,7 +1385,7 @@ static void link_block(PAGECACHE *pagecache, PAGECACHE_BLOCK_LINK *block,
("linked block: %u:%1u status: %x #requests: %u #available: %u",
PCBLOCK_NUMBER(pagecache, block), at_end, block->status,
block->requests, pagecache->blocks_available));
- KEYCACHE_DBUG_ASSERT((ulong) pagecache->blocks_available <=
+ KEYCACHE_DBUG_ASSERT(pagecache->blocks_available <=
pagecache->blocks_used);
#endif
DBUG_VOID_RETURN;
@@ -2024,7 +2024,7 @@ restart:
/* There are some never used blocks, take first of them */
block= &pagecache->block_root[pagecache->blocks_used];
block->buffer= ADD_TO_PTR(pagecache->block_mem,
- ((ulong) pagecache->blocks_used*
+ (pagecache->blocks_used*
pagecache->block_size),
uchar*);
pagecache->blocks_used++;
@@ -4875,7 +4875,7 @@ my_bool pagecache_collect_changed_blocks_with_lsn(PAGECACHE *pagecache,
LSN *min_rec_lsn)
{
my_bool error= 0;
- ulong stored_list_size= 0;
+ size_t stored_list_size= 0;
uint file_hash;
char *ptr;
LSN minimum_rec_lsn= LSN_MAX;
diff --git a/storage/maria/ma_pagecache.h b/storage/maria/ma_pagecache.h
index e212a7b7029..207ad69711f 100644
--- a/storage/maria/ma_pagecache.h
+++ b/storage/maria/ma_pagecache.h
@@ -131,21 +131,21 @@ typedef struct st_pagecache_hash_link PAGECACHE_HASH_LINK;
typedef struct st_pagecache
{
size_t mem_size; /* specified size of the cache memory */
- ulong min_warm_blocks; /* min number of warm blocks; */
- ulong age_threshold; /* age threshold for hot blocks */
+ size_t min_warm_blocks; /* min number of warm blocks; */
+ size_t age_threshold; /* age threshold for hot blocks */
ulonglong time; /* total number of block link operations */
- ulong hash_entries; /* max number of entries in the hash table */
- ulong changed_blocks_hash_size; /* Number of hash buckets for file blocks */
- long hash_links; /* max number of hash links */
- long hash_links_used; /* number of hash links taken from free links pool */
- long disk_blocks; /* max number of blocks in the cache */
- ulong blocks_used; /* maximum number of concurrently used blocks */
- ulong blocks_unused; /* number of currently unused blocks */
- ulong blocks_changed; /* number of currently dirty blocks */
- ulong warm_blocks; /* number of blocks in warm sub-chain */
- ulong cnt_for_resize_op; /* counter to block resize operation */
- ulong blocks_available; /* number of blocks available in the LRU chain */
- long blocks; /* max number of blocks in the cache */
+ size_t hash_entries; /* max number of entries in the hash table */
+ size_t changed_blocks_hash_size;/* Number of hash buckets for file blocks */
+ ssize_t hash_links; /* max number of hash links */
+ ssize_t hash_links_used; /* number of hash links taken from free links pool */
+ ssize_t disk_blocks; /* max number of blocks in the cache */
+ size_t blocks_used; /* maximum number of concurrently used blocks */
+ size_t blocks_unused; /* number of currently unused blocks */
+ size_t blocks_changed; /* number of currently dirty blocks */
+ size_t warm_blocks; /* number of blocks in warm sub-chain */
+ size_t cnt_for_resize_op; /* counter to block resize operation */
+ size_t blocks_available; /* number of blocks available in the LRU chain */
+ ssize_t blocks; /* max number of blocks in the cache */
uint32 block_size; /* size of the page buffer of a cache block */
PAGECACHE_HASH_LINK **hash_root;/* arr. of entries into hash table buckets */
PAGECACHE_HASH_LINK *hash_link_root;/* memory for hash table links */
@@ -170,12 +170,12 @@ typedef struct st_pagecache
*/
ulonglong param_buff_size; /* size the memory allocated for the cache */
- ulong param_block_size; /* size of the blocks in the key cache */
- ulong param_division_limit; /* min. percentage of warm blocks */
- ulong param_age_threshold; /* determines when hot block is downgraded */
+ size_t param_block_size; /* size of the blocks in the key cache */
+ size_t param_division_limit; /* min. percentage of warm blocks */
+ size_t param_age_threshold; /* determines when hot block is downgraded */
/* Statistics variables. These are reset in reset_pagecache_counters(). */
- ulong global_blocks_changed; /* number of currently dirty blocks */
+ size_t global_blocks_changed; /* number of currently dirty blocks */
ulonglong global_cache_w_requests;/* number of write requests (write hits) */
ulonglong global_cache_write; /* number of writes from cache to files */
ulonglong global_cache_r_requests;/* number of read requests (read hits) */
@@ -208,11 +208,11 @@ typedef enum pagecache_flush_filter_result
/* The default key cache */
extern PAGECACHE dflt_pagecache_var, *dflt_pagecache;
-extern ulong init_pagecache(PAGECACHE *pagecache, size_t use_mem,
+extern size_t init_pagecache(PAGECACHE *pagecache, size_t use_mem,
uint division_limit, uint age_threshold,
uint block_size, uint changed_blocks_hash_size,
myf my_read_flags);
-extern ulong resize_pagecache(PAGECACHE *pagecache,
+extern size_t resize_pagecache(PAGECACHE *pagecache,
size_t use_mem, uint division_limit,
uint age_threshold, uint changed_blocks_hash_size);
extern void change_pagecache_param(PAGECACHE *pagecache, uint division_limit,
diff --git a/storage/maria/ma_servicethread.c b/storage/maria/ma_servicethread.c
index e5c949a7571..d92c5315933 100644
--- a/storage/maria/ma_servicethread.c
+++ b/storage/maria/ma_servicethread.c
@@ -33,7 +33,7 @@ int ma_service_thread_control_init(MA_SERVICE_THREAD_CONTROL *control)
DBUG_ENTER("ma_service_thread_control_init");
DBUG_PRINT("init", ("control 0x%lx", (ulong) control));
control->inited= TRUE;
- control->status= THREAD_DEAD; /* not yet born == dead */
+ control->killed= FALSE;
res= (mysql_mutex_init(key_SERVICE_THREAD_CONTROL_lock,
control->LOCK_control, MY_MUTEX_INIT_SLOW) ||
mysql_cond_init(key_SERVICE_THREAD_CONTROL_cond,
@@ -60,20 +60,17 @@ void ma_service_thread_control_end(MA_SERVICE_THREAD_CONTROL *control)
DBUG_PRINT("init", ("control 0x%lx", (ulong) control));
DBUG_ASSERT(control->inited);
mysql_mutex_lock(control->LOCK_control);
- if (control->status != THREAD_DEAD) /* thread was started OK */
+ if (!control->killed)
{
DBUG_PRINT("info",("killing Maria background thread"));
- control->status= THREAD_DYING; /* kill it */
- do /* and wait for it to be dead */
- {
- /* wake it up if it was in a sleep */
- mysql_cond_broadcast(control->COND_control);
- DBUG_PRINT("info",("waiting for Maria background thread to die"));
- mysql_cond_wait(control->COND_control, control->LOCK_control);
- }
- while (control->status != THREAD_DEAD);
+ control->killed= TRUE; /* kill it */
+ mysql_cond_broadcast(control->COND_control);
+ mysql_mutex_unlock(control->LOCK_control);
+ DBUG_PRINT("info", ("waiting for Maria background thread to die"));
+ pthread_join(control->thread, NULL);
}
- mysql_mutex_unlock(control->LOCK_control);
+ else
+ mysql_mutex_unlock(control->LOCK_control);
mysql_mutex_destroy(control->LOCK_control);
mysql_cond_destroy(control->COND_control);
control->inited= FALSE;
@@ -100,7 +97,7 @@ my_bool my_service_thread_sleep(MA_SERVICE_THREAD_CONTROL *control,
DBUG_ENTER("my_service_thread_sleep");
DBUG_PRINT("init", ("control 0x%lx", (ulong) control));
mysql_mutex_lock(control->LOCK_control);
- if (control->status == THREAD_DYING)
+ if (control->killed)
{
mysql_mutex_unlock(control->LOCK_control);
DBUG_RETURN(TRUE);
@@ -119,34 +116,8 @@ my_bool my_service_thread_sleep(MA_SERVICE_THREAD_CONTROL *control,
control->LOCK_control, &abstime);
}
#endif
- if (control->status == THREAD_DYING)
+ if (control->killed)
res= TRUE;
mysql_mutex_unlock(control->LOCK_control);
DBUG_RETURN(res);
}
-
-
-/**
- inform about thread exiting
-
- @param control control block
-*/
-
-void my_service_thread_signal_end(MA_SERVICE_THREAD_CONTROL *control)
-{
- DBUG_ENTER("my_service_thread_signal_end");
- DBUG_PRINT("init", ("control 0x%lx", (ulong) control));
- mysql_mutex_lock(control->LOCK_control);
- control->status = THREAD_DEAD; /* indicate that we are dead */
- /*
- wake up ma_service_thread_control_end which may be waiting for
- our death
- */
- mysql_cond_broadcast(control->COND_control);
- /*
- broadcast was inside unlock because ma_service_thread_control_end
- destroys mutex
- */
- mysql_mutex_unlock(control->LOCK_control);
- DBUG_VOID_RETURN;
-}
diff --git a/storage/maria/ma_servicethread.h b/storage/maria/ma_servicethread.h
index ed578d93c24..254225bd608 100644
--- a/storage/maria/ma_servicethread.h
+++ b/storage/maria/ma_servicethread.h
@@ -16,12 +16,10 @@
#include <my_pthread.h>
-enum ma_service_thread_state {THREAD_RUNNING, THREAD_DYING, THREAD_DEAD};
-
typedef struct st_ma_service_thread_control
{
- /** 'kill' flag for the background thread */
- enum ma_service_thread_state status;
+ pthread_t thread;
+ my_bool killed;
/** if thread module was inited or not */
my_bool inited;
/** for killing the background thread */
@@ -35,4 +33,3 @@ int ma_service_thread_control_init(MA_SERVICE_THREAD_CONTROL *control);
void ma_service_thread_control_end(MA_SERVICE_THREAD_CONTROL *control);
my_bool my_service_thread_sleep(MA_SERVICE_THREAD_CONTROL *control,
ulonglong sleep_time);
-void my_service_thread_signal_end(MA_SERVICE_THREAD_CONTROL *control);
diff --git a/storage/maria/ma_sort.c b/storage/maria/ma_sort.c
index ef6e8506ac6..ac166cf4084 100644
--- a/storage/maria/ma_sort.c
+++ b/storage/maria/ma_sort.c
@@ -364,192 +364,196 @@ err:
} /* find_all_keys */
-/* Search after all keys and place them in a temp. file */
-
-pthread_handler_t _ma_thr_find_all_keys(void *arg)
+static my_bool _ma_thr_find_all_keys_exec(MARIA_SORT_PARAM* sort_param)
{
- MARIA_SORT_PARAM *sort_param= (MARIA_SORT_PARAM*) arg;
- int error;
- size_t memavl, old_memavl;
+ int error= 0;
+ ulonglong memavl, old_memavl;
longlong sortbuff_size;
ha_keys UNINIT_VAR(keys), idx;
uint sort_length;
uint maxbuffer;
- uchar **sort_keys=0;
+ uchar **sort_keys= NULL;
+ DBUG_ENTER("_ma_thr_find_all_keys_exec");
+ DBUG_PRINT("enter", ("master: %d", sort_param->master));
- error=1;
+ if (sort_param->sort_info->got_error)
+ DBUG_RETURN(TRUE);
- if (my_thread_init())
- goto err;
-
- { /* Add extra block since DBUG_ENTER declare variables */
- DBUG_ENTER("_ma_thr_find_all_keys");
- DBUG_PRINT("enter", ("master: %d", sort_param->master));
- if (sort_param->sort_info->got_error)
- goto err;
+ set_sort_param_read_write(sort_param);
- set_sort_param_read_write(sort_param);
+ my_b_clear(&sort_param->tempfile);
+ my_b_clear(&sort_param->tempfile_for_exceptions);
+ bzero((char*) &sort_param->buffpek, sizeof(sort_param->buffpek));
+ bzero((char*) &sort_param->unique, sizeof(sort_param->unique));
- my_b_clear(&sort_param->tempfile);
- my_b_clear(&sort_param->tempfile_for_exceptions);
- bzero((char*) &sort_param->buffpek,sizeof(sort_param->buffpek));
- bzero((char*) &sort_param->unique, sizeof(sort_param->unique));
+ sortbuff_size= sort_param->sortbuff_size;
+ memavl= MY_MAX(sortbuff_size, MIN_SORT_MEMORY);
+ idx= (ha_keys) sort_param->sort_info->max_records;
+ sort_length= sort_param->key_length;
+ maxbuffer= 1;
- sortbuff_size= sort_param->sortbuff_size;
- memavl= MY_MAX(sortbuff_size, MIN_SORT_MEMORY);
- idx= (ha_keys) sort_param->sort_info->max_records;
- sort_length= sort_param->key_length;
- maxbuffer= 1;
-
- while (memavl >= MIN_SORT_MEMORY)
+ while (memavl >= MIN_SORT_MEMORY)
+ {
+ if ((my_off_t) (idx+1)*(sort_length+sizeof(char*)) <= (my_off_t) memavl)
+ keys= idx+1;
+ else if ((sort_param->sort_info->param->testflag &
+ (T_FORCE_SORT_MEMORY | T_CREATE_MISSING_KEYS)) ==
+ T_FORCE_SORT_MEMORY)
{
- if ((my_off_t) (idx+1)*(sort_length+sizeof(char*)) <= (my_off_t) memavl)
- keys= idx+1;
- else if ((sort_param->sort_info->param->testflag &
- (T_FORCE_SORT_MEMORY | T_CREATE_MISSING_KEYS)) ==
- T_FORCE_SORT_MEMORY)
- {
- /*
- Use all of the given sort buffer for key data.
- Allocate 1000 buffers at a start for new data. More buffers
- will be allocated when needed.
- */
- keys= memavl / (sort_length+sizeof(char*));
- maxbuffer= (uint) MY_MIN((ulonglong) 1000, (idx / keys)+1);
- }
- else
- {
- uint maxbuffer_org;
- do
- {
- maxbuffer_org= maxbuffer;
- if (memavl < sizeof(BUFFPEK)*maxbuffer ||
- (keys=(memavl-sizeof(BUFFPEK)*maxbuffer)/
- (sort_length+sizeof(char*))) <= 1 ||
- keys < maxbuffer)
- {
- _ma_check_print_error(sort_param->sort_info->param,
- "aria_sort_buffer_size is too small. Current aria_sort_buffer_size: %llu rows: %llu sort_length: %u",
- sortbuff_size, (ulonglong) idx, sort_length);
- goto err;
- }
- }
- while ((maxbuffer= (uint) (idx/(keys-1)+1)) != maxbuffer_org);
- }
- if ((sort_keys= (uchar **)
- my_malloc(keys*(sort_length+sizeof(char*))+
- ((sort_param->keyinfo->flag & HA_FULLTEXT) ?
- HA_FT_MAXBYTELEN : 0), MYF(0))))
+ /*
+ Use all of the given sort buffer for key data.
+ Allocate 1000 buffers at a start for new data. More buffers
+ will be allocated when needed.
+ */
+ keys= memavl / (sort_length+sizeof(char*));
+ maxbuffer= (uint) MY_MIN((ulonglong) 1000, (idx / keys)+1);
+ }
+ else
+ {
+ uint maxbuffer_org;
+ do
{
- if (my_init_dynamic_array(&sort_param->buffpek, sizeof(BUFFPEK),
- maxbuffer, MY_MIN(maxbuffer/2, 1000), MYF(0)))
+ maxbuffer_org= maxbuffer;
+ if (memavl < sizeof(BUFFPEK)*maxbuffer ||
+ (keys=(memavl-sizeof(BUFFPEK)*maxbuffer)/
+ (sort_length+sizeof(char*))) <= 1 ||
+ keys < maxbuffer)
{
- my_free(sort_keys);
- sort_keys= (uchar **) NULL; /* for err: label */
+ _ma_check_print_error(sort_param->sort_info->param,
+ "aria_sort_buffer_size is too small. Current aria_sort_buffer_size: %llu rows: %llu sort_length: %u",
+ sortbuff_size, (ulonglong) idx, sort_length);
+ goto err;
}
- else
- break;
}
- old_memavl= memavl;
- if ((memavl= memavl/4*3) < MIN_SORT_MEMORY &&
- old_memavl > MIN_SORT_MEMORY)
- memavl= MIN_SORT_MEMORY;
+ while ((maxbuffer= (uint) (idx/(keys-1)+1)) != maxbuffer_org);
}
- if (memavl < MIN_SORT_MEMORY)
+ if ((sort_keys= (uchar **)
+ my_malloc(keys*(sort_length+sizeof(char*))+
+ ((sort_param->keyinfo->flag & HA_FULLTEXT) ?
+ HA_FT_MAXBYTELEN : 0), MYF(0))))
{
- /* purecov: begin inspected */
- _ma_check_print_error(sort_param->sort_info->param,
- "aria_sort_buffer_size is too small. Current aria_sort_buffer_size: %llu rows: %llu sort_length: %u",
- sortbuff_size, (ulonglong) idx, sort_length);
- my_errno= ENOMEM;
- goto err;
- /* purecov: end inspected */
+ if (my_init_dynamic_array(&sort_param->buffpek, sizeof(BUFFPEK),
+ maxbuffer, MY_MIN(maxbuffer / 2, 1000), MYF(0)))
+ {
+ my_free(sort_keys);
+ sort_keys= NULL; /* Safety against double free on error. */
+ }
+ else
+ break;
}
+ old_memavl= memavl;
+ if ((memavl= memavl/4*3) < MIN_SORT_MEMORY &&
+ old_memavl > MIN_SORT_MEMORY)
+ memavl= MIN_SORT_MEMORY;
+ }
+ if (memavl < MIN_SORT_MEMORY)
+ {
+ /* purecov: begin inspected */
+ _ma_check_print_error(sort_param->sort_info->param,
+ "aria_sort_buffer_size is too small. Current aria_sort_buffer_size: %llu rows: %llu sort_length: %u",
+ sortbuff_size, (ulonglong) idx, sort_length);
+ my_errno= ENOMEM;
+ goto err;
+ /* purecov: end inspected */
+ }
- if (sort_param->sort_info->param->testflag & T_VERBOSE)
- my_fprintf(stdout,
- "Key %d - Allocating buffer for %llu keys\n",
- sort_param->key + 1, (ulonglong) keys);
- sort_param->sort_keys= sort_keys;
+ if (sort_param->sort_info->param->testflag & T_VERBOSE)
+ my_fprintf(stdout,
+ "Key %d - Allocating buffer for %llu keys\n",
+ sort_param->key + 1, (ulonglong) keys);
+ sort_param->sort_keys= sort_keys;
- idx= error= 0;
- sort_keys[0]= (uchar*) (sort_keys+keys);
+ idx= error= 0;
+ sort_keys[0]= (uchar*) (sort_keys+keys);
- DBUG_PRINT("info", ("reading keys"));
- while (!(error= sort_param->sort_info->got_error) &&
- !(error= (*sort_param->key_read)(sort_param, sort_keys[idx])))
+ DBUG_PRINT("info", ("reading keys"));
+ while (!(error= sort_param->sort_info->got_error) &&
+ !(error= (*sort_param->key_read)(sort_param, sort_keys[idx])))
+ {
+ if (sort_param->real_key_length > sort_param->key_length)
{
- if (sort_param->real_key_length > sort_param->key_length)
- {
- if (write_key(sort_param, sort_keys[idx],
- &sort_param->tempfile_for_exceptions))
- goto err;
- continue;
- }
-
- if (++idx == keys)
- {
- if (sort_param->write_keys(sort_param, sort_keys, idx - 1,
- (BUFFPEK *)alloc_dynamic(&sort_param->
- buffpek),
- &sort_param->tempfile))
- goto err;
- sort_keys[0]= (uchar*) (sort_keys+keys);
- memcpy(sort_keys[0], sort_keys[idx - 1],
- (size_t) sort_param->key_length);
- idx= 1;
- }
- sort_keys[idx]= sort_keys[idx - 1] + sort_param->key_length;
+ if (write_key(sort_param, sort_keys[idx],
+ &sort_param->tempfile_for_exceptions))
+ goto err;
+ continue;
}
- if (error > 0)
- goto err;
- if (sort_param->buffpek.elements)
+
+ if (++idx == keys)
{
- if (sort_param->write_keys(sort_param,sort_keys, idx,
- (BUFFPEK *) alloc_dynamic(&sort_param->
- buffpek),
+ if (sort_param->write_keys(sort_param, sort_keys, idx - 1,
+ (BUFFPEK *)alloc_dynamic(&sort_param->buffpek),
&sort_param->tempfile))
goto err;
- sort_param->keys= (sort_param->buffpek.elements - 1) * (keys - 1) + idx;
+ sort_keys[0]= (uchar*) (sort_keys+keys);
+ memcpy(sort_keys[0], sort_keys[idx - 1], (size_t) sort_param->key_length);
+ idx= 1;
}
- else
- sort_param->keys= idx;
+ sort_keys[idx]= sort_keys[idx - 1] + sort_param->key_length;
+ }
+ if (error > 0)
+ goto err;
+ if (sort_param->buffpek.elements)
+ {
+ if (sort_param->write_keys(sort_param,sort_keys, idx,
+ (BUFFPEK *) alloc_dynamic(&sort_param->buffpek),
+ &sort_param->tempfile))
+ goto err;
+ sort_param->keys= (sort_param->buffpek.elements - 1) * (keys - 1) + idx;
+ }
+ else
+ sort_param->keys= idx;
- goto ok;
+ DBUG_RETURN(FALSE);
err:
- DBUG_PRINT("error", ("got some error"));
- sort_param->sort_info->got_error= 1; /* no need to protect with a mutex */
- my_free(sort_keys);
- sort_param->sort_keys= 0;
- delete_dynamic(& sort_param->buffpek);
- close_cached_file(&sort_param->tempfile);
- close_cached_file(&sort_param->tempfile_for_exceptions);
-
-ok:
- free_root(&sort_param->wordroot, MYF(0));
- /*
- Detach from the share if the writer is involved. Avoid others to
- be blocked. This includes a flush of the write buffer. This will
- also indicate EOF to the readers.
- That means that a writer always gets here first and readers -
- only when they see EOF. But if a reader finishes prematurely
- because of an error it may reach this earlier - don't allow it
- to detach the writer thread.
- */
- if (sort_param->master && sort_param->sort_info->info->rec_cache.share)
- remove_io_thread(&sort_param->sort_info->info->rec_cache);
-
- /* Readers detach from the share if any. Avoid others to be blocked. */
- if (sort_param->read_cache.share)
- remove_io_thread(&sort_param->read_cache);
-
- mysql_mutex_lock(&sort_param->sort_info->mutex);
- if (!--sort_param->sort_info->threads_running)
- mysql_cond_signal(&sort_param->sort_info->cond);
- mysql_mutex_unlock(&sort_param->sort_info->mutex);
- DBUG_PRINT("exit", ("======== ending thread ========"));
- }
+ DBUG_PRINT("error", ("got some error"));
+ my_free(sort_keys);
+ sort_param->sort_keys= 0;
+ delete_dynamic(& sort_param->buffpek);
+ close_cached_file(&sort_param->tempfile);
+ close_cached_file(&sort_param->tempfile_for_exceptions);
+
+ DBUG_RETURN(TRUE);
+}
+
+/* Search after all keys and place them in a temp. file */
+
+pthread_handler_t _ma_thr_find_all_keys(void *arg)
+{
+ MARIA_SORT_PARAM *sort_param= (MARIA_SORT_PARAM*) arg;
+ my_bool error= FALSE;
+ /* If my_thread_init fails */
+ if (my_thread_init() || _ma_thr_find_all_keys_exec(sort_param))
+ error= TRUE;
+
+ /*
+ Thread must clean up after itself.
+ */
+ free_root(&sort_param->wordroot, MYF(0));
+ /*
+ Detach from the share if the writer is involved. Avoid others to
+ be blocked. This includes a flush of the write buffer. This will
+ also indicate EOF to the readers.
+ That means that a writer always gets here first and readers -
+ only when they see EOF. But if a reader finishes prematurely
+ because of an error it may reach this earlier - don't allow it
+ to detach the writer thread.
+ */
+ if (sort_param->master && sort_param->sort_info->info->rec_cache.share)
+ remove_io_thread(&sort_param->sort_info->info->rec_cache);
+
+ /* Readers detach from the share if any. Avoid others to be blocked. */
+ if (sort_param->read_cache.share)
+ remove_io_thread(&sort_param->read_cache);
+
+ mysql_mutex_lock(&sort_param->sort_info->mutex);
+ if (error)
+ sort_param->sort_info->got_error= 1;
+
+ if (!--sort_param->sort_info->threads_running)
+ mysql_cond_signal(&sort_param->sort_info->cond);
+ mysql_mutex_unlock(&sort_param->sort_info->mutex);
+
my_thread_end();
return NULL;
}
@@ -559,7 +563,7 @@ int _ma_thr_write_keys(MARIA_SORT_PARAM *sort_param)
{
MARIA_SORT_INFO *sort_info=sort_param->sort_info;
HA_CHECK *param=sort_info->param;
- ulong UNINIT_VAR(length), keys;
+ size_t UNINIT_VAR(length), keys;
double *rec_per_key_part= param->new_rec_per_key_part;
int got_error=sort_info->got_error;
uint i;
diff --git a/storage/maria/maria_def.h b/storage/maria/maria_def.h
index 7337b01a981..a4fac8c088a 100644
--- a/storage/maria/maria_def.h
+++ b/storage/maria/maria_def.h
@@ -67,7 +67,8 @@ typedef struct st_maria_sort_info
pgcache_page_no_t page;
ha_rows max_records;
uint current_key, total_keys;
- uint got_error, threads_running;
+ volatile uint got_error;
+ uint threads_running;
myf myf_rw;
enum data_file_type new_data_file_type, org_data_file_type;
} MARIA_SORT_INFO;
diff --git a/storage/mroonga/ha_mroonga.cpp b/storage/mroonga/ha_mroonga.cpp
index 947a019827c..597ec4690cf 100644
--- a/storage/mroonga/ha_mroonga.cpp
+++ b/storage/mroonga/ha_mroonga.cpp
@@ -4218,9 +4218,12 @@ int ha_mroonga::storage_open(const char *name, int mode, uint test_if_locked)
if (!(ha_thd()->open_options & HA_OPEN_FOR_REPAIR)) {
error = storage_open_indexes(name);
if (error) {
- // TODO: free grn_columns and set NULL;
grn_obj_unlink(ctx, grn_table);
grn_table = NULL;
+ // TODO: unlink elements
+ free(grn_columns);
+ // TODO: unlink elements
+ free(grn_column_ranges);
DBUG_RETURN(error);
}
diff --git a/storage/myisam/mysql-test/storage_engine/alter_table_online.rdiff b/storage/myisam/mysql-test/storage_engine/alter_table_online.rdiff
index 3a7fef61d3b..5ae99e2035c 100644
--- a/storage/myisam/mysql-test/storage_engine/alter_table_online.rdiff
+++ b/storage/myisam/mysql-test/storage_engine/alter_table_online.rdiff
@@ -1,41 +1,5 @@
--- suite/storage_engine/alter_table_online.result 2013-11-08 20:01:16.000000000 +0400
+++ suite/storage_engine/alter_table_online.reject 2013-11-08 20:02:03.000000000 +0400
-@@ -2,8 +2,35 @@
- CREATE TABLE t1 (a <INT_COLUMN>, b <INT_COLUMN>, c <CHAR_COLUMN>) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
- INSERT INTO t1 (a,b,c) VALUES (1,100,'a'),(2,200,'b'),(3,300,'c');
- ALTER ONLINE TABLE t1 MODIFY b <INT_COLUMN> DEFAULT 5;
-+ERROR 0A000: LOCK=NONE/SHARED is not supported for this operation. Try LOCK=EXCLUSIVE.
-+# ERROR: Statement ended with errno 1845, errname ER_ALTER_OPERATION_NOT_SUPPORTED (expected to succeed)
-+# ------------ UNEXPECTED RESULT ------------
-+# The statement|command finished with ER_ALTER_OPERATION_NOT_SUPPORTED.
-+# Functionality or the mix could be unsupported|malfunctioning, or the problem was caused by previous errors.
-+# You can change the engine code, or create an rdiff, or disable the test by adding it to disabled.def.
-+# Further in this test, the message might sometimes be suppressed; a part of the test might be skipped.
-+# Also, this problem may cause a chain effect (more errors of different kinds in the test).
-+# -------------------------------------------
- ALTER ONLINE TABLE t1 CHANGE b new_name <INT_COLUMN>;
-+ERROR 0A000: LOCK=NONE/SHARED is not supported for this operation. Try LOCK=EXCLUSIVE.
-+# ERROR: Statement ended with errno 1845, errname ER_ALTER_OPERATION_NOT_SUPPORTED (expected to succeed)
-+# ------------ UNEXPECTED RESULT ------------
-+# The statement|command finished with ER_ALTER_OPERATION_NOT_SUPPORTED.
-+# Functionality or the mix could be unsupported|malfunctioning, or the problem was caused by previous errors.
-+# You can change the engine code, or create an rdiff, or disable the test by adding it to disabled.def.
-+# Further in this test, the message might sometimes be suppressed; a part of the test might be skipped.
-+# Also, this problem may cause a chain effect (more errors of different kinds in the test).
-+# -------------------------------------------
- ALTER ONLINE TABLE t1 COMMENT 'new comment';
-+ERROR 0A000: LOCK=NONE/SHARED is not supported for this operation. Try LOCK=EXCLUSIVE.
-+# ERROR: Statement ended with errno 1845, errname ER_ALTER_OPERATION_NOT_SUPPORTED (expected to succeed)
-+# ------------ UNEXPECTED RESULT ------------
-+# The statement|command finished with ER_ALTER_OPERATION_NOT_SUPPORTED.
-+# Functionality or the mix could be unsupported|malfunctioning, or the problem was caused by previous errors.
-+# You can change the engine code, or create an rdiff, or disable the test by adding it to disabled.def.
-+# Further in this test, the message might sometimes be suppressed; a part of the test might be skipped.
-+# Also, this problem may cause a chain effect (more errors of different kinds in the test).
-+# -------------------------------------------
- ALTER ONLINE TABLE t1 RENAME TO t2;
- ERROR 0A000: LOCK=NONE/SHARED is not supported for this operation. Try LOCK=EXCLUSIVE.
- DROP TABLE IF EXISTS t2;
@@ -23,12 +50,30 @@
CREATE TABLE t1 (a <INT_COLUMN>, b <INT_COLUMN>, c <CHAR_COLUMN>) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
INSERT INTO t1 (a,b,c) VALUES (1,100,'a'),(2,200,'b'),(3,300,'c');
diff --git a/storage/myisam/sort.c b/storage/myisam/sort.c
index f490e5f0b44..f9838d4dcb5 100644
--- a/storage/myisam/sort.c
+++ b/storage/myisam/sort.c
@@ -344,192 +344,197 @@ static ha_rows find_all_keys(MI_SORT_PARAM *info, ha_rows keys,
DBUG_RETURN((*maxbuffer)*(keys-1)+idx);
} /* find_all_keys */
-/* Search after all keys and place them in a temp. file */
-
-pthread_handler_t thr_find_all_keys(void *arg)
+static my_bool thr_find_all_keys_exec(MI_SORT_PARAM *sort_param)
{
- MI_SORT_PARAM *sort_param= (MI_SORT_PARAM*) arg;
- int error;
ulonglong memavl, old_memavl, sortbuff_size;
ha_keys UNINIT_VAR(keys), idx;
uint sort_length;
uint maxbuffer;
- uchar **sort_keys=0;
+ uchar **sort_keys= NULL;
+ int error= 0;
+ DBUG_ENTER("thr_find_all_keys");
+ DBUG_PRINT("enter", ("master: %d", sort_param->master));
- error=1;
+ if (sort_param->sort_info->got_error)
+ DBUG_RETURN(TRUE);
- if (my_thread_init())
- goto err;
+ set_sort_param_read_write(sort_param);
- { /* Add extra block since DBUG_ENTER declare variables */
- DBUG_ENTER("thr_find_all_keys");
- DBUG_PRINT("enter", ("master: %d", sort_param->master));
- if (sort_param->sort_info->got_error)
- goto err;
-
- set_sort_param_read_write(sort_param);
-
- my_b_clear(&sort_param->tempfile);
- my_b_clear(&sort_param->tempfile_for_exceptions);
- bzero((char*) &sort_param->buffpek, sizeof(sort_param->buffpek));
- bzero((char*) &sort_param->unique, sizeof(sort_param->unique));
- sort_keys= (uchar **) NULL;
+ my_b_clear(&sort_param->tempfile);
+ my_b_clear(&sort_param->tempfile_for_exceptions);
+ bzero((char*) &sort_param->buffpek, sizeof(sort_param->buffpek));
+ bzero((char*) &sort_param->unique, sizeof(sort_param->unique));
- sortbuff_size= sort_param->sortbuff_size;
- memavl= MY_MAX(sortbuff_size, MIN_SORT_BUFFER);
- idx= (ha_keys) sort_param->sort_info->max_records;
- sort_length= sort_param->key_length;
- maxbuffer= 1;
+ sortbuff_size= sort_param->sortbuff_size;
+ memavl= MY_MAX(sortbuff_size, MIN_SORT_BUFFER);
+ idx= (ha_keys) sort_param->sort_info->max_records;
+ sort_length= sort_param->key_length;
+ maxbuffer= 1;
- while (memavl >= MIN_SORT_BUFFER)
+ while (memavl >= MIN_SORT_BUFFER)
+ {
+ if ((my_off_t) (idx+1)*(sort_length+sizeof(char*)) <=
+ (my_off_t) memavl)
+ keys= idx+1;
+ else if ((sort_param->sort_info->param->testflag &
+ (T_FORCE_SORT_MEMORY | T_CREATE_MISSING_KEYS)) ==
+ T_FORCE_SORT_MEMORY)
{
- if ((my_off_t) (idx+1)*(sort_length+sizeof(char*)) <=
- (my_off_t) memavl)
- keys= idx+1;
- else if ((sort_param->sort_info->param->testflag &
- (T_FORCE_SORT_MEMORY | T_CREATE_MISSING_KEYS)) ==
- T_FORCE_SORT_MEMORY)
- {
- /*
- Use all of the given sort buffer for key data.
- Allocate 1000 buffers at a start for new data. More buffers
- will be allocated when needed.
- */
- keys= memavl / (sort_length+sizeof(char*));
- maxbuffer= (uint) MY_MIN((ulonglong) 1000, (idx / keys)+1);
- }
- else
- {
- uint maxbuffer_org;
- do
- {
- maxbuffer_org= maxbuffer;
- if (memavl < sizeof(BUFFPEK)*maxbuffer ||
- (keys=(memavl-sizeof(BUFFPEK)*maxbuffer)/
- (sort_length+sizeof(char*))) <= 1 ||
- keys < (uint) maxbuffer)
- {
- mi_check_print_error(sort_param->sort_info->param,
- "myisam_sort_buffer_size is too small. Current myisam_sort_buffer_size: %llu rows: %llu sort_length: %u",
- sortbuff_size, (ulonglong) idx, sort_length);
- goto err;
- }
- }
- while ((maxbuffer= (uint) (idx/(keys-1)+1)) != maxbuffer_org);
- }
- if ((sort_keys= (uchar**)
- my_malloc(keys*(sort_length+sizeof(char*))+
- ((sort_param->keyinfo->flag & HA_FULLTEXT) ?
- HA_FT_MAXBYTELEN : 0), MYF(0))))
+ /*
+ Use all of the given sort buffer for key data.
+ Allocate 1000 buffers at a start for new data. More buffers
+ will be allocated when needed.
+ */
+ keys= memavl / (sort_length+sizeof(char*));
+ maxbuffer= (uint) MY_MIN((ulonglong) 1000, (idx / keys)+1);
+ }
+ else
+ {
+ uint maxbuffer_org;
+ do
{
- if (my_init_dynamic_array(&sort_param->buffpek, sizeof(BUFFPEK),
- maxbuffer, MY_MIN(maxbuffer/2, 1000), MYF(0)))
+ maxbuffer_org= maxbuffer;
+ if (memavl < sizeof(BUFFPEK)*maxbuffer ||
+ (keys=(memavl-sizeof(BUFFPEK)*maxbuffer)/
+ (sort_length+sizeof(char*))) <= 1 ||
+ keys < (uint) maxbuffer)
{
- my_free(sort_keys);
- sort_keys= (uchar **) NULL; /* for err: label */
+ mi_check_print_error(sort_param->sort_info->param,
+ "myisam_sort_buffer_size is too small. Current myisam_sort_buffer_size: %llu rows: %llu sort_length: %u",
+ sortbuff_size, (ulonglong) idx, sort_length);
+ DBUG_RETURN(TRUE);
}
- else
- break;
}
- old_memavl= memavl;
- if ((memavl= memavl / 4 * 3) < MIN_SORT_BUFFER &&
- old_memavl > MIN_SORT_BUFFER)
- memavl= MIN_SORT_BUFFER;
+ while ((maxbuffer= (uint) (idx/(keys-1)+1)) != maxbuffer_org);
}
- if (memavl < MIN_SORT_BUFFER)
+ if ((sort_keys= (uchar**) my_malloc(keys * (sort_length + sizeof(char*)) +
+ ((sort_param->keyinfo->flag & HA_FULLTEXT) ?
+ HA_FT_MAXBYTELEN : 0), MYF(0))))
{
- /* purecov: begin inspected */
- mi_check_print_error(sort_param->sort_info->param,
- "myisam_sort_buffer_size is too small. Current myisam_sort_buffer_size: %llu rows: %llu sort_length: %u",
- sortbuff_size, (ulonglong) idx, sort_length);
- my_errno= ENOMEM;
- goto err;
- /* purecov: end inspected */
+ if (my_init_dynamic_array(&sort_param->buffpek, sizeof(BUFFPEK),
+ maxbuffer, MY_MIN(maxbuffer / 2, 1000), MYF(0)))
+ {
+ my_free(sort_keys);
+ sort_keys= NULL; /* Safety against double free on error. */
+ }
+ else
+ break;
}
+ old_memavl= memavl;
+ if ((memavl= memavl / 4 * 3) < MIN_SORT_BUFFER &&
+ old_memavl > MIN_SORT_BUFFER)
+ memavl= MIN_SORT_BUFFER;
+ }
+ if (memavl < MIN_SORT_BUFFER)
+ {
+ /* purecov: begin inspected */
+ mi_check_print_error(sort_param->sort_info->param,
+ "myisam_sort_buffer_size is too small. Current myisam_sort_buffer_size: %llu rows: %llu sort_length: %u",
+ sortbuff_size, (ulonglong) idx, sort_length);
+ my_errno= ENOMEM;
+ goto err;
+ /* purecov: end inspected */
+ }
- if (sort_param->sort_info->param->testflag & T_VERBOSE)
- my_fprintf(stdout,
- "Key %d - Allocating buffer for %llu keys\n",
- sort_param->key + 1, (ulonglong) keys);
- sort_param->sort_keys= sort_keys;
+ if (sort_param->sort_info->param->testflag & T_VERBOSE)
+ my_fprintf(stdout,
+ "Key %d - Allocating buffer for %llu keys\n",
+ sort_param->key + 1, (ulonglong) keys);
+ sort_param->sort_keys= sort_keys;
- idx= error= 0;
- sort_keys[0]= (uchar*) (sort_keys+keys);
+ idx= error= 0;
+ sort_keys[0]= (uchar*) (sort_keys+keys);
- DBUG_PRINT("info", ("reading keys"));
- while (!(error= sort_param->sort_info->got_error) &&
- !(error= (*sort_param->key_read)(sort_param, sort_keys[idx])))
+ DBUG_PRINT("info", ("reading keys"));
+ while (!(error= sort_param->sort_info->got_error) &&
+ !(error= (*sort_param->key_read)(sort_param, sort_keys[idx])))
+ {
+ if (sort_param->real_key_length > sort_param->key_length)
{
- if (sort_param->real_key_length > sort_param->key_length)
- {
- if (write_key(sort_param, sort_keys[idx],
- &sort_param->tempfile_for_exceptions))
- goto err;
- continue;
- }
-
- if (++idx == keys)
- {
- if (sort_param->write_keys(sort_param, sort_keys, idx - 1,
- (BUFFPEK*) alloc_dynamic(&sort_param->buffpek),
- &sort_param->tempfile))
- goto err;
- sort_keys[0]= (uchar*) (sort_keys+keys);
- memcpy(sort_keys[0], sort_keys[idx - 1],
- (size_t) sort_param->key_length);
- idx= 1;
- }
- sort_keys[idx]= sort_keys[idx - 1] + sort_param->key_length;
+ if (write_key(sort_param, sort_keys[idx],
+ &sort_param->tempfile_for_exceptions))
+ goto err;
+ continue;
}
- if (error > 0)
- goto err;
- if (sort_param->buffpek.elements)
+
+ if (++idx == keys)
{
- if (sort_param->write_keys(sort_param, sort_keys, idx,
+ if (sort_param->write_keys(sort_param, sort_keys, idx - 1,
(BUFFPEK*) alloc_dynamic(&sort_param->buffpek),
&sort_param->tempfile))
goto err;
- sort_param->keys= (sort_param->buffpek.elements - 1) * (keys - 1) + idx;
+ sort_keys[0]= (uchar*) (sort_keys+keys);
+ memcpy(sort_keys[0], sort_keys[idx - 1], (size_t) sort_param->key_length);
+ idx= 1;
}
- else
- sort_param->keys= idx;
+ sort_keys[idx]= sort_keys[idx - 1] + sort_param->key_length;
+ }
- goto ok;
+ if (error > 0)
+ goto err;
-err:
- DBUG_PRINT("error", ("got some error"));
- sort_param->sort_info->got_error= 1; /* no need to protect with a mutex */
- my_free(sort_keys);
- sort_param->sort_keys= 0;
- delete_dynamic(& sort_param->buffpek);
- close_cached_file(&sort_param->tempfile);
- close_cached_file(&sort_param->tempfile_for_exceptions);
-
-ok:
- free_root(&sort_param->wordroot, MYF(0));
- /*
- Detach from the share if the writer is involved. Avoid others to
- be blocked. This includes a flush of the write buffer. This will
- also indicate EOF to the readers.
- That means that a writer always gets here first and readers -
- only when they see EOF. But if a reader finishes prematurely
- because of an error it may reach this earlier - don't allow it
- to detach the writer thread.
- */
- if (sort_param->master && sort_param->sort_info->info->rec_cache.share)
- remove_io_thread(&sort_param->sort_info->info->rec_cache);
-
- /* Readers detach from the share if any. Avoid others to be blocked. */
- if (sort_param->read_cache.share)
- remove_io_thread(&sort_param->read_cache);
-
- mysql_mutex_lock(&sort_param->sort_info->mutex);
- if (!--sort_param->sort_info->threads_running)
- mysql_cond_signal(&sort_param->sort_info->cond);
- mysql_mutex_unlock(&sort_param->sort_info->mutex);
- DBUG_PRINT("exit", ("======== ending thread ========"));
- DBUG_LEAVE;
+ if (sort_param->buffpek.elements)
+ {
+ if (sort_param->write_keys(sort_param, sort_keys, idx,
+ (BUFFPEK*) alloc_dynamic(&sort_param->buffpek),
+ &sort_param->tempfile))
+ goto err;
+ sort_param->keys= (sort_param->buffpek.elements - 1) * (keys - 1) + idx;
}
+ else
+ sort_param->keys= idx;
+
+ DBUG_RETURN(FALSE);
+
+err:
+ DBUG_PRINT("error", ("got some error"));
+ sort_param->sort_info->got_error= 1; /* no need to protect with a mutex */
+ my_free(sort_keys);
+ sort_param->sort_keys= 0;
+ delete_dynamic(& sort_param->buffpek);
+ close_cached_file(&sort_param->tempfile);
+ close_cached_file(&sort_param->tempfile_for_exceptions);
+
+ DBUG_RETURN(TRUE);
+}
+
+/* Search after all keys and place them in a temp. file */
+
+pthread_handler_t thr_find_all_keys(void *arg)
+{
+ MI_SORT_PARAM *sort_param= (MI_SORT_PARAM*) arg;
+ my_bool error= FALSE;
+ /* If my_thread_init fails */
+ if (my_thread_init() || thr_find_all_keys_exec(sort_param))
+ error= TRUE;
+
+ /*
+ Thread must clean up after itself.
+ */
+ free_root(&sort_param->wordroot, MYF(0));
+ /*
+ Detach from the share if the writer is involved. Avoid others to
+ be blocked. This includes a flush of the write buffer. This will
+ also indicate EOF to the readers.
+ That means that a writer always gets here first and readers -
+ only when they see EOF. But if a reader finishes prematurely
+ because of an error it may reach this earlier - don't allow it
+ to detach the writer thread.
+ */
+ if (sort_param->master && sort_param->sort_info->info->rec_cache.share)
+ remove_io_thread(&sort_param->sort_info->info->rec_cache);
+
+ /* Readers detach from the share if any. Avoid others to be blocked. */
+ if (sort_param->read_cache.share)
+ remove_io_thread(&sort_param->read_cache);
+
+ mysql_mutex_lock(&sort_param->sort_info->mutex);
+ if (error)
+ sort_param->sort_info->got_error= 1;
+
+ if (!--sort_param->sort_info->threads_running)
+ mysql_cond_signal(&sort_param->sort_info->cond);
+ mysql_mutex_unlock(&sort_param->sort_info->mutex);
my_thread_end();
return NULL;
}
diff --git a/storage/oqgraph/oqgraph_shim.h b/storage/oqgraph/oqgraph_shim.h
index 6ea9d10631f..af240b88ebd 100644
--- a/storage/oqgraph/oqgraph_shim.h
+++ b/storage/oqgraph/oqgraph_shim.h
@@ -254,7 +254,7 @@ namespace boost
typedef no_property type;
};
-#if BOOST_VERSION >= 104601
+#if BOOST_VERSION < 106000 && BOOST_VERSION >= 104601
template <>
struct graph_bundle_type<oqgraph3::graph>
{
diff --git a/storage/tokudb/CMakeLists.txt b/storage/tokudb/CMakeLists.txt
index a64974d7330..f9754639bd9 100644
--- a/storage/tokudb/CMakeLists.txt
+++ b/storage/tokudb/CMakeLists.txt
@@ -1,4 +1,4 @@
-SET(TOKUDB_VERSION 5.6.26-74.0)
+SET(TOKUDB_VERSION 5.6.30-76.3)
# PerconaFT only supports x86-64 and cmake-2.8.9+
IF(CMAKE_VERSION VERSION_LESS "2.8.9")
MESSAGE(STATUS "CMake 2.8.9 or higher is required by TokuDB")
@@ -16,7 +16,12 @@ IF(NOT TOKUDB_OK)
RETURN()
ENDIF()
-SET(TOKUDB_SOURCES ha_tokudb.cc)
+SET(TOKUDB_SOURCES
+ ha_tokudb.cc
+ tokudb_background.cc
+ tokudb_information_schema.cc
+ tokudb_sysvars.cc
+ tokudb_thread.cc)
MYSQL_ADD_PLUGIN(tokudb ${TOKUDB_SOURCES} STORAGE_ENGINE MODULE_ONLY)
IF(NOT TARGET tokudb)
@@ -27,7 +32,6 @@ IF(NOT LIBJEMALLOC)
MESSAGE(WARNING "TokuDB is enabled, but jemalloc is not. This configuration is not supported")
ENDIF()
-MY_CHECK_AND_SET_COMPILER_FLAG("-Wno-vla")
MY_CHECK_AND_SET_COMPILER_FLAG("-Wno-vla" DEBUG)
############################################
@@ -47,10 +51,16 @@ MARK_AS_ADVANCED(gcc_ar)
MARK_AS_ADVANCED(gcc_ranlib)
############################################
+# pick language dialect
+MY_CHECK_AND_SET_COMPILER_FLAG(-std=c++11)
+
SET(BUILD_TESTING OFF CACHE BOOL "")
SET(USE_VALGRIND OFF CACHE BOOL "")
SET(TOKU_DEBUG_PARANOID OFF CACHE BOOL "")
+# Enable TokuDB's TOKUDB_DEBUG in debug builds
+SET(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -DTOKUDB_DEBUG")
+
IF(NOT DEFINED TOKUDB_VERSION)
IF(DEFINED ENV{TOKUDB_VERSION})
SET(TOKUDB_VERSION $ENV{TOKUDB_VERSION})
@@ -73,10 +83,6 @@ IF(DEFINED TOKUDB_CHECK_JEMALLOC)
ADD_DEFINITIONS("-DTOKUDB_CHECK_JEMALLOC=${TOKUDB_CHECK_JEMALLOC}")
ENDIF()
-## adds a compiler flag if the compiler supports it
-include(CheckCCompilerFlag)
-include(CheckCXXCompilerFlag)
-
MY_CHECK_AND_SET_COMPILER_FLAG(-Wno-missing-field-initializers)
IF (EXISTS "${CMAKE_CURRENT_SOURCE_DIR}/PerconaFT/")
diff --git a/storage/tokudb/PerconaFT/CMakeLists.txt b/storage/tokudb/PerconaFT/CMakeLists.txt
index 843b4c9d0e8..0e283c13c61 100644
--- a/storage/tokudb/PerconaFT/CMakeLists.txt
+++ b/storage/tokudb/PerconaFT/CMakeLists.txt
@@ -1,3 +1,6 @@
+if (CMAKE_PROJECT_NAME STREQUAL TokuDB)
+ cmake_minimum_required(VERSION 2.8.8 FATAL_ERROR)
+endif()
set(CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/cmake_modules")
project(TokuDB)
diff --git a/storage/tokudb/PerconaFT/CTestCustom.cmake b/storage/tokudb/PerconaFT/CTestCustom.cmake.in
index 54170b2b903..54170b2b903 100644
--- a/storage/tokudb/PerconaFT/CTestCustom.cmake
+++ b/storage/tokudb/PerconaFT/CTestCustom.cmake.in
diff --git a/storage/tokudb/PerconaFT/README.md b/storage/tokudb/PerconaFT/README.md
index 7e30a558bc7..d53caf00190 100644
--- a/storage/tokudb/PerconaFT/README.md
+++ b/storage/tokudb/PerconaFT/README.md
@@ -113,11 +113,11 @@ All source code and test contributions must be provided under a [BSD 2-Clause][b
License
-------
-PerconaFT is available under the GPL version 2, and AGPL version 3, with slight modifications.
+PerconaFT is available under the GPL version 2, and AGPL version 3.
See [COPYING.AGPLv3][agpllicense],
[COPYING.GPLv2][gpllicense], and
[PATENTS][patents].
-[agpllicense]: http://github.com/Perona/PerconaFT/blob/master/COPYING.AGPLv3
-[gpllicense]: http://github.com/Perona/PerconaFT/blob/master/COPYING.GPLv2
-[patents]: http://github.com/Perona/PerconaFT/blob/master/PATENTS
+[agpllicense]: http://github.com/Percona/PerconaFT/blob/master/COPYING.AGPLv3
+[gpllicense]: http://github.com/Percona/PerconaFT/blob/master/COPYING.GPLv2
+[patents]: http://github.com/Percona/PerconaFT/blob/master/PATENTS
diff --git a/storage/tokudb/PerconaFT/buildheader/make_tdb.cc b/storage/tokudb/PerconaFT/buildheader/make_tdb.cc
index 958f00a8706..5c29209e19d 100644
--- a/storage/tokudb/PerconaFT/buildheader/make_tdb.cc
+++ b/storage/tokudb/PerconaFT/buildheader/make_tdb.cc
@@ -510,8 +510,9 @@ static void print_db_struct (void) {
"int (*update_broadcast)(DB *, DB_TXN*, const DBT *extra, uint32_t flags)",
"int (*get_fractal_tree_info64)(DB*,uint64_t*,uint64_t*,uint64_t*,uint64_t*)",
"int (*iterate_fractal_tree_block_map)(DB*,int(*)(uint64_t,int64_t,int64_t,int64_t,int64_t,void*),void*)",
- "const char *(*get_dname)(DB *db)",
- "int (*get_last_key)(DB *db, YDB_CALLBACK_FUNCTION func, void* extra)",
+ "const char *(*get_dname)(DB *db)",
+ "int (*get_last_key)(DB *db, YDB_CALLBACK_FUNCTION func, void* extra)",
+ "int (*recount_rows)(DB* db, int (*progress_callback)(uint64_t count, uint64_t deleted, void* progress_extra), void* progress_extra)",
NULL};
sort_and_dump_fields("db", true, extra);
}
diff --git a/storage/tokudb/PerconaFT/cmake_modules/TokuMergeLibs.cmake b/storage/tokudb/PerconaFT/cmake_modules/TokuMergeLibs.cmake
index 15066906831..e1da095fc00 100644
--- a/storage/tokudb/PerconaFT/cmake_modules/TokuMergeLibs.cmake
+++ b/storage/tokudb/PerconaFT/cmake_modules/TokuMergeLibs.cmake
@@ -48,7 +48,8 @@ MACRO(TOKU_MERGE_STATIC_LIBS TARGET OUTPUT_NAME LIBS_TO_MERGE)
ENDIF()
ENDFOREACH()
IF(OSLIBS)
- #LIST(REMOVE_DUPLICATES OSLIBS)
+ # REMOVE_DUPLICATES destroys the order of the libs so disabled
+ # LIST(REMOVE_DUPLICATES OSLIBS)
TARGET_LINK_LIBRARIES(${TARGET} LINK_PUBLIC ${OSLIBS})
ENDIF()
diff --git a/storage/tokudb/PerconaFT/cmake_modules/TokuSetupCompiler.cmake b/storage/tokudb/PerconaFT/cmake_modules/TokuSetupCompiler.cmake
index 40ccbcc0aed..77f6d8f67b7 100644
--- a/storage/tokudb/PerconaFT/cmake_modules/TokuSetupCompiler.cmake
+++ b/storage/tokudb/PerconaFT/cmake_modules/TokuSetupCompiler.cmake
@@ -24,12 +24,12 @@ endif ()
## add TOKU_PTHREAD_DEBUG for debug builds
if (CMAKE_VERSION VERSION_LESS 3.0)
- set_property(DIRECTORY APPEND PROPERTY COMPILE_DEFINITIONS_DEBUG TOKU_PTHREAD_DEBUG=1)
- set_property(DIRECTORY APPEND PROPERTY COMPILE_DEFINITIONS_DRD TOKU_PTHREAD_DEBUG=1)
+ set_property(DIRECTORY APPEND PROPERTY COMPILE_DEFINITIONS_DEBUG TOKU_PTHREAD_DEBUG=1 TOKU_DEBUG_TXN_SYNC=1)
+ set_property(DIRECTORY APPEND PROPERTY COMPILE_DEFINITIONS_DRD TOKU_PTHREAD_DEBUG=1 TOKU_DEBUG_TXN_SYNC=1)
set_property(DIRECTORY APPEND PROPERTY COMPILE_DEFINITIONS_DRD _FORTIFY_SOURCE=2)
else ()
set_property(DIRECTORY APPEND PROPERTY COMPILE_DEFINITIONS
- $<$<OR:$<CONFIG:DEBUG>,$<CONFIG:DRD>>:TOKU_PTHREAD_DEBUG=1>
+ $<$<OR:$<CONFIG:DEBUG>,$<CONFIG:DRD>>:TOKU_PTHREAD_DEBUG=1 TOKU_DEBUG_TXN_SYNC=1>
$<$<CONFIG:DRD>:_FORTIFY_SOURCE=2>
)
endif ()
@@ -65,8 +65,10 @@ set_cflags_if_supported(
-Wno-error=missing-format-attribute
-Wno-error=address-of-array-temporary
-Wno-error=tautological-constant-out-of-range-compare
+ -Wno-error=maybe-uninitialized
-Wno-ignored-attributes
-Wno-error=extern-c-compat
+ -Wno-pointer-bool-conversion
-fno-rtti
-fno-exceptions
)
@@ -119,13 +121,18 @@ if (CMAKE_CXX_COMPILER_ID STREQUAL Clang)
set(CMAKE_C_FLAGS_RELEASE "-g -O3 ${CMAKE_C_FLAGS_RELEASE} -UNDEBUG")
set(CMAKE_CXX_FLAGS_RELEASE "-g -O3 ${CMAKE_CXX_FLAGS_RELEASE} -UNDEBUG")
else ()
+ if (APPLE)
+ set(FLTO_OPTS "-fwhole-program")
+ else ()
+ set(FLTO_OPTS "-fuse-linker-plugin")
+ endif()
# we overwrite this because the default passes -DNDEBUG and we don't want that
- set(CMAKE_C_FLAGS_RELWITHDEBINFO "-flto -fuse-linker-plugin ${CMAKE_C_FLAGS_RELWITHDEBINFO} -g -O3 -UNDEBUG")
- set(CMAKE_CXX_FLAGS_RELWITHDEBINFO "-flto -fuse-linker-plugin ${CMAKE_CXX_FLAGS_RELWITHDEBINFO} -g -O3 -UNDEBUG")
- set(CMAKE_C_FLAGS_RELEASE "-g -O3 -flto -fuse-linker-plugin ${CMAKE_C_FLAGS_RELEASE} -UNDEBUG")
- set(CMAKE_CXX_FLAGS_RELEASE "-g -O3 -flto -fuse-linker-plugin ${CMAKE_CXX_FLAGS_RELEASE} -UNDEBUG")
- set(CMAKE_EXE_LINKER_FLAGS "-g -fuse-linker-plugin ${CMAKE_EXE_LINKER_FLAGS}")
- set(CMAKE_SHARED_LINKER_FLAGS "-g -fuse-linker-plugin ${CMAKE_SHARED_LINKER_FLAGS}")
+ set(CMAKE_C_FLAGS_RELWITHDEBINFO "-flto ${FLTO_OPTS} ${CMAKE_C_FLAGS_RELWITHDEBINFO} -g -O3 -UNDEBUG")
+ set(CMAKE_CXX_FLAGS_RELWITHDEBINFO "-flto ${FLTO_OPTS} ${CMAKE_CXX_FLAGS_RELWITHDEBINFO} -g -O3 -UNDEBUG")
+ set(CMAKE_C_FLAGS_RELEASE "-g -O3 -flto ${FLTO_OPTS} ${CMAKE_C_FLAGS_RELEASE} -UNDEBUG")
+ set(CMAKE_CXX_FLAGS_RELEASE "-g -O3 -flto ${FLTO_OPTS} ${CMAKE_CXX_FLAGS_RELEASE} -UNDEBUG")
+ set(CMAKE_EXE_LINKER_FLAGS "-g ${FLTO_OPTS} ${CMAKE_EXE_LINKER_FLAGS}")
+ set(CMAKE_SHARED_LINKER_FLAGS "-g ${FLTO_OPTS} ${CMAKE_SHARED_LINKER_FLAGS}")
endif ()
## set warnings
@@ -159,15 +166,6 @@ endif ()
set(CMAKE_C_FLAGS "-Wall -Werror ${CMAKE_C_FLAGS}")
set(CMAKE_CXX_FLAGS "-Wall -Werror ${CMAKE_CXX_FLAGS}")
-## need to set -stdlib=libc++ to get real c++11 support on darwin
-if (APPLE)
- if (CMAKE_GENERATOR STREQUAL Xcode)
- set(CMAKE_XCODE_ATTRIBUTE_CLANG_CXX_LIBRARY "libc++")
- else ()
- add_definitions(-stdlib=libc++)
- endif ()
-endif ()
-
# pick language dialect
set(CMAKE_C_FLAGS "-std=c99 ${CMAKE_C_FLAGS}")
check_cxx_compiler_flag(-std=c++11 HAVE_STDCXX11)
diff --git a/storage/tokudb/PerconaFT/ft/CMakeLists.txt b/storage/tokudb/PerconaFT/ft/CMakeLists.txt
index 744b9c9a9e1..11091073ac2 100644
--- a/storage/tokudb/PerconaFT/ft/CMakeLists.txt
+++ b/storage/tokudb/PerconaFT/ft/CMakeLists.txt
@@ -36,6 +36,7 @@ set(FT_SOURCES
ft-flusher
ft-hot-flusher
ft-ops
+ ft-recount-rows
ft-status
ft-test-helpers
ft-verify
diff --git a/storage/tokudb/PerconaFT/ft/ft-flusher.cc b/storage/tokudb/PerconaFT/ft/ft-flusher.cc
index 530947fe868..fb456ea6a18 100644
--- a/storage/tokudb/PerconaFT/ft/ft-flusher.cc
+++ b/storage/tokudb/PerconaFT/ft/ft-flusher.cc
@@ -1572,6 +1572,7 @@ void toku_bnc_flush_to_child(FT ft, NONLEAF_CHILDINFO bnc, FTNODE child, TXNID p
txn_gc_info *gc_info;
STAT64INFO_S stats_delta;
+ int64_t logical_rows_delta = 0;
size_t remaining_memsize = bnc->msg_buffer.buffer_size_in_use();
flush_msg_fn(FT t, FTNODE n, NONLEAF_CHILDINFO nl, txn_gc_info *g) :
@@ -1599,8 +1600,8 @@ void toku_bnc_flush_to_child(FT ft, NONLEAF_CHILDINFO bnc, FTNODE child, TXNID p
is_fresh,
gc_info,
flow_deltas,
- &stats_delta
- );
+ &stats_delta,
+ &logical_rows_delta);
remaining_memsize -= memsize_in_buffer;
return 0;
}
@@ -1613,6 +1614,7 @@ void toku_bnc_flush_to_child(FT ft, NONLEAF_CHILDINFO bnc, FTNODE child, TXNID p
if (flush_fn.stats_delta.numbytes || flush_fn.stats_delta.numrows) {
toku_ft_update_stats(&ft->in_memory_stats, flush_fn.stats_delta);
}
+ toku_ft_adjust_logical_row_count(ft, flush_fn.logical_rows_delta);
if (do_garbage_collection) {
size_t buffsize = bnc->msg_buffer.buffer_size_in_use();
// may be misleading if there's a broadcast message in there
diff --git a/storage/tokudb/PerconaFT/ft/ft-internal.h b/storage/tokudb/PerconaFT/ft/ft-internal.h
index 6bf7029245b..eec591d1744 100644
--- a/storage/tokudb/PerconaFT/ft/ft-internal.h
+++ b/storage/tokudb/PerconaFT/ft/ft-internal.h
@@ -143,6 +143,10 @@ struct ft_header {
MSN msn_at_start_of_last_completed_optimize;
STAT64INFO_S on_disk_stats;
+
+ // This represents the balance of inserts - deletes and should be
+ // closer to a logical representation of the number of records in an index
+ uint64_t on_disk_logical_rows;
};
typedef struct ft_header *FT_HEADER;
@@ -176,6 +180,7 @@ struct ft {
// protected by atomic builtins
STAT64INFO_S in_memory_stats;
+ uint64_t in_memory_logical_rows;
// transient, not serialized to disk. updated when we do write to
// disk. tells us whether we can do partial eviction (we can't if
diff --git a/storage/tokudb/PerconaFT/ft/ft-ops.cc b/storage/tokudb/PerconaFT/ft/ft-ops.cc
index f5da82ee000..8f61bc67339 100644
--- a/storage/tokudb/PerconaFT/ft/ft-ops.cc
+++ b/storage/tokudb/PerconaFT/ft/ft-ops.cc
@@ -1371,7 +1371,8 @@ static void inject_message_in_locked_node(
ft_msg msg_with_msn(msg.kdbt(), msg.vdbt(), msg.type(), msg_msn, msg.xids());
paranoid_invariant(msg_with_msn.msn().msn > node->max_msn_applied_to_node_on_disk.msn);
- STAT64INFO_S stats_delta = {0,0};
+ STAT64INFO_S stats_delta = { 0,0 };
+ int64_t logical_rows_delta = 0;
toku_ftnode_put_msg(
ft->cmp,
ft->update_fun,
@@ -1381,11 +1382,12 @@ static void inject_message_in_locked_node(
true,
gc_info,
flow_deltas,
- &stats_delta
- );
+ &stats_delta,
+ &logical_rows_delta);
if (stats_delta.numbytes || stats_delta.numrows) {
toku_ft_update_stats(&ft->in_memory_stats, stats_delta);
}
+ toku_ft_adjust_logical_row_count(ft, logical_rows_delta);
//
// assumption is that toku_ftnode_put_msg will
// mark the node as dirty.
@@ -2169,6 +2171,7 @@ int toku_ft_insert_unique(FT_HANDLE ft_h, DBT *key, DBT *val, TOKUTXN txn, bool
if (r == 0) {
ft_txn_log_insert(ft_h->ft, key, val, txn, do_logging, FT_INSERT);
+ toku_ft_adjust_logical_row_count(ft_h->ft, 1);
}
return r;
}
@@ -2344,6 +2347,7 @@ void toku_ft_maybe_insert (FT_HANDLE ft_h, DBT *key, DBT *val, TOKUTXN txn, bool
if (r != 0) {
toku_ft_send_insert(ft_h, key, val, message_xids, type, &gc_info);
}
+ toku_ft_adjust_logical_row_count(ft_h->ft, 1);
}
}
@@ -2513,6 +2517,7 @@ void toku_ft_maybe_delete(FT_HANDLE ft_h, DBT *key, TOKUTXN txn, bool oplsn_vali
oldest_referenced_xid_estimate,
txn != nullptr ? !txn->for_recovery : false);
toku_ft_send_delete(ft_h, key, message_xids, &gc_info);
+ toku_ft_adjust_logical_row_count(ft_h->ft, -1);
}
}
diff --git a/storage/tokudb/PerconaFT/ft/ft-ops.h b/storage/tokudb/PerconaFT/ft/ft-ops.h
index 7d0b165b70c..313a74628ea 100644
--- a/storage/tokudb/PerconaFT/ft/ft-ops.h
+++ b/storage/tokudb/PerconaFT/ft/ft-ops.h
@@ -207,6 +207,15 @@ extern int toku_ft_debug_mode;
int toku_verify_ft (FT_HANDLE ft_h) __attribute__ ((warn_unused_result));
int toku_verify_ft_with_progress (FT_HANDLE ft_h, int (*progress_callback)(void *extra, float progress), void *extra, int verbose, int keep_going) __attribute__ ((warn_unused_result));
+int toku_ft_recount_rows(
+ FT_HANDLE ft,
+ int (*progress_callback)(
+ uint64_t count,
+ uint64_t deleted,
+ void* progress_extra),
+ void* progress_extra);
+
+
DICTIONARY_ID toku_ft_get_dictionary_id(FT_HANDLE);
enum ft_flags {
diff --git a/storage/tokudb/PerconaFT/ft/ft-recount-rows.cc b/storage/tokudb/PerconaFT/ft/ft-recount-rows.cc
new file mode 100644
index 00000000000..adac96f4882
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/ft-recount-rows.cc
@@ -0,0 +1,115 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "ft/serialize/block_table.h"
+#include "ft/ft.h"
+#include "ft/ft-internal.h"
+#include "ft/cursor.h"
+
+struct recount_rows_extra_t {
+ int (*_progress_callback)(
+ uint64_t count,
+ uint64_t deleted,
+ void* progress_extra);
+ void* _progress_extra;
+ uint64_t _keys;
+ bool _cancelled;
+};
+
+static int recount_rows_found(
+ uint32_t UU(keylen),
+ const void* key,
+ uint32_t UU(vallen),
+ const void* UU(val),
+ void* extra,
+ bool UU(lock_only)) {
+
+ recount_rows_extra_t* rre = (recount_rows_extra_t*)extra;
+
+ if (FT_LIKELY(key != nullptr)) {
+ rre->_keys++;
+ }
+ return rre->_cancelled
+ = rre->_progress_callback(rre->_keys, 0, rre->_progress_extra);
+}
+static bool recount_rows_interrupt(void* extra, uint64_t deleted_rows) {
+ recount_rows_extra_t* rre = (recount_rows_extra_t*)extra;
+
+ return rre->_cancelled =
+ rre->_progress_callback(rre->_keys, deleted_rows, rre->_progress_extra);
+}
+int toku_ft_recount_rows(
+ FT_HANDLE ft,
+ int (*progress_callback)(
+ uint64_t count,
+ uint64_t deleted,
+ void* progress_extra),
+ void* progress_extra) {
+
+ int ret = 0;
+ recount_rows_extra_t rre = {
+ progress_callback,
+ progress_extra,
+ 0,
+ false
+ };
+
+ ft_cursor c;
+ ret = toku_ft_cursor_create(ft, &c, nullptr, C_READ_ANY, false, false);
+ if (ret) return ret;
+
+ toku_ft_cursor_set_check_interrupt_cb(
+ &c,
+ recount_rows_interrupt,
+ &rre);
+
+ ret = toku_ft_cursor_first(&c, recount_rows_found, &rre);
+ while (FT_LIKELY(ret == 0)) {
+ ret = toku_ft_cursor_next(&c, recount_rows_found, &rre);
+ }
+
+ toku_ft_cursor_destroy(&c);
+
+ if (rre._cancelled == false) {
+ // update ft count
+ toku_unsafe_set(&ft->ft->in_memory_logical_rows, rre._keys);
+ ret = 0;
+ }
+
+ return ret;
+}
diff --git a/storage/tokudb/PerconaFT/ft/ft-status.cc b/storage/tokudb/PerconaFT/ft/ft-status.cc
index 982df1822c4..19a378c22bf 100644
--- a/storage/tokudb/PerconaFT/ft/ft-status.cc
+++ b/storage/tokudb/PerconaFT/ft/ft-status.cc
@@ -128,24 +128,24 @@ void CACHETABLE_STATUS_S::init() {
CT_STATUS_INIT(CT_LONG_WAIT_PRESSURE_COUNT, CACHETABLE_LONG_WAIT_PRESSURE_COUNT, UINT64, "number of long waits on cache pressure");
CT_STATUS_INIT(CT_LONG_WAIT_PRESSURE_TIME, CACHETABLE_LONG_WAIT_PRESSURE_TIME, UINT64, "long time waiting on cache pressure");
- CT_STATUS_INIT(CT_POOL_CLIENT_NUM_THREADS, CACHETABLE_POOL_CLIENT_NUM_THREADS, UINT64, "number of threads in pool");
- CT_STATUS_INIT(CT_POOL_CLIENT_NUM_THREADS_ACTIVE, CACHETABLE_POOL_CLIENT_NUM_THREADS_ACTIVE, UINT64, "number of currently active threads in pool");
- CT_STATUS_INIT(CT_POOL_CLIENT_QUEUE_SIZE, CACHETABLE_POOL_CLIENT_QUEUE_SIZE, UINT64, "number of currently queued work items");
- CT_STATUS_INIT(CT_POOL_CLIENT_MAX_QUEUE_SIZE, CACHETABLE_POOL_CLIENT_MAX_QUEUE_SIZE, UINT64, "largest number of queued work items");
- CT_STATUS_INIT(CT_POOL_CLIENT_TOTAL_ITEMS_PROCESSED, CACHETABLE_POOL_CLIENT_TOTAL_ITEMS_PROCESSED, UINT64, "total number of work items processed");
- CT_STATUS_INIT(CT_POOL_CLIENT_TOTAL_EXECUTION_TIME, CACHETABLE_POOL_CLIENT_TOTAL_EXECUTION_TIME, UINT64, "total execution time of processing work items");
- CT_STATUS_INIT(CT_POOL_CACHETABLE_NUM_THREADS, CACHETABLE_POOL_CACHETABLE_NUM_THREADS, UINT64, "number of threads in pool");
- CT_STATUS_INIT(CT_POOL_CACHETABLE_NUM_THREADS_ACTIVE, CACHETABLE_POOL_CACHETABLE_NUM_THREADS_ACTIVE, UINT64, "number of currently active threads in pool");
- CT_STATUS_INIT(CT_POOL_CACHETABLE_QUEUE_SIZE, CACHETABLE_POOL_CACHETABLE_QUEUE_SIZE, UINT64, "number of currently queued work items");
- CT_STATUS_INIT(CT_POOL_CACHETABLE_MAX_QUEUE_SIZE, CACHETABLE_POOL_CACHETABLE_MAX_QUEUE_SIZE, UINT64, "largest number of queued work items");
- CT_STATUS_INIT(CT_POOL_CACHETABLE_TOTAL_ITEMS_PROCESSED, CACHETABLE_POOL_CACHETABLE_TOTAL_ITEMS_PROCESSED, UINT64, "total number of work items processed");
- CT_STATUS_INIT(CT_POOL_CACHETABLE_TOTAL_EXECUTION_TIME, CACHETABLE_POOL_CACHETABLE_TOTAL_EXECUTION_TIME, UINT64, "total execution time of processing work items");
- CT_STATUS_INIT(CT_POOL_CHECKPOINT_NUM_THREADS, CACHETABLE_POOL_CHECKPOINT_NUM_THREADS, UINT64, "number of threads in pool");
- CT_STATUS_INIT(CT_POOL_CHECKPOINT_NUM_THREADS_ACTIVE, CACHETABLE_POOL_CHECKPOINT_NUM_THREADS_ACTIVE, UINT64, "number of currently active threads in pool");
- CT_STATUS_INIT(CT_POOL_CHECKPOINT_QUEUE_SIZE, CACHETABLE_POOL_CHECKPOINT_QUEUE_SIZE, UINT64, "number of currently queued work items");
- CT_STATUS_INIT(CT_POOL_CHECKPOINT_MAX_QUEUE_SIZE, CACHETABLE_POOL_CHECKPOINT_MAX_QUEUE_SIZE, UINT64, "largest number of queued work items");
- CT_STATUS_INIT(CT_POOL_CHECKPOINT_TOTAL_ITEMS_PROCESSED, CACHETABLE_POOL_CHECKPOINT_TOTAL_ITEMS_PROCESSED, UINT64, "total number of work items processed");
- CT_STATUS_INIT(CT_POOL_CHECKPOINT_TOTAL_EXECUTION_TIME, CACHETABLE_POOL_CHECKPOINT_TOTAL_EXECUTION_TIME, UINT64, "total execution time of processing work items");
+ CT_STATUS_INIT(CT_POOL_CLIENT_NUM_THREADS, CACHETABLE_POOL_CLIENT_NUM_THREADS, UINT64, "client pool: number of threads in pool");
+ CT_STATUS_INIT(CT_POOL_CLIENT_NUM_THREADS_ACTIVE, CACHETABLE_POOL_CLIENT_NUM_THREADS_ACTIVE, UINT64, "client pool: number of currently active threads in pool");
+ CT_STATUS_INIT(CT_POOL_CLIENT_QUEUE_SIZE, CACHETABLE_POOL_CLIENT_QUEUE_SIZE, UINT64, "client pool: number of currently queued work items");
+ CT_STATUS_INIT(CT_POOL_CLIENT_MAX_QUEUE_SIZE, CACHETABLE_POOL_CLIENT_MAX_QUEUE_SIZE, UINT64, "client pool: largest number of queued work items");
+ CT_STATUS_INIT(CT_POOL_CLIENT_TOTAL_ITEMS_PROCESSED, CACHETABLE_POOL_CLIENT_TOTAL_ITEMS_PROCESSED, UINT64, "client pool: total number of work items processed");
+ CT_STATUS_INIT(CT_POOL_CLIENT_TOTAL_EXECUTION_TIME, CACHETABLE_POOL_CLIENT_TOTAL_EXECUTION_TIME, UINT64, "client pool: total execution time of processing work items");
+ CT_STATUS_INIT(CT_POOL_CACHETABLE_NUM_THREADS, CACHETABLE_POOL_CACHETABLE_NUM_THREADS, UINT64, "cachetable pool: number of threads in pool");
+ CT_STATUS_INIT(CT_POOL_CACHETABLE_NUM_THREADS_ACTIVE, CACHETABLE_POOL_CACHETABLE_NUM_THREADS_ACTIVE, UINT64, "cachetable pool: number of currently active threads in pool");
+ CT_STATUS_INIT(CT_POOL_CACHETABLE_QUEUE_SIZE, CACHETABLE_POOL_CACHETABLE_QUEUE_SIZE, UINT64, "cachetable pool: number of currently queued work items");
+ CT_STATUS_INIT(CT_POOL_CACHETABLE_MAX_QUEUE_SIZE, CACHETABLE_POOL_CACHETABLE_MAX_QUEUE_SIZE, UINT64, "cachetable pool: largest number of queued work items");
+ CT_STATUS_INIT(CT_POOL_CACHETABLE_TOTAL_ITEMS_PROCESSED, CACHETABLE_POOL_CACHETABLE_TOTAL_ITEMS_PROCESSED, UINT64, "cachetable pool: total number of work items processed");
+ CT_STATUS_INIT(CT_POOL_CACHETABLE_TOTAL_EXECUTION_TIME, CACHETABLE_POOL_CACHETABLE_TOTAL_EXECUTION_TIME, UINT64, "cachetable pool: total execution time of processing work items");
+ CT_STATUS_INIT(CT_POOL_CHECKPOINT_NUM_THREADS, CACHETABLE_POOL_CHECKPOINT_NUM_THREADS, UINT64, "checkpoint pool: number of threads in pool");
+ CT_STATUS_INIT(CT_POOL_CHECKPOINT_NUM_THREADS_ACTIVE, CACHETABLE_POOL_CHECKPOINT_NUM_THREADS_ACTIVE, UINT64, "checkpoint pool: number of currently active threads in pool");
+ CT_STATUS_INIT(CT_POOL_CHECKPOINT_QUEUE_SIZE, CACHETABLE_POOL_CHECKPOINT_QUEUE_SIZE, UINT64, "checkpoint pool: number of currently queued work items");
+ CT_STATUS_INIT(CT_POOL_CHECKPOINT_MAX_QUEUE_SIZE, CACHETABLE_POOL_CHECKPOINT_MAX_QUEUE_SIZE, UINT64, "checkpoint pool: largest number of queued work items");
+ CT_STATUS_INIT(CT_POOL_CHECKPOINT_TOTAL_ITEMS_PROCESSED, CACHETABLE_POOL_CHECKPOINT_TOTAL_ITEMS_PROCESSED, UINT64, "checkpoint pool: total number of work items processed");
+ CT_STATUS_INIT(CT_POOL_CHECKPOINT_TOTAL_EXECUTION_TIME, CACHETABLE_POOL_CHECKPOINT_TOTAL_EXECUTION_TIME, UINT64, "checkpoint pool: total execution time of processing work items");
m_initialized = true;
#undef CT_STATUS_INIT
diff --git a/storage/tokudb/PerconaFT/ft/ft-test-helpers.cc b/storage/tokudb/PerconaFT/ft/ft-test-helpers.cc
index 7ca36c23780..6fcdbbdc9e3 100644
--- a/storage/tokudb/PerconaFT/ft/ft-test-helpers.cc
+++ b/storage/tokudb/PerconaFT/ft/ft-test-helpers.cc
@@ -172,21 +172,26 @@ int toku_testsetup_insert_to_leaf (FT_HANDLE ft_handle, BLOCKNUM blocknum, const
assert(node->height==0);
DBT kdbt, vdbt;
- ft_msg msg(toku_fill_dbt(&kdbt, key, keylen), toku_fill_dbt(&vdbt, val, vallen),
- FT_INSERT, next_dummymsn(), toku_xids_get_root_xids());
+ ft_msg msg(
+ toku_fill_dbt(&kdbt, key, keylen),
+ toku_fill_dbt(&vdbt, val, vallen),
+ FT_INSERT,
+ next_dummymsn(),
+ toku_xids_get_root_xids());
static size_t zero_flow_deltas[] = { 0, 0 };
txn_gc_info gc_info(nullptr, TXNID_NONE, TXNID_NONE, true);
- toku_ftnode_put_msg(ft_handle->ft->cmp,
- ft_handle->ft->update_fun,
- node,
- -1,
- msg,
- true,
- &gc_info,
- zero_flow_deltas,
- NULL
- );
+ toku_ftnode_put_msg(
+ ft_handle->ft->cmp,
+ ft_handle->ft->update_fun,
+ node,
+ -1,
+ msg,
+ true,
+ &gc_info,
+ zero_flow_deltas,
+ NULL,
+ NULL);
toku_verify_or_set_counts(node);
diff --git a/storage/tokudb/PerconaFT/ft/ft.cc b/storage/tokudb/PerconaFT/ft/ft.cc
index 2a0fb6f6800..93d21233bf7 100644
--- a/storage/tokudb/PerconaFT/ft/ft.cc
+++ b/storage/tokudb/PerconaFT/ft/ft.cc
@@ -198,6 +198,8 @@ static void ft_checkpoint (CACHEFILE cf, int fd, void *header_v) {
ch->time_of_last_modification = now;
ch->checkpoint_count++;
ft_hack_highest_unused_msn_for_upgrade_for_checkpoint(ft);
+ ch->on_disk_logical_rows =
+ ft->h->on_disk_logical_rows = ft->in_memory_logical_rows;
// write translation and header to disk (or at least to OS internal buffer)
toku_serialize_ft_to(fd, ch, &ft->blocktable, ft->cf);
@@ -383,7 +385,8 @@ ft_header_create(FT_OPTIONS options, BLOCKNUM root_blocknum, TXNID root_xid_that
.count_of_optimize_in_progress = 0,
.count_of_optimize_in_progress_read_from_disk = 0,
.msn_at_start_of_last_completed_optimize = ZERO_MSN,
- .on_disk_stats = ZEROSTATS
+ .on_disk_stats = ZEROSTATS,
+ .on_disk_logical_rows = 0
};
return (FT_HEADER) toku_xmemdup(&h, sizeof h);
}
@@ -802,7 +805,14 @@ toku_ft_stat64 (FT ft, struct ftstat64_s *s) {
s->fsize = toku_cachefile_size(ft->cf);
// just use the in memory stats from the header
// prevent appearance of negative numbers for numrows, numbytes
- int64_t n = ft->in_memory_stats.numrows;
+ // if the logical count was never properly re-counted on an upgrade,
+ // return the existing physical count instead.
+ int64_t n;
+ if (ft->in_memory_logical_rows == (uint64_t)-1) {
+ n = ft->in_memory_stats.numrows;
+ } else {
+ n = ft->in_memory_logical_rows;
+ }
if (n < 0) {
n = 0;
}
@@ -871,20 +881,38 @@ DESCRIPTOR toku_ft_get_cmp_descriptor(FT_HANDLE ft_handle) {
return &ft_handle->ft->cmp_descriptor;
}
-void
-toku_ft_update_stats(STAT64INFO headerstats, STAT64INFO_S delta) {
+void toku_ft_update_stats(STAT64INFO headerstats, STAT64INFO_S delta) {
(void) toku_sync_fetch_and_add(&(headerstats->numrows), delta.numrows);
(void) toku_sync_fetch_and_add(&(headerstats->numbytes), delta.numbytes);
}
-void
-toku_ft_decrease_stats(STAT64INFO headerstats, STAT64INFO_S delta) {
+void toku_ft_decrease_stats(STAT64INFO headerstats, STAT64INFO_S delta) {
(void) toku_sync_fetch_and_sub(&(headerstats->numrows), delta.numrows);
(void) toku_sync_fetch_and_sub(&(headerstats->numbytes), delta.numbytes);
}
-void
-toku_ft_remove_reference(FT ft, bool oplsn_valid, LSN oplsn, remove_ft_ref_callback remove_ref, void *extra) {
+void toku_ft_adjust_logical_row_count(FT ft, int64_t delta) {
+ // In order to make sure that the correct count is returned from
+ // toku_ft_stat64, the ft->(in_memory|on_disk)_logical_rows _MUST_NOT_ be
+ // modified from anywhere else from here with the exceptions of
+ // serializing in a header, initializing a new header and analyzing
+ // an index for a logical_row count.
+ // The gist is that on an index upgrade, all logical_rows values
+ // in the ft header are set to -1 until an analyze can reset it to an
+ // accurate value. Until then, the physical count from in_memory_stats
+ // must be returned in toku_ft_stat64.
+ if (delta != 0 && ft->in_memory_logical_rows != (uint64_t)-1) {
+ toku_sync_fetch_and_add(&(ft->in_memory_logical_rows), delta);
+ }
+}
+
+void toku_ft_remove_reference(
+ FT ft,
+ bool oplsn_valid,
+ LSN oplsn,
+ remove_ft_ref_callback remove_ref,
+ void *extra) {
+
toku_ft_grab_reflock(ft);
if (toku_ft_has_one_reference_unlocked(ft)) {
toku_ft_release_reflock(ft);
diff --git a/storage/tokudb/PerconaFT/ft/ft.h b/storage/tokudb/PerconaFT/ft/ft.h
index cc64bdfc6d3..d600e093bdc 100644
--- a/storage/tokudb/PerconaFT/ft/ft.h
+++ b/storage/tokudb/PerconaFT/ft/ft.h
@@ -127,13 +127,17 @@ DESCRIPTOR toku_ft_get_cmp_descriptor(FT_HANDLE ft_handle);
typedef struct {
// delta versions in basements could be negative
+ // These represent the physical leaf entries and do not account
+ // for pending deletes or other in-flight messages that have not been
+ // applied to a leaf entry.
int64_t numrows;
int64_t numbytes;
} STAT64INFO_S, *STAT64INFO;
-static const STAT64INFO_S ZEROSTATS = { .numrows = 0, .numbytes = 0};
+static const STAT64INFO_S ZEROSTATS = { .numrows = 0, .numbytes = 0 };
void toku_ft_update_stats(STAT64INFO headerstats, STAT64INFO_S delta);
void toku_ft_decrease_stats(STAT64INFO headerstats, STAT64INFO_S delta);
+void toku_ft_adjust_logical_row_count(FT ft, int64_t delta);
typedef void (*remove_ft_ref_callback)(FT ft, void *extra);
void toku_ft_remove_reference(FT ft,
diff --git a/storage/tokudb/PerconaFT/ft/leafentry.h b/storage/tokudb/PerconaFT/ft/leafentry.h
index 9cb81ef7cd6..7274a1480e2 100644
--- a/storage/tokudb/PerconaFT/ft/leafentry.h
+++ b/storage/tokudb/PerconaFT/ft/leafentry.h
@@ -180,43 +180,57 @@ uint64_t le_outermost_uncommitted_xid (LEAFENTRY le);
// r|r!=0&&r!=TOKUDB_ACCEPT: Quit early, return r, because something unexpected went wrong (error case)
typedef int(*LE_ITERATE_CALLBACK)(TXNID id, TOKUTXN context, bool is_provisional);
-int le_iterate_val(LEAFENTRY le, LE_ITERATE_CALLBACK f, void** valpp, uint32_t *vallenp, TOKUTXN context);
-
-void le_extract_val(LEAFENTRY le,
- // should we return the entire leafentry as the val?
- bool is_leaf_mode, enum cursor_read_type read_type,
- TOKUTXN ttxn, uint32_t *vallen, void **val);
-
-size_t
-leafentry_disksize_13(LEAFENTRY_13 le);
-
-int
-toku_le_upgrade_13_14(LEAFENTRY_13 old_leafentry, // NULL if there was no stored data.
- void** keyp,
- uint32_t* keylen,
- size_t *new_leafentry_memorysize,
- LEAFENTRY *new_leafentry_p);
+int le_iterate_val(
+ LEAFENTRY le,
+ LE_ITERATE_CALLBACK f,
+ void** valpp,
+ uint32_t* vallenp,
+ TOKUTXN context);
+
+void le_extract_val(
+ LEAFENTRY le,
+ // should we return the entire leafentry as the val?
+ bool is_leaf_mode,
+ enum cursor_read_type read_type,
+ TOKUTXN ttxn,
+ uint32_t* vallen,
+ void** val);
+
+size_t leafentry_disksize_13(LEAFENTRY_13 le);
+
+int toku_le_upgrade_13_14(
+ // NULL if there was no stored data.
+ LEAFENTRY_13 old_leafentry,
+ void** keyp,
+ uint32_t* keylen,
+ size_t* new_leafentry_memorysize,
+ LEAFENTRY *new_leafentry_p);
class bn_data;
-void
-toku_le_apply_msg(const ft_msg &msg,
- LEAFENTRY old_leafentry, // NULL if there was no stored data.
- bn_data* data_buffer, // bn_data storing leafentry, if NULL, means there is no bn_data
- uint32_t idx, // index in data_buffer where leafentry is stored (and should be replaced
- uint32_t old_keylen,
- txn_gc_info *gc_info,
- LEAFENTRY *new_leafentry_p,
- int64_t * numbytes_delta_p);
-
-bool toku_le_worth_running_garbage_collection(LEAFENTRY le, txn_gc_info *gc_info);
-
-void
-toku_le_garbage_collect(LEAFENTRY old_leaf_entry,
- bn_data* data_buffer,
- uint32_t idx,
- void* keyp,
- uint32_t keylen,
- txn_gc_info *gc_info,
- LEAFENTRY *new_leaf_entry,
- int64_t * numbytes_delta_p);
+int64_t toku_le_apply_msg(
+ const ft_msg &msg,
+ // NULL if there was no stored data.
+ LEAFENTRY old_leafentry,
+ // bn_data storing leafentry, if NULL, means there is no bn_data
+ bn_data* data_buffer,
+ // index in data_buffer where leafentry is stored (and should be replaced
+ uint32_t idx,
+ uint32_t old_keylen,
+ txn_gc_info* gc_info,
+ LEAFENTRY *new_leafentry_p,
+ int64_t* numbytes_delta_p);
+
+bool toku_le_worth_running_garbage_collection(
+ LEAFENTRY le,
+ txn_gc_info* gc_info);
+
+void toku_le_garbage_collect(
+ LEAFENTRY old_leaf_entry,
+ bn_data* data_buffer,
+ uint32_t idx,
+ void* keyp,
+ uint32_t keylen,
+ txn_gc_info* gc_info,
+ LEAFENTRY* new_leaf_entry,
+ int64_t* numbytes_delta_p);
diff --git a/storage/tokudb/PerconaFT/ft/loader/loader.cc b/storage/tokudb/PerconaFT/ft/loader/loader.cc
index 5ff0d69af46..20f9363da1e 100644
--- a/storage/tokudb/PerconaFT/ft/loader/loader.cc
+++ b/storage/tokudb/PerconaFT/ft/loader/loader.cc
@@ -2312,11 +2312,42 @@ static struct leaf_buf *start_leaf (struct dbout *out, const DESCRIPTOR UU(desc)
return lbuf;
}
-static void finish_leafnode (struct dbout *out, struct leaf_buf *lbuf, int progress_allocation, FTLOADER bl, uint32_t target_basementnodesize, enum toku_compression_method target_compression_method);
-static int write_nonleaves (FTLOADER bl, FIDX pivots_fidx, struct dbout *out, struct subtrees_info *sts, const DESCRIPTOR descriptor, uint32_t target_nodesize, uint32_t target_basementnodesize, enum toku_compression_method target_compression_method);
-static void add_pair_to_leafnode (struct leaf_buf *lbuf, unsigned char *key, int keylen, unsigned char *val, int vallen, int this_leafentry_size, STAT64INFO stats_to_update);
-static int write_translation_table (struct dbout *out, long long *off_of_translation_p);
-static int write_header (struct dbout *out, long long translation_location_on_disk, long long translation_size_on_disk);
+static void finish_leafnode(
+ struct dbout* out,
+ struct leaf_buf* lbuf,
+ int progress_allocation,
+ FTLOADER bl,
+ uint32_t target_basementnodesize,
+ enum toku_compression_method target_compression_method);
+
+static int write_nonleaves(
+ FTLOADER bl,
+ FIDX pivots_fidx,
+ struct dbout* out,
+ struct subtrees_info* sts,
+ const DESCRIPTOR descriptor,
+ uint32_t target_nodesize,
+ uint32_t target_basementnodesize,
+ enum toku_compression_method target_compression_method);
+
+static void add_pair_to_leafnode(
+ struct leaf_buf* lbuf,
+ unsigned char* key,
+ int keylen,
+ unsigned char* val,
+ int vallen,
+ int this_leafentry_size,
+ STAT64INFO stats_to_update,
+ int64_t* logical_rows_delta);
+
+static int write_translation_table(
+ struct dbout* out,
+ long long* off_of_translation_p);
+
+static int write_header(
+ struct dbout* out,
+ long long translation_location_on_disk,
+ long long translation_size_on_disk);
static void drain_writer_q(QUEUE q) {
void *item;
@@ -2448,6 +2479,12 @@ static int toku_loader_write_ft_from_q (FTLOADER bl,
DBT maxkey = make_dbt(0, 0); // keep track of the max key of the current node
STAT64INFO_S deltas = ZEROSTATS;
+ // This is just a placeholder and not used in the loader, the real/accurate
+ // stats will come out of 'deltas' because this loader is not pushing
+ // messages down into the top of a fractal tree where the logical row count
+ // is done, it is directly creating leaf entries so it must also take on
+ // performing the logical row counting on its own
+ int64_t logical_rows_delta = 0;
while (result == 0) {
void *item;
{
@@ -2506,7 +2543,15 @@ static int toku_loader_write_ft_from_q (FTLOADER bl,
lbuf = start_leaf(&out, descriptor, lblock, le_xid, target_nodesize);
}
- add_pair_to_leafnode(lbuf, (unsigned char *) key.data, key.size, (unsigned char *) val.data, val.size, this_leafentry_size, &deltas);
+ add_pair_to_leafnode(
+ lbuf,
+ (unsigned char*)key.data,
+ key.size,
+ (unsigned char*)val.data,
+ val.size,
+ this_leafentry_size,
+ &deltas,
+ &logical_rows_delta);
n_rows_remaining--;
update_maxkey(&maxkey, &key); // set the new maxkey to the current key
@@ -2526,6 +2571,13 @@ static int toku_loader_write_ft_from_q (FTLOADER bl,
toku_ft_update_stats(&ft.in_memory_stats, deltas);
}
+ // As noted above, the loader directly creates a tree structure without
+ // going through the higher level ft API and tus bypasses the logical row
+ // counting performed at that level. So, we must manually update the logical
+ // row count with the info we have from the physical delta that comes out of
+ // add_pair_to_leafnode.
+ toku_ft_adjust_logical_row_count(&ft, deltas.numrows);
+
cleanup_maxkey(&maxkey);
if (lbuf) {
@@ -2878,7 +2930,16 @@ int toku_ft_loader_get_error(FTLOADER bl, int *error) {
return 0;
}
-static void add_pair_to_leafnode (struct leaf_buf *lbuf, unsigned char *key, int keylen, unsigned char *val, int vallen, int this_leafentry_size, STAT64INFO stats_to_update) {
+static void add_pair_to_leafnode(
+ struct leaf_buf* lbuf,
+ unsigned char* key,
+ int keylen,
+ unsigned char* val,
+ int vallen,
+ int this_leafentry_size,
+ STAT64INFO stats_to_update,
+ int64_t* logical_rows_delta) {
+
lbuf->nkeys++;
lbuf->ndata++;
lbuf->dsize += keylen + vallen;
@@ -2890,11 +2951,25 @@ static void add_pair_to_leafnode (struct leaf_buf *lbuf, unsigned char *key, int
FTNODE leafnode = lbuf->node;
uint32_t idx = BLB_DATA(leafnode, 0)->num_klpairs();
DBT kdbt, vdbt;
- ft_msg msg(toku_fill_dbt(&kdbt, key, keylen), toku_fill_dbt(&vdbt, val, vallen), FT_INSERT, ZERO_MSN, lbuf->xids);
+ ft_msg msg(
+ toku_fill_dbt(&kdbt, key, keylen),
+ toku_fill_dbt(&vdbt, val, vallen),
+ FT_INSERT,
+ ZERO_MSN,
+ lbuf->xids);
uint64_t workdone = 0;
// there's no mvcc garbage in a bulk-loaded FT, so there's no need to pass useful gc info
txn_gc_info gc_info(nullptr, TXNID_NONE, TXNID_NONE, true);
- toku_ft_bn_apply_msg_once(BLB(leafnode,0), msg, idx, keylen, NULL, &gc_info, &workdone, stats_to_update);
+ toku_ft_bn_apply_msg_once(
+ BLB(leafnode, 0),
+ msg,
+ idx,
+ keylen,
+ NULL,
+ &gc_info,
+ &workdone,
+ stats_to_update,
+ logical_rows_delta);
}
static int write_literal(struct dbout *out, void*data, size_t len) {
@@ -2905,7 +2980,14 @@ static int write_literal(struct dbout *out, void*data, size_t len) {
return result;
}
-static void finish_leafnode (struct dbout *out, struct leaf_buf *lbuf, int progress_allocation, FTLOADER bl, uint32_t target_basementnodesize, enum toku_compression_method target_compression_method) {
+static void finish_leafnode(
+ struct dbout* out,
+ struct leaf_buf* lbuf,
+ int progress_allocation,
+ FTLOADER bl,
+ uint32_t target_basementnodesize,
+ enum toku_compression_method target_compression_method) {
+
int result = 0;
// serialize leaf to buffer
@@ -2913,7 +2995,16 @@ static void finish_leafnode (struct dbout *out, struct leaf_buf *lbuf, int progr
size_t uncompressed_serialized_leaf_size = 0;
char *serialized_leaf = NULL;
FTNODE_DISK_DATA ndd = NULL;
- result = toku_serialize_ftnode_to_memory(lbuf->node, &ndd, target_basementnodesize, target_compression_method, true, true, &serialized_leaf_size, &uncompressed_serialized_leaf_size, &serialized_leaf);
+ result = toku_serialize_ftnode_to_memory(
+ lbuf->node,
+ &ndd,
+ target_basementnodesize,
+ target_compression_method,
+ true,
+ true,
+ &serialized_leaf_size,
+ &uncompressed_serialized_leaf_size,
+ &serialized_leaf);
// write it out
if (result == 0) {
@@ -2979,8 +3070,11 @@ static int write_translation_table (struct dbout *out, long long *off_of_transla
return result;
}
-static int
-write_header (struct dbout *out, long long translation_location_on_disk, long long translation_size_on_disk) {
+static int write_header(
+ struct dbout* out,
+ long long translation_location_on_disk,
+ long long translation_size_on_disk) {
+
int result = 0;
size_t size = toku_serialize_ft_size(out->ft->h);
size_t alloced_size = roundup_to_multiple(512, size);
@@ -2991,6 +3085,7 @@ write_header (struct dbout *out, long long translation_location_on_disk, long lo
} else {
wbuf_init(&wbuf, buf, size);
out->ft->h->on_disk_stats = out->ft->in_memory_stats;
+ out->ft->h->on_disk_logical_rows = out->ft->in_memory_logical_rows;
toku_serialize_ft_to_wbuf(&wbuf, out->ft->h, translation_location_on_disk, translation_size_on_disk);
for (size_t i=size; i<alloced_size; i++) buf[i]=0; // initialize all those unused spots to zero
if (wbuf.ndone != size)
diff --git a/storage/tokudb/PerconaFT/ft/logger/log_upgrade.cc b/storage/tokudb/PerconaFT/ft/logger/log_upgrade.cc
index 6dca8c25378..efaba49198d 100644
--- a/storage/tokudb/PerconaFT/ft/logger/log_upgrade.cc
+++ b/storage/tokudb/PerconaFT/ft/logger/log_upgrade.cc
@@ -265,8 +265,9 @@ toku_maybe_upgrade_log(const char *env_dir, const char *log_dir, LSN * lsn_of_cl
TXNID last_xid = TXNID_NONE;
r = verify_clean_shutdown_of_log_version(log_dir, version_of_logs_on_disk, &last_lsn, &last_xid);
if (r != 0) {
- if (TOKU_LOG_VERSION_25 <= version_of_logs_on_disk && version_of_logs_on_disk <= TOKU_LOG_VERSION_27
- && TOKU_LOG_VERSION_28 == TOKU_LOG_VERSION) {
+ if (version_of_logs_on_disk >= TOKU_LOG_VERSION_25 &&
+ version_of_logs_on_disk <= TOKU_LOG_VERSION_29 &&
+ TOKU_LOG_VERSION_29 == TOKU_LOG_VERSION) {
r = 0; // can do recovery on dirty shutdown
} else {
fprintf(stderr, "Cannot upgrade PerconaFT version %d database.", version_of_logs_on_disk);
diff --git a/storage/tokudb/PerconaFT/ft/logger/logger.h b/storage/tokudb/PerconaFT/ft/logger/logger.h
index 1f15f59fb3f..d9595d71065 100644
--- a/storage/tokudb/PerconaFT/ft/logger/logger.h
+++ b/storage/tokudb/PerconaFT/ft/logger/logger.h
@@ -54,6 +54,7 @@ enum {
TOKU_LOG_VERSION_26 = 26, // no change from 25
TOKU_LOG_VERSION_27 = 27, // no change from 26
TOKU_LOG_VERSION_28 = 28, // no change from 27
+ TOKU_LOG_VERSION_29 = 29, // no change from 28
TOKU_LOG_VERSION = FT_LAYOUT_VERSION,
TOKU_LOG_MIN_SUPPORTED_VERSION = FT_LAYOUT_MIN_SUPPORTED_VERSION,
};
diff --git a/storage/tokudb/PerconaFT/ft/logger/recover.h b/storage/tokudb/PerconaFT/ft/logger/recover.h
index 0d216c11a8b..bdd44d562cd 100644
--- a/storage/tokudb/PerconaFT/ft/logger/recover.h
+++ b/storage/tokudb/PerconaFT/ft/logger/recover.h
@@ -67,7 +67,7 @@ int tokuft_recover(DB_ENV *env,
// Effect: Check the tokuft logs to determine whether or not we need to run recovery.
// If the log is empty or if there is a clean shutdown at the end of the log, then we
-// dont need to run recovery.
+// don't need to run recovery.
// Returns: true if we need recovery, otherwise false.
int tokuft_needs_recovery(const char *logdir, bool ignore_empty_log);
diff --git a/storage/tokudb/PerconaFT/ft/node.cc b/storage/tokudb/PerconaFT/ft/node.cc
index 44dbc73ba2b..58ba675eb7c 100644
--- a/storage/tokudb/PerconaFT/ft/node.cc
+++ b/storage/tokudb/PerconaFT/ft/node.cc
@@ -206,12 +206,20 @@ int msg_buffer_offset_msn_cmp(message_buffer &msg_buffer, const int32_t &ao, con
}
/**
- * Given a message buffer and and offset, apply the message with toku_ft_bn_apply_msg, or discard it,
+ * Given a message buffer and and offset, apply the message with
+ * toku_ft_bn_apply_msg, or discard it,
* based on its MSN and the MSN of the basement node.
*/
-static void
-do_bn_apply_msg(FT_HANDLE ft_handle, BASEMENTNODE bn, message_buffer *msg_buffer, int32_t offset,
- txn_gc_info *gc_info, uint64_t *workdone, STAT64INFO stats_to_update) {
+static void do_bn_apply_msg(
+ FT_HANDLE ft_handle,
+ BASEMENTNODE bn,
+ message_buffer* msg_buffer,
+ int32_t offset,
+ txn_gc_info* gc_info,
+ uint64_t* workdone,
+ STAT64INFO stats_to_update,
+ int64_t* logical_rows_delta) {
+
DBT k, v;
ft_msg msg = msg_buffer->get_message(offset, &k, &v);
@@ -227,16 +235,17 @@ do_bn_apply_msg(FT_HANDLE ft_handle, BASEMENTNODE bn, message_buffer *msg_buffer
msg,
gc_info,
workdone,
- stats_to_update
- );
+ stats_to_update,
+ logical_rows_delta);
} else {
toku_ft_status_note_msn_discard();
}
// We must always mark message as stale since it has been marked
// (using omt::iterate_and_mark_range)
- // It is possible to call do_bn_apply_msg even when it won't apply the message because
- // the node containing it could have been evicted and brought back in.
+ // It is possible to call do_bn_apply_msg even when it won't apply the
+ // message because the node containing it could have been evicted and
+ // brought back in.
msg_buffer->set_freshness(offset, false);
}
@@ -248,12 +257,29 @@ struct iterate_do_bn_apply_msg_extra {
txn_gc_info *gc_info;
uint64_t *workdone;
STAT64INFO stats_to_update;
+ int64_t *logical_rows_delta;
};
-int iterate_do_bn_apply_msg(const int32_t &offset, const uint32_t UU(idx), struct iterate_do_bn_apply_msg_extra *const e) __attribute__((nonnull(3)));
-int iterate_do_bn_apply_msg(const int32_t &offset, const uint32_t UU(idx), struct iterate_do_bn_apply_msg_extra *const e)
+int iterate_do_bn_apply_msg(
+ const int32_t &offset,
+ const uint32_t UU(idx),
+ struct iterate_do_bn_apply_msg_extra* const e)
+ __attribute__((nonnull(3)));
+
+int iterate_do_bn_apply_msg(
+ const int32_t &offset,
+ const uint32_t UU(idx),
+ struct iterate_do_bn_apply_msg_extra* const e)
{
- do_bn_apply_msg(e->t, e->bn, &e->bnc->msg_buffer, offset, e->gc_info, e->workdone, e->stats_to_update);
+ do_bn_apply_msg(
+ e->t,
+ e->bn,
+ &e->bnc->msg_buffer,
+ offset,
+ e->gc_info,
+ e->workdone,
+ e->stats_to_update,
+ e->logical_rows_delta);
return 0;
}
@@ -354,17 +380,15 @@ find_bounds_within_message_tree(
* or plus infinity respectively if they are NULL. Do not mark the node
* as dirty (preserve previous state of 'dirty' bit).
*/
-static void
-bnc_apply_messages_to_basement_node(
+static void bnc_apply_messages_to_basement_node(
FT_HANDLE t, // used for comparison function
BASEMENTNODE bn, // where to apply messages
FTNODE ancestor, // the ancestor node where we can find messages to apply
int childnum, // which child buffer of ancestor contains messages we want
const pivot_bounds &bounds, // contains pivot key bounds of this basement node
- txn_gc_info *gc_info,
- bool* msgs_applied
- )
-{
+ txn_gc_info* gc_info,
+ bool* msgs_applied) {
+
int r;
NONLEAF_CHILDINFO bnc = BNC(ancestor, childnum);
@@ -372,16 +396,29 @@ bnc_apply_messages_to_basement_node(
// apply messages from this buffer
STAT64INFO_S stats_delta = {0,0};
uint64_t workdone_this_ancestor = 0;
+ int64_t logical_rows_delta = 0;
uint32_t stale_lbi, stale_ube;
if (!bn->stale_ancestor_messages_applied) {
- find_bounds_within_message_tree(t->ft->cmp, bnc->stale_message_tree, &bnc->msg_buffer, bounds, &stale_lbi, &stale_ube);
+ find_bounds_within_message_tree(
+ t->ft->cmp,
+ bnc->stale_message_tree,
+ &bnc->msg_buffer,
+ bounds,
+ &stale_lbi,
+ &stale_ube);
} else {
stale_lbi = 0;
stale_ube = 0;
}
uint32_t fresh_lbi, fresh_ube;
- find_bounds_within_message_tree(t->ft->cmp, bnc->fresh_message_tree, &bnc->msg_buffer, bounds, &fresh_lbi, &fresh_ube);
+ find_bounds_within_message_tree(
+ t->ft->cmp,
+ bnc->fresh_message_tree,
+ &bnc->msg_buffer,
+ bounds,
+ &fresh_lbi,
+ &fresh_ube);
// We now know where all the messages we must apply are, so one of the
// following 4 cases will do the application, depending on which of
@@ -395,7 +432,9 @@ bnc_apply_messages_to_basement_node(
// We have messages in multiple trees, so we grab all
// the relevant messages' offsets and sort them by MSN, then apply
// them in MSN order.
- const int buffer_size = ((stale_ube - stale_lbi) + (fresh_ube - fresh_lbi) + bnc->broadcast_list.size());
+ const int buffer_size = ((stale_ube - stale_lbi) +
+ (fresh_ube - fresh_lbi) +
+ bnc->broadcast_list.size());
toku::scoped_malloc offsets_buf(buffer_size * sizeof(int32_t));
int32_t *offsets = reinterpret_cast<int32_t *>(offsets_buf.get());
struct store_msg_buffer_offset_extra sfo_extra = { .offsets = offsets, .i = 0 };
@@ -419,11 +458,27 @@ bnc_apply_messages_to_basement_node(
// Apply the messages in MSN order.
for (int i = 0; i < buffer_size; ++i) {
*msgs_applied = true;
- do_bn_apply_msg(t, bn, &bnc->msg_buffer, offsets[i], gc_info, &workdone_this_ancestor, &stats_delta);
+ do_bn_apply_msg(
+ t,
+ bn,
+ &bnc->msg_buffer,
+ offsets[i],
+ gc_info,
+ &workdone_this_ancestor,
+ &stats_delta,
+ &logical_rows_delta);
}
} else if (stale_lbi == stale_ube) {
// No stale messages to apply, we just apply fresh messages, and mark them to be moved to stale later.
- struct iterate_do_bn_apply_msg_extra iter_extra = { .t = t, .bn = bn, .bnc = bnc, .gc_info = gc_info, .workdone = &workdone_this_ancestor, .stats_to_update = &stats_delta };
+ struct iterate_do_bn_apply_msg_extra iter_extra = {
+ .t = t,
+ .bn = bn,
+ .bnc = bnc,
+ .gc_info = gc_info,
+ .workdone = &workdone_this_ancestor,
+ .stats_to_update = &stats_delta,
+ .logical_rows_delta = &logical_rows_delta
+ };
if (fresh_ube - fresh_lbi > 0) *msgs_applied = true;
r = bnc->fresh_message_tree.iterate_and_mark_range<struct iterate_do_bn_apply_msg_extra, iterate_do_bn_apply_msg>(fresh_lbi, fresh_ube, &iter_extra);
assert_zero(r);
@@ -432,7 +487,15 @@ bnc_apply_messages_to_basement_node(
// No fresh messages to apply, we just apply stale messages.
if (stale_ube - stale_lbi > 0) *msgs_applied = true;
- struct iterate_do_bn_apply_msg_extra iter_extra = { .t = t, .bn = bn, .bnc = bnc, .gc_info = gc_info, .workdone = &workdone_this_ancestor, .stats_to_update = &stats_delta };
+ struct iterate_do_bn_apply_msg_extra iter_extra = {
+ .t = t,
+ .bn = bn,
+ .bnc = bnc,
+ .gc_info = gc_info,
+ .workdone = &workdone_this_ancestor,
+ .stats_to_update = &stats_delta,
+ .logical_rows_delta = &logical_rows_delta
+ };
r = bnc->stale_message_tree.iterate_on_range<struct iterate_do_bn_apply_msg_extra, iterate_do_bn_apply_msg>(stale_lbi, stale_ube, &iter_extra);
assert_zero(r);
@@ -446,6 +509,7 @@ bnc_apply_messages_to_basement_node(
if (stats_delta.numbytes || stats_delta.numrows) {
toku_ft_update_stats(&t->ft->in_memory_stats, stats_delta);
}
+ toku_ft_adjust_logical_row_count(t->ft, logical_rows_delta);
}
static void
@@ -1073,7 +1137,8 @@ toku_ft_bn_apply_msg_once (
LEAFENTRY le,
txn_gc_info *gc_info,
uint64_t *workdone,
- STAT64INFO stats_to_update
+ STAT64INFO stats_to_update,
+ int64_t *logical_rows_delta
)
// Effect: Apply msg to leafentry (msn is ignored)
// Calculate work done by message on leafentry and add it to caller's workdone counter.
@@ -1082,26 +1147,34 @@ toku_ft_bn_apply_msg_once (
{
size_t newsize=0, oldsize=0, workdone_this_le=0;
LEAFENTRY new_le=0;
- int64_t numbytes_delta = 0; // how many bytes of user data (not including overhead) were added or deleted from this row
- int64_t numrows_delta = 0; // will be +1 or -1 or 0 (if row was added or deleted or not)
+ // how many bytes of user data (not including overhead) were added or
+ // deleted from this row
+ int64_t numbytes_delta = 0;
+ // will be +1 or -1 or 0 (if row was added or deleted or not)
+ int64_t numrows_delta = 0;
+ // will be +1, -1 or 0 if a message that was accounted for logically has
+ // changed in meaning such as an insert changed to an update or a delete
+ // changed to a noop
+ int64_t logical_rows_delta_le = 0;
uint32_t key_storage_size = msg.kdbt()->size + sizeof(uint32_t);
if (le) {
oldsize = leafentry_memsize(le) + key_storage_size;
}
- // toku_le_apply_msg() may call bn_data::mempool_malloc_and_update_dmt() to allocate more space.
- // That means le is guaranteed to not cause a sigsegv but it may point to a mempool that is
- // no longer in use. We'll have to release the old mempool later.
- toku_le_apply_msg(
- msg,
+ // toku_le_apply_msg() may call bn_data::mempool_malloc_and_update_dmt()
+ // to allocate more space. That means le is guaranteed to not cause a
+ // sigsegv but it may point to a mempool that is no longer in use.
+ // We'll have to release the old mempool later.
+ logical_rows_delta_le = toku_le_apply_msg(
+ msg,
le,
&bn->data_buffer,
idx,
le_keylen,
- gc_info,
- &new_le,
- &numbytes_delta
- );
+ gc_info,
+ &new_le,
+ &numbytes_delta);
+
// at this point, we cannot trust cmd->u.id.key to be valid.
// The dmt may have realloced its mempool and freed the one containing key.
@@ -1121,37 +1194,42 @@ toku_ft_bn_apply_msg_once (
numrows_delta = 1;
}
}
- if (workdone) { // test programs may call with NULL
+ if (FT_LIKELY(workdone != NULL)) { // test programs may call with NULL
*workdone += workdone_this_le;
}
+ if (FT_LIKELY(logical_rows_delta != NULL)) {
+ *logical_rows_delta += logical_rows_delta_le;
+ }
// now update stat64 statistics
bn->stat64_delta.numrows += numrows_delta;
bn->stat64_delta.numbytes += numbytes_delta;
// the only reason stats_to_update may be null is for tests
- if (stats_to_update) {
+ if (FT_LIKELY(stats_to_update != NULL)) {
stats_to_update->numrows += numrows_delta;
stats_to_update->numbytes += numbytes_delta;
}
-
}
static const uint32_t setval_tag = 0xee0ccb99; // this was gotten by doing "cat /dev/random|head -c4|od -x" to get a random number. We want to make sure that the user actually passes us the setval_extra_s that we passed in.
struct setval_extra_s {
uint32_t tag;
bool did_set_val;
- int setval_r; // any error code that setval_fun wants to return goes here.
+ // any error code that setval_fun wants to return goes here.
+ int setval_r;
// need arguments for toku_ft_bn_apply_msg_once
BASEMENTNODE bn;
- MSN msn; // captured from original message, not currently used
+ // captured from original message, not currently used
+ MSN msn;
XIDS xids;
- const DBT *key;
+ const DBT* key;
uint32_t idx;
uint32_t le_keylen;
LEAFENTRY le;
- txn_gc_info *gc_info;
- uint64_t * workdone; // set by toku_ft_bn_apply_msg_once()
+ txn_gc_info* gc_info;
+ uint64_t* workdone; // set by toku_ft_bn_apply_msg_once()
STAT64INFO stats_to_update;
+ int64_t* logical_rows_delta;
};
/*
@@ -1170,29 +1248,45 @@ static void setval_fun (const DBT *new_val, void *svextra_v) {
// can't leave scope until toku_ft_bn_apply_msg_once if
// this is a delete
DBT val;
- ft_msg msg(svextra->key,
- new_val ? new_val : toku_init_dbt(&val),
- new_val ? FT_INSERT : FT_DELETE_ANY,
- svextra->msn, svextra->xids);
- toku_ft_bn_apply_msg_once(svextra->bn, msg,
- svextra->idx, svextra->le_keylen, svextra->le,
- svextra->gc_info,
- svextra->workdone, svextra->stats_to_update);
+ ft_msg msg(
+ svextra->key,
+ new_val ? new_val : toku_init_dbt(&val),
+ new_val ? FT_INSERT : FT_DELETE_ANY,
+ svextra->msn,
+ svextra->xids);
+ toku_ft_bn_apply_msg_once(
+ svextra->bn,
+ msg,
+ svextra->idx,
+ svextra->le_keylen,
+ svextra->le,
+ svextra->gc_info,
+ svextra->workdone,
+ svextra->stats_to_update,
+ svextra->logical_rows_delta);
svextra->setval_r = 0;
}
}
-// We are already past the msn filter (in toku_ft_bn_apply_msg(), which calls do_update()),
-// so capturing the msn in the setval_extra_s is not strictly required. The alternative
-// would be to put a dummy msn in the messages created by setval_fun(), but preserving
-// the original msn seems cleaner and it preserves accountability at a lower layer.
-static int do_update(ft_update_func update_fun, const DESCRIPTOR_S *desc, BASEMENTNODE bn, const ft_msg &msg, uint32_t idx,
- LEAFENTRY le,
- void* keydata,
- uint32_t keylen,
- txn_gc_info *gc_info,
- uint64_t * workdone,
- STAT64INFO stats_to_update) {
+// We are already past the msn filter (in toku_ft_bn_apply_msg(), which calls
+// do_update()), so capturing the msn in the setval_extra_s is not strictly
+// required. The alternative would be to put a dummy msn in the messages
+// created by setval_fun(), but preserving the original msn seems cleaner and
+// it preserves accountability at a lower layer.
+static int do_update(
+ ft_update_func update_fun,
+ const DESCRIPTOR_S* desc,
+ BASEMENTNODE bn,
+ const ft_msg &msg,
+ uint32_t idx,
+ LEAFENTRY le,
+ void* keydata,
+ uint32_t keylen,
+ txn_gc_info* gc_info,
+ uint64_t* workdone,
+ STAT64INFO stats_to_update,
+ int64_t* logical_rows_delta) {
+
LEAFENTRY le_for_update;
DBT key;
const DBT *keyp;
@@ -1232,39 +1326,52 @@ static int do_update(ft_update_func update_fun, const DESCRIPTOR_S *desc, BASEME
}
le_for_update = le;
- struct setval_extra_s setval_extra = {setval_tag, false, 0, bn, msg.msn(), msg.xids(),
- keyp, idx, keylen, le_for_update, gc_info,
- workdone, stats_to_update};
- // call handlerton's ft->update_fun(), which passes setval_extra to setval_fun()
+ struct setval_extra_s setval_extra = {
+ setval_tag,
+ false,
+ 0,
+ bn,
+ msg.msn(),
+ msg.xids(),
+ keyp,
+ idx,
+ keylen,
+ le_for_update,
+ gc_info,
+ workdone,
+ stats_to_update,
+ logical_rows_delta
+ };
+ // call handlerton's ft->update_fun(), which passes setval_extra
+ // to setval_fun()
FAKE_DB(db, desc);
int r = update_fun(
&db,
keyp,
vdbtp,
update_function_extra,
- setval_fun, &setval_extra
- );
+ setval_fun,
+ &setval_extra);
if (r == 0) { r = setval_extra.setval_r; }
return r;
}
// Should be renamed as something like "apply_msg_to_basement()."
-void
-toku_ft_bn_apply_msg (
- const toku::comparator &cmp,
+void toku_ft_bn_apply_msg(
+ const toku::comparator& cmp,
ft_update_func update_fun,
BASEMENTNODE bn,
- const ft_msg &msg,
- txn_gc_info *gc_info,
- uint64_t *workdone,
- STAT64INFO stats_to_update
- )
+ const ft_msg& msg,
+ txn_gc_info* gc_info,
+ uint64_t* workdone,
+ STAT64INFO stats_to_update,
+ int64_t* logical_rows_delta) {
// Effect:
// Put a msg into a leaf.
-// Calculate work done by message on leafnode and add it to caller's workdone counter.
+// Calculate work done by message on leafnode and add it to caller's
+// workdone counter.
// The leaf could end up "too big" or "too small". The caller must fix that up.
-{
LEAFENTRY storeddata;
void* key = NULL;
uint32_t keylen = 0;
@@ -1303,7 +1410,16 @@ toku_ft_bn_apply_msg (
} else {
assert_zero(r);
}
- toku_ft_bn_apply_msg_once(bn, msg, idx, keylen, storeddata, gc_info, workdone, stats_to_update);
+ toku_ft_bn_apply_msg_once(
+ bn,
+ msg,
+ idx,
+ keylen,
+ storeddata,
+ gc_info,
+ workdone,
+ stats_to_update,
+ logical_rows_delta);
// if the insertion point is within a window of the right edge of
// the leaf then it is sequential
@@ -1331,12 +1447,19 @@ toku_ft_bn_apply_msg (
&storeddata,
&key,
&keylen,
- &idx
- );
+ &idx);
if (r == DB_NOTFOUND) break;
assert_zero(r);
- toku_ft_bn_apply_msg_once(bn, msg, idx, keylen, storeddata, gc_info, workdone, stats_to_update);
-
+ toku_ft_bn_apply_msg_once(
+ bn,
+ msg,
+ idx,
+ keylen,
+ storeddata,
+ gc_info,
+ workdone,
+ stats_to_update,
+ logical_rows_delta);
break;
}
case FT_OPTIMIZE_FOR_UPGRADE:
@@ -1352,13 +1475,27 @@ toku_ft_bn_apply_msg (
assert_zero(r);
int deleted = 0;
if (!le_is_clean(storeddata)) { //If already clean, nothing to do.
- // message application code needs a key in order to determine how much
- // work was done by this message. since this is a broadcast message,
- // we have to create a new message whose key is the current le's key.
+ // message application code needs a key in order to determine
+ // how much work was done by this message. since this is a
+ // broadcast message, we have to create a new message whose
+ // key is the current le's key.
DBT curr_keydbt;
- ft_msg curr_msg(toku_fill_dbt(&curr_keydbt, curr_keyp, curr_keylen),
- msg.vdbt(), msg.type(), msg.msn(), msg.xids());
- toku_ft_bn_apply_msg_once(bn, curr_msg, idx, curr_keylen, storeddata, gc_info, workdone, stats_to_update);
+ ft_msg curr_msg(
+ toku_fill_dbt(&curr_keydbt, curr_keyp, curr_keylen),
+ msg.vdbt(),
+ msg.type(),
+ msg.msn(),
+ msg.xids());
+ toku_ft_bn_apply_msg_once(
+ bn,
+ curr_msg,
+ idx,
+ curr_keylen,
+ storeddata,
+ gc_info,
+ workdone,
+ stats_to_update,
+ logical_rows_delta);
// at this point, we cannot trust msg.kdbt to be valid.
uint32_t new_dmt_size = bn->data_buffer.num_klpairs();
if (new_dmt_size != num_klpairs) {
@@ -1386,13 +1523,27 @@ toku_ft_bn_apply_msg (
assert_zero(r);
int deleted = 0;
if (le_has_xids(storeddata, msg.xids())) {
- // message application code needs a key in order to determine how much
- // work was done by this message. since this is a broadcast message,
- // we have to create a new message whose key is the current le's key.
+ // message application code needs a key in order to determine
+ // how much work was done by this message. since this is a
+ // broadcast message, we have to create a new message whose key
+ // is the current le's key.
DBT curr_keydbt;
- ft_msg curr_msg(toku_fill_dbt(&curr_keydbt, curr_keyp, curr_keylen),
- msg.vdbt(), msg.type(), msg.msn(), msg.xids());
- toku_ft_bn_apply_msg_once(bn, curr_msg, idx, curr_keylen, storeddata, gc_info, workdone, stats_to_update);
+ ft_msg curr_msg(
+ toku_fill_dbt(&curr_keydbt, curr_keyp, curr_keylen),
+ msg.vdbt(),
+ msg.type(),
+ msg.msn(),
+ msg.xids());
+ toku_ft_bn_apply_msg_once(
+ bn,
+ curr_msg,
+ idx,
+ curr_keylen,
+ storeddata,
+ gc_info,
+ workdone,
+ stats_to_update,
+ logical_rows_delta);
uint32_t new_dmt_size = bn->data_buffer.num_klpairs();
if (new_dmt_size != num_klpairs) {
paranoid_invariant(new_dmt_size + 1 == num_klpairs);
@@ -1424,9 +1575,33 @@ toku_ft_bn_apply_msg (
key = msg.kdbt()->data;
keylen = msg.kdbt()->size;
}
- r = do_update(update_fun, cmp.get_descriptor(), bn, msg, idx, NULL, NULL, 0, gc_info, workdone, stats_to_update);
+ r = do_update(
+ update_fun,
+ cmp.get_descriptor(),
+ bn,
+ msg,
+ idx,
+ NULL,
+ NULL,
+ 0,
+ gc_info,
+ workdone,
+ stats_to_update,
+ logical_rows_delta);
} else if (r==0) {
- r = do_update(update_fun, cmp.get_descriptor(), bn, msg, idx, storeddata, key, keylen, gc_info, workdone, stats_to_update);
+ r = do_update(
+ update_fun,
+ cmp.get_descriptor(),
+ bn,
+ msg,
+ idx,
+ storeddata,
+ key,
+ keylen,
+ gc_info,
+ workdone,
+ stats_to_update,
+ logical_rows_delta);
} // otherwise, a worse error, just return it
break;
}
@@ -1434,6 +1609,12 @@ toku_ft_bn_apply_msg (
// apply to all leafentries.
uint32_t idx = 0;
uint32_t num_leafentries_before;
+ // This is used to avoid having the logical row count changed on apply
+ // of this message since it will return a negative number of the number
+ // of leaf entries visited and cause the ft header value to go to 0;
+ // This message will not change the number of rows, so just use the
+ // bogus value.
+ int64_t temp_logical_rows_delta = 0;
while (idx < (num_leafentries_before = bn->data_buffer.num_klpairs())) {
void* curr_key = nullptr;
uint32_t curr_keylen = 0;
@@ -1449,7 +1630,19 @@ toku_ft_bn_apply_msg (
// This is broken below. Have a compilation error checked
// in as a reminder
- r = do_update(update_fun, cmp.get_descriptor(), bn, msg, idx, storeddata, curr_key, curr_keylen, gc_info, workdone, stats_to_update);
+ r = do_update(
+ update_fun,
+ cmp.get_descriptor(),
+ bn,
+ msg,
+ idx,
+ storeddata,
+ curr_key,
+ curr_keylen,
+ gc_info,
+ workdone,
+ stats_to_update,
+ &temp_logical_rows_delta);
assert_zero(r);
if (num_leafentries_before == bn->data_buffer.num_klpairs()) {
@@ -1810,24 +2003,22 @@ void toku_ftnode_leaf_run_gc(FT ft, FTNODE node) {
}
}
-void
-toku_ftnode_put_msg (
+void toku_ftnode_put_msg(
const toku::comparator &cmp,
ft_update_func update_fun,
FTNODE node,
int target_childnum,
const ft_msg &msg,
bool is_fresh,
- txn_gc_info *gc_info,
+ txn_gc_info* gc_info,
size_t flow_deltas[],
- STAT64INFO stats_to_update
- )
+ STAT64INFO stats_to_update,
+ int64_t* logical_rows_delta) {
// Effect: Push message into the subtree rooted at NODE.
// If NODE is a leaf, then
// put message into leaf, applying it to the leafentries
// If NODE is a nonleaf, then push the message into the message buffer(s) of the relevent child(ren).
// The node may become overfull. That's not our problem.
-{
toku_ftnode_assert_fully_in_memory(node);
//
// see comments in toku_ft_leaf_apply_msg
@@ -1836,26 +2027,40 @@ toku_ftnode_put_msg (
// and instead defer to these functions
//
if (node->height==0) {
- toku_ft_leaf_apply_msg(cmp, update_fun, node, target_childnum, msg, gc_info, nullptr, stats_to_update);
+ toku_ft_leaf_apply_msg(
+ cmp,
+ update_fun,
+ node,
+ target_childnum, msg,
+ gc_info,
+ nullptr,
+ stats_to_update,
+ logical_rows_delta);
} else {
- ft_nonleaf_put_msg(cmp, node, target_childnum, msg, is_fresh, flow_deltas);
+ ft_nonleaf_put_msg(
+ cmp,
+ node,
+ target_childnum,
+ msg,
+ is_fresh,
+ flow_deltas);
}
}
-// Effect: applies the message to the leaf if the appropriate basement node is in memory.
-// This function is called during message injection and/or flushing, so the entire
-// node MUST be in memory.
+// Effect: applies the message to the leaf if the appropriate basement node is
+// in memory. This function is called during message injection and/or
+// flushing, so the entire node MUST be in memory.
void toku_ft_leaf_apply_msg(
- const toku::comparator &cmp,
+ const toku::comparator& cmp,
ft_update_func update_fun,
FTNODE node,
int target_childnum, // which child to inject to, or -1 if unknown
- const ft_msg &msg,
- txn_gc_info *gc_info,
- uint64_t *workdone,
- STAT64INFO stats_to_update
- )
-{
+ const ft_msg& msg,
+ txn_gc_info* gc_info,
+ uint64_t* workdone,
+ STAT64INFO stats_to_update,
+ int64_t* logical_rows_delta) {
+
VERIFY_NODE(t, node);
toku_ftnode_assert_fully_in_memory(node);
@@ -1891,34 +2096,36 @@ void toku_ft_leaf_apply_msg(
BASEMENTNODE bn = BLB(node, childnum);
if (msg.msn().msn > bn->max_msn_applied.msn) {
bn->max_msn_applied = msg.msn();
- toku_ft_bn_apply_msg(cmp,
- update_fun,
- bn,
- msg,
- gc_info,
- workdone,
- stats_to_update);
+ toku_ft_bn_apply_msg(
+ cmp,
+ update_fun,
+ bn,
+ msg,
+ gc_info,
+ workdone,
+ stats_to_update,
+ logical_rows_delta);
} else {
toku_ft_status_note_msn_discard();
}
- }
- else if (ft_msg_type_applies_all(msg.type())) {
+ } else if (ft_msg_type_applies_all(msg.type())) {
for (int childnum=0; childnum<node->n_children; childnum++) {
if (msg.msn().msn > BLB(node, childnum)->max_msn_applied.msn) {
BLB(node, childnum)->max_msn_applied = msg.msn();
- toku_ft_bn_apply_msg(cmp,
- update_fun,
- BLB(node, childnum),
- msg,
- gc_info,
- workdone,
- stats_to_update);
+ toku_ft_bn_apply_msg(
+ cmp,
+ update_fun,
+ BLB(node, childnum),
+ msg,
+ gc_info,
+ workdone,
+ stats_to_update,
+ logical_rows_delta);
} else {
toku_ft_status_note_msn_discard();
}
}
- }
- else if (!ft_msg_type_does_nothing(msg.type())) {
+ } else if (!ft_msg_type_does_nothing(msg.type())) {
invariant(ft_msg_type_does_nothing(msg.type()));
}
VERIFY_NODE(t, node);
diff --git a/storage/tokudb/PerconaFT/ft/node.h b/storage/tokudb/PerconaFT/ft/node.h
index 9d910491682..ad0298e81c5 100644
--- a/storage/tokudb/PerconaFT/ft/node.h
+++ b/storage/tokudb/PerconaFT/ft/node.h
@@ -382,25 +382,54 @@ enum reactivity toku_ftnode_get_leaf_reactivity(FTNODE node, uint32_t nodesize);
* If k is equal to some pivot, then we return the next (to the right)
* childnum.
*/
-int toku_ftnode_hot_next_child(FTNODE node, const DBT *k, const toku::comparator &cmp);
-
-void toku_ftnode_put_msg(const toku::comparator &cmp, ft_update_func update_fun,
- FTNODE node, int target_childnum,
- const ft_msg &msg, bool is_fresh, txn_gc_info *gc_info,
- size_t flow_deltas[], STAT64INFO stats_to_update);
-
-void toku_ft_bn_apply_msg_once(BASEMENTNODE bn, const ft_msg &msg, uint32_t idx,
- uint32_t le_keylen, LEAFENTRY le, txn_gc_info *gc_info,
- uint64_t *workdonep, STAT64INFO stats_to_update);
-
-void toku_ft_bn_apply_msg(const toku::comparator &cmp, ft_update_func update_fun,
- BASEMENTNODE bn, const ft_msg &msg, txn_gc_info *gc_info,
- uint64_t *workdone, STAT64INFO stats_to_update);
-
-void toku_ft_leaf_apply_msg(const toku::comparator &cmp, ft_update_func update_fun,
- FTNODE node, int target_childnum,
- const ft_msg &msg, txn_gc_info *gc_info,
- uint64_t *workdone, STAT64INFO stats_to_update);
+int toku_ftnode_hot_next_child(
+ FTNODE node,
+ const DBT* k,
+ const toku::comparator &cmp);
+
+void toku_ftnode_put_msg(
+ const toku::comparator& cmp,
+ ft_update_func update_fun,
+ FTNODE node,
+ int target_childnum,
+ const ft_msg& msg,
+ bool is_fresh,
+ txn_gc_info* gc_info,
+ size_t flow_deltas[],
+ STAT64INFO stats_to_update,
+ int64_t* logical_rows_delta);
+
+void toku_ft_bn_apply_msg_once(
+ BASEMENTNODE bn,
+ const ft_msg& msg,
+ uint32_t idx,
+ uint32_t le_keylen,
+ LEAFENTRY le,
+ txn_gc_info* gc_info,
+ uint64_t* workdonep,
+ STAT64INFO stats_to_update,
+ int64_t* logical_rows_delta);
+
+void toku_ft_bn_apply_msg(
+ const toku::comparator& cmp,
+ ft_update_func update_fun,
+ BASEMENTNODE bn,
+ const ft_msg& msg,
+ txn_gc_info* gc_info,
+ uint64_t* workdone,
+ STAT64INFO stats_to_update,
+ int64_t* logical_rows_delta);
+
+void toku_ft_leaf_apply_msg(
+ const toku::comparator& cmp,
+ ft_update_func update_fun,
+ FTNODE node,
+ int target_childnum,
+ const ft_msg& msg,
+ txn_gc_info* gc_info,
+ uint64_t* workdone,
+ STAT64INFO stats_to_update,
+ int64_t* logical_rows_delta);
//
// Message management for orthopush
diff --git a/storage/tokudb/PerconaFT/ft/serialize/ft-serialize.cc b/storage/tokudb/PerconaFT/ft/serialize/ft-serialize.cc
index a7bc2949276..49d4368a3ab 100644
--- a/storage/tokudb/PerconaFT/ft/serialize/ft-serialize.cc
+++ b/storage/tokudb/PerconaFT/ft/serialize/ft-serialize.cc
@@ -323,6 +323,13 @@ int deserialize_ft_versioned(int fd, struct rbuf *rb, FT *ftp, uint32_t version)
fanout = rbuf_int(rb);
}
+ uint64_t on_disk_logical_rows;
+ on_disk_logical_rows = (uint64_t)-1;
+ if (ft->layout_version_read_from_disk >= FT_LAYOUT_VERSION_29) {
+ on_disk_logical_rows = rbuf_ulonglong(rb);
+ }
+ ft->in_memory_logical_rows = on_disk_logical_rows;
+
(void) rbuf_int(rb); //Read in checksum and ignore (already verified).
if (rb->ndone != rb->size) {
fprintf(stderr, "Header size did not match contents.\n");
@@ -357,7 +364,8 @@ int deserialize_ft_versioned(int fd, struct rbuf *rb, FT *ftp, uint32_t version)
.count_of_optimize_in_progress = count_of_optimize_in_progress,
.count_of_optimize_in_progress_read_from_disk = count_of_optimize_in_progress,
.msn_at_start_of_last_completed_optimize = msn_at_start_of_last_completed_optimize,
- .on_disk_stats = on_disk_stats
+ .on_disk_stats = on_disk_stats,
+ .on_disk_logical_rows = on_disk_logical_rows
};
XMEMDUP(ft->h, &h);
}
@@ -408,6 +416,8 @@ serialize_ft_min_size (uint32_t version) {
size_t size = 0;
switch(version) {
+ case FT_LAYOUT_VERSION_29:
+ size += sizeof(uint64_t); // logrows in ft
case FT_LAYOUT_VERSION_28:
size += sizeof(uint32_t); // fanout in ft
case FT_LAYOUT_VERSION_27:
@@ -754,6 +764,7 @@ void toku_serialize_ft_to_wbuf (
wbuf_MSN(wbuf, h->highest_unused_msn_for_upgrade);
wbuf_MSN(wbuf, h->max_msn_in_ft);
wbuf_int(wbuf, h->fanout);
+ wbuf_ulonglong(wbuf, h->on_disk_logical_rows);
uint32_t checksum = toku_x1764_finish(&wbuf->checksum);
wbuf_int(wbuf, checksum);
lazy_assert(wbuf->ndone == wbuf->size);
diff --git a/storage/tokudb/PerconaFT/ft/serialize/ft_layout_version.h b/storage/tokudb/PerconaFT/ft/serialize/ft_layout_version.h
index 72b6882bc06..9407a568337 100644
--- a/storage/tokudb/PerconaFT/ft/serialize/ft_layout_version.h
+++ b/storage/tokudb/PerconaFT/ft/serialize/ft_layout_version.h
@@ -68,6 +68,7 @@ enum ft_layout_version_e {
FT_LAYOUT_VERSION_26 = 26, // Hojo: basements store key/vals separately on disk for fixed klpair length BNs
FT_LAYOUT_VERSION_27 = 27, // serialize message trees with nonleaf buffers to avoid key, msn sort on deserialize
FT_LAYOUT_VERSION_28 = 28, // Add fanout to ft_header
+ FT_LAYOUT_VERSION_29 = 29, // Add logrows to ft_header
FT_NEXT_VERSION, // the version after the current version
FT_LAYOUT_VERSION = FT_NEXT_VERSION-1, // A hack so I don't have to change this line.
FT_LAYOUT_MIN_SUPPORTED_VERSION = FT_LAYOUT_VERSION_13, // Minimum version supported
diff --git a/storage/tokudb/PerconaFT/ft/tests/CMakeLists.txt b/storage/tokudb/PerconaFT/ft/tests/CMakeLists.txt
index 0098b6091be..270ec97660a 100644
--- a/storage/tokudb/PerconaFT/ft/tests/CMakeLists.txt
+++ b/storage/tokudb/PerconaFT/ft/tests/CMakeLists.txt
@@ -112,11 +112,13 @@ if(BUILD_TESTING OR BUILD_FT_TESTS)
declare_custom_tests(test-upgrade-recovery-logs)
file(GLOB upgrade_tests "${TOKUDB_DATA}/upgrade-recovery-logs-??-clean")
+ file(GLOB upgrade_tests "${CMAKE_CURRENT_SOURCE_DIR}/upgrade.data/upgrade-recovery-logs-??-clean")
foreach(test ${upgrade_tests})
get_filename_component(test_basename "${test}" NAME)
add_ft_test_aux(test-${test_basename} test-upgrade-recovery-logs ${test})
endforeach(test)
file(GLOB upgrade_tests "${TOKUDB_DATA}/upgrade-recovery-logs-??-dirty")
+ file(GLOB upgrade_tests "${CMAKE_CURRENT_SOURCE_DIR}/upgrade.data/upgrade-recovery-logs-??-dirty")
foreach(test ${upgrade_tests})
get_filename_component(test_basename "${test}" NAME)
add_ft_test_aux(test-${test_basename} test-upgrade-recovery-logs ${test})
diff --git a/storage/tokudb/PerconaFT/ft/tests/make-tree.cc b/storage/tokudb/PerconaFT/ft/tests/make-tree.cc
index 663bbf3beb2..761d672539b 100644
--- a/storage/tokudb/PerconaFT/ft/tests/make-tree.cc
+++ b/storage/tokudb/PerconaFT/ft/tests/make-tree.cc
@@ -74,11 +74,20 @@ append_leaf(FTNODE leafnode, void *key, size_t keylen, void *val, size_t vallen)
// apply an insert to the leaf node
txn_gc_info gc_info(nullptr, TXNID_NONE, TXNID_NONE, false);
ft_msg msg(&thekey, &theval, FT_INSERT, msn, toku_xids_get_root_xids());
- toku_ft_bn_apply_msg_once(BLB(leafnode,0), msg, idx, keylen, NULL, &gc_info, NULL, NULL);
+ toku_ft_bn_apply_msg_once(
+ BLB(leafnode, 0),
+ msg,
+ idx,
+ keylen,
+ NULL,
+ &gc_info,
+ NULL,
+ NULL,
+ NULL);
leafnode->max_msn_applied_to_node_on_disk = msn;
- // dont forget to dirty the node
+ // don't forget to dirty the node
leafnode->dirty = 1;
}
diff --git a/storage/tokudb/PerconaFT/ft/tests/msnfilter.cc b/storage/tokudb/PerconaFT/ft/tests/msnfilter.cc
index 737c3556ad6..c37dcd089f8 100644
--- a/storage/tokudb/PerconaFT/ft/tests/msnfilter.cc
+++ b/storage/tokudb/PerconaFT/ft/tests/msnfilter.cc
@@ -82,49 +82,85 @@ append_leaf(FT_HANDLE ft, FTNODE leafnode, void *key, uint32_t keylen, void *val
ft_msg msg(&thekey, &theval, FT_INSERT, msn, toku_xids_get_root_xids());
txn_gc_info gc_info(nullptr, TXNID_NONE, TXNID_NONE, false);
- toku_ft_leaf_apply_msg(ft->ft->cmp, ft->ft->update_fun, leafnode, -1, msg, &gc_info, nullptr, nullptr);
+ toku_ft_leaf_apply_msg(
+ ft->ft->cmp,
+ ft->ft->update_fun,
+ leafnode,
+ -1,
+ msg,
+ &gc_info,
+ nullptr,
+ nullptr,
+ nullptr);
{
- int r = toku_ft_lookup(ft, &thekey, lookup_checkf, &pair);
- assert(r==0);
- assert(pair.call_count==1);
+ int r = toku_ft_lookup(ft, &thekey, lookup_checkf, &pair);
+ assert(r==0);
+ assert(pair.call_count==1);
}
ft_msg badmsg(&thekey, &badval, FT_INSERT, msn, toku_xids_get_root_xids());
- toku_ft_leaf_apply_msg(ft->ft->cmp, ft->ft->update_fun, leafnode, -1, badmsg, &gc_info, nullptr, nullptr);
+ toku_ft_leaf_apply_msg(
+ ft->ft->cmp,
+ ft->ft->update_fun,
+ leafnode,
+ -1,
+ badmsg,
+ &gc_info,
+ nullptr,
+ nullptr,
+ nullptr);
// message should be rejected for duplicate msn, row should still have original val
{
- int r = toku_ft_lookup(ft, &thekey, lookup_checkf, &pair);
- assert(r==0);
- assert(pair.call_count==2);
+ int r = toku_ft_lookup(ft, &thekey, lookup_checkf, &pair);
+ assert(r==0);
+ assert(pair.call_count==2);
}
// now verify that message with proper msn gets through
msn = next_dummymsn();
ft->ft->h->max_msn_in_ft = msn;
ft_msg msg2(&thekey, &val2, FT_INSERT, msn, toku_xids_get_root_xids());
- toku_ft_leaf_apply_msg(ft->ft->cmp, ft->ft->update_fun, leafnode, -1, msg2, &gc_info, nullptr, nullptr);
+ toku_ft_leaf_apply_msg(
+ ft->ft->cmp,
+ ft->ft->update_fun,
+ leafnode,
+ -1,
+ msg2,
+ &gc_info,
+ nullptr,
+ nullptr,
+ nullptr);
// message should be accepted, val should have new value
{
- int r = toku_ft_lookup(ft, &thekey, lookup_checkf, &pair2);
- assert(r==0);
- assert(pair2.call_count==1);
+ int r = toku_ft_lookup(ft, &thekey, lookup_checkf, &pair2);
+ assert(r==0);
+ assert(pair2.call_count==1);
}
// now verify that message with lesser (older) msn is rejected
msn.msn = msn.msn - 10;
ft_msg msg3(&thekey, &badval, FT_INSERT, msn, toku_xids_get_root_xids());
- toku_ft_leaf_apply_msg(ft->ft->cmp, ft->ft->update_fun, leafnode, -1, msg3, &gc_info, nullptr, nullptr);
+ toku_ft_leaf_apply_msg(
+ ft->ft->cmp,
+ ft->ft->update_fun,
+ leafnode,
+ -1,
+ msg3,
+ &gc_info,
+ nullptr,
+ nullptr,
+ nullptr);
// message should be rejected, val should still have value in pair2
{
- int r = toku_ft_lookup(ft, &thekey, lookup_checkf, &pair2);
- assert(r==0);
- assert(pair2.call_count==2);
+ int r = toku_ft_lookup(ft, &thekey, lookup_checkf, &pair2);
+ assert(r==0);
+ assert(pair2.call_count==2);
}
- // dont forget to dirty the node
+ // don't forget to dirty the node
leafnode->dirty = 1;
}
diff --git a/storage/tokudb/PerconaFT/ft/tests/orthopush-flush.cc b/storage/tokudb/PerconaFT/ft/tests/orthopush-flush.cc
index 055a38e5f6d..393fb88ac2e 100644
--- a/storage/tokudb/PerconaFT/ft/tests/orthopush-flush.cc
+++ b/storage/tokudb/PerconaFT/ft/tests/orthopush-flush.cc
@@ -137,8 +137,24 @@ insert_random_message_to_bn(
*keyp = toku_xmemdup(keydbt->data, keydbt->size);
ft_msg msg(keydbt, valdbt, FT_INSERT, msn, xids);
int64_t numbytes;
- toku_le_apply_msg(msg, NULL, NULL, 0, keydbt->size, &non_mvcc_gc_info, save, &numbytes);
- toku_ft_bn_apply_msg(t->ft->cmp, t->ft->update_fun, blb, msg, &non_mvcc_gc_info, NULL, NULL);
+ toku_le_apply_msg(
+ msg,
+ NULL,
+ NULL,
+ 0,
+ keydbt->size,
+ &non_mvcc_gc_info,
+ save,
+ &numbytes);
+ toku_ft_bn_apply_msg(
+ t->ft->cmp,
+ t->ft->update_fun,
+ blb,
+ msg,
+ &non_mvcc_gc_info,
+ NULL,
+ NULL,
+ NULL);
if (msn.msn > blb->max_msn_applied.msn) {
blb->max_msn_applied = msn;
}
@@ -182,12 +198,36 @@ insert_same_message_to_bns(
*keyp = toku_xmemdup(keydbt->data, keydbt->size);
ft_msg msg(keydbt, valdbt, FT_INSERT, msn, xids);
int64_t numbytes;
- toku_le_apply_msg(msg, NULL, NULL, 0, keydbt->size, &non_mvcc_gc_info, save, &numbytes);
- toku_ft_bn_apply_msg(t->ft->cmp, t->ft->update_fun, blb1, msg, &non_mvcc_gc_info, NULL, NULL);
+ toku_le_apply_msg(
+ msg,
+ NULL,
+ NULL,
+ 0,
+ keydbt->size,
+ &non_mvcc_gc_info,
+ save,
+ &numbytes);
+ toku_ft_bn_apply_msg(
+ t->ft->cmp,
+ t->ft->update_fun,
+ blb1,
+ msg,
+ &non_mvcc_gc_info,
+ NULL,
+ NULL,
+ NULL);
if (msn.msn > blb1->max_msn_applied.msn) {
blb1->max_msn_applied = msn;
}
- toku_ft_bn_apply_msg(t->ft->cmp, t->ft->update_fun, blb2, msg, &non_mvcc_gc_info, NULL, NULL);
+ toku_ft_bn_apply_msg(
+ t->ft->cmp,
+ t->ft->update_fun,
+ blb2,
+ msg,
+ &non_mvcc_gc_info,
+ NULL,
+ NULL,
+ NULL);
if (msn.msn > blb2->max_msn_applied.msn) {
blb2->max_msn_applied = msn;
}
@@ -619,7 +659,16 @@ flush_to_leaf(FT_HANDLE t, bool make_leaf_up_to_date, bool use_flush) {
if (make_leaf_up_to_date) {
for (i = 0; i < num_parent_messages; ++i) {
if (!parent_messages_is_fresh[i]) {
- toku_ft_leaf_apply_msg(t->ft->cmp, t->ft->update_fun, child, -1, *parent_messages[i], &non_mvcc_gc_info, NULL, NULL);
+ toku_ft_leaf_apply_msg(
+ t->ft->cmp,
+ t->ft->update_fun,
+ child,
+ -1,
+ *parent_messages[i],
+ &non_mvcc_gc_info,
+ NULL,
+ NULL,
+ NULL);
}
}
for (i = 0; i < 8; ++i) {
@@ -842,7 +891,16 @@ flush_to_leaf_with_keyrange(FT_HANDLE t, bool make_leaf_up_to_date) {
for (i = 0; i < num_parent_messages; ++i) {
if (dummy_cmp(parent_messages[i]->kdbt(), &childkeys[7]) <= 0 &&
!parent_messages_is_fresh[i]) {
- toku_ft_leaf_apply_msg(t->ft->cmp, t->ft->update_fun, child, -1, *parent_messages[i], &non_mvcc_gc_info, NULL, NULL);
+ toku_ft_leaf_apply_msg(
+ t->ft->cmp,
+ t->ft->update_fun,
+ child,
+ -1,
+ *parent_messages[i],
+ &non_mvcc_gc_info,
+ NULL,
+ NULL,
+ NULL);
}
}
for (i = 0; i < 8; ++i) {
@@ -1045,8 +1103,26 @@ compare_apply_and_flush(FT_HANDLE t, bool make_leaf_up_to_date) {
if (make_leaf_up_to_date) {
for (i = 0; i < num_parent_messages; ++i) {
if (!parent_messages_is_fresh[i]) {
- toku_ft_leaf_apply_msg(t->ft->cmp, t->ft->update_fun, child1, -1, *parent_messages[i], &non_mvcc_gc_info, NULL, NULL);
- toku_ft_leaf_apply_msg(t->ft->cmp, t->ft->update_fun, child2, -1, *parent_messages[i], &non_mvcc_gc_info, NULL, NULL);
+ toku_ft_leaf_apply_msg(
+ t->ft->cmp,
+ t->ft->update_fun,
+ child1,
+ -1,
+ *parent_messages[i],
+ &non_mvcc_gc_info,
+ NULL,
+ NULL,
+ NULL);
+ toku_ft_leaf_apply_msg(
+ t->ft->cmp,
+ t->ft->update_fun,
+ child2,
+ -1,
+ *parent_messages[i],
+ &non_mvcc_gc_info,
+ NULL,
+ NULL,
+ NULL);
}
}
for (i = 0; i < 8; ++i) {
diff --git a/storage/tokudb/PerconaFT/ft/tests/test-upgrade-recovery-logs.cc b/storage/tokudb/PerconaFT/ft/tests/test-upgrade-recovery-logs.cc
index 8e006498d77..7691ffaac2b 100644
--- a/storage/tokudb/PerconaFT/ft/tests/test-upgrade-recovery-logs.cc
+++ b/storage/tokudb/PerconaFT/ft/tests/test-upgrade-recovery-logs.cc
@@ -81,7 +81,7 @@ static void run_recovery(const char *testdir) {
bool upgrade_in_progress;
r = toku_maybe_upgrade_log(testdir, testdir, &lsn_of_clean_shutdown, &upgrade_in_progress);
if (strcmp(shutdown, "dirty") == 0 && log_version <= 24) {
- CKERR2(r, TOKUDB_UPGRADE_FAILURE); // we dont support dirty upgrade from versions <= 24
+ CKERR2(r, TOKUDB_UPGRADE_FAILURE); // we don't support dirty upgrade from versions <= 24
return;
} else {
CKERR(r);
diff --git a/storage/tokudb/PerconaFT/ft/tests/upgrade.data/upgrade-recovery-logs-24-clean/log000000000000.tokulog24 b/storage/tokudb/PerconaFT/ft/tests/upgrade.data/upgrade-recovery-logs-24-clean/log000000000000.tokulog24
new file mode 100755
index 00000000000..9a56e83e627
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/upgrade.data/upgrade-recovery-logs-24-clean/log000000000000.tokulog24
Binary files differ
diff --git a/storage/tokudb/PerconaFT/ft/tests/upgrade.data/upgrade-recovery-logs-24-dirty/log000000000000.tokulog24 b/storage/tokudb/PerconaFT/ft/tests/upgrade.data/upgrade-recovery-logs-24-dirty/log000000000000.tokulog24
new file mode 100755
index 00000000000..c552cda6673
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/upgrade.data/upgrade-recovery-logs-24-dirty/log000000000000.tokulog24
Binary files differ
diff --git a/storage/tokudb/PerconaFT/ft/tests/upgrade.data/upgrade-recovery-logs-25-clean/log000000000000.tokulog25 b/storage/tokudb/PerconaFT/ft/tests/upgrade.data/upgrade-recovery-logs-25-clean/log000000000000.tokulog25
new file mode 100755
index 00000000000..26b8bcfbdcc
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/upgrade.data/upgrade-recovery-logs-25-clean/log000000000000.tokulog25
Binary files differ
diff --git a/storage/tokudb/PerconaFT/ft/tests/upgrade.data/upgrade-recovery-logs-25-dirty/log000000000000.tokulog25 b/storage/tokudb/PerconaFT/ft/tests/upgrade.data/upgrade-recovery-logs-25-dirty/log000000000000.tokulog25
new file mode 100755
index 00000000000..04d3190c818
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/upgrade.data/upgrade-recovery-logs-25-dirty/log000000000000.tokulog25
Binary files differ
diff --git a/storage/tokudb/PerconaFT/ft/tests/upgrade.data/upgrade-recovery-logs-26-clean/log000000000000.tokulog26 b/storage/tokudb/PerconaFT/ft/tests/upgrade.data/upgrade-recovery-logs-26-clean/log000000000000.tokulog26
new file mode 100755
index 00000000000..02047325aa6
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/upgrade.data/upgrade-recovery-logs-26-clean/log000000000000.tokulog26
Binary files differ
diff --git a/storage/tokudb/PerconaFT/ft/tests/upgrade.data/upgrade-recovery-logs-26-dirty/log000000000000.tokulog26 b/storage/tokudb/PerconaFT/ft/tests/upgrade.data/upgrade-recovery-logs-26-dirty/log000000000000.tokulog26
new file mode 100755
index 00000000000..ce826b5608b
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/upgrade.data/upgrade-recovery-logs-26-dirty/log000000000000.tokulog26
Binary files differ
diff --git a/storage/tokudb/PerconaFT/ft/tests/upgrade.data/upgrade-recovery-logs-27-clean/log000000000000.tokulog27 b/storage/tokudb/PerconaFT/ft/tests/upgrade.data/upgrade-recovery-logs-27-clean/log000000000000.tokulog27
new file mode 100755
index 00000000000..9849b977d73
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/upgrade.data/upgrade-recovery-logs-27-clean/log000000000000.tokulog27
Binary files differ
diff --git a/storage/tokudb/PerconaFT/ft/tests/upgrade.data/upgrade-recovery-logs-27-dirty/log000000000000.tokulog27 b/storage/tokudb/PerconaFT/ft/tests/upgrade.data/upgrade-recovery-logs-27-dirty/log000000000000.tokulog27
new file mode 100755
index 00000000000..8b658ea4c0a
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/upgrade.data/upgrade-recovery-logs-27-dirty/log000000000000.tokulog27
Binary files differ
diff --git a/storage/tokudb/PerconaFT/ft/tests/upgrade.data/upgrade-recovery-logs-28-clean/log000000000000.tokulog28 b/storage/tokudb/PerconaFT/ft/tests/upgrade.data/upgrade-recovery-logs-28-clean/log000000000000.tokulog28
new file mode 100644
index 00000000000..11fecfb94b2
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/upgrade.data/upgrade-recovery-logs-28-clean/log000000000000.tokulog28
Binary files differ
diff --git a/storage/tokudb/PerconaFT/ft/tests/upgrade.data/upgrade-recovery-logs-28-dirty/log000000000000.tokulog28 b/storage/tokudb/PerconaFT/ft/tests/upgrade.data/upgrade-recovery-logs-28-dirty/log000000000000.tokulog28
new file mode 100644
index 00000000000..b7a9b03b583
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/upgrade.data/upgrade-recovery-logs-28-dirty/log000000000000.tokulog28
Binary files differ
diff --git a/storage/tokudb/PerconaFT/ft/tests/upgrade.data/upgrade-recovery-logs-29-clean/log000000000000.tokulog29 b/storage/tokudb/PerconaFT/ft/tests/upgrade.data/upgrade-recovery-logs-29-clean/log000000000000.tokulog29
new file mode 100644
index 00000000000..a1f306f4a96
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/upgrade.data/upgrade-recovery-logs-29-clean/log000000000000.tokulog29
Binary files differ
diff --git a/storage/tokudb/PerconaFT/ft/tests/upgrade.data/upgrade-recovery-logs-29-dirty/log000000000000.tokulog29 b/storage/tokudb/PerconaFT/ft/tests/upgrade.data/upgrade-recovery-logs-29-dirty/log000000000000.tokulog29
new file mode 100644
index 00000000000..b9e79eeb1c4
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/upgrade.data/upgrade-recovery-logs-29-dirty/log000000000000.tokulog29
Binary files differ
diff --git a/storage/tokudb/PerconaFT/ft/tests/verify-bad-msn.cc b/storage/tokudb/PerconaFT/ft/tests/verify-bad-msn.cc
index 68fac0e6a9c..b10885c2e62 100644
--- a/storage/tokudb/PerconaFT/ft/tests/verify-bad-msn.cc
+++ b/storage/tokudb/PerconaFT/ft/tests/verify-bad-msn.cc
@@ -78,12 +78,21 @@ append_leaf(FTNODE leafnode, void *key, size_t keylen, void *val, size_t vallen)
// apply an insert to the leaf node
ft_msg msg(&thekey, &theval, FT_INSERT, msn, toku_xids_get_root_xids());
txn_gc_info gc_info(nullptr, TXNID_NONE, TXNID_NONE, false);
- toku_ft_bn_apply_msg_once(BLB(leafnode, 0), msg, idx, keylen, NULL, &gc_info, NULL, NULL);
+ toku_ft_bn_apply_msg_once(
+ BLB(leafnode, 0),
+ msg,
+ idx,
+ keylen,
+ NULL,
+ &gc_info,
+ NULL,
+ NULL,
+ NULL);
// Create bad tree (don't do following):
// leafnode->max_msn_applied_to_node = msn;
- // dont forget to dirty the node
+ // don't forget to dirty the node
leafnode->dirty = 1;
}
diff --git a/storage/tokudb/PerconaFT/ft/tests/verify-bad-pivots.cc b/storage/tokudb/PerconaFT/ft/tests/verify-bad-pivots.cc
index 49b2b8a6c21..c1d08ce41a6 100644
--- a/storage/tokudb/PerconaFT/ft/tests/verify-bad-pivots.cc
+++ b/storage/tokudb/PerconaFT/ft/tests/verify-bad-pivots.cc
@@ -65,9 +65,18 @@ append_leaf(FTNODE leafnode, void *key, size_t keylen, void *val, size_t vallen)
MSN msn = next_dummymsn();
ft_msg msg(&thekey, &theval, FT_INSERT, msn, toku_xids_get_root_xids());
txn_gc_info gc_info(nullptr, TXNID_NONE, TXNID_NONE, false);
- toku_ft_bn_apply_msg_once(BLB(leafnode, 0), msg, idx, keylen, NULL, &gc_info, NULL, NULL);
-
- // dont forget to dirty the node
+ toku_ft_bn_apply_msg_once(
+ BLB(leafnode, 0),
+ msg,
+ idx,
+ keylen,
+ NULL,
+ &gc_info,
+ NULL,
+ NULL,
+ NULL);
+
+ // don't forget to dirty the node
leafnode->dirty = 1;
}
diff --git a/storage/tokudb/PerconaFT/ft/tests/verify-dup-in-leaf.cc b/storage/tokudb/PerconaFT/ft/tests/verify-dup-in-leaf.cc
index 72c4063f51f..22a29c0ff69 100644
--- a/storage/tokudb/PerconaFT/ft/tests/verify-dup-in-leaf.cc
+++ b/storage/tokudb/PerconaFT/ft/tests/verify-dup-in-leaf.cc
@@ -66,9 +66,18 @@ append_leaf(FTNODE leafnode, void *key, size_t keylen, void *val, size_t vallen)
MSN msn = next_dummymsn();
ft_msg msg(&thekey, &theval, FT_INSERT, msn, toku_xids_get_root_xids());
txn_gc_info gc_info(nullptr, TXNID_NONE, TXNID_NONE, false);
- toku_ft_bn_apply_msg_once(BLB(leafnode, 0), msg, idx, keylen, NULL, &gc_info, NULL, NULL);
-
- // dont forget to dirty the node
+ toku_ft_bn_apply_msg_once(
+ BLB(leafnode, 0),
+ msg,
+ idx,
+ keylen,
+ NULL,
+ &gc_info,
+ NULL,
+ NULL,
+ NULL);
+
+ // don't forget to dirty the node
leafnode->dirty = 1;
}
diff --git a/storage/tokudb/PerconaFT/ft/tests/verify-dup-pivots.cc b/storage/tokudb/PerconaFT/ft/tests/verify-dup-pivots.cc
index f569f502dc8..80189dd9804 100644
--- a/storage/tokudb/PerconaFT/ft/tests/verify-dup-pivots.cc
+++ b/storage/tokudb/PerconaFT/ft/tests/verify-dup-pivots.cc
@@ -65,9 +65,18 @@ append_leaf(FTNODE leafnode, void *key, size_t keylen, void *val, size_t vallen)
MSN msn = next_dummymsn();
ft_msg msg(&thekey, &theval, FT_INSERT, msn, toku_xids_get_root_xids());
txn_gc_info gc_info(nullptr, TXNID_NONE, TXNID_NONE, false);
- toku_ft_bn_apply_msg_once(BLB(leafnode, 0), msg, idx, keylen, NULL, &gc_info, NULL, NULL);
-
- // dont forget to dirty the node
+ toku_ft_bn_apply_msg_once(
+ BLB(leafnode, 0),
+ msg,
+ idx,
+ keylen,
+ NULL,
+ &gc_info,
+ NULL,
+ NULL,
+ NULL);
+
+ // don't forget to dirty the node
leafnode->dirty = 1;
}
diff --git a/storage/tokudb/PerconaFT/ft/tests/verify-misrouted-msgs.cc b/storage/tokudb/PerconaFT/ft/tests/verify-misrouted-msgs.cc
index 3a6db8ee4de..a84aac1f063 100644
--- a/storage/tokudb/PerconaFT/ft/tests/verify-misrouted-msgs.cc
+++ b/storage/tokudb/PerconaFT/ft/tests/verify-misrouted-msgs.cc
@@ -66,9 +66,18 @@ append_leaf(FTNODE leafnode, void *key, size_t keylen, void *val, size_t vallen)
MSN msn = next_dummymsn();
ft_msg msg(&thekey, &theval, FT_INSERT, msn, toku_xids_get_root_xids());
txn_gc_info gc_info(nullptr, TXNID_NONE, TXNID_NONE, false);
- toku_ft_bn_apply_msg_once(BLB(leafnode,0), msg, idx, keylen, NULL, &gc_info, NULL, NULL);
-
- // dont forget to dirty the node
+ toku_ft_bn_apply_msg_once(
+ BLB(leafnode, 0),
+ msg,
+ idx,
+ keylen,
+ NULL,
+ &gc_info,
+ NULL,
+ NULL,
+ NULL);
+
+ // don't forget to dirty the node
leafnode->dirty = 1;
}
diff --git a/storage/tokudb/PerconaFT/ft/tests/verify-unsorted-leaf.cc b/storage/tokudb/PerconaFT/ft/tests/verify-unsorted-leaf.cc
index 4392887718f..ca413f52567 100644
--- a/storage/tokudb/PerconaFT/ft/tests/verify-unsorted-leaf.cc
+++ b/storage/tokudb/PerconaFT/ft/tests/verify-unsorted-leaf.cc
@@ -68,9 +68,18 @@ append_leaf(FTNODE leafnode, void *key, size_t keylen, void *val, size_t vallen)
MSN msn = next_dummymsn();
ft_msg msg(&thekey, &theval, FT_INSERT, msn, toku_xids_get_root_xids());
txn_gc_info gc_info(nullptr, TXNID_NONE, TXNID_NONE, false);
- toku_ft_bn_apply_msg_once(BLB(leafnode, 0), msg, idx, keylen, NULL, &gc_info, NULL, NULL);
-
- // dont forget to dirty the node
+ toku_ft_bn_apply_msg_once(
+ BLB(leafnode, 0),
+ msg,
+ idx,
+ keylen,
+ NULL,
+ &gc_info,
+ NULL,
+ NULL,
+ NULL);
+
+ // don't forget to dirty the node
leafnode->dirty = 1;
}
diff --git a/storage/tokudb/PerconaFT/ft/tests/verify-unsorted-pivots.cc b/storage/tokudb/PerconaFT/ft/tests/verify-unsorted-pivots.cc
index e3167bd3dc1..6efa06913c2 100644
--- a/storage/tokudb/PerconaFT/ft/tests/verify-unsorted-pivots.cc
+++ b/storage/tokudb/PerconaFT/ft/tests/verify-unsorted-pivots.cc
@@ -65,9 +65,18 @@ append_leaf(FTNODE leafnode, void *key, size_t keylen, void *val, size_t vallen)
MSN msn = next_dummymsn();
ft_msg msg(&thekey, &theval, FT_INSERT, msn, toku_xids_get_root_xids());
txn_gc_info gc_info(nullptr, TXNID_NONE, TXNID_NONE, false);
- toku_ft_bn_apply_msg_once(BLB(leafnode, 0), msg, idx, keylen, NULL, &gc_info, NULL, NULL);
-
- // dont forget to dirty the node
+ toku_ft_bn_apply_msg_once(
+ BLB(leafnode, 0),
+ msg,
+ idx,
+ keylen,
+ NULL,
+ &gc_info,
+ NULL,
+ NULL,
+ NULL);
+
+ // don't forget to dirty the node
leafnode->dirty = 1;
}
diff --git a/storage/tokudb/PerconaFT/ft/txn/rollback-apply.cc b/storage/tokudb/PerconaFT/ft/txn/rollback-apply.cc
index 6a8c0d45b45..df830afd0df 100644
--- a/storage/tokudb/PerconaFT/ft/txn/rollback-apply.cc
+++ b/storage/tokudb/PerconaFT/ft/txn/rollback-apply.cc
@@ -186,6 +186,7 @@ int toku_rollback_commit(TOKUTXN txn, LSN lsn) {
// Append the list to the front of the parent.
if (child_log->oldest_logentry) {
// There are some entries, so link them in.
+ parent_log->dirty = true;
child_log->oldest_logentry->prev = parent_log->newest_logentry;
if (!parent_log->oldest_logentry) {
parent_log->oldest_logentry = child_log->oldest_logentry;
diff --git a/storage/tokudb/PerconaFT/ft/txn/txn.cc b/storage/tokudb/PerconaFT/ft/txn/txn.cc
index cd0585dbf6c..dd03073a3ec 100644
--- a/storage/tokudb/PerconaFT/ft/txn/txn.cc
+++ b/storage/tokudb/PerconaFT/ft/txn/txn.cc
@@ -248,11 +248,24 @@ static txn_child_manager tcm;
.xa_xid = {0, 0, 0, ""},
.progress_poll_fun = NULL,
.progress_poll_fun_extra = NULL,
- .txn_lock = ZERO_MUTEX_INITIALIZER,
+
+ // You cannot initialize txn_lock a TOKU_MUTEX_INITIALIZER, because we
+ // will initialize it in the code below, and it cannot already
+ // be initialized at that point. Also, in general, you don't
+ // get to use PTHREAD_MUTEX_INITALIZER (which is what is inside
+ // TOKU_MUTEX_INITIALIZER) except in static variables, and this
+ // is initializing an auto variable.
+ //
+ // And we cannot simply avoid initializing these fields
+ // because, although it avoids -Wmissing-field-initializer
+ // errors under gcc, it gets other errors about non-trivial
+ // designated initializers not being supported.
+
+ .txn_lock = ZERO_MUTEX_INITIALIZER, // Not TOKU_MUTEX_INITIALIZER
.open_fts = open_fts,
.roll_info = roll_info,
- .state_lock = ZERO_MUTEX_INITIALIZER,
- .state_cond = TOKU_COND_INITIALIZER,
+ .state_lock = ZERO_MUTEX_INITIALIZER, // Not TOKU_MUTEX_INITIALIZER
+ .state_cond = ZERO_COND_INITIALIZER, // Not TOKU_COND_INITIALIZER
.state = TOKUTXN_LIVE,
.num_pin = 0,
.client_id = 0,
diff --git a/storage/tokudb/PerconaFT/ft/txn/txn_manager.cc b/storage/tokudb/PerconaFT/ft/txn/txn_manager.cc
index 551fd32b8d5..88eca36a261 100644
--- a/storage/tokudb/PerconaFT/ft/txn/txn_manager.cc
+++ b/storage/tokudb/PerconaFT/ft/txn/txn_manager.cc
@@ -45,7 +45,15 @@ Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
#include "ft/txn/txn_manager.h"
#include "ft/txn/rollback.h"
#include "util/omt.h"
+//this is only for testing
+static void (* test_txn_sync_callback) (pthread_t, void *) = NULL;
+static void * test_txn_sync_callback_extra = NULL;
+
+void set_test_txn_sync_callback(void (*cb) (pthread_t, void *), void *extra) {
+ test_txn_sync_callback = cb;
+ test_txn_sync_callback_extra = extra;
+}
bool garbage_collection_debug = false;
static bool txn_records_snapshot(TXN_SNAPSHOT_TYPE snapshot_type, struct tokutxn *parent) {
@@ -525,14 +533,19 @@ void toku_txn_manager_handle_snapshot_create_for_child_txn(
XMALLOC(txn->live_root_txn_list);
txn_manager_lock(txn_manager);
txn_manager_create_snapshot_unlocked(txn_manager, txn);
- txn_manager_unlock(txn_manager);
}
else {
inherit_snapshot_from_parent(txn);
}
- if (copies_snapshot) {
- setup_live_root_txn_list(&txn_manager->live_root_ids, txn->live_root_txn_list);
- }
+
+ toku_debug_txn_sync(pthread_self());
+
+ if (copies_snapshot) {
+ if(!records_snapshot)
+ txn_manager_lock(txn_manager);
+ setup_live_root_txn_list(&txn_manager->live_root_ids, txn->live_root_txn_list);
+ txn_manager_unlock(txn_manager);
+ }
}
void toku_txn_manager_handle_snapshot_destroy_for_child_txn(
diff --git a/storage/tokudb/PerconaFT/ft/txn/txn_manager.h b/storage/tokudb/PerconaFT/ft/txn/txn_manager.h
index 658c6f9aecd..7cdc52c4f43 100644
--- a/storage/tokudb/PerconaFT/ft/txn/txn_manager.h
+++ b/storage/tokudb/PerconaFT/ft/txn/txn_manager.h
@@ -43,6 +43,15 @@ Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
#include "ft/txn/txn.h"
+void set_test_txn_sync_callback(void (*) (pthread_t, void*), void*);
+#define toku_test_txn_sync_callback(a) ((test_txn_sync_callback)? test_txn_sync_callback( a,test_txn_sync_callback_extra) : (void) 0)
+
+#if TOKU_DEBUG_TXN_SYNC
+#define toku_debug_txn_sync(a) toku_test_txn_sync_callback(a)
+#else
+#define toku_debug_txn_sync(a) ((void) 0)
+#endif
+
typedef struct txn_manager *TXN_MANAGER;
struct referenced_xid_tuple {
diff --git a/storage/tokudb/PerconaFT/ft/ule.cc b/storage/tokudb/PerconaFT/ft/ule.cc
index 573c4488f70..ac393fbf179 100644
--- a/storage/tokudb/PerconaFT/ft/ule.cc
+++ b/storage/tokudb/PerconaFT/ft/ule.cc
@@ -73,12 +73,11 @@ void toku_le_get_status(LE_STATUS statp) {
*statp = le_status;
}
-///////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////
// Accessor functions used by outside world (e.g. indexer)
//
-ULEHANDLE
-toku_ule_create(LEAFENTRY le) {
+ULEHANDLE toku_ule_create(LEAFENTRY le) {
ULE XMALLOC(ule_p);
le_unpack(ule_p, le);
return (ULEHANDLE) ule_p;
@@ -89,7 +88,7 @@ void toku_ule_free(ULEHANDLE ule_p) {
toku_free(ule_p);
}
-///////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////
//
// Question: Can any software outside this file modify or read a leafentry?
// If so, is it worthwhile to put it all here?
@@ -117,27 +116,43 @@ const UXR_S committed_delete = {
// Local functions:
-static void msg_init_empty_ule(ULE ule);
-static void msg_modify_ule(ULE ule, const ft_msg &msg);
-static void ule_init_empty_ule(ULE ule);
+static inline void msg_init_empty_ule(ULE ule);
+static int64_t msg_modify_ule(ULE ule, const ft_msg &msg);
+static inline void ule_init_empty_ule(ULE ule);
static void ule_do_implicit_promotions(ULE ule, XIDS xids);
-static void ule_try_promote_provisional_outermost(ULE ule, TXNID oldest_possible_live_xid);
+static void ule_try_promote_provisional_outermost(
+ ULE ule,
+ TXNID oldest_possible_live_xid);
static void ule_promote_provisional_innermost_to_index(ULE ule, uint32_t index);
static void ule_promote_provisional_innermost_to_committed(ULE ule);
-static void ule_apply_insert(ULE ule, XIDS xids, uint32_t vallen, void * valp);
-static void ule_apply_delete(ULE ule, XIDS xids);
-static void ule_prepare_for_new_uxr(ULE ule, XIDS xids);
-static void ule_apply_abort(ULE ule, XIDS xids);
+static inline int64_t ule_apply_insert_no_overwrite(
+ ULE ule,
+ XIDS xids,
+ uint32_t vallen,
+ void* valp);
+static inline int64_t ule_apply_insert(
+ ULE ule,
+ XIDS xids,
+ uint32_t vallen,
+ void* valp);
+static inline int64_t ule_apply_delete(ULE ule, XIDS xids);
+static inline void ule_prepare_for_new_uxr(ULE ule, XIDS xids);
+static inline int64_t ule_apply_abort(ULE ule, XIDS xids);
static void ule_apply_broadcast_commit_all (ULE ule);
static void ule_apply_commit(ULE ule, XIDS xids);
-static void ule_push_insert_uxr(ULE ule, bool is_committed, TXNID xid, uint32_t vallen, void * valp);
-static void ule_push_delete_uxr(ULE ule, bool is_committed, TXNID xid);
-static void ule_push_placeholder_uxr(ULE ule, TXNID xid);
-static UXR ule_get_innermost_uxr(ULE ule);
-static UXR ule_get_first_empty_uxr(ULE ule);
-static void ule_remove_innermost_uxr(ULE ule);
-static TXNID ule_get_innermost_xid(ULE ule);
-static TXNID ule_get_xid(ULE ule, uint32_t index);
+static inline void ule_push_insert_uxr(
+ ULE ule,
+ bool is_committed,
+ TXNID xid,
+ uint32_t vallen,
+ void* valp);
+static inline void ule_push_delete_uxr(ULE ule, bool is_committed, TXNID xid);
+static inline void ule_push_placeholder_uxr(ULE ule, TXNID xid);
+static inline UXR ule_get_innermost_uxr(ULE ule);
+static inline UXR ule_get_first_empty_uxr(ULE ule);
+static inline void ule_remove_innermost_uxr(ULE ule);
+static inline TXNID ule_get_innermost_xid(ULE ule);
+static inline TXNID ule_get_xid(ULE ule, uint32_t index);
static void ule_remove_innermost_placeholders(ULE ule);
static void ule_add_placeholders(ULE ule, XIDS xids);
static void ule_optimize(ULE ule, XIDS xids);
@@ -153,6 +168,30 @@ static inline size_t uxr_unpack_type_and_length(UXR uxr, uint8_t *p);
static inline size_t uxr_unpack_length_and_bit(UXR uxr, uint8_t *p);
static inline size_t uxr_unpack_data(UXR uxr, uint8_t *p);
+#if 0
+static void ule_print(ULE ule, const char* note) {
+ fprintf(stderr, "%s : ULE[0x%p]\n", note, ule);
+ fprintf(stderr, " num_puxrs[%u]\n", ule->num_puxrs);
+ fprintf(stderr, " num_cuxrs[%u]\n", ule->num_cuxrs);
+ fprintf(stderr, " innermost[%u]\n", ule->num_cuxrs + ule->num_puxrs - 1);
+ fprintf(stderr, " first_empty[%u]\n", ule->num_cuxrs + ule->num_puxrs);
+
+ uint32_t num_uxrs = ule->num_cuxrs + ule->num_puxrs - 1;
+ for (uint32_t uxr_num = 0; uxr_num <= num_uxrs; uxr_num++) {
+ UXR uxr = &(ule->uxrs[uxr_num]);
+ fprintf(stderr, " uxr[%u]\n", uxr_num);
+ switch (uxr->type) {
+ case 0: fprintf(stderr, " type[NONE]\n"); break;
+ case 1: fprintf(stderr, " type[INSERT]\n"); break;
+ case 2: fprintf(stderr, " type[DELETE]\n"); break;
+ case 3: fprintf(stderr, " type[PLACEHOLDER]\n"); break;
+ default: fprintf(stderr, " type[WHAT??]\n"); break;
+ }
+ fprintf(stderr, " xid[%lu]\n", uxr->xid);
+ }
+}
+#endif
+
static void get_space_for_le(
bn_data* data_buffer,
uint32_t idx,
@@ -162,21 +201,30 @@ static void get_space_for_le(
uint32_t old_le_size,
size_t size,
LEAFENTRY* new_le_space,
- void **const maybe_free
- )
-{
+ void** const maybe_free) {
+
if (data_buffer == nullptr) {
CAST_FROM_VOIDP(*new_le_space, toku_xmalloc(size));
- }
- else {
+ } else if (old_le_size > 0) {
// this means we are overwriting something
- if (old_le_size > 0) {
- data_buffer->get_space_for_overwrite(idx, keyp, keylen, old_keylen, old_le_size, size, new_le_space, maybe_free);
- }
+ data_buffer->get_space_for_overwrite(
+ idx,
+ keyp,
+ keylen,
+ old_keylen,
+ old_le_size,
+ size,
+ new_le_space,
+ maybe_free);
+ } else {
// this means we are inserting something new
- else {
- data_buffer->get_space_for_insert(idx, keyp, keylen, size, new_le_space, maybe_free);
- }
+ data_buffer->get_space_for_insert(
+ idx,
+ keyp,
+ keylen,
+ size,
+ new_le_space,
+ maybe_free);
}
}
@@ -185,15 +233,13 @@ static void get_space_for_le(
// Garbage collection related functions
//
-static TXNID
-get_next_older_txnid(TXNID xc, const xid_omt_t &omt) {
+static TXNID get_next_older_txnid(TXNID xc, const xid_omt_t &omt) {
int r;
TXNID xid;
r = omt.find<TXNID, toku_find_xid_by_xid>(xc, -1, &xid, nullptr);
if (r==0) {
invariant(xid < xc); //sanity check
- }
- else {
+ } else {
invariant(r==DB_NOTFOUND);
xid = TXNID_NONE;
}
@@ -201,17 +247,32 @@ get_next_older_txnid(TXNID xc, const xid_omt_t &omt) {
}
//
-// This function returns true if live transaction TL1 is allowed to read a value committed by
-// transaction xc, false otherwise.
+// This function returns true if live transaction TL1 is allowed to read a
+// value committed by transaction xc, false otherwise.
//
-static bool
-xid_reads_committed_xid(TXNID tl1, TXNID xc, const xid_omt_t &snapshot_txnids, const rx_omt_t &referenced_xids) {
+static bool xid_reads_committed_xid(
+ TXNID tl1,
+ TXNID xc,
+ const xid_omt_t& snapshot_txnids,
+ const rx_omt_t& referenced_xids) {
+
bool rval;
- if (tl1 < xc) rval = false; //cannot read a newer txn
- else {
- TXNID x = toku_get_youngest_live_list_txnid_for(xc, snapshot_txnids, referenced_xids);
- if (x == TXNID_NONE) rval = true; //Not in ANY live list, tl1 can read it.
- else rval = tl1 > x; //Newer than the 'newest one that has it in live list'
+ if (tl1 < xc) {
+ rval = false; //cannot read a newer txn
+ } else {
+ TXNID x =
+ toku_get_youngest_live_list_txnid_for(
+ xc,
+ snapshot_txnids,
+ referenced_xids);
+
+ if (x == TXNID_NONE) {
+ //Not in ANY live list, tl1 can read it.
+ rval = true;
+ } else {
+ //Newer than the 'newest one that has it in live list'
+ rval = tl1 > x;
+ }
// we know tl1 > xc
// we know x > xc
// if tl1 == x, then we do not read, because tl1 is in xc's live list
@@ -228,8 +289,7 @@ xid_reads_committed_xid(TXNID tl1, TXNID xc, const xid_omt_t &snapshot_txnids, c
// than oldest_referenced_xid. All elements below this entry are garbage,
// so we get rid of them.
//
-static void
-ule_simple_garbage_collection(ULE ule, txn_gc_info *gc_info) {
+static void ule_simple_garbage_collection(ULE ule, txn_gc_info *gc_info) {
if (ule->num_cuxrs == 1) {
return;
}
@@ -240,7 +300,8 @@ ule_simple_garbage_collection(ULE ule, txn_gc_info *gc_info) {
// uxr with a txnid that is less than oldest_referenced_xid
for (uint32_t i = 0; i < ule->num_cuxrs; i++) {
curr_index = ule->num_cuxrs - i - 1;
- if (ule->uxrs[curr_index].xid < gc_info->oldest_referenced_xid_for_simple_gc) {
+ if (ule->uxrs[curr_index].xid <
+ gc_info->oldest_referenced_xid_for_simple_gc) {
break;
}
}
@@ -250,12 +311,15 @@ ule_simple_garbage_collection(ULE ule, txn_gc_info *gc_info) {
curr_index = ule->num_cuxrs - 1;
}
- // curr_index is now set to the youngest uxr older than oldest_referenced_xid
- // so if it's not the bottom of the stack..
+ // curr_index is now set to the youngest uxr older than
+ // oldest_referenced_xid so if it's not the bottom of the stack..
if (curr_index != 0) {
// ..then we need to get rid of the entries below curr_index
uint32_t num_entries = ule->num_cuxrs + ule->num_puxrs - curr_index;
- memmove(&ule->uxrs[0], &ule->uxrs[curr_index], num_entries * sizeof(ule->uxrs[0]));
+ memmove(
+ &ule->uxrs[0],
+ &ule->uxrs[curr_index],
+ num_entries * sizeof(ule->uxrs[0]));
ule->uxrs[0].xid = TXNID_NONE; // New 'bottom of stack' loses its TXNID
ule->num_cuxrs -= curr_index;
}
@@ -264,8 +328,12 @@ ule_simple_garbage_collection(ULE ule, txn_gc_info *gc_info) {
// TODO: Clean this up
extern bool garbage_collection_debug;
-static void
-ule_garbage_collect(ULE ule, const xid_omt_t &snapshot_xids, const rx_omt_t &referenced_xids, const xid_omt_t &live_root_txns) {
+static void ule_garbage_collect(
+ ULE ule,
+ const xid_omt_t& snapshot_xids,
+ const rx_omt_t& referenced_xids,
+ const xid_omt_t& live_root_txns) {
+
if (ule->num_cuxrs == 1) {
return;
}
@@ -289,10 +357,12 @@ ule_garbage_collect(ULE ule, const xid_omt_t &snapshot_xids, const rx_omt_t &ref
// If we find that the committed transaction is in the live list,
// then xc is really in the process of being committed. It has not
// been fully committed. As a result, our assumption that transactions
- // newer than what is currently in these OMTs will read the top of the stack
- // is not necessarily accurate. Transactions may read what is just below xc.
- // As a result, we must mark what is just below xc as necessary and move on.
- // This issue was found while testing flusher threads, and was fixed for #3979
+ // newer than what is currently in these OMTs will read the top of the
+ // stack is not necessarily accurate. Transactions may read what is
+ // just below xc.
+ // As a result, we must mark what is just below xc as necessary and
+ // move on. This issue was found while testing flusher threads, and was
+ // fixed for #3979
//
bool is_xc_live = toku_is_txn_in_live_root_txn_list(live_root_txns, xc);
if (is_xc_live) {
@@ -300,13 +370,19 @@ ule_garbage_collect(ULE ule, const xid_omt_t &snapshot_xids, const rx_omt_t &ref
continue;
}
- tl1 = toku_get_youngest_live_list_txnid_for(xc, snapshot_xids, referenced_xids);
+ tl1 =
+ toku_get_youngest_live_list_txnid_for(
+ xc,
+ snapshot_xids,
+ referenced_xids);
- // if tl1 == xc, that means xc should be live and show up in live_root_txns, which we check above.
+ // if tl1 == xc, that means xc should be live and show up in
+ // live_root_txns, which we check above.
invariant(tl1 != xc);
if (tl1 == TXNID_NONE) {
- // set tl1 to youngest live transaction older than ule->uxrs[curr_committed_entry]->xid
+ // set tl1 to youngest live transaction older than
+ // ule->uxrs[curr_committed_entry]->xid
tl1 = get_next_older_txnid(xc, snapshot_xids);
if (tl1 == TXNID_NONE) {
// remainder is garbage, we're done
@@ -314,8 +390,13 @@ ule_garbage_collect(ULE ule, const xid_omt_t &snapshot_xids, const rx_omt_t &ref
}
}
if (garbage_collection_debug) {
- int r = snapshot_xids.find_zero<TXNID, toku_find_xid_by_xid>(tl1, nullptr, nullptr);
- invariant_zero(r); // make sure that the txn you are claiming is live is actually live
+ int r =
+ snapshot_xids.find_zero<TXNID, toku_find_xid_by_xid>(
+ tl1,
+ nullptr,
+ nullptr);
+ // make sure that the txn you are claiming is live is actually live
+ invariant_zero(r);
}
//
// tl1 should now be set
@@ -323,7 +404,11 @@ ule_garbage_collect(ULE ule, const xid_omt_t &snapshot_xids, const rx_omt_t &ref
curr_committed_entry--;
while (curr_committed_entry > 0) {
xc = ule->uxrs[curr_committed_entry].xid;
- if (xid_reads_committed_xid(tl1, xc, snapshot_xids, referenced_xids)) {
+ if (xid_reads_committed_xid(
+ tl1,
+ xc,
+ snapshot_xids,
+ referenced_xids)) {
break;
}
curr_committed_entry--;
@@ -343,7 +428,10 @@ ule_garbage_collect(ULE ule, const xid_omt_t &snapshot_xids, const rx_omt_t &ref
ule->uxrs[0].xid = TXNID_NONE; //New 'bottom of stack' loses its TXNID
if (first_free != ule->num_cuxrs) {
// Shift provisional values
- memmove(&ule->uxrs[first_free], &ule->uxrs[ule->num_cuxrs], ule->num_puxrs * sizeof(ule->uxrs[0]));
+ memmove(
+ &ule->uxrs[first_free],
+ &ule->uxrs[ule->num_cuxrs],
+ ule->num_puxrs * sizeof(ule->uxrs[0]));
}
ule->num_cuxrs = saved;
}
@@ -367,29 +455,42 @@ enum {
ULE_MIN_MEMSIZE_TO_FORCE_GC = 1024 * 1024
};
-/////////////////////////////////////////////////////////////////////////////////
-// This is the big enchilada. (Bring Tums.) Note that this level of abstraction
-// has no knowledge of the inner structure of either leafentry or msg. It makes
-// calls into the next lower layer (msg_xxx) which handles messages.
+////////////////////////////////////////////////////////////////////////////////
+// This is the big enchilada. (Bring Tums.) Note that this level of
+// abstraction has no knowledge of the inner structure of either leafentry or
+// msg. It makes calls into the next lower layer (msg_xxx) which handles
+// messages.
//
// NOTE: This is the only function (at least in this body of code) that modifies
// a leafentry.
// NOTE: It is the responsibility of the caller to make sure that the key is set
// in the FT_MSG, as it will be used to store the data in the data_buffer
//
-// Return 0 on success.
-// If the leafentry is destroyed it sets *new_leafentry_p to NULL.
-// Otehrwise the new_leafentry_p points at the new leaf entry.
-// As of October 2011, this function always returns 0.
-void
-toku_le_apply_msg(const ft_msg &msg,
- LEAFENTRY old_leafentry, // NULL if there was no stored data.
- bn_data* data_buffer, // bn_data storing leafentry, if NULL, means there is no bn_data
- uint32_t idx, // index in data_buffer where leafentry is stored (and should be replaced
- uint32_t old_keylen, // length of the any key in data_buffer
- txn_gc_info *gc_info,
- LEAFENTRY *new_leafentry_p,
- int64_t * numbytes_delta_p) { // change in total size of key and val, not including any overhead
+// Returns -1, 0, or 1 that identifies the change in logical row count needed
+// based on the results of the message application. For example, if a delete
+// finds no logical leafentry or if an insert finds a duplicate and is
+// converted to an update.
+//
+// old_leafentry - NULL if there was no stored data.
+// data_buffer - bn_data storing leafentry, if NULL, means there is no bn_data
+// idx - index in data_buffer where leafentry is stored
+// (and should be replaced)
+// old_keylen - length of the any key in data_buffer
+// new_leafentry_p - If the leafentry is destroyed it sets *new_leafentry_p
+// to NULL. Otherwise the new_leafentry_p points at the new
+// leaf entry.
+// numbytes_delta_p - change in total size of key and val, not including any
+// overhead
+int64_t toku_le_apply_msg(
+ const ft_msg& msg,
+ LEAFENTRY old_leafentry,
+ bn_data* data_buffer,
+ uint32_t idx,
+ uint32_t old_keylen,
+ txn_gc_info* gc_info,
+ LEAFENTRY* new_leafentry_p,
+ int64_t* numbytes_delta_p) {
+
invariant_notnull(gc_info);
paranoid_invariant_notnull(new_leafentry_p);
ULE_S ule;
@@ -397,6 +498,7 @@ toku_le_apply_msg(const ft_msg &msg,
int64_t newnumbytes = 0;
uint64_t oldmemsize = 0;
uint32_t keylen = msg.kdbt()->size;
+ int32_t rowcountdelta = 0;
if (old_leafentry == NULL) {
msg_init_empty_ule(&ule);
@@ -405,49 +507,62 @@ toku_le_apply_msg(const ft_msg &msg,
le_unpack(&ule, old_leafentry); // otherwise unpack leafentry
oldnumbytes = ule_get_innermost_numbytes(&ule, keylen);
}
- msg_modify_ule(&ule, msg); // modify unpacked leafentry
- // - we may be able to immediately promote the newly-apllied outermost provisonal uxr
- // - either way, run simple gc first, and then full gc if there are still some committed uxrs.
- ule_try_promote_provisional_outermost(&ule, gc_info->oldest_referenced_xid_for_implicit_promotion);
+ // modify unpacked leafentry
+ rowcountdelta = msg_modify_ule(&ule, msg);
+
+ // - we may be able to immediately promote the newly-apllied outermost
+ // provisonal uxr
+ // - either way, run simple gc first, and then full gc if there are still
+ // some committed uxrs.
+ ule_try_promote_provisional_outermost(
+ &ule,
+ gc_info->oldest_referenced_xid_for_implicit_promotion);
ule_simple_garbage_collection(&ule, gc_info);
txn_manager_state *txn_state_for_gc = gc_info->txn_state_for_gc;
size_t size_before_gc = 0;
- if (ule.num_cuxrs > 1 && txn_state_for_gc != nullptr && // there is garbage to clean, and our caller gave us state..
- // ..and either the state is pre-initialized, or the committed stack is large enough
- (txn_state_for_gc->initialized || ule.num_cuxrs >= ULE_MIN_STACK_SIZE_TO_FORCE_GC ||
- // ..or the ule's raw memsize is sufficiently large
- (size_before_gc = ule_packed_memsize(&ule)) >= ULE_MIN_MEMSIZE_TO_FORCE_GC)) {
- // ..then it's worth running gc, possibly initializing the txn manager state, if it isn't already
+ // there is garbage to clean, and our caller gave us state..
+ // ..and either the state is pre-initialized, or the committed stack is
+ // large enough
+ // ..or the ule's raw memsize is sufficiently large
+ // ..then it's worth running gc, possibly initializing the txn manager
+ // state, if it isn't already
+ if (ule.num_cuxrs > 1 && txn_state_for_gc != nullptr &&
+ (txn_state_for_gc->initialized ||
+ ule.num_cuxrs >= ULE_MIN_STACK_SIZE_TO_FORCE_GC ||
+ (size_before_gc = ule_packed_memsize(&ule)) >=
+ ULE_MIN_MEMSIZE_TO_FORCE_GC)) {
if (!txn_state_for_gc->initialized) {
txn_state_for_gc->init();
}
-
- size_before_gc = size_before_gc != 0 ? size_before_gc : // it's already been calculated above
- ule_packed_memsize(&ule);
- ule_garbage_collect(&ule,
- txn_state_for_gc->snapshot_xids,
- txn_state_for_gc->referenced_xids,
- txn_state_for_gc->live_root_txns
- );
+ // it's already been calculated above
+ size_before_gc =
+ size_before_gc != 0 ? size_before_gc : ule_packed_memsize(&ule);
+ ule_garbage_collect(
+ &ule,
+ txn_state_for_gc->snapshot_xids,
+ txn_state_for_gc->referenced_xids,
+ txn_state_for_gc->live_root_txns);
size_t size_after_gc = ule_packed_memsize(&ule);
LE_STATUS_INC(LE_APPLY_GC_BYTES_IN, size_before_gc);
LE_STATUS_INC(LE_APPLY_GC_BYTES_OUT, size_after_gc);
}
- void *maybe_free = nullptr;
- int r = le_pack(
- &ule, // create packed leafentry
- data_buffer,
- idx,
- msg.kdbt()->data, // contract of this function is caller has this set, always
- keylen, // contract of this function is caller has this set, always
- old_keylen,
- oldmemsize,
- new_leafentry_p,
- &maybe_free
- );
+ void* maybe_free = nullptr;
+ // create packed leafentry
+ // contract of this function is caller has keyp and keylen set, always
+ int r =
+ le_pack(
+ &ule,
+ data_buffer,
+ idx,
+ msg.kdbt()->data,
+ keylen,
+ old_keylen,
+ oldmemsize,
+ new_leafentry_p,
+ &maybe_free);
invariant_zero(r);
if (*new_leafentry_p) {
newnumbytes = ule_get_innermost_numbytes(&ule, keylen);
@@ -458,16 +573,22 @@ toku_le_apply_msg(const ft_msg &msg,
if (maybe_free != nullptr) {
toku_free(maybe_free);
}
+ return rowcountdelta;
}
-bool toku_le_worth_running_garbage_collection(LEAFENTRY le, txn_gc_info *gc_info) {
-// Effect: Quickly determines if it's worth trying to run garbage collection on a leafentry
+bool toku_le_worth_running_garbage_collection(
+ LEAFENTRY le,
+ txn_gc_info* gc_info) {
+// Effect: Quickly determines if it's worth trying to run garbage collection
+// on a leafentry
// Return: True if it makes sense to try garbage collection, false otherwise.
// Rationale: Garbage collection is likely to clean up under two circumstances:
-// 1.) There are multiple committed entries. Some may never be read by new txns.
-// 2.) There is only one committed entry, but the outermost provisional entry
-// is older than the oldest known referenced xid, so it must have commited.
-// Therefor we can promote it to committed and get rid of the old commited entry.
+// 1.) There are multiple committed entries. Some may never be read
+// by new txns.
+// 2.) There is only one committed entry, but the outermost
+// provisional entry is older than the oldest known referenced
+// xid, so it must have commited. Therefor we can promote it to
+// committed and get rid of the old commited entry.
if (le->type != LE_MVCC) {
return false;
}
@@ -477,7 +598,8 @@ bool toku_le_worth_running_garbage_collection(LEAFENTRY le, txn_gc_info *gc_info
paranoid_invariant(le->u.mvcc.num_cxrs == 1);
}
return le->u.mvcc.num_pxrs > 0 &&
- le_outermost_uncommitted_xid(le) < gc_info->oldest_referenced_xid_for_implicit_promotion;
+ le_outermost_uncommitted_xid(le) <
+ gc_info->oldest_referenced_xid_for_implicit_promotion;
}
// Garbage collect one leaf entry, using the given OMT's.
@@ -498,16 +620,18 @@ bool toku_le_worth_running_garbage_collection(LEAFENTRY le, txn_gc_info *gc_info
// -- referenced_xids : list of in memory active transactions.
// NOTE: it is not a good idea to garbage collect a leaf
// entry with only one committed value.
-void
-toku_le_garbage_collect(LEAFENTRY old_leaf_entry,
- bn_data* data_buffer,
- uint32_t idx,
- void* keyp,
- uint32_t keylen,
- txn_gc_info *gc_info,
- LEAFENTRY *new_leaf_entry,
- int64_t * numbytes_delta_p) {
- // We shouldn't want to run gc without having provided a snapshot of the txn system.
+void toku_le_garbage_collect(
+ LEAFENTRY old_leaf_entry,
+ bn_data* data_buffer,
+ uint32_t idx,
+ void* keyp,
+ uint32_t keylen,
+ txn_gc_info* gc_info,
+ LEAFENTRY* new_leaf_entry,
+ int64_t* numbytes_delta_p) {
+
+ // We shouldn't want to run gc without having provided a snapshot of the
+ // txn system.
invariant_notnull(gc_info);
invariant_notnull(gc_info->txn_state_for_gc);
paranoid_invariant_notnull(new_leaf_entry);
@@ -520,20 +644,24 @@ toku_le_garbage_collect(LEAFENTRY old_leaf_entry,
oldnumbytes = ule_get_innermost_numbytes(&ule, keylen);
uint32_t old_mem_size = leafentry_memsize(old_leaf_entry);
- // Before running garbage collection, try to promote the outermost provisional
- // entries to committed if its xid is older than the oldest possible live xid.
+ // Before running garbage collection, try to promote the outermost
+ // provisional entries to committed if its xid is older than the oldest
+ // possible live xid.
//
// The oldest known refeferenced xid is a lower bound on the oldest possible
// live xid, so we use that. It's usually close enough to get rid of most
// garbage in leafentries.
- ule_try_promote_provisional_outermost(&ule, gc_info->oldest_referenced_xid_for_implicit_promotion);
+ ule_try_promote_provisional_outermost(
+ &ule,
+ gc_info->oldest_referenced_xid_for_implicit_promotion);
// No need to run simple gc here if we're going straight for full gc.
if (ule.num_cuxrs > 1) {
size_t size_before_gc = ule_packed_memsize(&ule);
- ule_garbage_collect(&ule,
- gc_info->txn_state_for_gc->snapshot_xids,
- gc_info->txn_state_for_gc->referenced_xids,
- gc_info->txn_state_for_gc->live_root_txns);
+ ule_garbage_collect(
+ &ule,
+ gc_info->txn_state_for_gc->snapshot_xids,
+ gc_info->txn_state_for_gc->referenced_xids,
+ gc_info->txn_state_for_gc->live_root_txns);
size_t size_after_gc = ule_packed_memsize(&ule);
LE_STATUS_INC(LE_APPLY_GC_BYTES_IN, size_before_gc);
@@ -541,17 +669,18 @@ toku_le_garbage_collect(LEAFENTRY old_leaf_entry,
}
void *maybe_free = nullptr;
- int r = le_pack(
- &ule,
- data_buffer,
- idx,
- keyp,
- keylen,
- keylen, // old_keylen, same because the key isn't going to change for gc
- old_mem_size,
- new_leaf_entry,
- &maybe_free
- );
+ // old_keylen, same because the key isn't going to change for gc
+ int r =
+ le_pack(
+ &ule,
+ data_buffer,
+ idx,
+ keyp,
+ keylen,
+ keylen,
+ old_mem_size,
+ new_leaf_entry,
+ &maybe_free);
invariant_zero(r);
if (*new_leaf_entry) {
newnumbytes = ule_get_innermost_numbytes(&ule, keylen);
@@ -564,49 +693,54 @@ toku_le_garbage_collect(LEAFENTRY old_leaf_entry,
}
}
-/////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////
// This layer of abstraction (msg_xxx)
// knows the accessors of msg, but not of leafentry or unpacked leaf entry.
// It makes calls into the lower layer (le_xxx) which handles leafentries.
// Purpose is to init the ule with given key and no transaction records
//
-static void
-msg_init_empty_ule(ULE ule) {
+static inline void msg_init_empty_ule(ULE ule) {
ule_init_empty_ule(ule);
}
// Purpose is to modify the unpacked leafentry in our private workspace.
//
-static void
-msg_modify_ule(ULE ule, const ft_msg &msg) {
+// Returns -1, 0, or 1 that identifies the change in logical row count needed
+// based on the results of the message application. For example, if a delete
+// finds no logical leafentry or if an insert finds a duplicate and is
+// converted to an update.
+static int64_t msg_modify_ule(ULE ule, const ft_msg &msg) {
+ int64_t retval = 0;
XIDS xids = msg.xids();
invariant(toku_xids_get_num_xids(xids) < MAX_TRANSACTION_RECORDS);
enum ft_msg_type type = msg.type();
- if (type != FT_OPTIMIZE && type != FT_OPTIMIZE_FOR_UPGRADE) {
+ if (FT_LIKELY(type != FT_OPTIMIZE && type != FT_OPTIMIZE_FOR_UPGRADE)) {
ule_do_implicit_promotions(ule, xids);
}
switch (type) {
- case FT_INSERT_NO_OVERWRITE: {
- UXR old_innermost_uxr = ule_get_innermost_uxr(ule);
- //If something exists, quit (no overwrite).
- if (uxr_is_insert(old_innermost_uxr)) break;
- //else it is just an insert, so
- //fall through to FT_INSERT on purpose.
- }
- case FT_INSERT: {
- uint32_t vallen = msg.vdbt()->size;
- invariant(IS_VALID_LEN(vallen));
- void * valp = msg.vdbt()->data;
- ule_apply_insert(ule, xids, vallen, valp);
+ case FT_INSERT_NO_OVERWRITE:
+ retval =
+ ule_apply_insert_no_overwrite(
+ ule,
+ xids,
+ msg.vdbt()->size,
+ msg.vdbt()->data);
+ break;
+ case FT_INSERT:
+ retval =
+ ule_apply_insert(
+ ule,
+ xids,
+ msg.vdbt()->size,
+ msg.vdbt()->data);
break;
- }
case FT_DELETE_ANY:
- ule_apply_delete(ule, xids);
+ retval = ule_apply_delete(ule, xids);
break;
case FT_ABORT_ANY:
case FT_ABORT_BROADCAST_TXN:
- ule_apply_abort(ule, xids);
+ retval = ule_apply_abort(ule, xids);
break;
case FT_COMMIT_BROADCAST_ALL:
ule_apply_broadcast_commit_all(ule);
@@ -621,34 +755,40 @@ msg_modify_ule(ULE ule, const ft_msg &msg) {
break;
case FT_UPDATE:
case FT_UPDATE_BROADCAST_ALL:
- assert(false); // These messages don't get this far. Instead they get translated (in setval_fun in do_update) into FT_INSERT messages.
+ // These messages don't get this far. Instead they get translated (in
+ // setval_fun in do_update) into FT_INSERT messages.
+ assert(false);
break;
default:
- assert(false); /* illegal ft msg type */
+ // illegal ft msg type
+ assert(false);
break;
}
+ return retval;
}
-void test_msg_modify_ule(ULE ule, const ft_msg &msg){
+void test_msg_modify_ule(ULE ule, const ft_msg &msg) {
msg_modify_ule(ule,msg);
}
static void ule_optimize(ULE ule, XIDS xids) {
if (ule->num_puxrs) {
- TXNID uncommitted = ule->uxrs[ule->num_cuxrs].xid; // outermost uncommitted
+ // outermost uncommitted
+ TXNID uncommitted = ule->uxrs[ule->num_cuxrs].xid;
TXNID oldest_living_xid = TXNID_NONE;
uint32_t num_xids = toku_xids_get_num_xids(xids);
if (num_xids > 0) {
invariant(num_xids==1);
oldest_living_xid = toku_xids_get_xid(xids, 0);
}
- if (oldest_living_xid == TXNID_NONE || uncommitted < oldest_living_xid) {
+ if (oldest_living_xid == TXNID_NONE ||
+ uncommitted < oldest_living_xid) {
ule_promote_provisional_innermost_to_committed(ule);
}
}
}
-/////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////
// This layer of abstraction (le_xxx) understands the structure of the leafentry
// and of the unpacked leafentry. It is the only layer that understands the
// structure of leafentry. It has no knowledge of any other data structures.
@@ -657,8 +797,7 @@ static void ule_optimize(ULE ule, XIDS xids) {
//
// required for every le_unpack that is done
//
-void
-ule_cleanup(ULE ule) {
+void ule_cleanup(ULE ule) {
invariant(ule->uxrs);
if (ule->uxrs != ule->uxrs_static) {
toku_free(ule->uxrs);
@@ -668,8 +807,7 @@ ule_cleanup(ULE ule) {
// populate an unpacked leafentry using pointers into the given leafentry.
// thus, the memory referenced by 'le' must live as long as the ULE.
-void
-le_unpack(ULE ule, LEAFENTRY le) {
+void le_unpack(ULE ule, LEAFENTRY le) {
uint8_t type = le->type;
uint8_t *p;
uint32_t i;
@@ -694,9 +832,10 @@ le_unpack(ULE ule, LEAFENTRY le) {
//Dynamic memory
if (ule->num_cuxrs < MAX_TRANSACTION_RECORDS) {
ule->uxrs = ule->uxrs_static;
- }
- else {
- XMALLOC_N(ule->num_cuxrs + 1 + MAX_TRANSACTION_RECORDS, ule->uxrs);
+ } else {
+ XMALLOC_N(
+ ule->num_cuxrs + 1 + MAX_TRANSACTION_RECORDS,
+ ule->uxrs);
}
p = le->u.mvcc.xrs;
@@ -717,9 +856,12 @@ le_unpack(ULE ule, LEAFENTRY le) {
p += uxr_unpack_length_and_bit(innermost, p);
}
for (i = 0; i < ule->num_cuxrs; i++) {
- p += uxr_unpack_length_and_bit(ule->uxrs + ule->num_cuxrs - 1 - i, p);
+ p +=
+ uxr_unpack_length_and_bit(
+ ule->uxrs + ule->num_cuxrs - 1 - i,
+ p);
}
-
+
//unpack interesting values inner to outer
if (ule->num_puxrs!=0) {
UXR innermost = ule->uxrs + ule->num_cuxrs + ule->num_puxrs - 1;
@@ -761,14 +903,12 @@ le_unpack(ULE ule, LEAFENTRY le) {
#endif
}
-static inline size_t
-uxr_pack_txnid(UXR uxr, uint8_t *p) {
+static inline size_t uxr_pack_txnid(UXR uxr, uint8_t *p) {
*(TXNID*)p = toku_htod64(uxr->xid);
return sizeof(TXNID);
}
-static inline size_t
-uxr_pack_type_and_length(UXR uxr, uint8_t *p) {
+static inline size_t uxr_pack_type_and_length(UXR uxr, uint8_t *p) {
size_t rval = 1;
*p = uxr->type;
if (uxr_is_insert(uxr)) {
@@ -778,21 +918,18 @@ uxr_pack_type_and_length(UXR uxr, uint8_t *p) {
return rval;
}
-static inline size_t
-uxr_pack_length_and_bit(UXR uxr, uint8_t *p) {
+static inline size_t uxr_pack_length_and_bit(UXR uxr, uint8_t *p) {
uint32_t length_and_bit;
if (uxr_is_insert(uxr)) {
length_and_bit = INSERT_LENGTH(uxr->vallen);
- }
- else {
+ } else {
length_and_bit = DELETE_LENGTH(uxr->vallen);
}
*(uint32_t*)p = toku_htod32(length_and_bit);
return sizeof(uint32_t);
}
-static inline size_t
-uxr_pack_data(UXR uxr, uint8_t *p) {
+static inline size_t uxr_pack_data(UXR uxr, uint8_t *p) {
if (uxr_is_insert(uxr)) {
memcpy(p, uxr->valp, uxr->vallen);
return uxr->vallen;
@@ -800,14 +937,12 @@ uxr_pack_data(UXR uxr, uint8_t *p) {
return 0;
}
-static inline size_t
-uxr_unpack_txnid(UXR uxr, uint8_t *p) {
+static inline size_t uxr_unpack_txnid(UXR uxr, uint8_t *p) {
uxr->xid = toku_dtoh64(*(TXNID*)p);
return sizeof(TXNID);
}
-static inline size_t
-uxr_unpack_type_and_length(UXR uxr, uint8_t *p) {
+static inline size_t uxr_unpack_type_and_length(UXR uxr, uint8_t *p) {
size_t rval = 1;
uxr->type = *p;
if (uxr_is_insert(uxr)) {
@@ -817,22 +952,19 @@ uxr_unpack_type_and_length(UXR uxr, uint8_t *p) {
return rval;
}
-static inline size_t
-uxr_unpack_length_and_bit(UXR uxr, uint8_t *p) {
+static inline size_t uxr_unpack_length_and_bit(UXR uxr, uint8_t *p) {
uint32_t length_and_bit = toku_dtoh32(*(uint32_t*)p);
if (IS_INSERT(length_and_bit)) {
uxr->type = XR_INSERT;
uxr->vallen = GET_LENGTH(length_and_bit);
- }
- else {
+ } else {
uxr->type = XR_DELETE;
uxr->vallen = 0;
}
return sizeof(uint32_t);
}
-static inline size_t
-uxr_unpack_data(UXR uxr, uint8_t *p) {
+static inline size_t uxr_unpack_data(UXR uxr, uint8_t *p) {
if (uxr_is_insert(uxr)) {
uxr->valp = p;
return uxr->vallen;
@@ -841,8 +973,7 @@ uxr_unpack_data(UXR uxr, uint8_t *p) {
}
// executed too often to be worth making threadsafe
-static inline void
-update_le_status(ULE ule, size_t memsize) {
+static inline void update_le_status(ULE ule, size_t memsize) {
if (ule->num_cuxrs > LE_STATUS_VAL(LE_MAX_COMMITTED_XR))
LE_STATUS_VAL(LE_MAX_COMMITTED_XR) = ule->num_cuxrs;
if (ule->num_puxrs > LE_STATUS_VAL(LE_MAX_PROVISIONAL_XR))
@@ -856,21 +987,22 @@ update_le_status(ULE ule, size_t memsize) {
// Purpose is to return a newly allocated leaf entry in packed format, or
// return null if leaf entry should be destroyed (if no transaction records
// are for inserts).
-// Transaction records in packed le are stored inner to outer (first xr is innermost),
-// with some information extracted out of the transaction records into the header.
+// Transaction records in packed le are stored inner to outer (first xr is
+// innermost), with some information extracted out of the transaction records
+// into the header.
// Transaction records in ule are stored outer to inner (uxr[0] is outermost).
-int
-le_pack(ULE ule, // data to be packed into new leafentry
- bn_data* data_buffer,
- uint32_t idx,
- void* keyp,
- uint32_t keylen,
- uint32_t old_keylen,
- uint32_t old_le_size,
- LEAFENTRY * const new_leafentry_p, // this is what this function creates
- void **const maybe_free
- )
-{
+// Takes 'ule' and creates 'new_leafentry_p
+int le_pack(
+ ULE ule,
+ bn_data* data_buffer,
+ uint32_t idx,
+ void* keyp,
+ uint32_t keylen,
+ uint32_t old_keylen,
+ uint32_t old_le_size,
+ LEAFENTRY* const new_leafentry_p,
+ void** const maybe_free) {
+
invariant(ule->num_cuxrs > 0);
invariant(ule->uxrs[0].xid == TXNID_NONE);
int rval;
@@ -888,7 +1020,8 @@ le_pack(ULE ule, // data to be packed into new leafentry
}
}
if (data_buffer && old_le_size > 0) {
- // must pass old_keylen and old_le_size, since that's what is actually stored in data_buffer
+ // must pass old_keylen and old_le_size, since that's what is
+ // actually stored in data_buffer
data_buffer->delete_leafentry(idx, old_keylen, old_le_size);
}
*new_leafentry_p = NULL;
@@ -898,14 +1031,24 @@ le_pack(ULE ule, // data to be packed into new leafentry
found_insert:
memsize = le_memsize_from_ule(ule);
LEAFENTRY new_leafentry;
- get_space_for_le(data_buffer, idx, keyp, keylen, old_keylen, old_le_size, memsize, &new_leafentry, maybe_free);
+ get_space_for_le(
+ data_buffer,
+ idx,
+ keyp,
+ keylen,
+ old_keylen,
+ old_le_size,
+ memsize,
+ &new_leafentry,
+ maybe_free);
//p always points to first unused byte after leafentry we are packing
uint8_t *p;
invariant(ule->num_cuxrs>0);
//Type specific data
if (ule->num_cuxrs == 1 && ule->num_puxrs == 0) {
- //Pack a 'clean leafentry' (no uncommitted transactions, only one committed value)
+ //Pack a 'clean leafentry' (no uncommitted transactions, only one
+ //committed value)
new_leafentry->type = LE_CLEAN;
uint32_t vallen = ule->uxrs[0].vallen;
@@ -917,8 +1060,7 @@ found_insert:
//Set p to after leafentry
p = new_leafentry->u.clean.val + vallen;
- }
- else {
+ } else {
uint32_t i;
//Pack an 'mvcc leafentry'
new_leafentry->type = LE_MVCC;
@@ -969,7 +1111,9 @@ found_insert:
p += uxr_pack_data(outermost, p);
}
//pack txnid, length, bit, data for non-outermost, non-innermost
- for (i = ule->num_cuxrs + 1; i < ule->num_cuxrs + ule->num_puxrs - 1; i++) {
+ for (i = ule->num_cuxrs + 1;
+ i < ule->num_cuxrs + ule->num_puxrs - 1;
+ i++) {
UXR uxr = ule->uxrs + i;
p += uxr_pack_txnid(uxr, p);
p += uxr_pack_type_and_length(uxr, p);
@@ -1022,13 +1166,13 @@ cleanup:
return rval;
}
-//////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////
// Following functions provide convenient access to a packed leafentry.
//Requires:
-// Leafentry that ule represents should not be destroyed (is not just all deletes)
-size_t
-le_memsize_from_ule (ULE ule) {
+// Leafentry that ule represents should not be destroyed (is not just all
+// deletes)
+size_t le_memsize_from_ule (ULE ule) {
invariant(ule->num_cuxrs);
size_t rval;
if (ule->num_cuxrs == 1 && ule->num_puxrs == 0) {
@@ -1037,13 +1181,13 @@ le_memsize_from_ule (ULE ule) {
rval = 1 //type
+4 //vallen
+committed->vallen; //actual val
- }
- else {
+ } else {
rval = 1 //type
+4 //num_cuxrs
+1 //num_puxrs
+4*(ule->num_cuxrs) //types+lengths for committed
- +8*(ule->num_cuxrs + ule->num_puxrs - 1); //txnids (excluding superroot)
+ +8*(ule->num_cuxrs + ule->num_puxrs - 1); //txnids (excluding
+ //superroot)
uint32_t i;
//Count data from committed uxrs and innermost puxr
for (i = 0; i < ule->num_cuxrs; i++) {
@@ -1072,8 +1216,11 @@ le_memsize_from_ule (ULE ule) {
}
// TODO: rename
-size_t
-leafentry_rest_memsize(uint32_t num_puxrs, uint32_t num_cuxrs, uint8_t* start) {
+size_t leafentry_rest_memsize(
+ uint32_t num_puxrs,
+ uint32_t num_cuxrs,
+ uint8_t* start) {
+
UXR_S uxr;
size_t lengths = 0;
uint8_t* p = start;
@@ -1122,8 +1269,7 @@ leafentry_rest_memsize(uint32_t num_puxrs, uint32_t num_cuxrs, uint8_t* start) {
return rval;
}
-size_t
-leafentry_memsize (LEAFENTRY le) {
+size_t leafentry_memsize (LEAFENTRY le) {
size_t rval = 0;
uint8_t type = le->type;
@@ -1162,13 +1308,11 @@ leafentry_memsize (LEAFENTRY le) {
return rval;
}
-size_t
-leafentry_disksize (LEAFENTRY le) {
+size_t leafentry_disksize (LEAFENTRY le) {
return leafentry_memsize(le);
}
-bool
-le_is_clean(LEAFENTRY le) {
+bool le_is_clean(LEAFENTRY le) {
uint8_t type = le->type;
uint32_t rval;
switch (type) {
@@ -1228,13 +1372,14 @@ int le_latest_is_del(LEAFENTRY le) {
//
-// returns true if the outermost provisional transaction id on the leafentry's stack matches
-// the outermost transaction id in xids
-// It is used to determine if a broadcast commit/abort message (look in ft-ops.c) should be applied to this leafentry
-// If the outermost transactions match, then the broadcast commit/abort should be applied
+// returns true if the outermost provisional transaction id on the leafentry's
+// stack matches the outermost transaction id in xids
+// It is used to determine if a broadcast commit/abort message (look in ft-ops.c)
+// should be applied to this leafentry
+// If the outermost transactions match, then the broadcast commit/abort should
+// be applied
//
-bool
-le_has_xids(LEAFENTRY le, XIDS xids) {
+bool le_has_xids(LEAFENTRY le, XIDS xids) {
//Read num_uxrs
uint32_t num_xids = toku_xids_get_num_xids(xids);
invariant(num_xids > 0); //Disallow checking for having TXNID_NONE
@@ -1245,8 +1390,7 @@ le_has_xids(LEAFENTRY le, XIDS xids) {
return rval;
}
-void*
-le_latest_val_and_len (LEAFENTRY le, uint32_t *len) {
+void* le_latest_val_and_len (LEAFENTRY le, uint32_t *len) {
uint8_t type = le->type;
void *valp;
@@ -1277,8 +1421,7 @@ le_latest_val_and_len (LEAFENTRY le, uint32_t *len) {
if (uxr_is_insert(&uxr)) {
*len = uxr.vallen;
valp = p + (num_cuxrs - 1 + (num_puxrs!=0))*sizeof(uint32_t);
- }
- else {
+ } else {
*len = 0;
valp = NULL;
}
@@ -1295,8 +1438,7 @@ le_latest_val_and_len (LEAFENTRY le, uint32_t *len) {
if (uxr_is_insert(uxr)) {
slow_valp = uxr->valp;
slow_len = uxr->vallen;
- }
- else {
+ } else {
slow_valp = NULL;
slow_len = 0;
}
@@ -1310,8 +1452,7 @@ le_latest_val_and_len (LEAFENTRY le, uint32_t *len) {
}
//DEBUG ONLY can be slow
-void*
-le_latest_val (LEAFENTRY le) {
+void* le_latest_val (LEAFENTRY le) {
ULE_S ule;
le_unpack(&ule, le);
UXR uxr = ule_get_innermost_uxr(&ule);
@@ -1325,8 +1466,7 @@ le_latest_val (LEAFENTRY le) {
}
//needed to be fast for statistics.
-uint32_t
-le_latest_vallen (LEAFENTRY le) {
+uint32_t le_latest_vallen (LEAFENTRY le) {
uint32_t rval;
uint8_t type = le->type;
uint8_t *p;
@@ -1354,8 +1494,7 @@ le_latest_vallen (LEAFENTRY le) {
uxr_unpack_length_and_bit(&uxr, p);
if (uxr_is_insert(&uxr)) {
rval = uxr.vallen;
- }
- else {
+ } else {
rval = 0;
}
break;
@@ -1377,8 +1516,7 @@ le_latest_vallen (LEAFENTRY le) {
return rval;
}
-uint64_t
-le_outermost_uncommitted_xid (LEAFENTRY le) {
+uint64_t le_outermost_uncommitted_xid (LEAFENTRY le) {
uint64_t rval = TXNID_NONE;
uint8_t type = le->type;
@@ -1412,8 +1550,7 @@ le_outermost_uncommitted_xid (LEAFENTRY le) {
//Optimization not required. This is a debug only function.
//Print a leafentry out in human-readable format
-int
-print_klpair (FILE *outf, const void* keyp, uint32_t keylen, LEAFENTRY le) {
+int print_klpair (FILE *outf, const void* keyp, uint32_t keylen, LEAFENTRY le) {
ULE_S ule;
le_unpack(&ule, le);
uint32_t i;
@@ -1444,23 +1581,21 @@ print_klpair (FILE *outf, const void* keyp, uint32_t keylen, LEAFENTRY le) {
return 0;
}
-/////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////
// This layer of abstraction (ule_xxx) knows the structure of the unpacked
// leafentry and no other structure.
//
// ule constructor
// Note that transaction 0 is explicit in the ule
-static void
-ule_init_empty_ule(ULE ule) {
+static inline void ule_init_empty_ule(ULE ule) {
ule->num_cuxrs = 1;
ule->num_puxrs = 0;
ule->uxrs = ule->uxrs_static;
ule->uxrs[0] = committed_delete;
}
-static inline int32_t
-min_i32(int32_t a, int32_t b) {
+static inline int32_t min_i32(int32_t a, int32_t b) {
int32_t rval = a < b ? a : b;
return rval;
}
@@ -1470,8 +1605,8 @@ min_i32(int32_t a, int32_t b) {
//
// If the leafentry has already been promoted, there is nothing to do.
// We have two transaction stacks (one from message, one from leaf entry).
-// We want to implicitly promote transactions newer than (but not including)
-// the innermost common ancestor (ICA) of the two stacks of transaction ids. We
+// We want to implicitly promote transactions newer than (but not including)
+// the innermost common ancestor (ICA) of the two stacks of transaction ids. We
// know that this is the right thing to do because each transaction with an id
// greater (later) than the ICA must have been either committed or aborted.
// If it was aborted then we would have seen an abort message and removed the
@@ -1483,8 +1618,7 @@ min_i32(int32_t a, int32_t b) {
// record of ICA, keeping the transaction id of the ICA.
// Outermost xid is zero for both ule and xids<>
//
-static void
-ule_do_implicit_promotions(ULE ule, XIDS xids) {
+static void ule_do_implicit_promotions(ULE ule, XIDS xids) {
//Optimization for (most) common case.
//No commits necessary if everything is already committed.
if (ule->num_puxrs > 0) {
@@ -1506,17 +1640,16 @@ ule_do_implicit_promotions(ULE ule, XIDS xids) {
if (ica_index < ule->num_cuxrs) {
invariant(ica_index == ule->num_cuxrs - 1);
ule_promote_provisional_innermost_to_committed(ule);
- }
- else if (ica_index < ule->num_cuxrs + ule->num_puxrs - 1) {
- //If ica is the innermost uxr in the leafentry, no commits are necessary.
+ } else if (ica_index < ule->num_cuxrs + ule->num_puxrs - 1) {
+ //If ica is the innermost uxr in the leafentry, no commits are
+ //necessary.
ule_promote_provisional_innermost_to_index(ule, ica_index);
}
}
}
-static void
-ule_promote_provisional_innermost_to_committed(ULE ule) {
+static void ule_promote_provisional_innermost_to_committed(ULE ule) {
//Must be something to promote.
invariant(ule->num_puxrs);
//Take value (or delete flag) from innermost.
@@ -1532,8 +1665,7 @@ ule_promote_provisional_innermost_to_committed(ULE ule) {
ule->num_puxrs = 0; //Discard all provisional uxrs.
if (uxr_is_delete(old_innermost_uxr)) {
ule_push_delete_uxr(ule, true, old_outermost_uncommitted_uxr->xid);
- }
- else {
+ } else {
ule_push_insert_uxr(ule, true,
old_outermost_uncommitted_uxr->xid,
old_innermost_uxr->vallen,
@@ -1541,11 +1673,13 @@ ule_promote_provisional_innermost_to_committed(ULE ule) {
}
}
-static void
-ule_try_promote_provisional_outermost(ULE ule, TXNID oldest_possible_live_xid) {
+static void ule_try_promote_provisional_outermost(
+ ULE ule,
+ TXNID oldest_possible_live_xid) {
// Effect: If there is a provisional record whose outermost xid is older than
// the oldest known referenced_xid, promote it to committed.
- if (ule->num_puxrs > 0 && ule_get_xid(ule, ule->num_cuxrs) < oldest_possible_live_xid) {
+ if (ule->num_puxrs > 0 &&
+ ule_get_xid(ule, ule->num_cuxrs) < oldest_possible_live_xid) {
ule_promote_provisional_innermost_to_committed(ule);
}
}
@@ -1553,8 +1687,9 @@ ule_try_promote_provisional_outermost(ULE ule, TXNID oldest_possible_live_xid) {
// Purpose is to promote the value (and type) of the innermost transaction
// record to the uxr at the specified index (keeping the txnid of the uxr at
// specified index.)
-static void
-ule_promote_provisional_innermost_to_index(ULE ule, uint32_t index) {
+static void ule_promote_provisional_innermost_to_index(
+ ULE ule,
+ uint32_t index) {
//Must not promote to committed portion of stack.
invariant(index >= ule->num_cuxrs);
//Must actually be promoting.
@@ -1562,15 +1697,17 @@ ule_promote_provisional_innermost_to_index(ULE ule, uint32_t index) {
UXR old_innermost_uxr = ule_get_innermost_uxr(ule);
assert(!uxr_is_placeholder(old_innermost_uxr));
TXNID new_innermost_xid = ule->uxrs[index].xid;
- ule->num_puxrs = index - ule->num_cuxrs; //Discard old uxr at index (and everything inner)
+ //Discard old uxr at index (and everything inner)
+ ule->num_puxrs = index - ule->num_cuxrs;
if (uxr_is_delete(old_innermost_uxr)) {
ule_push_delete_uxr(ule, false, new_innermost_xid);
- }
- else {
- ule_push_insert_uxr(ule, false,
- new_innermost_xid,
- old_innermost_uxr->vallen,
- old_innermost_uxr->valp);
+ } else {
+ ule_push_insert_uxr(
+ ule,
+ false,
+ new_innermost_xid,
+ old_innermost_uxr->vallen,
+ old_innermost_uxr->valp);
}
}
@@ -1581,19 +1718,60 @@ ule_promote_provisional_innermost_to_index(ULE ule, uint32_t index) {
// Purpose is to apply an insert message to this leafentry:
-static void
-ule_apply_insert(ULE ule, XIDS xids, uint32_t vallen, void * valp) {
+static inline int64_t ule_apply_insert_no_overwrite(
+ ULE ule,
+ XIDS xids,
+ uint32_t vallen,
+ void* valp) {
+
+ invariant(IS_VALID_LEN(vallen));
+ int64_t retval = 0;
+ UXR old_innermost_uxr = ule_get_innermost_uxr(ule);
+ // If something exists, don't overwrite
+ if (uxr_is_insert(old_innermost_uxr)) {
+ retval = -1;
+ return retval;
+ }
ule_prepare_for_new_uxr(ule, xids);
- TXNID this_xid = toku_xids_get_innermost_xid(xids); // xid of transaction doing this insert
+ // xid of transaction doing this insert
+ TXNID this_xid = toku_xids_get_innermost_xid(xids);
ule_push_insert_uxr(ule, this_xid == TXNID_NONE, this_xid, vallen, valp);
+ return retval;
+}
+
+// Purpose is to apply an insert message to this leafentry:
+static inline int64_t ule_apply_insert(
+ ULE ule,
+ XIDS xids,
+ uint32_t vallen,
+ void* valp) {
+
+ invariant(IS_VALID_LEN(vallen));
+ int64_t retval = 0;
+ UXR old_innermost_uxr = ule_get_innermost_uxr(ule);
+ // If something exists, overwrite
+ if (uxr_is_insert(old_innermost_uxr)) {
+ retval = -1;
+ }
+ ule_prepare_for_new_uxr(ule, xids);
+ // xid of transaction doing this insert
+ TXNID this_xid = toku_xids_get_innermost_xid(xids);
+ ule_push_insert_uxr(ule, this_xid == TXNID_NONE, this_xid, vallen, valp);
+ return retval;
}
// Purpose is to apply a delete message to this leafentry:
-static void
-ule_apply_delete(ULE ule, XIDS xids) {
+static inline int64_t ule_apply_delete(ULE ule, XIDS xids) {
+ int64_t retval = 0;
+ UXR old_innermost_uxr = ule_get_innermost_uxr(ule);
+ if (FT_UNLIKELY(uxr_is_delete(old_innermost_uxr))) {
+ retval = 1;
+ }
ule_prepare_for_new_uxr(ule, xids);
- TXNID this_xid = toku_xids_get_innermost_xid(xids); // xid of transaction doing this delete
+ // xid of transaction doing this delete
+ TXNID this_xid = toku_xids_get_innermost_xid(xids);
ule_push_delete_uxr(ule, this_xid == TXNID_NONE, this_xid);
+ return retval;
}
// First, discard anything done earlier by this transaction.
@@ -1601,20 +1779,18 @@ ule_apply_delete(ULE ule, XIDS xids) {
// outer transactions that are newer than then newest (innermost) transaction in
// the leafentry. If so, record those outer transactions in the leafentry
// with placeholders.
-static void
-ule_prepare_for_new_uxr(ULE ule, XIDS xids) {
+static inline void ule_prepare_for_new_uxr(ULE ule, XIDS xids) {
TXNID this_xid = toku_xids_get_innermost_xid(xids);
//This is for LOADER_USE_PUTS or transactionless environment
//where messages use XIDS of 0
if (this_xid == TXNID_NONE && ule_get_innermost_xid(ule) == TXNID_NONE) {
ule_remove_innermost_uxr(ule);
- }
- // case where we are transactional and xids stack matches ule stack
- else if (ule->num_puxrs > 0 && ule_get_innermost_xid(ule) == this_xid) {
+ } else if (ule->num_puxrs > 0 && ule_get_innermost_xid(ule) == this_xid) {
+ // case where we are transactional and xids stack matches ule stack
ule_remove_innermost_uxr(ule);
- }
- // case where we are transactional and xids stack does not match ule stack
- else {
+ } else {
+ // case where we are transactional and xids stack does not match ule
+ // stack
ule_add_placeholders(ule, xids);
}
}
@@ -1625,10 +1801,12 @@ ule_prepare_for_new_uxr(ULE ule, XIDS xids) {
// then there is nothing to be done.
// If this transaction did modify the leafentry, then undo whatever it did (by
// removing the transaction record (uxr) and any placeholders underneath.
-// Remember, the innermost uxr can only be an insert or a delete, not a placeholder.
-static void
-ule_apply_abort(ULE ule, XIDS xids) {
- TXNID this_xid = toku_xids_get_innermost_xid(xids); // xid of transaction doing this abort
+// Remember, the innermost uxr can only be an insert or a delete, not a
+// placeholder.
+static inline int64_t ule_apply_abort(ULE ule, XIDS xids) {
+ int64_t retval = 0;
+ // xid of transaction doing this abort
+ TXNID this_xid = toku_xids_get_innermost_xid(xids);
invariant(this_xid!=TXNID_NONE);
UXR innermost = ule_get_innermost_uxr(ule);
// need to check for provisional entries in ule, otherwise
@@ -1636,15 +1814,34 @@ ule_apply_abort(ULE ule, XIDS xids) {
// in a bug where the most recently committed has same xid
// as the XID's innermost
if (ule->num_puxrs > 0 && innermost->xid == this_xid) {
+ // if this is a rollback of a delete of a new ule, return 0
+ // (i.e. double delete)
+ if (uxr_is_delete(innermost)) {
+ if (ule->num_puxrs == 1 && ule->num_cuxrs == 1 &&
+ uxr_is_delete(&(ule->uxrs[0]))) {
+ retval = 0;
+ } else {
+ retval = 1;
+ }
+ } else if (uxr_is_insert(innermost)) {
+ if (ule->num_puxrs == 1 && ule->num_cuxrs == 1 &&
+ uxr_is_insert(&(ule->uxrs[0]))) {
+ retval = 0;
+ } else {
+ retval = -1;
+ }
+ }
+ // if this is a rollback of a insert of an exising ule, return 0
+ // (i.e. double insert)
invariant(ule->num_puxrs>0);
ule_remove_innermost_uxr(ule);
ule_remove_innermost_placeholders(ule);
}
invariant(ule->num_cuxrs > 0);
+ return retval;
}
-static void
-ule_apply_broadcast_commit_all (ULE ule) {
+static void ule_apply_broadcast_commit_all (ULE ule) {
ule->uxrs[0] = ule->uxrs[ule->num_puxrs + ule->num_cuxrs - 1];
ule->uxrs[0].xid = TXNID_NONE;
ule->num_puxrs = 0;
@@ -1657,9 +1854,11 @@ ule_apply_broadcast_commit_all (ULE ule) {
// then there is nothing to be done.
// Also, if there are no uncommitted transaction records there is nothing to do.
// If this transaction did modify the leafentry, then promote whatever it did.
-// Remember, the innermost uxr can only be an insert or a delete, not a placeholder.
+// Remember, the innermost uxr can only be an insert or a delete, not a
+// placeholder.
void ule_apply_commit(ULE ule, XIDS xids) {
- TXNID this_xid = toku_xids_get_innermost_xid(xids); // xid of transaction committing
+ // xid of transaction committing
+ TXNID this_xid = toku_xids_get_innermost_xid(xids);
invariant(this_xid!=TXNID_NONE);
// need to check for provisional entries in ule, otherwise
// there is nothing to abort, not checking this may result
@@ -1668,16 +1867,19 @@ void ule_apply_commit(ULE ule, XIDS xids) {
if (ule->num_puxrs > 0 && ule_get_innermost_xid(ule) == this_xid) {
// 3 cases:
//1- it's already a committed value (do nothing) (num_puxrs==0)
- //2- it's provisional but root level (make a new committed value (num_puxrs==1)
+ //2- it's provisional but root level (make a new committed value
+ // (num_puxrs==1)
//3- it's provisional and not root (promote); (num_puxrs>1)
if (ule->num_puxrs == 1) { //new committed value
ule_promote_provisional_innermost_to_committed(ule);
- }
- else if (ule->num_puxrs > 1) {
- //ule->uxrs[ule->num_cuxrs+ule->num_puxrs-1] is the innermost (this transaction)
+ } else if (ule->num_puxrs > 1) {
+ //ule->uxrs[ule->num_cuxrs+ule->num_puxrs-1] is the innermost
+ // (this transaction)
//ule->uxrs[ule->num_cuxrs+ule->num_puxrs-2] is the 2nd innermost
//We want to promote the innermost uxr one level out.
- ule_promote_provisional_innermost_to_index(ule, ule->num_cuxrs+ule->num_puxrs-2);
+ ule_promote_provisional_innermost_to_index(
+ ule,
+ ule->num_cuxrs+ule->num_puxrs-2);
}
}
}
@@ -1687,14 +1889,17 @@ void ule_apply_commit(ULE ule, XIDS xids) {
//
// Purpose is to record an insert for this transaction (and set type correctly).
-static void
-ule_push_insert_uxr(ULE ule, bool is_committed, TXNID xid, uint32_t vallen, void * valp) {
- UXR uxr = ule_get_first_empty_uxr(ule);
+static inline void ule_push_insert_uxr(
+ ULE ule,
+ bool is_committed, TXNID xid,
+ uint32_t vallen,
+ void* valp) {
+
+ UXR uxr = ule_get_first_empty_uxr(ule);
if (is_committed) {
invariant(ule->num_puxrs==0);
ule->num_cuxrs++;
- }
- else {
+ } else {
ule->num_puxrs++;
}
uxr->xid = xid;
@@ -1706,23 +1911,21 @@ ule_push_insert_uxr(ULE ule, bool is_committed, TXNID xid, uint32_t vallen, void
// Purpose is to record a delete for this transaction. If this transaction
// is the root transaction, then truly delete the leafentry by marking the
// ule as empty.
-static void
-ule_push_delete_uxr(ULE ule, bool is_committed, TXNID xid) {
+static inline void ule_push_delete_uxr(ULE ule, bool is_committed, TXNID xid) {
UXR uxr = ule_get_first_empty_uxr(ule);
if (is_committed) {
invariant(ule->num_puxrs==0);
ule->num_cuxrs++;
- }
- else {
+ } else {
ule->num_puxrs++;
}
uxr->xid = xid;
uxr->type = XR_DELETE;
}
-// Purpose is to push a placeholder on the top of the leafentry's transaction stack.
-static void
-ule_push_placeholder_uxr(ULE ule, TXNID xid) {
+// Purpose is to push a placeholder on the top of the leafentry's transaction
+// stack.
+static inline void ule_push_placeholder_uxr(ULE ule, TXNID xid) {
invariant(ule->num_cuxrs>0);
UXR uxr = ule_get_first_empty_uxr(ule);
uxr->xid = xid;
@@ -1731,16 +1934,14 @@ ule_push_placeholder_uxr(ULE ule, TXNID xid) {
}
// Return innermost transaction record.
-static UXR
-ule_get_innermost_uxr(ULE ule) {
+static inline UXR ule_get_innermost_uxr(ULE ule) {
invariant(ule->num_cuxrs > 0);
UXR rval = &(ule->uxrs[ule->num_cuxrs + ule->num_puxrs - 1]);
return rval;
}
// Return first empty transaction record
-static UXR
-ule_get_first_empty_uxr(ULE ule) {
+static inline UXR ule_get_first_empty_uxr(ULE ule) {
invariant(ule->num_puxrs < MAX_TRANSACTION_RECORDS-1);
UXR rval = &(ule->uxrs[ule->num_cuxrs+ule->num_puxrs]);
return rval;
@@ -1748,14 +1949,12 @@ ule_get_first_empty_uxr(ULE ule) {
// Remove the innermost transaction (pop the leafentry's stack), undoing
// whatever the innermost transaction did.
-static void
-ule_remove_innermost_uxr(ULE ule) {
+static inline void ule_remove_innermost_uxr(ULE ule) {
//It is possible to remove the committed delete at first insert.
invariant(ule->num_cuxrs > 0);
if (ule->num_puxrs) {
ule->num_puxrs--;
- }
- else {
+ } else {
//This is for LOADER_USE_PUTS or transactionless environment
//where messages use XIDS of 0
invariant(ule->num_cuxrs == 1);
@@ -1764,14 +1963,12 @@ ule_remove_innermost_uxr(ULE ule) {
}
}
-static TXNID
-ule_get_innermost_xid(ULE ule) {
+static inline TXNID ule_get_innermost_xid(ULE ule) {
TXNID rval = ule_get_xid(ule, ule->num_cuxrs + ule->num_puxrs - 1);
return rval;
}
-static TXNID
-ule_get_xid(ULE ule, uint32_t index) {
+static inline TXNID ule_get_xid(ULE ule, uint32_t index) {
invariant(index < ule->num_cuxrs + ule->num_puxrs);
TXNID rval = ule->uxrs[index].xid;
return rval;
@@ -1781,8 +1978,7 @@ ule_get_xid(ULE ule, uint32_t index) {
// innermost recorded transactions), if necessary. This function is idempotent.
// It makes no logical sense for a placeholder to be the innermost recorded
// transaction record, so placeholders at the top of the stack are not legal.
-static void
-ule_remove_innermost_placeholders(ULE ule) {
+static void ule_remove_innermost_placeholders(ULE ule) {
UXR uxr = ule_get_innermost_uxr(ule);
while (uxr_is_placeholder(uxr)) {
invariant(ule->num_puxrs>0);
@@ -1796,8 +1992,7 @@ ule_remove_innermost_placeholders(ULE ule) {
// Note, after placeholders are added, an insert or delete will be added. This
// function temporarily leaves the transaction stack in an illegal state (having
// placeholders on top).
-static void
-ule_add_placeholders(ULE ule, XIDS xids) {
+static void ule_add_placeholders(ULE ule, XIDS xids) {
//Placeholders can be placed on top of the committed uxr.
invariant(ule->num_cuxrs > 0);
@@ -1819,47 +2014,40 @@ ule_add_placeholders(ULE ule, XIDS xids) {
}
}
-uint64_t
-ule_num_uxrs(ULE ule) {
+uint64_t ule_num_uxrs(ULE ule) {
return ule->num_cuxrs + ule->num_puxrs;
}
-UXR
-ule_get_uxr(ULE ule, uint64_t ith) {
+UXR ule_get_uxr(ULE ule, uint64_t ith) {
invariant(ith < ule_num_uxrs(ule));
return &ule->uxrs[ith];
}
-uint32_t
-ule_get_num_committed(ULE ule) {
+uint32_t ule_get_num_committed(ULE ule) {
return ule->num_cuxrs;
}
-uint32_t
-ule_get_num_provisional(ULE ule) {
+uint32_t ule_get_num_provisional(ULE ule) {
return ule->num_puxrs;
}
-int
-ule_is_committed(ULE ule, uint64_t ith) {
+int ule_is_committed(ULE ule, uint64_t ith) {
invariant(ith < ule_num_uxrs(ule));
return ith < ule->num_cuxrs;
}
-int
-ule_is_provisional(ULE ule, uint64_t ith) {
+int ule_is_provisional(ULE ule, uint64_t ith) {
invariant(ith < ule_num_uxrs(ule));
return ith >= ule->num_cuxrs;
}
// return size of data for innermost uxr, the size of val
-uint32_t
-ule_get_innermost_numbytes(ULE ule, uint32_t keylen) {
+uint32_t ule_get_innermost_numbytes(ULE ule, uint32_t keylen) {
uint32_t rval;
UXR uxr = ule_get_innermost_uxr(ule);
- if (uxr_is_delete(uxr))
+ if (uxr_is_delete(uxr)) {
rval = 0;
- else {
+ } else {
rval = uxr_get_vallen(uxr) + keylen;
}
return rval;
@@ -1870,68 +2058,65 @@ ule_get_innermost_numbytes(ULE ule, uint32_t keylen) {
// This layer of abstraction (uxr_xxx) understands uxr and nothing else.
//
-static inline bool
-uxr_type_is_insert(uint8_t type) {
+static inline bool uxr_type_is_insert(uint8_t type) {
bool rval = (bool)(type == XR_INSERT);
return rval;
}
-bool
-uxr_is_insert(UXR uxr) {
+bool uxr_is_insert(UXR uxr) {
return uxr_type_is_insert(uxr->type);
}
-static inline bool
-uxr_type_is_delete(uint8_t type) {
+static inline bool uxr_type_is_delete(uint8_t type) {
bool rval = (bool)(type == XR_DELETE);
return rval;
}
-bool
-uxr_is_delete(UXR uxr) {
+bool uxr_is_delete(UXR uxr) {
return uxr_type_is_delete(uxr->type);
}
-static inline bool
-uxr_type_is_placeholder(uint8_t type) {
+static inline bool uxr_type_is_placeholder(uint8_t type) {
bool rval = (bool)(type == XR_PLACEHOLDER);
return rval;
}
-bool
-uxr_is_placeholder(UXR uxr) {
+bool uxr_is_placeholder(UXR uxr) {
return uxr_type_is_placeholder(uxr->type);
}
-void *
-uxr_get_val(UXR uxr) {
+void* uxr_get_val(UXR uxr) {
return uxr->valp;
}
-uint32_t
-uxr_get_vallen(UXR uxr) {
+uint32_t uxr_get_vallen(UXR uxr) {
return uxr->vallen;
}
-TXNID
-uxr_get_txnid(UXR uxr) {
+TXNID uxr_get_txnid(UXR uxr) {
return uxr->xid;
}
-static int
-le_iterate_get_accepted_index(TXNID *xids, uint32_t *index, uint32_t num_xids, LE_ITERATE_CALLBACK f, TOKUTXN context, bool top_is_provisional) {
+static int le_iterate_get_accepted_index(
+ TXNID* xids,
+ uint32_t* index,
+ uint32_t num_xids,
+ LE_ITERATE_CALLBACK f,
+ TOKUTXN context,
+ bool top_is_provisional) {
+
uint32_t i;
int r = 0;
- // if this for loop does not return anything, we return num_xids-1, which should map to T_0
+ // if this for loop does not return anything, we return num_xids-1, which
+ // should map to T_0
for (i = 0; i < num_xids - 1; i++) {
TXNID xid = toku_dtoh64(xids[i]);
r = f(xid, context, (i == 0 && top_is_provisional));
if (r==TOKUDB_ACCEPT) {
r = 0;
break; //or goto something
- }
- else if (r!=0) {
+ } else if (r!=0) {
break;
}
}
@@ -1940,8 +2125,7 @@ le_iterate_get_accepted_index(TXNID *xids, uint32_t *index, uint32_t num_xids, L
}
#if ULE_DEBUG
-static void
-ule_verify_xids(ULE ule, uint32_t interesting, TXNID *xids) {
+static void ule_verify_xids(ULE ule, uint32_t interesting, TXNID *xids) {
int has_p = (ule->num_puxrs != 0);
invariant(ule->num_cuxrs + has_p == interesting);
uint32_t i;
@@ -1953,21 +2137,29 @@ ule_verify_xids(ULE ule, uint32_t interesting, TXNID *xids) {
#endif
//
-// Iterates over "possible" TXNIDs in a leafentry's stack, until one is accepted by 'f'. If the value
-// associated with the accepted TXNID is not an insert, then set *is_emptyp to true, otherwise false
+// Iterates over "possible" TXNIDs in a leafentry's stack, until one is
+// accepted by 'f'. If the value associated with the accepted TXNID is not an
+// insert, then set *is_emptyp to true, otherwise false
// The "possible" TXNIDs are:
-// if provisionals exist, then the first possible TXNID is the outermost provisional.
-// The next possible TXNIDs are the committed TXNIDs, from most recently committed to T_0.
-// If provisionals exist, and the outermost provisional is accepted by 'f',
+// If provisionals exist, then the first possible TXNID is the outermost
+// provisional.
+// The next possible TXNIDs are the committed TXNIDs, from most recently
+// committed to T_0.
+// If provisionals exist, and the outermost provisional is accepted by 'f',
// the associated value checked is the innermost provisional's value.
// Parameters:
// le - leafentry to iterate over
-// f - callback function that checks if a TXNID in le is accepted, and its associated value should be examined.
+// f - callback function that checks if a TXNID in le is accepted, and its
+// associated value should be examined.
// is_delp - output parameter that returns answer
// context - parameter for f
//
-static int
-le_iterate_is_del(LEAFENTRY le, LE_ITERATE_CALLBACK f, bool *is_delp, TOKUTXN context) {
+static int le_iterate_is_del(
+ LEAFENTRY le,
+ LE_ITERATE_CALLBACK f,
+ bool* is_delp,
+ TOKUTXN context) {
+
#if ULE_DEBUG
ULE_S ule;
le_unpack(&ule, le);
@@ -2002,8 +2194,17 @@ le_iterate_is_del(LEAFENTRY le, LE_ITERATE_CALLBACK f, bool *is_delp, TOKUTXN co
#if ULE_DEBUG
ule_verify_xids(&ule, num_interesting, xids);
#endif
- r = le_iterate_get_accepted_index(xids, &index, num_interesting, f, context, (num_puxrs != 0));
- if (r!=0) goto cleanup;
+ r =
+ le_iterate_get_accepted_index(
+ xids,
+ &index,
+ num_interesting,
+ f,
+ context,
+ (num_puxrs != 0));
+ if (r != 0) {
+ goto cleanup;
+ }
invariant(index < num_interesting);
//Skip TXNIDs
@@ -2017,7 +2218,9 @@ le_iterate_is_del(LEAFENTRY le, LE_ITERATE_CALLBACK f, bool *is_delp, TOKUTXN co
#if ULE_DEBUG
{
uint32_t has_p = (ule.num_puxrs != 0);
- uint32_t ule_index = (index==0) ? ule.num_cuxrs + ule.num_puxrs - 1 : ule.num_cuxrs - 1 + has_p - index;
+ uint32_t ule_index = (index==0) ?
+ ule.num_cuxrs + ule.num_puxrs - 1 :
+ ule.num_cuxrs - 1 + has_p - index;
UXR uxr = ule.uxrs + ule_index;
invariant(uxr_is_delete(uxr) == is_del);
}
@@ -2034,7 +2237,11 @@ cleanup:
return r;
}
-static int le_iterate_read_committed_callback(TXNID txnid, TOKUTXN txn, bool is_provisional UU()) {
+static int le_iterate_read_committed_callback(
+ TXNID txnid,
+ TOKUTXN txn,
+ bool is_provisional UU()) {
+
if (is_provisional) {
return toku_txn_reads_txnid(txnid, txn, is_provisional);
}
@@ -2058,33 +2265,40 @@ int le_val_is_del(LEAFENTRY le, enum cursor_read_type read_type, TOKUTXN txn) {
txn
);
rval = is_del;
- }
- else if (read_type == C_READ_ANY) {
+ } else if (read_type == C_READ_ANY) {
rval = le_latest_is_del(le);
- }
- else {
+ } else {
invariant(false);
}
return rval;
}
//
-// Iterates over "possible" TXNIDs in a leafentry's stack, until one is accepted by 'f'. Set
-// valpp and vallenp to value and length associated with accepted TXNID
+// Iterates over "possible" TXNIDs in a leafentry's stack, until one is accepted
+// by 'f'. Set valpp and vallenp to value and length associated with accepted
+// TXNID
// The "possible" TXNIDs are:
-// if provisionals exist, then the first possible TXNID is the outermost provisional.
-// The next possible TXNIDs are the committed TXNIDs, from most recently committed to T_0.
-// If provisionals exist, and the outermost provisional is accepted by 'f',
+// If provisionals exist, then the first possible TXNID is the outermost
+// provisional.
+// The next possible TXNIDs are the committed TXNIDs, from most recently
+// committed to T_0.
+// If provisionals exist, and the outermost provisional is accepted by 'f',
// the associated length value is the innermost provisional's length and value.
// Parameters:
// le - leafentry to iterate over
-// f - callback function that checks if a TXNID in le is accepted, and its associated value should be examined.
+// f - callback function that checks if a TXNID in le is accepted, and its
+// associated value should be examined.
// valpp - output parameter that returns pointer to value
// vallenp - output parameter that returns length of value
// context - parameter for f
//
-int
-le_iterate_val(LEAFENTRY le, LE_ITERATE_CALLBACK f, void** valpp, uint32_t *vallenp, TOKUTXN context) {
+int le_iterate_val(
+ LEAFENTRY le,
+ LE_ITERATE_CALLBACK f,
+ void** valpp,
+ uint32_t* vallenp,
+ TOKUTXN context) {
+
#if ULE_DEBUG
ULE_S ule;
le_unpack(&ule, le);
@@ -2124,8 +2338,17 @@ le_iterate_val(LEAFENTRY le, LE_ITERATE_CALLBACK f, void** valpp, uint32_t *vall
#if ULE_DEBUG
ule_verify_xids(&ule, num_interesting, xids);
#endif
- r = le_iterate_get_accepted_index(xids, &index, num_interesting, f, context, (num_puxrs != 0));
- if (r!=0) goto cleanup;
+ r =
+ le_iterate_get_accepted_index(
+ xids,
+ &index,
+ num_interesting,
+ f,
+ context,
+ (num_puxrs != 0));
+ if (r != 0) {
+ goto cleanup;
+ }
invariant(index < num_interesting);
//Skip TXNIDs
@@ -2158,7 +2381,9 @@ le_iterate_val(LEAFENTRY le, LE_ITERATE_CALLBACK f, void** valpp, uint32_t *vall
#if ULE_DEBUG
{
uint32_t has_p = (ule.num_puxrs != 0);
- uint32_t ule_index = (index==0) ? ule.num_cuxrs + ule.num_puxrs - 1 : ule.num_cuxrs - 1 + has_p - index;
+ uint32_t ule_index = (index==0) ?
+ ule.num_cuxrs + ule.num_puxrs - 1 :
+ ule.num_cuxrs - 1 + has_p - index;
UXR uxr = ule.uxrs + ule_index;
invariant(uxr_is_insert(uxr));
invariant(uxr->vallen == vallen);
@@ -2188,10 +2413,15 @@ cleanup:
return r;
}
-void le_extract_val(LEAFENTRY le,
- // should we return the entire leafentry as the val?
- bool is_leaf_mode, enum cursor_read_type read_type,
- TOKUTXN ttxn, uint32_t *vallen, void **val) {
+void le_extract_val(
+ LEAFENTRY le,
+ // should we return the entire leafentry as the val?
+ bool is_leaf_mode,
+ enum cursor_read_type read_type,
+ TOKUTXN ttxn,
+ uint32_t* vallen,
+ void** val) {
+
if (is_leaf_mode) {
*val = le;
*vallen = leafentry_memsize(le);
@@ -2199,18 +2429,11 @@ void le_extract_val(LEAFENTRY le,
LE_ITERATE_CALLBACK f = (read_type == C_READ_SNAPSHOT) ?
toku_txn_reads_txnid :
le_iterate_read_committed_callback;
- int r = le_iterate_val(
- le,
- f,
- val,
- vallen,
- ttxn
- );
+ int r = le_iterate_val(le, f, val, vallen, ttxn);
lazy_assert_zero(r);
} else if (read_type == C_READ_ANY){
*val = le_latest_val_and_len(le, vallen);
- }
- else {
+ } else {
assert(false);
}
}
@@ -2244,9 +2467,9 @@ static_assert(18 == sizeof(leafentry_13), "wrong size");
static_assert(9 == __builtin_offsetof(leafentry_13, u), "wrong offset");
//Requires:
-// Leafentry that ule represents should not be destroyed (is not just all deletes)
-static size_t
-le_memsize_from_ule_13 (ULE ule, LEAFENTRY_13 le) {
+// Leafentry that ule represents should not be destroyed (is not just all
+// deletes)
+static size_t le_memsize_from_ule_13 (ULE ule, LEAFENTRY_13 le) {
uint32_t num_uxrs = ule->num_cuxrs + ule->num_puxrs;
assert(num_uxrs);
size_t rval;
@@ -2257,8 +2480,7 @@ le_memsize_from_ule_13 (ULE ule, LEAFENTRY_13 le) {
+4 //vallen
+le->keylen //actual key
+ule->uxrs[0].vallen; //actual val
- }
- else {
+ } else {
rval = 1 //num_uxrs
+4 //keylen
+le->keylen //actual key
@@ -2276,16 +2498,20 @@ le_memsize_from_ule_13 (ULE ule, LEAFENTRY_13 le) {
return rval;
}
-//This function is mostly copied from 4.1.1 (which is version 12, same as 13 except that only 13 is upgradable).
-// Note, number of transaction records in version 13 has been replaced by separate counters in version 14 (MVCC),
-// one counter for committed transaction records and one counter for provisional transaction records. When
-// upgrading a version 13 le to version 14, the number of committed transaction records is always set to one (1)
-// and the number of provisional transaction records is set to the original number of transaction records
-// minus one. The bottom transaction record is assumed to be a committed value. (If there is no committed
-// value then the bottom transaction record of version 13 is a committed delete.)
-// This is the only change from the 4.1.1 code. The rest of the leafentry is read as is.
-static void
-le_unpack_13(ULE ule, LEAFENTRY_13 le) {
+// This function is mostly copied from 4.1.1 (which is version 12, same as 13
+// except that only 13 is upgradable).
+// Note, number of transaction records in version 13 has been replaced by
+// separate counters in version 14 (MVCC), one counter for committed transaction
+// records and one counter for provisional transaction records. When upgrading
+// a version 13 le to version 14, the number of committed transaction records is
+// always set to one (1) and the number of provisional transaction records is
+// set to the original number of transaction records minus one. The bottom
+// transaction record is assumed to be a committed value. (If there is no
+// committed value then the bottom transaction record of version 13 is a
+// committed delete.)
+// This is the only change from the 4.1.1 code. The rest of the leafentry is
+// read as is.
+static void le_unpack_13(ULE ule, LEAFENTRY_13 le) {
//Read num_uxrs
uint8_t num_xrs = le->num_xrs;
assert(num_xrs > 0);
@@ -2302,15 +2528,15 @@ le_unpack_13(ULE ule, LEAFENTRY_13 le) {
uint8_t *p;
if (num_xrs == 1) {
//Unpack a 'committed leafentry' (No uncommitted transactions exist)
- ule->uxrs[0].type = XR_INSERT; //Must be or the leafentry would not exist
+ //Must be or the leafentry would not exist
+ ule->uxrs[0].type = XR_INSERT;
ule->uxrs[0].vallen = vallen_of_innermost_insert;
ule->uxrs[0].valp = &le->u.comm.key_val[keylen];
ule->uxrs[0].xid = 0; //Required.
//Set p to immediately after leafentry
p = &le->u.comm.key_val[keylen + vallen_of_innermost_insert];
- }
- else {
+ } else {
//Unpack a 'provisional leafentry' (Uncommitted transactions exist)
//Read in type.
@@ -2337,8 +2563,7 @@ le_unpack_13(ULE ule, LEAFENTRY_13 le) {
//Not innermost, so load the type.
uxr->type = *p;
p += 1;
- }
- else {
+ } else {
//Innermost, load the type previously read from header
uxr->type = innermost_type;
}
@@ -2349,12 +2574,11 @@ le_unpack_13(ULE ule, LEAFENTRY_13 le) {
//Not committed nor outermost uncommitted, so load the xid.
uxr->xid = toku_dtoh64(*(TXNID*)p);
p += 8;
- }
- else if (i == 1) {
- //Outermost uncommitted, load the xid previously read from header
+ } else if (i == 1) {
+ //Outermost uncommitted, load the xid previously read from
+ //header
uxr->xid = xid_outermost_uncommitted;
- }
- else {
+ } else {
// i == 0, committed entry
uxr->xid = 0;
}
@@ -2367,9 +2591,9 @@ le_unpack_13(ULE ule, LEAFENTRY_13 le) {
uxr->valp = p;
p += uxr->vallen;
- }
- else {
- //Innermost insert, load the vallen/valp previously read from header
+ } else {
+ //Innermost insert, load the vallen/valp previously read
+ //from header
uxr->vallen = vallen_of_innermost_insert;
uxr->valp = valp_of_innermost_insert;
found_innermost_insert = true;
@@ -2384,8 +2608,7 @@ le_unpack_13(ULE ule, LEAFENTRY_13 le) {
#endif
}
-size_t
-leafentry_disksize_13(LEAFENTRY_13 le) {
+size_t leafentry_disksize_13(LEAFENTRY_13 le) {
ULE_S ule;
le_unpack_13(&ule, le);
size_t memsize = le_memsize_from_ule_13(&ule, le);
@@ -2393,13 +2616,13 @@ leafentry_disksize_13(LEAFENTRY_13 le) {
return memsize;
}
-int
-toku_le_upgrade_13_14(LEAFENTRY_13 old_leafentry,
- void** keyp,
- uint32_t* keylen,
- size_t *new_leafentry_memorysize,
- LEAFENTRY *new_leafentry_p
- ) {
+int toku_le_upgrade_13_14(
+ LEAFENTRY_13 old_leafentry,
+ void** keyp,
+ uint32_t* keylen,
+ size_t* new_leafentry_memorysize,
+ LEAFENTRY* new_leafentry_p) {
+
ULE_S ule;
int rval;
invariant(old_leafentry);
@@ -2408,23 +2631,23 @@ toku_le_upgrade_13_14(LEAFENTRY_13 old_leafentry,
*keylen = old_leafentry->keylen;
if (old_leafentry->num_xrs == 1) {
*keyp = old_leafentry->u.comm.key_val;
- }
- else {
+ } else {
*keyp = old_leafentry->u.prov.key_val_xrs;
}
// We used to pass NULL for omt and mempool, so that we would use
// malloc instead of a mempool. However after supporting upgrade,
// we need to use mempools and the OMT.
- rval = le_pack(&ule, // create packed leafentry
- nullptr,
- 0, //only matters if we are passing in a bn_data
- nullptr, //only matters if we are passing in a bn_data
- 0, //only matters if we are passing in a bn_data
- 0, //only matters if we are passing in a bn_data
- 0, //only matters if we are passing in a bn_data
- new_leafentry_p,
- nullptr //only matters if we are passing in a bn_data
- );
+ rval =
+ le_pack(
+ &ule, // create packed leafentry
+ nullptr,
+ 0, //only matters if we are passing in a bn_data
+ nullptr, //only matters if we are passing in a bn_data
+ 0, //only matters if we are passing in a bn_data
+ 0, //only matters if we are passing in a bn_data
+ 0, //only matters if we are passing in a bn_data
+ new_leafentry_p,
+ nullptr); //only matters if we are passing in a bn_data
ule_cleanup(&ule);
*new_leafentry_memorysize = leafentry_memsize(*new_leafentry_p);
return rval;
diff --git a/storage/tokudb/PerconaFT/ftcxx/tests/CMakeLists.txt b/storage/tokudb/PerconaFT/ftcxx/tests/CMakeLists.txt
index 8cea16c914d..6f9146ce5b2 100644
--- a/storage/tokudb/PerconaFT/ftcxx/tests/CMakeLists.txt
+++ b/storage/tokudb/PerconaFT/ftcxx/tests/CMakeLists.txt
@@ -2,6 +2,8 @@ include_directories(..)
include_directories(../../src)
include_directories(../../src/tests)
+find_library(JEMALLOC_STATIC_LIBRARY libjemalloc.a)
+
if (BUILD_TESTING)
## reference implementation with simple size-doubling buffer without
## jemalloc size tricks
@@ -24,15 +26,15 @@ if (BUILD_TESTING)
cursor_test
)
set(_testname ${impl}_${test})
- if (with_jemalloc)
+ if (with_jemalloc AND JEMALLOC_STATIC_LIBRARY)
set(_testname ${_testname}_j)
endif ()
add_executable(${_testname} ${test})
- if (with_jemalloc)
+ if (with_jemalloc AND JEMALLOC_STATIC_LIBRARY)
if (APPLE)
- target_link_libraries(${_testname} -Wl,-force_load jemalloc)
+ target_link_libraries(${_testname} -Wl,-force_load ${JEMALLOC_STATIC_LIBRARY})
else ()
- target_link_libraries(${_testname} -Wl,--whole-archive jemalloc -Wl,--no-whole-archive)
+ target_link_libraries(${_testname} -Wl,--whole-archive ${JEMALLOC_STATIC_LIBRARY} -Wl,--no-whole-archive)
endif ()
endif ()
target_link_libraries(${_testname} ${impl})
diff --git a/storage/tokudb/PerconaFT/portability/toku_pthread.h b/storage/tokudb/PerconaFT/portability/toku_pthread.h
index 25cf48dfd8c..84c27736201 100644
--- a/storage/tokudb/PerconaFT/portability/toku_pthread.h
+++ b/storage/tokudb/PerconaFT/portability/toku_pthread.h
@@ -72,15 +72,18 @@ typedef struct toku_mutex_aligned {
toku_mutex_t aligned_mutex __attribute__((__aligned__(64)));
} toku_mutex_aligned_t;
-// Different OSes implement mutexes as different amounts of nested structs.
-// C++ will fill out all missing values with zeroes if you provide at least one zero, but it needs the right amount of nesting.
-#if defined(__FreeBSD__)
-# define ZERO_MUTEX_INITIALIZER {0}
-#elif defined(__APPLE__)
-# define ZERO_MUTEX_INITIALIZER {{0}}
-#else // __linux__, at least
-# define ZERO_MUTEX_INITIALIZER {{{0}}}
-#endif
+// Initializing with {} will fill in a struct with all zeros.
+// But you may also need a pragma to suppress the warnings, as follows
+//
+// #pragma GCC diagnostic push
+// #pragma GCC diagnostic ignored "-Wmissing-field-initializers"
+// toku_mutex_t foo = ZERO_MUTEX_INITIALIZER;
+// #pragma GCC diagnostic pop
+//
+// In general it will be a lot of busy work to make this codebase compile
+// cleanly with -Wmissing-field-initializers
+
+# define ZERO_MUTEX_INITIALIZER {}
#if TOKU_PTHREAD_DEBUG
# define TOKU_MUTEX_INITIALIZER { .pmutex = PTHREAD_MUTEX_INITIALIZER, .owner = 0, .locked = false, .valid = true }
@@ -223,15 +226,9 @@ typedef struct toku_cond {
pthread_cond_t pcond;
} toku_cond_t;
-// Different OSes implement mutexes as different amounts of nested structs.
-// C++ will fill out all missing values with zeroes if you provide at least one zero, but it needs the right amount of nesting.
-#if defined(__FreeBSD__)
-# define ZERO_COND_INITIALIZER {0}
-#elif defined(__APPLE__)
-# define ZERO_COND_INITIALIZER {{0}}
-#else // __linux__, at least
-# define ZERO_COND_INITIALIZER {{{0}}}
-#endif
+// Same considerations as for ZERO_MUTEX_INITIALIZER apply
+#define ZERO_COND_INITIALIZER {}
+
#define TOKU_COND_INITIALIZER {PTHREAD_COND_INITIALIZER}
static inline void
diff --git a/storage/tokudb/PerconaFT/portability/toku_time.h b/storage/tokudb/PerconaFT/portability/toku_time.h
index c476b64a212..11a3f3aa2b9 100644
--- a/storage/tokudb/PerconaFT/portability/toku_time.h
+++ b/storage/tokudb/PerconaFT/portability/toku_time.h
@@ -108,3 +108,13 @@ static inline uint64_t toku_current_time_microsec(void) {
gettimeofday(&t, NULL);
return t.tv_sec * (1UL * 1000 * 1000) + t.tv_usec;
}
+
+// sleep microseconds
+static inline void toku_sleep_microsec(uint64_t ms) {
+ struct timeval t;
+
+ t.tv_sec = ms / 1000000;
+ t.tv_usec = ms % 1000000;
+
+ select(0, NULL, NULL, NULL, &t);
+}
diff --git a/storage/tokudb/PerconaFT/scripts/run.stress-tests.py b/storage/tokudb/PerconaFT/scripts/run.stress-tests.py
index a8df83a3b55..e983fe8ccd9 100644
--- a/storage/tokudb/PerconaFT/scripts/run.stress-tests.py
+++ b/storage/tokudb/PerconaFT/scripts/run.stress-tests.py
@@ -521,14 +521,16 @@ Test output:
}))
def send_mail(toaddrs, subject, body):
- m = MIMEText(body)
- fromaddr = 'tim@tokutek.com'
- m['From'] = fromaddr
- m['To'] = ', '.join(toaddrs)
- m['Subject'] = subject
- s = SMTP('192.168.1.114')
- s.sendmail(fromaddr, toaddrs, str(m))
- s.quit()
+ # m = MIMEText(body)
+ # fromaddr = 'dev-private@percona.com'
+ # m['From'] = fromaddr
+ # m['To'] = ', '.join(toaddrs)
+ # m['Subject'] = subject
+ # s = SMTP('192.168.1.114')
+ # s.sendmail(fromaddr, toaddrs, str(m))
+ # s.quit()
+ info(subject);
+ info(body);
def update(tokudb):
info('Updating from git.')
@@ -554,12 +556,12 @@ def rebuild(tokudb, builddir, tokudb_data, cc, cxx, tests):
env=newenv,
cwd=builddir)
if r != 0:
- send_mail(['leif@tokutek.com'], 'Stress tests on %s failed to build.' % gethostname(), '')
+ send_mail(['dev-private@percona.com'], 'Stress tests on %s failed to build.' % gethostname(), '')
error('Building the tests failed.')
sys.exit(r)
r = call(['make', '-j8'], cwd=builddir)
if r != 0:
- send_mail(['leif@tokutek.com'], 'Stress tests on %s failed to build.' % gethostname(), '')
+ send_mail(['dev-private@percona.com'], 'Stress tests on %s failed to build.' % gethostname(), '')
error('Building the tests failed.')
sys.exit(r)
@@ -671,7 +673,7 @@ def main(opts):
sys.exit(0)
except Exception, e:
exception('Unhandled exception caught in main.')
- send_mail(['leif@tokutek.com'], 'Stress tests caught unhandled exception in main, on %s' % gethostname(), format_exc())
+ send_mail(['dev-private@percona.com'], 'Stress tests caught unhandled exception in main, on %s' % gethostname(), format_exc())
raise e
if __name__ == '__main__':
@@ -786,7 +788,7 @@ if __name__ == '__main__':
if not opts.send_emails:
opts.email = None
elif len(opts.email) == 0:
- opts.email.append('tokueng@tokutek.com')
+ opts.email.append('dev-private@percona.com')
if opts.debug:
logging.basicConfig(level=logging.DEBUG)
diff --git a/storage/tokudb/PerconaFT/src/export.map b/storage/tokudb/PerconaFT/src/export.map
index 3f2c7569ea4..fc2be5f41a5 100644
--- a/storage/tokudb/PerconaFT/src/export.map
+++ b/storage/tokudb/PerconaFT/src/export.map
@@ -82,6 +82,7 @@
toku_test_db_redirect_dictionary;
toku_test_get_latest_lsn;
toku_test_get_checkpointing_user_data_status;
+ toku_set_test_txn_sync_callback;
toku_indexer_set_test_only_flags;
toku_increase_last_xid;
diff --git a/storage/tokudb/PerconaFT/src/indexer-undo-do.cc b/storage/tokudb/PerconaFT/src/indexer-undo-do.cc
index b93429407eb..8d0b080b9fe 100644
--- a/storage/tokudb/PerconaFT/src/indexer-undo-do.cc
+++ b/storage/tokudb/PerconaFT/src/indexer-undo-do.cc
@@ -313,7 +313,7 @@ indexer_undo_do_provisional(DB_INDEXER *indexer, DB *hotdb, struct ule_prov_info
break;
if (outermost_xid_state != TOKUTXN_LIVE && xrindex > num_committed) {
- // if the outermost is not live, then the inner state must be retired. thats the way that the txn API works.
+ // If the outermost is not live, then the inner state must be retired. That's the way that the txn API works.
assert(this_xid_state == TOKUTXN_RETIRED);
}
diff --git a/storage/tokudb/PerconaFT/src/tests/CMakeLists.txt b/storage/tokudb/PerconaFT/src/tests/CMakeLists.txt
index 70977a9dfda..47f6aa44a75 100644
--- a/storage/tokudb/PerconaFT/src/tests/CMakeLists.txt
+++ b/storage/tokudb/PerconaFT/src/tests/CMakeLists.txt
@@ -53,7 +53,7 @@ if(BUILD_TESTING OR BUILD_SRC_TESTS)
target_link_libraries(test-5138.tdb ${LIBTOKUDB}_static z ${LIBTOKUPORTABILITY}_static ${CMAKE_THREAD_LIBS_INIT} ${EXTRA_SYSTEM_LIBS})
add_space_separated_property(TARGET test-5138.tdb COMPILE_FLAGS -fvisibility=hidden)
add_ydb_test(test-5138.tdb)
-
+ add_ydb_test(rollback-inconsistency.tdb)
foreach(bin ${tdb_bins})
get_filename_component(base ${bin} NAME_WE)
diff --git a/storage/tokudb/PerconaFT/src/tests/rollback-inconsistency.cc b/storage/tokudb/PerconaFT/src/tests/rollback-inconsistency.cc
new file mode 100644
index 00000000000..f8099c7a639
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/rollback-inconsistency.cc
@@ -0,0 +1,161 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+// insert enough rows with a child txn and then force an eviction to verify the rollback
+// log node is in valid state.
+// the test fails without the fix (and of course passes with it).
+// The test basically simulates the test script of George's.
+
+
+static void
+populate_table(int start, int end, DB_TXN * parent, DB_ENV * env, DB * db) {
+ DB_TXN *txn = NULL;
+ int r = env->txn_begin(env, parent, &txn, 0); assert_zero(r);
+ for (int i = start; i < end; i++) {
+ int k = htonl(i);
+ char kk[4];
+ char str[220];
+ memset(kk, 0, sizeof kk);
+ memcpy(kk, &k, sizeof k);
+ memset(str,'a', sizeof str);
+ DBT key = { .data = kk, .size = sizeof kk };
+ DBT val = { .data = str, .size = sizeof str };
+ r = db->put(db, txn, &key, &val, 0);
+ assert_zero(r);
+ }
+ r = txn->commit(txn, 0);
+ assert_zero(r);
+}
+
+static void
+populate_and_test(DB_ENV *env, DB *db) {
+ int r;
+ DB_TXN *parent = NULL;
+ r = env->txn_begin(env, NULL, &parent, 0); assert_zero(r);
+
+ populate_table(0, 128, parent, env, db);
+
+ //we know the eviction is going to happen here and the log node of parent txn is going to be evicted
+ //due to the extremely low cachesize.
+ populate_table(128, 256, parent, env, db);
+
+ //again eviction due to the memory pressure. 256 rows is the point when that rollback log spills out. The spilled node
+ //will be written back but will not be dirtied by including rollback nodes from child txn(in which case the bug is bypassed).
+ populate_table(256, 512, parent, env, db);
+
+ r = parent->abort(parent); assert_zero(r);
+
+ //try to search anything in the lost range
+ int k = htonl(200);
+ char kk[4];
+ memset(kk, 0, sizeof kk);
+ memcpy(kk, &k, sizeof k);
+ DBT key = { .data = kk, .size = sizeof kk };
+ DBT val;
+ r = db->get(db, NULL, &key, &val, 0);
+ assert(r==DB_NOTFOUND);
+
+}
+
+static void
+run_test(void) {
+ int r;
+ DB_ENV *env = NULL;
+ r = db_env_create(&env, 0);
+ assert_zero(r);
+ env->set_errfile(env, stderr);
+
+ //setting up the cachetable size 64k
+ uint32_t cachesize = 64*1024;
+ r = env->set_cachesize(env, 0, cachesize, 1);
+ assert_zero(r);
+
+ //setting up the log write block size to 4k so the rollback log nodes spill in accordance with the node size
+ r = env->set_lg_bsize(env, 4096);
+ assert_zero(r);
+
+ r = env->open(env, TOKU_TEST_FILENAME, DB_INIT_MPOOL|DB_CREATE|DB_THREAD |DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE, S_IRWXU+S_IRWXG+S_IRWXO);
+ assert_zero(r);
+
+ DB *db = NULL;
+ r = db_create(&db, env, 0);
+ assert_zero(r);
+
+ r = db->set_pagesize(db, 4096);
+ assert_zero(r);
+
+ r = db->set_readpagesize(db, 1024);
+ assert_zero(r);
+
+ r = db->open(db, NULL, "test.tdb", NULL, DB_BTREE, DB_AUTO_COMMIT+DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO);
+ assert_zero(r);
+
+ populate_and_test(env, db);
+
+ r = db->close(db, 0); assert_zero(r);
+
+ r = env->close(env, 0); assert_zero(r);
+}
+
+int
+test_main(int argc, char * const argv[]) {
+ int r;
+
+ // parse_args(argc, argv);
+ for (int i = 1; i < argc; i++) {
+ char * const arg = argv[i];
+ if (strcmp(arg, "-v") == 0) {
+ verbose++;
+ continue;
+ }
+ if (strcmp(arg, "-q") == 0) {
+ verbose = 0;
+ continue;
+ }
+ }
+
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); assert_zero(r);
+
+ run_test();
+
+ return 0;
+}
+
diff --git a/storage/tokudb/PerconaFT/src/tests/stat64-root-changes.cc b/storage/tokudb/PerconaFT/src/tests/stat64-root-changes.cc
index 0c70b0669ad..a2b48e443cd 100644
--- a/storage/tokudb/PerconaFT/src/tests/stat64-root-changes.cc
+++ b/storage/tokudb/PerconaFT/src/tests/stat64-root-changes.cc
@@ -166,7 +166,7 @@ run_test (void) {
DB_BTREE_STAT64 s;
r = db->stat64(db, NULL, &s); CKERR(r);
- assert(s.bt_nkeys == 1 && s.bt_dsize == sizeof key + sizeof val);
+ assert(s.bt_nkeys == 0);
r = db->close(db, 0); CKERR(r);
@@ -176,7 +176,7 @@ run_test (void) {
r = txn->commit(txn, 0); CKERR(r);
r = db->stat64(db, NULL, &s); CKERR(r);
- assert(s.bt_nkeys == 1 && s.bt_dsize == sizeof key + sizeof val);
+ assert(s.bt_nkeys == 0);
}
// verify update callback overwrites the row
diff --git a/storage/tokudb/PerconaFT/src/tests/test_db_rowcount.cc b/storage/tokudb/PerconaFT/src/tests/test_db_rowcount.cc
new file mode 100644
index 00000000000..c440bdc59e7
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_db_rowcount.cc
@@ -0,0 +1,523 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+#include <stdio.h>
+
+#include <sys/stat.h>
+#include <db.h>
+
+// Tests that the logical row counts are correct and not subject to variance
+// due to normal insert/delete messages within the tree with the few exceptions
+// of 1) rollback messages not yet applied; 2) inserts messages turned to
+// updates on apply; and 3) missing leafentries on delete messages on apply.
+
+static DB_TXN* const null_txn = 0;
+static const uint64_t num_records = 4*1024;
+
+#define CHECK_NUM_ROWS(_expected, _stats) assert(_stats.bt_ndata == _expected)
+
+static DB* create_db(const char* fname, DB_ENV* env) {
+ int r;
+ DB* db;
+
+ r = db_create(&db, env, 0);
+ assert(r == 0);
+ db->set_errfile(db, stderr);
+
+ r = db->set_pagesize(db, 8192);
+ assert(r == 0);
+
+ r = db->set_readpagesize(db, 1024);
+ assert(r == 0);
+
+ r = db->set_fanout(db, 4);
+ assert(r == 0);
+
+ r = db->set_compression_method(db, TOKU_NO_COMPRESSION);
+ assert(r == 0);
+
+ r = db->open(db, null_txn, fname, "main", DB_BTREE, DB_CREATE,
+ 0666);
+ assert(r == 0);
+
+ return db;
+}
+static void add_records(DB* db, DB_TXN* txn, uint64_t start_id, uint64_t num) {
+ int r;
+ for (uint64_t i = 0, j=start_id; i < num; i++,j++) {
+ char key[100], val[256];
+ DBT k,v;
+ snprintf(key, 100, "%08" PRIu64, j);
+ snprintf(val, 256, "%*s", 200, key);
+ r =
+ db->put(
+ db,
+ txn,
+ dbt_init(&k, key, 1+strlen(key)),
+ dbt_init(&v, val, 1+strlen(val)),
+ 0);
+ assert(r == 0);
+ }
+}
+static void delete_records(
+ DB* db,
+ DB_TXN* txn,
+ uint64_t start_id,
+ uint64_t num) {
+
+ int r;
+ for (uint64_t i = 0, j=start_id; i < num; i++,j++) {
+ char key[100];
+ DBT k;
+ snprintf(key, 100, "%08" PRIu64, j);
+ r =
+ db->del(
+ db,
+ txn,
+ dbt_init(&k, key, 1+strlen(key)),
+ 0);
+ assert(r == 0);
+ }
+}
+static void full_optimize(DB* db) {
+ int r;
+ uint64_t loops_run = 0;
+
+ r = db->optimize(db);
+ assert(r == 0);
+
+ r = db->hot_optimize(db, NULL, NULL, NULL, NULL, &loops_run);
+ assert(r == 0);
+}
+static void test_insert_commit(DB_ENV* env) {
+ int r;
+ DB* db;
+ DB_TXN* txn;
+ DB_BTREE_STAT64 stats;
+
+ db = create_db(__FUNCTION__, env);
+
+ r = env->txn_begin(env, null_txn, &txn, 0);
+ assert(r == 0);
+
+ add_records(db, txn, 0, num_records);
+
+ r = db->stat64(db, null_txn, &stats);
+ assert(r == 0);
+
+ CHECK_NUM_ROWS(num_records, stats);
+ if (verbose)
+ printf("%s : before commit %" PRIu64 " rows\n", __FUNCTION__, stats.bt_ndata);
+
+ r = txn->commit(txn, 0);
+ assert(r == 0);
+
+ r = db->stat64(db, null_txn, &stats);
+ assert(r == 0);
+
+ CHECK_NUM_ROWS(num_records, stats);
+ if (verbose)
+ printf("%s : after commit %" PRIu64 " rows\n", __FUNCTION__, stats.bt_ndata);
+
+ db->close(db, 0);
+}
+static void test_insert_delete_commit(DB_ENV* env) {
+ int r;
+ DB* db;
+ DB_TXN* txn;
+ DB_BTREE_STAT64 stats;
+
+ db = create_db(__FUNCTION__, env);
+
+ r = env->txn_begin(env, null_txn, &txn, 0);
+ assert(r == 0);
+
+ add_records(db, txn, 0, num_records);
+
+ r = db->stat64(db, null_txn, &stats);
+ assert(r == 0);
+
+ CHECK_NUM_ROWS(num_records, stats);
+ if (verbose)
+ printf("%s : before delete %" PRIu64 " rows\n", __FUNCTION__, stats.bt_ndata);
+
+ delete_records(db, txn, 0, num_records);
+
+ r = db->stat64(db, null_txn, &stats);
+ assert(r == 0);
+
+ CHECK_NUM_ROWS(0, stats);
+ if (verbose)
+ printf("%s : after delete %" PRIu64 " rows\n", __FUNCTION__, stats.bt_ndata);
+
+ r = txn->commit(txn, 0);
+ assert(r == 0);
+
+ r = db->stat64(db, null_txn, &stats);
+ assert(r == 0);
+
+ CHECK_NUM_ROWS(0, stats);
+ if (verbose)
+ printf("%s : after commit %" PRIu64 " rows\n", __FUNCTION__, stats.bt_ndata);
+
+ db->close(db, 0);
+}
+static void test_insert_commit_delete_commit(DB_ENV* env) {
+ int r;
+ DB* db;
+ DB_TXN* txn;
+ DB_BTREE_STAT64 stats;
+
+ db = create_db(__FUNCTION__, env);
+
+ r = env->txn_begin(env, null_txn, &txn, 0);
+ assert(r == 0);
+
+ add_records(db, txn, 0, num_records);
+
+ r = db->stat64(db, null_txn, &stats);
+ assert(r == 0);
+
+ CHECK_NUM_ROWS(num_records, stats);
+ if (verbose)
+ printf(
+ "%s : before insert commit %" PRIu64 " rows\n",
+ __FUNCTION__,
+ stats.bt_ndata);
+
+ r = txn->commit(txn, 0);
+ assert(r == 0);
+
+ r = db->stat64(db, null_txn, &stats);
+ assert(r == 0);
+
+ CHECK_NUM_ROWS(num_records, stats);
+ if (verbose)
+ printf(
+ "%s : after insert commit %" PRIu64 " rows\n",
+ __FUNCTION__,
+ stats.bt_ndata);
+
+ r = env->txn_begin(env, null_txn, &txn, 0);
+ assert(r == 0);
+
+ delete_records(db, txn, 0, num_records);
+
+ r = db->stat64(db, null_txn, &stats);
+ assert(r == 0);
+
+ CHECK_NUM_ROWS(0, stats);
+ if (verbose)
+ printf("%s : after delete %" PRIu64 " rows\n", __FUNCTION__, stats.bt_ndata);
+
+ r = txn->commit(txn, 0);
+ assert(r == 0);
+
+ r = db->stat64(db, null_txn, &stats);
+ assert(r == 0);
+
+ CHECK_NUM_ROWS(0, stats);
+ if (verbose)
+ printf(
+ "%s : after delete commit %" PRIu64 " rows\n",
+ __FUNCTION__,
+ stats.bt_ndata);
+
+ db->close(db, 0);
+}
+static void test_insert_rollback(DB_ENV* env) {
+ int r;
+ DB* db;
+ DB_TXN* txn;
+ DB_BTREE_STAT64 stats;
+
+ db = create_db(__FUNCTION__, env);
+
+ r = env->txn_begin(env, null_txn, &txn, 0);
+ assert(r == 0);
+
+ add_records(db, txn, 0, num_records);
+
+ r = db->stat64(db, null_txn, &stats);
+ assert(r == 0);
+
+ CHECK_NUM_ROWS(num_records, stats);
+ if (verbose)
+ printf("%s : before rollback %" PRIu64 " rows\n", __FUNCTION__, stats.bt_ndata);
+
+ r = txn->abort(txn);
+ assert(r == 0);
+
+ r = db->stat64(db, null_txn, &stats);
+ assert(r == 0);
+
+ // CAN NOT TEST stats HERE AS THEY ARE SOMEWHAT NON_DETERMINISTIC UNTIL
+ // optimize + hot_optimize HAVE BEEN RUN DUE TO THE FACT THAT ROLLBACK
+ // MESSAGES ARE "IN-FLIGHT" IN THE TREE AND MUST BE APPLIED IN ORDER TO
+ // CORRECT THE RUNNING LOGICAL COUNT
+ if (verbose)
+ printf("%s : after rollback %" PRIu64 " rows\n", __FUNCTION__, stats.bt_ndata);
+
+ full_optimize(db);
+
+ r = db->stat64(db, null_txn, &stats);
+ assert(r == 0);
+
+ CHECK_NUM_ROWS(0, stats);
+ if (verbose)
+ printf(
+ "%s : after rollback optimize %" PRIu64 " rows\n",
+ __FUNCTION__,
+ stats.bt_ndata);
+
+ db->close(db, 0);
+}
+static void test_insert_delete_rollback(DB_ENV* env) {
+ int r;
+ DB* db;
+ DB_TXN* txn;
+ DB_BTREE_STAT64 stats;
+
+ db = create_db(__FUNCTION__, env);
+
+ r = env->txn_begin(env, null_txn, &txn, 0);
+ assert(r == 0);
+
+ add_records(db, txn, 0, num_records);
+
+ r = db->stat64(db, null_txn, &stats);
+ assert(r == 0);
+
+ CHECK_NUM_ROWS(num_records, stats);
+ if (verbose)
+ printf("%s : before delete %" PRIu64 " rows\n", __FUNCTION__, stats.bt_ndata);
+
+ delete_records(db, txn, 0, num_records);
+
+ r = db->stat64(db, null_txn, &stats);
+ assert(r == 0);
+
+ CHECK_NUM_ROWS(0, stats);
+ if (verbose)
+ printf("%s : after delete %" PRIu64 " rows\n", __FUNCTION__, stats.bt_ndata);
+
+ r = txn->abort(txn);
+ assert(r == 0);
+
+ r = db->stat64(db, null_txn, &stats);
+ assert(r == 0);
+
+ CHECK_NUM_ROWS(0, stats);
+ if (verbose)
+ printf("%s : after commit %" PRIu64 " rows\n", __FUNCTION__, stats.bt_ndata);
+
+ db->close(db, 0);
+}
+static void test_insert_commit_delete_rollback(DB_ENV* env) {
+ int r;
+ DB* db;
+ DB_TXN* txn;
+ DB_BTREE_STAT64 stats;
+
+ db = create_db(__FUNCTION__, env);
+
+ r = env->txn_begin(env, null_txn, &txn, 0);
+ assert(r == 0);
+
+ add_records(db, txn, 0, num_records);
+
+ r = db->stat64(db, null_txn, &stats);
+ assert(r == 0);
+
+ CHECK_NUM_ROWS(num_records, stats);
+ if (verbose)
+ printf(
+ "%s : before insert commit %" PRIu64 " rows\n",
+ __FUNCTION__,
+ stats.bt_ndata);
+
+ r = txn->commit(txn, 0);
+ assert(r == 0);
+
+ r = db->stat64(db, null_txn, &stats);
+ assert(r == 0);
+
+ CHECK_NUM_ROWS(num_records, stats);
+ if (verbose)
+ printf(
+ "%s : after insert commit %" PRIu64 " rows\n",
+ __FUNCTION__,
+ stats.bt_ndata);
+
+ r = env->txn_begin(env, null_txn, &txn, 0);
+ assert(r == 0);
+
+ delete_records(db, txn, 0, num_records);
+
+ r = db->stat64(db, null_txn, &stats);
+ assert(r == 0);
+
+ CHECK_NUM_ROWS(0, stats);
+ if (verbose)
+ printf("%s : after delete %" PRIu64 " rows\n", __FUNCTION__, stats.bt_ndata);
+
+ r = txn->abort(txn);
+ assert(r == 0);
+
+ r = db->stat64(db, null_txn, &stats);
+ assert(r == 0);
+
+ // CAN NOT TEST stats HERE AS THEY ARE SOMEWHAT NON_DETERMINISTIC UNTIL
+ // optimize + hot_optimize HAVE BEEN RUN DUE TO THE FACT THAT ROLLBACK
+ // MESSAGES ARE "IN-FLIGHT" IN THE TREE AND MUST BE APPLIED IN ORDER TO
+ // CORRECT THE RUNNING LOGICAL COUNT
+ if (verbose)
+ printf(
+ "%s : after delete rollback %" PRIu64 " rows\n",
+ __FUNCTION__,
+ stats.bt_ndata);
+
+ full_optimize(db);
+
+ r = db->stat64(db, null_txn, &stats);
+ assert(r == 0);
+
+ CHECK_NUM_ROWS(num_records, stats);
+ if (verbose)
+ printf(
+ "%s : after delete rollback optimize %" PRIu64 " rows\n",
+ __FUNCTION__,
+ stats.bt_ndata);
+
+ db->close(db, 0);
+}
+
+static int test_recount_insert_commit_progress(
+ uint64_t count,
+ uint64_t deleted,
+ void*) {
+
+ if (verbose)
+ printf(
+ "%s : count[%" PRIu64 "] deleted[%" PRIu64 "]\n",
+ __FUNCTION__,
+ count,
+ deleted);
+ return 0;
+}
+static int test_recount_cancel_progress(uint64_t, uint64_t, void*) {
+ return 1;
+}
+
+static void test_recount_insert_commit(DB_ENV* env) {
+ int r;
+ DB* db;
+ DB_TXN* txn;
+ DB_BTREE_STAT64 stats;
+
+ db = create_db(__FUNCTION__, env);
+
+ r = env->txn_begin(env, null_txn, &txn, 0);
+ assert(r == 0);
+
+ add_records(db, txn, 0, num_records);
+
+ r = db->stat64(db, null_txn, &stats);
+ assert(r == 0);
+
+ CHECK_NUM_ROWS(num_records, stats);
+ if (verbose)
+ printf(
+ "%s : before commit %" PRIu64 " rows\n",
+ __FUNCTION__,
+ stats.bt_ndata);
+
+ r = txn->commit(txn, 0);
+ assert(r == 0);
+
+ r = db->stat64(db, null_txn, &stats);
+ assert(r == 0);
+
+ CHECK_NUM_ROWS(num_records, stats);
+ if (verbose)
+ printf("%s : after commit %" PRIu64 " rows\n", __FUNCTION__, stats.bt_ndata);
+
+ // test that recount counted correct # of rows
+ r = db->recount_rows(db, test_recount_insert_commit_progress, NULL);
+ assert(r == 0);
+ CHECK_NUM_ROWS(num_records, stats);
+
+ // test that recount callback cancel returns
+ r = db->recount_rows(db, test_recount_cancel_progress, NULL);
+ assert(r == 1);
+ CHECK_NUM_ROWS(num_records, stats);
+
+ db->close(db, 0);
+}
+int test_main(int UU(argc), char UU(*const argv[])) {
+ int r;
+ DB_ENV* env;
+
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU + S_IRWXG + S_IRWXO);
+
+ r = db_env_create(&env, 0);
+ assert(r == 0);
+
+ r =
+ env->open(
+ env,
+ TOKU_TEST_FILENAME,
+ DB_INIT_MPOOL + DB_INIT_LOG + DB_INIT_TXN + DB_PRIVATE + DB_CREATE,
+ S_IRWXU + S_IRWXG + S_IRWXO);
+ assert(r == 0);
+
+ test_insert_commit(env);
+ test_insert_delete_commit(env);
+ test_insert_commit_delete_commit(env);
+ test_insert_rollback(env);
+ test_insert_delete_rollback(env);
+ test_insert_commit_delete_rollback(env);
+ test_recount_insert_commit(env);
+
+ r = env->close(env, 0);
+ assert(r == 0);
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/txn_manager_handle_snapshot_atomicity.cc b/storage/tokudb/PerconaFT/src/tests/txn_manager_handle_snapshot_atomicity.cc
new file mode 100644
index 00000000000..30cc16d73a7
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/txn_manager_handle_snapshot_atomicity.cc
@@ -0,0 +1,217 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+//In response to the read-commit crash bug in the sysbench, this test is created to test
+//the atomicity of the txn manager when handling the child txn snapshot.
+//The test is supposed to fail before the read-commit-fix.
+
+#include "test.h"
+#include "toku_pthread.h"
+#include "ydb.h"
+struct test_sync {
+ int state;
+ toku_mutex_t lock;
+ toku_cond_t cv;
+};
+
+static void test_sync_init(struct test_sync *UU(sync)) {
+#if TOKU_DEBUG_TXN_SYNC
+ sync->state = 0;
+ toku_mutex_init(&sync->lock, NULL);
+ toku_cond_init(&sync->cv, NULL);
+#endif
+}
+
+static void test_sync_destroy(struct test_sync *UU(sync)) {
+#if TOKU_DEBUG_TXN_SYNC
+ toku_mutex_destroy(&sync->lock);
+ toku_cond_destroy(&sync->cv);
+#endif
+}
+
+static void test_sync_sleep(struct test_sync *UU(sync), int UU(new_state)) {
+#if TOKU_DEBUG_TXN_SYNC
+ toku_mutex_lock(&sync->lock);
+ while (sync->state != new_state) {
+ toku_cond_wait(&sync->cv, &sync->lock);
+ }
+ toku_mutex_unlock(&sync->lock);
+#endif
+}
+
+static void test_sync_next_state(struct test_sync *UU(sync)) {
+#if TOKU_DEBUG_TXN_SYNC
+ toku_mutex_lock(&sync->lock);
+ sync->state++;
+ toku_cond_broadcast(&sync->cv);
+ toku_mutex_unlock(&sync->lock);
+#endif
+}
+
+
+struct start_txn_arg {
+ DB_ENV *env;
+ DB *db;
+ DB_TXN * parent;
+};
+
+static struct test_sync sync_s;
+
+static void test_callback(pthread_t self_tid, void * extra) {
+ pthread_t **p = (pthread_t **) extra;
+ pthread_t tid_1 = *p[0];
+ pthread_t tid_2 = *p[1];
+ assert(pthread_equal(self_tid, tid_2));
+ printf("%s: the thread[%" PRIu64 "] is going to wait...\n", __func__, reinterpret_cast<uint64_t>(tid_1));
+ test_sync_next_state(&sync_s);
+ sleep(3);
+ //test_sync_sleep(&sync_s,3);
+ //using test_sync_sleep/test_sync_next_state pair can sync threads better, however
+ //after the fix, this might cause a deadlock. just simply use sleep to do a proof-
+ //of-concept test.
+ printf("%s: the thread[%" PRIu64 "] is resuming...\n", __func__, reinterpret_cast<uint64_t>(tid_1));
+ return;
+}
+
+static void * start_txn2(void * extra) {
+ struct start_txn_arg * args = (struct start_txn_arg *) extra;
+ DB_ENV * env = args -> env;
+ DB * db = args->db;
+ DB_TXN * parent = args->parent;
+ test_sync_sleep(&sync_s, 1);
+ printf("start %s [thread %" PRIu64 "]\n", __func__, reinterpret_cast<uint64_t>(pthread_self()));
+ DB_TXN *txn;
+ int r = env->txn_begin(env, parent, &txn, DB_READ_COMMITTED);
+ assert(r == 0);
+ //do some random things...
+ DBT key, data;
+ dbt_init(&key, "hello", 6);
+ dbt_init(&data, "world", 6);
+ db->put(db, txn, &key, &data, 0);
+ db->get(db, txn, &key, &data, 0);
+
+ r = txn->commit(txn, 0);
+ assert(r == 0);
+ printf("%s done[thread %" PRIu64 "]\n", __func__, reinterpret_cast<uint64_t>(pthread_self()));
+ return extra;
+}
+
+static void * start_txn1(void * extra) {
+ struct start_txn_arg * args = (struct start_txn_arg *) extra;
+ DB_ENV * env = args -> env;
+ DB * db = args->db;
+ printf("start %s: [thread %" PRIu64 "]\n", __func__, reinterpret_cast<uint64_t>(pthread_self()));
+ DB_TXN *txn;
+ int r = env->txn_begin(env, NULL, &txn, DB_READ_COMMITTED);
+ assert(r == 0);
+ printf("%s: txn began by [thread %" PRIu64 "], will wait\n", __func__, reinterpret_cast<uint64_t>(pthread_self()));
+ test_sync_next_state(&sync_s);
+ test_sync_sleep(&sync_s,2);
+ printf("%s: [thread %" PRIu64 "] resumed\n", __func__, reinterpret_cast<uint64_t>(pthread_self()));
+ //do some random things...
+ DBT key, data;
+ dbt_init(&key, "hello", 6);
+ dbt_init(&data, "world", 6);
+ db->put(db, txn, &key, &data, 0);
+ db->get(db, txn, &key, &data, 0);
+ r = txn->commit(txn, 0);
+ assert(r == 0);
+ printf("%s: done[thread %" PRIu64 "]\n", __func__, reinterpret_cast<uint64_t>(pthread_self()));
+ //test_sync_next_state(&sync_s);
+ return extra;
+}
+
+int test_main (int UU(argc), char * const UU(argv[])) {
+ int r;
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO);
+ assert(r == 0);
+
+ DB_ENV *env;
+ r = db_env_create(&env, 0);
+ assert(r == 0);
+
+ r = env->open(env, TOKU_TEST_FILENAME, DB_INIT_MPOOL|DB_CREATE|DB_THREAD |DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE, S_IRWXU+S_IRWXG+S_IRWXO);
+ assert(r == 0);
+
+ DB *db = NULL;
+ r = db_create(&db, env, 0);
+ assert(r == 0);
+
+ r = db->open(db, NULL, "testit", NULL, DB_BTREE, DB_AUTO_COMMIT+DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO);
+ assert(r == 0);
+
+ DB_TXN * parent = NULL;
+ r = env->txn_begin(env, 0, &parent, DB_READ_COMMITTED);
+ assert(r == 0);
+
+ ZERO_STRUCT(sync_s);
+ test_sync_init(&sync_s);
+
+ pthread_t tid_1 = 0;
+ pthread_t tid_2 = 0;
+ pthread_t* callback_extra[2] = {&tid_1, &tid_2};
+ toku_set_test_txn_sync_callback(test_callback, callback_extra);
+
+ struct start_txn_arg args = {env, db, parent};
+
+ r = pthread_create(&tid_1, NULL, start_txn1, &args);
+ assert(r==0);
+
+ r= pthread_create(&tid_2, NULL, start_txn2, &args);
+ assert(r==0);
+
+ void * ret;
+ r = pthread_join(tid_1, &ret);
+ assert(r == 0);
+ r = pthread_join(tid_2, &ret);
+ assert(r == 0);
+
+ r = parent->commit(parent, 0);
+ assert(r ==0);
+
+ test_sync_destroy(&sync_s);
+ r = db->close(db, 0);
+ assert(r == 0);
+
+ r = env->close(env, 0);
+ assert(r == 0);
+
+ return 0;
+}
+
diff --git a/storage/tokudb/PerconaFT/src/ydb.cc b/storage/tokudb/PerconaFT/src/ydb.cc
index 88c6c86f214..55da418a0de 100644
--- a/storage/tokudb/PerconaFT/src/ydb.cc
+++ b/storage/tokudb/PerconaFT/src/ydb.cc
@@ -3148,6 +3148,10 @@ toku_test_get_latest_lsn(DB_ENV *env) {
return rval.lsn;
}
+void toku_set_test_txn_sync_callback(void (* cb) (pthread_t, void *), void * extra) {
+ set_test_txn_sync_callback(cb, extra);
+}
+
int
toku_test_get_checkpointing_user_data_status (void) {
return toku_cachetable_get_checkpointing_user_data_status();
diff --git a/storage/tokudb/PerconaFT/src/ydb.h b/storage/tokudb/PerconaFT/src/ydb.h
index 9d4e94c6f30..facbfdc9252 100644
--- a/storage/tokudb/PerconaFT/src/ydb.h
+++ b/storage/tokudb/PerconaFT/src/ydb.h
@@ -58,3 +58,6 @@ extern "C" uint64_t toku_test_get_latest_lsn(DB_ENV *env) __attribute__((__visib
// test-only function
extern "C" int toku_test_get_checkpointing_user_data_status(void) __attribute__((__visibility__("default")));
+
+// test-only function
+extern "C" void toku_set_test_txn_sync_callback(void (* ) (pthread_t, void *), void * extra) __attribute__((__visibility__("default")));
diff --git a/storage/tokudb/PerconaFT/src/ydb_db.cc b/storage/tokudb/PerconaFT/src/ydb_db.cc
index 25b24467684..e5bd4e7d089 100644
--- a/storage/tokudb/PerconaFT/src/ydb_db.cc
+++ b/storage/tokudb/PerconaFT/src/ydb_db.cc
@@ -1015,6 +1015,25 @@ toku_db_verify_with_progress(DB *db, int (*progress_callback)(void *extra, float
return r;
}
+
+static int
+toku_db_recount_rows(DB* db, int (*progress_callback)(uint64_t count,
+ uint64_t deleted,
+ void* progress_extra),
+ void* progress_extra) {
+
+ HANDLE_PANICKED_DB(db);
+ int r = 0;
+ r =
+ toku_ft_recount_rows(
+ db->i->ft_handle,
+ progress_callback,
+ progress_extra);
+
+ return r;
+}
+
+
int toku_setup_db_internal (DB **dbp, DB_ENV *env, uint32_t flags, FT_HANDLE ft_handle, bool is_open) {
if (flags || env == NULL)
return EINVAL;
@@ -1098,6 +1117,7 @@ toku_db_create(DB ** db, DB_ENV * env, uint32_t flags) {
USDB(dbt_pos_infty);
USDB(dbt_neg_infty);
USDB(get_fragmentation);
+ USDB(recount_rows);
#undef USDB
result->get_indexer = db_get_indexer;
result->del = autotxn_db_del;
diff --git a/storage/tokudb/ha_tokudb.cc b/storage/tokudb/ha_tokudb.cc
index 6ff879c9f1a..672ae32f80a 100644
--- a/storage/tokudb/ha_tokudb.cc
+++ b/storage/tokudb/ha_tokudb.cc
@@ -23,60 +23,31 @@ Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
-#ifdef USE_PRAGMA_IMPLEMENTATION
-#pragma implementation // gcc: Class implementation
-#endif
-
-#include <my_config.h>
-extern "C" {
-#include "stdint.h"
-#define __STDC_FORMAT_MACROS
-#include "inttypes.h"
-#if defined(_WIN32)
-#include "misc.h"
-#endif
-}
-
-#define MYSQL_SERVER 1
-#include "mysql_version.h"
-#include "sql_table.h"
-#include "handler.h"
-#include "table.h"
-#include "log.h"
-#include "sql_class.h"
-#include "sql_show.h"
-#include "discover.h"
-
-#if (50600 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 50699) || (50700 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 50799)
-#include <binlog.h>
-#endif
-
-#include "db.h"
-#include "toku_os.h"
-#include "hatoku_defines.h"
+#include "hatoku_hton.h"
#include "hatoku_cmp.h"
+#include "tokudb_buffer.h"
+#include "tokudb_status.h"
+#include "tokudb_card.h"
+#include "ha_tokudb.h"
-static inline uint get_key_parts(const KEY *key);
-
-#undef PACKAGE
-#undef VERSION
-#undef HAVE_DTRACE
-#undef _DTRACE_VERSION
-/* We define DTRACE after mysql_priv.h in case it disabled dtrace in the main server */
-#ifdef HAVE_DTRACE
-#define _DTRACE_VERSION 1
+#if TOKU_INCLUDE_EXTENDED_KEYS
+static inline uint get_ext_key_parts(const KEY *key) {
+#if (50609 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 50699) || \
+ (50700 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 50799)
+ return key->actual_key_parts;
+#elif defined(MARIADB_BASE_VERSION)
+ return key->ext_key_parts;
#else
+#error
+#endif
+}
#endif
-#include "tokudb_buffer.h"
-#include "tokudb_status.h"
-#include "tokudb_card.h"
-#include "ha_tokudb.h"
-#include "hatoku_hton.h"
-#include <mysql/plugin.h>
+HASH TOKUDB_SHARE::_open_tables;
+tokudb::thread::mutex_t TOKUDB_SHARE::_open_tables_mutex;
-static const char *ha_tokudb_exts[] = {
+static const char* ha_tokudb_exts[] = {
ha_tokudb_ext,
NullS
};
@@ -84,10 +55,15 @@ static const char *ha_tokudb_exts[] = {
//
// This offset is calculated starting from AFTER the NULL bytes
//
-static inline uint32_t get_fixed_field_size(KEY_AND_COL_INFO* kc_info, TABLE_SHARE* table_share, uint keynr) {
+static inline uint32_t get_fixed_field_size(
+ KEY_AND_COL_INFO* kc_info,
+ TABLE_SHARE* table_share,
+ uint keynr) {
+
uint offset = 0;
for (uint i = 0; i < table_share->fields; i++) {
- if (is_fixed_field(kc_info, i) && !bitmap_is_set(&kc_info->key_filters[keynr],i)) {
+ if (is_fixed_field(kc_info, i) &&
+ !bitmap_is_set(&kc_info->key_filters[keynr], i)) {
offset += kc_info->field_lengths[i];
}
}
@@ -95,10 +71,15 @@ static inline uint32_t get_fixed_field_size(KEY_AND_COL_INFO* kc_info, TABLE_SHA
}
-static inline uint32_t get_len_of_offsets(KEY_AND_COL_INFO* kc_info, TABLE_SHARE* table_share, uint keynr) {
+static inline uint32_t get_len_of_offsets(
+ KEY_AND_COL_INFO* kc_info,
+ TABLE_SHARE* table_share,
+ uint keynr) {
+
uint len = 0;
for (uint i = 0; i < table_share->fields; i++) {
- if (is_variable_field(kc_info, i) && !bitmap_is_set(&kc_info->key_filters[keynr],i)) {
+ if (is_variable_field(kc_info, i) &&
+ !bitmap_is_set(&kc_info->key_filters[keynr], i)) {
len += kc_info->num_offset_bytes;
}
}
@@ -106,32 +87,36 @@ static inline uint32_t get_len_of_offsets(KEY_AND_COL_INFO* kc_info, TABLE_SHARE
}
-static int allocate_key_and_col_info ( TABLE_SHARE* table_share, KEY_AND_COL_INFO* kc_info) {
+static int allocate_key_and_col_info(
+ TABLE_SHARE* table_share,
+ KEY_AND_COL_INFO* kc_info) {
+
int error;
//
// initialize all of the bitmaps
//
for (uint i = 0; i < MAX_KEY + 1; i++) {
- error = bitmap_init(
- &kc_info->key_filters[i],
- NULL,
- table_share->fields,
- false
- );
+ error =
+ bitmap_init(
+ &kc_info->key_filters[i],
+ NULL,
+ table_share->fields,
+ false);
if (error) {
goto exit;
}
}
-
+
//
// create the field lengths
//
- kc_info->multi_ptr = tokudb_my_multi_malloc(MYF(MY_WME+MY_ZEROFILL),
- &kc_info->field_types, (uint)(table_share->fields * sizeof (uint8_t)),
- &kc_info->field_lengths, (uint)(table_share->fields * sizeof (uint16_t)),
- &kc_info->length_bytes, (uint)(table_share->fields * sizeof (uint8_t)),
- &kc_info->blob_fields, (uint)(table_share->fields * sizeof (uint32_t)),
- NullS);
+ kc_info->multi_ptr = tokudb::memory::multi_malloc(
+ MYF(MY_WME+MY_ZEROFILL),
+ &kc_info->field_types, (uint)(table_share->fields * sizeof (uint8_t)),
+ &kc_info->field_lengths, (uint)(table_share->fields * sizeof (uint16_t)),
+ &kc_info->length_bytes, (uint)(table_share->fields * sizeof (uint8_t)),
+ &kc_info->blob_fields, (uint)(table_share->fields * sizeof (uint32_t)),
+ NullS);
if (kc_info->multi_ptr == NULL) {
error = ENOMEM;
goto exit;
@@ -141,7 +126,7 @@ exit:
for (uint i = 0; MAX_KEY + 1; i++) {
bitmap_free(&kc_info->key_filters[i]);
}
- tokudb_my_free(kc_info->multi_ptr);
+ tokudb::memory::free(kc_info->multi_ptr);
}
return error;
}
@@ -150,136 +135,306 @@ static void free_key_and_col_info (KEY_AND_COL_INFO* kc_info) {
for (uint i = 0; i < MAX_KEY+1; i++) {
bitmap_free(&kc_info->key_filters[i]);
}
-
+
for (uint i = 0; i < MAX_KEY+1; i++) {
- tokudb_my_free(kc_info->cp_info[i]);
+ tokudb::memory::free(kc_info->cp_info[i]);
kc_info->cp_info[i] = NULL; // 3144
}
- tokudb_my_free(kc_info->multi_ptr);
+ tokudb::memory::free(kc_info->multi_ptr);
kc_info->field_types = NULL;
kc_info->field_lengths = NULL;
kc_info->length_bytes = NULL;
kc_info->blob_fields = NULL;
}
-void TOKUDB_SHARE::init(void) {
- use_count = 0;
- thr_lock_init(&lock);
- tokudb_pthread_mutex_init(&mutex, MY_MUTEX_INIT_FAST);
- my_rwlock_init(&num_DBs_lock, 0);
- tokudb_pthread_cond_init(&m_openclose_cond, NULL);
- m_state = CLOSED;
-}
-void TOKUDB_SHARE::destroy(void) {
- assert(m_state == CLOSED);
- thr_lock_delete(&lock);
- tokudb_pthread_mutex_destroy(&mutex);
- rwlock_destroy(&num_DBs_lock);
- tokudb_pthread_cond_destroy(&m_openclose_cond);
- tokudb_my_free(rec_per_key);
- rec_per_key = NULL;
-}
+uchar* TOKUDB_SHARE::hash_get_key(
+ TOKUDB_SHARE* share,
+ size_t* length,
+ TOKUDB_UNUSED(my_bool not_used)) {
-// MUST have tokudb_mutex locked on input
-static TOKUDB_SHARE *get_share(const char *table_name, TABLE_SHARE* table_share) {
- TOKUDB_SHARE *share = NULL;
+ *length = share->_full_table_name.length();
+ return (uchar *) share->_full_table_name.c_ptr();
+}
+void TOKUDB_SHARE::hash_free_element(TOKUDB_SHARE* share) {
+ share->destroy();
+ delete share;
+}
+void TOKUDB_SHARE::static_init() {
+ my_hash_init(
+ &_open_tables,
+ table_alias_charset,
+ 32,
+ 0,
+ 0,
+ (my_hash_get_key)hash_get_key,
+ (my_hash_free_key)hash_free_element, 0);
+}
+void TOKUDB_SHARE::static_destroy() {
+ my_hash_free(&_open_tables);
+}
+const char* TOKUDB_SHARE::get_state_string(share_state_t state) {
+ static const char* state_string[] = {
+ "CLOSED",
+ "OPENED",
+ "ERROR"
+ };
+ assert_always(state == CLOSED || state == OPENED || state == ERROR);
+ return state_string[state];
+}
+void* TOKUDB_SHARE::operator new(size_t sz) {
+ return tokudb::memory::malloc(sz, MYF(MY_WME|MY_ZEROFILL|MY_FAE));
+}
+void TOKUDB_SHARE::operator delete(void* p) {
+ tokudb::memory::free(p);
+}
+TOKUDB_SHARE::TOKUDB_SHARE() :
+ _num_DBs_lock(),
+ _mutex() {
+}
+void TOKUDB_SHARE::init(const char* table_name) {
+ _use_count = 0;
+ thr_lock_init(&_thr_lock);
+ _state = CLOSED;
+ _row_delta_activity = 0;
+ _allow_auto_analysis = true;
+
+ _full_table_name.append(table_name);
+
+ String tmp_dictionary_name;
+ tokudb_split_dname(
+ table_name,
+ _database_name,
+ _table_name,
+ tmp_dictionary_name);
+
+ TOKUDB_SHARE_DBUG_ENTER("file[%s]:state[%s]:use_count[%d]",
+ _full_table_name.ptr(),
+ get_state_string(_state),
+ _use_count);
+ TOKUDB_SHARE_DBUG_VOID_RETURN();
+}
+void TOKUDB_SHARE::destroy() {
+ TOKUDB_SHARE_DBUG_ENTER("file[%s]:state[%s]:use_count[%d]",
+ _full_table_name.ptr(),
+ get_state_string(_state),
+ _use_count);
+
+ assert_always(_use_count == 0);
+ assert_always(
+ _state == TOKUDB_SHARE::CLOSED || _state == TOKUDB_SHARE::ERROR);
+ thr_lock_delete(&_thr_lock);
+ TOKUDB_SHARE_DBUG_VOID_RETURN();
+}
+TOKUDB_SHARE* TOKUDB_SHARE::get_share(
+ const char* table_name,
+ TABLE_SHARE* table_share,
+ THR_LOCK_DATA* data,
+ bool create_new) {
+
+ _open_tables_mutex.lock();
int error = 0;
uint length = (uint) strlen(table_name);
- if (!(share = (TOKUDB_SHARE *) my_hash_search(&tokudb_open_tables, (uchar *) table_name, length))) {
- char *tmp_name;
-
+ TOKUDB_SHARE* share =
+ (TOKUDB_SHARE*)my_hash_search(
+ &_open_tables,
+ (uchar*)table_name,
+ length);
+
+ TOKUDB_TRACE_FOR_FLAGS(
+ TOKUDB_DEBUG_SHARE,
+ "existing share[%s] %s:share[%p]",
+ table_name,
+ share == NULL ? "not found" : "found",
+ share);
+
+ if (!share) {
+ if (create_new == false)
+ goto exit;
// create share and fill it with all zeroes
// hence, all pointers are initialized to NULL
- share = (TOKUDB_SHARE *) tokudb_my_multi_malloc(MYF(MY_WME | MY_ZEROFILL),
- &share, sizeof(*share),
- &tmp_name, length + 1,
- NullS
- );
- assert(share);
-
- share->init();
+ share = new TOKUDB_SHARE;
+ assert_always(share);
- share->table_name_length = length;
- share->table_name = tmp_name;
- strmov(share->table_name, table_name);
+ share->init(table_name);
- error = my_hash_insert(&tokudb_open_tables, (uchar *) share);
+ error = my_hash_insert(&_open_tables, (uchar*)share);
if (error) {
free_key_and_col_info(&share->kc_info);
+ share->destroy();
+ tokudb::memory::free((uchar*)share);
+ share = NULL;
goto exit;
}
}
+ share->addref();
+
+ if (data)
+ thr_lock_data_init(&(share->_thr_lock), data, NULL);
+
exit:
- if (error) {
- share->destroy();
- tokudb_my_free((uchar *) share);
- share = NULL;
- }
+ _open_tables_mutex.unlock();
return share;
}
+void TOKUDB_SHARE::drop_share(TOKUDB_SHARE* share) {
+ TOKUDB_TRACE_FOR_FLAGS(
+ TOKUDB_DEBUG_SHARE,
+ "share[%p]:file[%s]:state[%s]:use_count[%d]",
+ share,
+ share->_full_table_name.ptr(),
+ get_state_string(share->_state),
+ share->_use_count);
+
+ _open_tables_mutex.lock();
+ my_hash_delete(&_open_tables, (uchar*)share);
+ _open_tables_mutex.unlock();
+}
+TOKUDB_SHARE::share_state_t TOKUDB_SHARE::addref() {
+ TOKUDB_SHARE_TRACE_FOR_FLAGS((TOKUDB_DEBUG_ENTER & TOKUDB_DEBUG_SHARE),
+ "file[%s]:state[%s]:use_count[%d]",
+ _full_table_name.ptr(),
+ get_state_string(_state),
+ _use_count);
+
+ lock();
+ _use_count++;
+
+ return _state;
+}
+int TOKUDB_SHARE::release() {
+ TOKUDB_SHARE_DBUG_ENTER("file[%s]:state[%s]:use_count[%d]",
+ _full_table_name.ptr(),
+ get_state_string(_state),
+ _use_count);
-static int free_share(TOKUDB_SHARE * share) {
int error, result = 0;
- tokudb_pthread_mutex_lock(&share->mutex);
- DBUG_PRINT("info", ("share->use_count %u", share->use_count));
- if (!--share->use_count) {
- share->m_state = TOKUDB_SHARE::CLOSING;
- tokudb_pthread_mutex_unlock(&share->mutex);
-
- //
- // number of open DB's may not be equal to number of keys we have because add_index
- // may have added some. So, we loop through entire array and close any non-NULL value
- // It is imperative that we reset a DB to NULL once we are done with it.
- //
- for (uint i = 0; i < sizeof(share->key_file)/sizeof(share->key_file[0]); i++) {
- if (share->key_file[i]) {
- if (tokudb_debug & TOKUDB_DEBUG_OPEN) {
- TOKUDB_TRACE("dbclose:%p", share->key_file[i]);
- }
- error = share->key_file[i]->close(share->key_file[i], 0);
- assert(error == 0);
+ _mutex.lock();
+ assert_always(_use_count != 0);
+ _use_count--;
+ if (_use_count == 0 && _state == TOKUDB_SHARE::OPENED) {
+ // number of open DB's may not be equal to number of keys we have
+ // because add_index may have added some. So, we loop through entire
+ // array and close any non-NULL value. It is imperative that we reset
+ // a DB to NULL once we are done with it.
+ for (uint i = 0; i < sizeof(key_file)/sizeof(key_file[0]); i++) {
+ if (key_file[i]) {
+ TOKUDB_TRACE_FOR_FLAGS(
+ TOKUDB_DEBUG_OPEN,
+ "dbclose:%p",
+ key_file[i]);
+ error = key_file[i]->close(key_file[i], 0);
+ assert_always(error == 0);
if (error) {
result = error;
}
- if (share->key_file[i] == share->file)
- share->file = NULL;
- share->key_file[i] = NULL;
+ if (key_file[i] == file)
+ file = NULL;
+ key_file[i] = NULL;
}
}
- error = tokudb::close_status(&share->status_block);
- assert(error == 0);
+ error = tokudb::metadata::close(&status_block);
+ assert_always(error == 0);
- free_key_and_col_info(&share->kc_info);
+ free_key_and_col_info(&kc_info);
- tokudb_pthread_mutex_lock(&tokudb_mutex);
- tokudb_pthread_mutex_lock(&share->mutex);
- share->m_state = TOKUDB_SHARE::CLOSED;
- if (share->use_count > 0) {
- tokudb_pthread_cond_broadcast(&share->m_openclose_cond);
- tokudb_pthread_mutex_unlock(&share->mutex);
- tokudb_pthread_mutex_unlock(&tokudb_mutex);
- } else {
-
- my_hash_delete(&tokudb_open_tables, (uchar *) share);
-
- tokudb_pthread_mutex_unlock(&share->mutex);
- tokudb_pthread_mutex_unlock(&tokudb_mutex);
+ if (_rec_per_key) {
+ tokudb::memory::free(_rec_per_key);
+ _rec_per_key = NULL;
+ _rec_per_keys = 0;
+ }
- share->destroy();
- tokudb_my_free((uchar *) share);
+ for (uint i = 0; i < _keys; i++) {
+ tokudb::memory::free(_key_descriptors[i]._name);
}
- } else {
- tokudb_pthread_mutex_unlock(&share->mutex);
+ tokudb::memory::free(_key_descriptors);
+ _keys = _max_key_parts = 0; _key_descriptors = NULL;
+
+ _state = TOKUDB_SHARE::CLOSED;
}
+ _mutex.unlock();
- return result;
+ TOKUDB_SHARE_DBUG_RETURN(result);
+}
+void TOKUDB_SHARE::update_row_count(
+ THD* thd,
+ uint64_t added,
+ uint64_t deleted,
+ uint64_t updated) {
+
+ uint64_t delta = added + deleted + updated;
+ lock();
+ if (deleted > added && _rows < (deleted - added)) {
+ _rows = 0;
+ } else {
+ _rows += added - deleted;
+ }
+ _row_delta_activity += delta;
+ if (_row_delta_activity == (uint64_t)~0)
+ _row_delta_activity = 1;
+
+ ulonglong auto_threshold = tokudb::sysvars::auto_analyze(thd);
+ if (delta && auto_threshold > 0 && _allow_auto_analysis) {
+ ulonglong pct_of_rows_changed_to_trigger;
+ pct_of_rows_changed_to_trigger = ((_rows * auto_threshold) / 100);
+ if (_row_delta_activity >= pct_of_rows_changed_to_trigger) {
+ char msg[200];
+ snprintf(
+ msg,
+ sizeof(msg),
+ "TokuDB: Auto %s background analysis for %s, delta_activity "
+ "%llu is greater than %llu percent of %llu rows.",
+ tokudb::sysvars::analyze_in_background(thd) > 0 ?
+ "scheduling" : "running",
+ full_table_name(),
+ _row_delta_activity,
+ auto_threshold,
+ (ulonglong)(_rows));
+
+ // analyze_standard will unlock _mutex regardless of success/failure
+ int ret = analyze_standard(thd, NULL);
+ if (ret == 0) {
+ sql_print_information("%s - succeeded.", msg);
+ } else {
+ sql_print_information(
+ "%s - failed, likely a job already running.",
+ msg);
+ }
+ }
+ }
+ unlock();
}
+void TOKUDB_SHARE::set_cardinality_counts_in_table(TABLE* table) {
+ lock();
+ uint32_t next_key_part = 0;
+ for (uint32_t i = 0; i < table->s->keys; i++) {
+ KEY* key = &table->key_info[i];
+ bool is_unique_key =
+ (i == table->s->primary_key) || (key->flags & HA_NOSAME);
+ for (uint32_t j = 0; j < get_ext_key_parts(key); j++) {
+ if (j >= key->user_defined_key_parts) {
+ // MySQL 'hidden' keys, really needs deeper investigation
+ // into MySQL hidden keys vs TokuDB hidden keys
+ key->rec_per_key[j] = 1;
+ continue;
+ }
+
+ assert_always(next_key_part < _rec_per_keys);
+ ulong val = _rec_per_key[next_key_part++];
+ val = (val * tokudb::sysvars::cardinality_scale_percent) / 100;
+ if (val == 0 || _rows == 0 ||
+ (is_unique_key && j == get_ext_key_parts(key) - 1)) {
+ val = 1;
+ }
+ key->rec_per_key[j] = val;
+ }
+ }
+ unlock();
+}
#define HANDLE_INVALID_CURSOR() \
if (cursor == NULL) { \
@@ -288,7 +443,6 @@ static int free_share(TOKUDB_SHARE * share) {
}
const char *ha_tokudb::table_type() const {
- extern const char *tokudb_hton_name;
return tokudb_hton_name;
}
@@ -296,7 +450,7 @@ const char *ha_tokudb::index_type(uint inx) {
return "BTREE";
}
-/*
+/*
* returns NULL terminated file extension string
*/
const char **ha_tokudb::bas_ext() const {
@@ -315,46 +469,23 @@ static inline bool is_replace_into(THD* thd) {
return thd->lex->duplicates == DUP_REPLACE;
}
-static inline bool do_ignore_flag_optimization(THD* thd, TABLE* table, bool opt_eligible) {
+static inline bool do_ignore_flag_optimization(
+ THD* thd,
+ TABLE* table,
+ bool opt_eligible) {
+
bool do_opt = false;
- if (opt_eligible) {
- if (is_replace_into(thd) || is_insert_ignore(thd)) {
- uint pk_insert_mode = get_pk_insert_mode(thd);
- if ((!table->triggers && pk_insert_mode < 2) || pk_insert_mode == 0) {
- if (mysql_bin_log.is_open() && thd->variables.binlog_format != BINLOG_FORMAT_STMT) {
- do_opt = false;
- } else {
- do_opt = true;
- }
- }
- }
+ if (opt_eligible &&
+ (is_replace_into(thd) || is_insert_ignore(thd)) &&
+ tokudb::sysvars::pk_insert_mode(thd) == 1 &&
+ !table->triggers &&
+ !(mysql_bin_log.is_open() &&
+ thd->variables.binlog_format != BINLOG_FORMAT_STMT)) {
+ do_opt = true;
}
return do_opt;
}
-static inline uint get_key_parts(const KEY *key) {
-#if (50609 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 50699) || \
- (50700 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 50799) || \
- (100009 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 100199)
- return key->user_defined_key_parts;
-#else
- return key->key_parts;
-#endif
-}
-
-#if TOKU_INCLUDE_EXTENDED_KEYS
-static inline uint get_ext_key_parts(const KEY *key) {
-#if (50609 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 50699) || \
- (50700 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 50799)
- return key->actual_key_parts;
-#elif defined(MARIADB_BASE_VERSION)
- return key->ext_key_parts;
-#else
-#error
-#endif
-}
-#endif
-
ulonglong ha_tokudb::table_flags() const {
return int_table_flags | HA_BINLOG_ROW_CAPABLE | HA_BINLOG_STMT_CAPABLE;
}
@@ -365,11 +496,9 @@ ulonglong ha_tokudb::table_flags() const {
//
ulong ha_tokudb::index_flags(uint idx, uint part, bool all_parts) const {
TOKUDB_HANDLER_DBUG_ENTER("");
- assert(table_share);
- ulong flags = (HA_READ_NEXT | HA_READ_PREV | HA_READ_ORDER | HA_KEYREAD_ONLY | HA_READ_RANGE);
-#if defined(MARIADB_BASE_VERSION) || (50600 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 50699)
- flags |= HA_DO_INDEX_COND_PUSHDOWN;
-#endif
+ assert_always(table_share);
+ ulong flags = (HA_READ_NEXT | HA_READ_PREV | HA_READ_ORDER |
+ HA_KEYREAD_ONLY | HA_READ_RANGE | HA_DO_INDEX_COND_PUSHDOWN);
if (key_is_clustering(&table_share->key_info[idx])) {
flags |= HA_CLUSTERED_INDEX;
}
@@ -434,13 +563,13 @@ static int loader_poll_fun(void *extra, float progress) {
static void loader_ai_err_fun(DB *db, int i, int err, DBT *key, DBT *val, void *error_extra) {
LOADER_CONTEXT context = (LOADER_CONTEXT)error_extra;
- assert(context->ha);
+ assert_always(context->ha);
context->ha->set_loader_error(err);
}
static void loader_dup_fun(DB *db, int i, int err, DBT *key, DBT *val, void *error_extra) {
LOADER_CONTEXT context = (LOADER_CONTEXT)error_extra;
- assert(context->ha);
+ assert_always(context->ha);
context->ha->set_loader_error(err);
if (err == DB_KEYEXIST) {
context->ha->set_dup_value_for_pk(key);
@@ -620,8 +749,7 @@ static ulonglong retrieve_auto_increment(uint16 type, uint32 offset,const uchar
break;
default:
- DBUG_ASSERT(0);
- unsigned_autoinc = 0;
+ assert_unreachable();
}
if (signed_autoinc < 0) {
@@ -632,21 +760,6 @@ static ulonglong retrieve_auto_increment(uint16 type, uint32 offset,const uchar
unsigned_autoinc : (ulonglong) signed_autoinc;
}
-static inline bool
-is_null_field( TABLE* table, Field* field, const uchar* record) {
- uint null_offset;
- bool ret_val;
- if (!field->real_maybe_null()) {
- ret_val = false;
- goto exitpt;
- }
- null_offset = get_null_offset(table,field);
- ret_val = (record[null_offset] & field->null_bit) ? true: false;
-
-exitpt:
- return ret_val;
-}
-
static inline ulong field_offset(Field* field, TABLE* table) {
return((ulong) (field->ptr - table->record[0]));
}
@@ -692,29 +805,36 @@ static int filter_key_part_compare (const void* left, const void* right) {
// if key, table have proper info set. I had to verify by checking
// in the debugger.
//
-void set_key_filter(MY_BITMAP* key_filter, KEY* key, TABLE* table, bool get_offset_from_keypart) {
+void set_key_filter(
+ MY_BITMAP* key_filter,
+ KEY* key,
+ TABLE* table,
+ bool get_offset_from_keypart) {
+
FILTER_KEY_PART_INFO parts[MAX_REF_PARTS];
uint curr_skip_index = 0;
- for (uint i = 0; i < get_key_parts(key); i++) {
+ for (uint i = 0; i < key->user_defined_key_parts; i++) {
//
// horrendous hack due to bugs in mysql, basically
// we cannot always reliably get the offset from the same source
//
- parts[i].offset = get_offset_from_keypart ? key->key_part[i].offset : field_offset(key->key_part[i].field, table);
+ parts[i].offset =
+ get_offset_from_keypart ?
+ key->key_part[i].offset :
+ field_offset(key->key_part[i].field, table);
parts[i].part_index = i;
}
qsort(
parts, // start of array
- get_key_parts(key), //num elements
+ key->user_defined_key_parts, //num elements
sizeof(*parts), //size of each element
- filter_key_part_compare
- );
+ filter_key_part_compare);
for (uint i = 0; i < table->s->fields; i++) {
Field* field = table->field[i];
uint curr_field_offset = field_offset(field, table);
- if (curr_skip_index < get_key_parts(key)) {
+ if (curr_skip_index < key->user_defined_key_parts) {
uint curr_skip_offset = 0;
curr_skip_offset = parts[curr_skip_index].offset;
if (curr_skip_offset == curr_field_offset) {
@@ -830,7 +950,7 @@ static inline uchar* write_var_field(
int2store(to_tokudb_offset_ptr,offset);
break;
default:
- assert(false);
+ assert_unreachable();
break;
}
return to_tokudb_data + data_length;
@@ -850,8 +970,7 @@ static inline uint32_t get_var_data_length(
data_length = uint2korr(from_mysql);
break;
default:
- assert(false);
- break;
+ assert_unreachable();
}
return data_length;
}
@@ -894,8 +1013,7 @@ static inline void unpack_var_field(
int2store(to_mysql, from_tokudb_data_len);
break;
default:
- assert(false);
- break;
+ assert_unreachable();
}
//
// store the data
@@ -928,7 +1046,7 @@ static uchar* pack_toku_field_blob(
length = uint4korr(from_mysql);
break;
default:
- assert(false);
+ assert_unreachable();
}
if (length > 0) {
@@ -940,7 +1058,9 @@ static uchar* pack_toku_field_blob(
static int create_tokudb_trx_data_instance(tokudb_trx_data** out_trx) {
int error;
- tokudb_trx_data* trx = (tokudb_trx_data *) tokudb_my_malloc(sizeof(*trx), MYF(MY_ZEROFILL));
+ tokudb_trx_data* trx = (tokudb_trx_data *) tokudb::memory::malloc(
+ sizeof(*trx),
+ MYF(MY_ZEROFILL));
if (!trx) {
error = ENOMEM;
goto cleanup;
@@ -1011,32 +1131,27 @@ static inline int tokudb_generate_row(
void* old_ptr = dest_key->data;
void* new_ptr = NULL;
new_ptr = realloc(old_ptr, max_key_len);
- assert(new_ptr);
+ assert_always(new_ptr);
dest_key->data = new_ptr;
dest_key->ulen = max_key_len;
}
buff = (uchar *)dest_key->data;
- assert(buff != NULL && max_key_len > 0);
- }
- else {
- assert(false);
+ assert_always(buff != NULL && max_key_len > 0);
+ } else {
+ assert_unreachable();
}
- dest_key->size = pack_key_from_desc(
- buff,
- row_desc,
- desc_size,
- src_key,
- src_val
- );
- assert(dest_key->ulen >= dest_key->size);
- if (tokudb_debug & TOKUDB_DEBUG_CHECK_KEY && !max_key_len) {
+ dest_key->size = pack_key_from_desc(buff, row_desc, desc_size, src_key,
+ src_val);
+ assert_always(dest_key->ulen >= dest_key->size);
+ if (TOKUDB_UNLIKELY(TOKUDB_DEBUG_FLAGS(TOKUDB_DEBUG_CHECK_KEY)) &&
+ !max_key_len) {
max_key_len = max_key_size_from_desc(row_desc, desc_size);
max_key_len += src_key->size;
}
if (max_key_len) {
- assert(max_key_len >= dest_key->size);
+ assert_always(max_key_len >= dest_key->size);
}
row_desc += desc_size;
@@ -1045,8 +1160,7 @@ static inline int tokudb_generate_row(
if (dest_val != NULL) {
if (!is_key_clustering(row_desc, desc_size) || src_val->size == 0) {
dest_val->size = 0;
- }
- else {
+ } else {
uchar* buff = NULL;
if (dest_val->flags == 0) {
dest_val->ulen = 0;
@@ -1059,23 +1173,21 @@ static inline int tokudb_generate_row(
void* old_ptr = dest_val->data;
void* new_ptr = NULL;
new_ptr = realloc(old_ptr, src_val->size);
- assert(new_ptr);
+ assert_always(new_ptr);
dest_val->data = new_ptr;
dest_val->ulen = src_val->size;
}
buff = (uchar *)dest_val->data;
- assert(buff != NULL);
- }
- else {
- assert(false);
+ assert_always(buff != NULL);
+ } else {
+ assert_unreachable();
}
dest_val->size = pack_clustering_val_from_desc(
buff,
row_desc,
desc_size,
- src_val
- );
- assert(dest_val->ulen >= dest_val->size);
+ src_val);
+ assert_always(dest_val->ulen >= dest_val->size);
}
}
error = 0;
@@ -1143,6 +1255,7 @@ ha_tokudb::ha_tokudb(handlerton * hton, TABLE_SHARE * table_arg):handler(hton, t
read_key = false;
added_rows = 0;
deleted_rows = 0;
+ updated_rows = 0;
last_dup_key = UINT_MAX;
using_ignore = false;
using_ignore_no_key = false;
@@ -1224,41 +1337,42 @@ bool ha_tokudb::has_auto_increment_flag(uint* index) {
static int open_status_dictionary(DB** ptr, const char* name, DB_TXN* txn) {
int error;
char* newname = NULL;
- newname = (char *)tokudb_my_malloc(
- get_max_dict_name_path_length(name),
- MYF(MY_WME));
+ size_t newname_len = get_max_dict_name_path_length(name);
+ newname = (char*)tokudb::memory::malloc(newname_len, MYF(MY_WME));
if (newname == NULL) {
error = ENOMEM;
goto cleanup;
}
- make_name(newname, name, "status");
- if (tokudb_debug & TOKUDB_DEBUG_OPEN) {
- TOKUDB_TRACE("open:%s", newname);
- }
+ make_name(newname, newname_len, name, "status");
+ TOKUDB_TRACE_FOR_FLAGS(TOKUDB_DEBUG_OPEN, "open:%s", newname);
- error = tokudb::open_status(db_env, ptr, newname, txn);
+ error = tokudb::metadata::open(db_env, ptr, newname, txn);
cleanup:
- tokudb_my_free(newname);
+ tokudb::memory::free(newname);
return error;
}
-int ha_tokudb::open_main_dictionary(const char* name, bool is_read_only, DB_TXN* txn) {
+int ha_tokudb::open_main_dictionary(
+ const char* name,
+ bool is_read_only,
+ DB_TXN* txn) {
+
int error;
char* newname = NULL;
+ size_t newname_len = 0;
uint open_flags = (is_read_only ? DB_RDONLY : 0) | DB_THREAD;
- assert(share->file == NULL);
- assert(share->key_file[primary_key] == NULL);
-
- newname = (char *)tokudb_my_malloc(
- get_max_dict_name_path_length(name),
- MYF(MY_WME|MY_ZEROFILL)
- );
+ assert_always(share->file == NULL);
+ assert_always(share->key_file[primary_key] == NULL);
+ newname_len = get_max_dict_name_path_length(name);
+ newname = (char*)tokudb::memory::malloc(
+ newname_len,
+ MYF(MY_WME|MY_ZEROFILL));
if (newname == NULL) {
error = ENOMEM;
goto exit;
}
- make_name(newname, name, "main");
+ make_name(newname, newname_len, name, "main");
error = db_create(&share->file, db_env, 0);
if (error) {
@@ -1266,14 +1380,24 @@ int ha_tokudb::open_main_dictionary(const char* name, bool is_read_only, DB_TXN*
}
share->key_file[primary_key] = share->file;
- error = share->file->open(share->file, txn, newname, NULL, DB_BTREE, open_flags, 0);
+ error =
+ share->file->open(
+ share->file,
+ txn,
+ newname,
+ NULL,
+ DB_BTREE,
+ open_flags,
+ 0);
if (error) {
goto exit;
}
-
- if (tokudb_debug & TOKUDB_DEBUG_OPEN) {
- TOKUDB_HANDLER_TRACE("open:%s:file=%p", newname, share->file);
- }
+
+ TOKUDB_HANDLER_TRACE_FOR_FLAGS(
+ TOKUDB_DEBUG_OPEN,
+ "open:%s:file=%p",
+ newname,
+ share->file);
error = 0;
exit:
@@ -1283,34 +1407,42 @@ exit:
share->file,
0
);
- assert(r==0);
+ assert_always(r==0);
share->file = NULL;
share->key_file[primary_key] = NULL;
}
}
- tokudb_my_free(newname);
+ tokudb::memory::free(newname);
return error;
}
//
-// Open a secondary table, the key will be a secondary index, the data will be a primary key
+// Open a secondary table, the key will be a secondary index, the data will
+// be a primary key
//
-int ha_tokudb::open_secondary_dictionary(DB** ptr, KEY* key_info, const char* name, bool is_read_only, DB_TXN* txn) {
+int ha_tokudb::open_secondary_dictionary(
+ DB** ptr,
+ KEY* key_info,
+ const char* name,
+ bool is_read_only,
+ DB_TXN* txn) {
+
int error = ENOSYS;
char dict_name[MAX_DICT_NAME_LEN];
uint open_flags = (is_read_only ? DB_RDONLY : 0) | DB_THREAD;
char* newname = NULL;
- uint newname_len = 0;
-
+ size_t newname_len = 0;
+
sprintf(dict_name, "key-%s", key_info->name);
newname_len = get_max_dict_name_path_length(name);
- newname = (char *)tokudb_my_malloc(newname_len, MYF(MY_WME|MY_ZEROFILL));
+ newname =
+ (char*)tokudb::memory::malloc(newname_len, MYF(MY_WME|MY_ZEROFILL));
if (newname == NULL) {
error = ENOMEM;
goto cleanup;
}
- make_name(newname, name, dict_name);
+ make_name(newname, newname_len, name, dict_name);
if ((error = db_create(ptr, db_env, 0))) {
@@ -1319,22 +1451,25 @@ int ha_tokudb::open_secondary_dictionary(DB** ptr, KEY* key_info, const char* na
}
- if ((error = (*ptr)->open(*ptr, txn, newname, NULL, DB_BTREE, open_flags, 0))) {
+ error = (*ptr)->open(*ptr, txn, newname, NULL, DB_BTREE, open_flags, 0);
+ if (error) {
my_errno = error;
goto cleanup;
}
- if (tokudb_debug & TOKUDB_DEBUG_OPEN) {
- TOKUDB_HANDLER_TRACE("open:%s:file=%p", newname, *ptr);
- }
+ TOKUDB_HANDLER_TRACE_FOR_FLAGS(
+ TOKUDB_DEBUG_OPEN,
+ "open:%s:file=%p",
+ newname,
+ *ptr);
cleanup:
if (error) {
if (*ptr) {
int r = (*ptr)->close(*ptr, 0);
- assert(r==0);
+ assert_always(r==0);
*ptr = NULL;
}
}
- tokudb_my_free(newname);
+ tokudb::memory::free(newname);
return error;
}
@@ -1343,11 +1478,10 @@ static int initialize_col_pack_info(KEY_AND_COL_INFO* kc_info, TABLE_SHARE* tabl
//
// set up the cp_info
//
- assert(kc_info->cp_info[keynr] == NULL);
- kc_info->cp_info[keynr] = (COL_PACK_INFO *)tokudb_my_malloc(
- table_share->fields*sizeof(COL_PACK_INFO),
- MYF(MY_WME | MY_ZEROFILL)
- );
+ assert_always(kc_info->cp_info[keynr] == NULL);
+ kc_info->cp_info[keynr] = (COL_PACK_INFO*)tokudb::memory::malloc(
+ table_share->fields * sizeof(COL_PACK_INFO),
+ MYF(MY_WME | MY_ZEROFILL));
if (kc_info->cp_info[keynr] == NULL) {
error = ENOMEM;
goto exit;
@@ -1396,12 +1530,18 @@ exit:
// reset the kc_info state at keynr
static void reset_key_and_col_info(KEY_AND_COL_INFO *kc_info, uint keynr) {
bitmap_clear_all(&kc_info->key_filters[keynr]);
- tokudb_my_free(kc_info->cp_info[keynr]);
+ tokudb::memory::free(kc_info->cp_info[keynr]);
kc_info->cp_info[keynr] = NULL;
kc_info->mcp_info[keynr] = (MULTI_COL_PACK_INFO) { 0, 0 };
}
-static int initialize_key_and_col_info(TABLE_SHARE* table_share, TABLE* table, KEY_AND_COL_INFO* kc_info, uint hidden_primary_key, uint primary_key) {
+static int initialize_key_and_col_info(
+ TABLE_SHARE* table_share,
+ TABLE* table,
+ KEY_AND_COL_INFO* kc_info,
+ uint hidden_primary_key,
+ uint primary_key) {
+
int error = 0;
uint32_t curr_blob_field_index = 0;
uint32_t max_var_bytes = 0;
@@ -1420,7 +1560,7 @@ static int initialize_key_and_col_info(TABLE_SHARE* table_share, TABLE* table, K
case toku_type_fixbinary:
case toku_type_fixstring:
pack_length = field->pack_length();
- assert(pack_length < 1<<16);
+ assert_always(pack_length < 1<<16);
kc_info->field_types[i] = KEY_AND_COL_INFO::TOKUDB_FIXED_FIELD;
kc_info->field_lengths[i] = (uint16_t)pack_length;
kc_info->length_bytes[i] = 0;
@@ -1436,11 +1576,12 @@ static int initialize_key_and_col_info(TABLE_SHARE* table_share, TABLE* table, K
case toku_type_varbinary:
kc_info->field_types[i] = KEY_AND_COL_INFO::TOKUDB_VARIABLE_FIELD;
kc_info->field_lengths[i] = 0;
- kc_info->length_bytes[i] = (uchar)((Field_varstring *)field)->length_bytes;
+ kc_info->length_bytes[i] =
+ (uchar)((Field_varstring*)field)->length_bytes;
max_var_bytes += field->field_length;
break;
default:
- assert(false);
+ assert_unreachable();
}
}
kc_info->num_blobs = curr_blob_field_index;
@@ -1452,54 +1593,54 @@ static int initialize_key_and_col_info(TABLE_SHARE* table_share, TABLE* table, K
//
if (max_var_bytes < 256) {
kc_info->num_offset_bytes = 1;
- }
- else {
+ } else {
kc_info->num_offset_bytes = 2;
}
- for (uint i = 0; i < table_share->keys + tokudb_test(hidden_primary_key); i++) {
+ for (uint i = 0;
+ i < table_share->keys + tokudb_test(hidden_primary_key);
+ i++) {
//
// do the cluster/primary key filtering calculations
//
- if (! (i==primary_key && hidden_primary_key) ){
- if ( i == primary_key ) {
+ if (!(i==primary_key && hidden_primary_key)) {
+ if (i == primary_key) {
set_key_filter(
&kc_info->key_filters[primary_key],
&table_share->key_info[primary_key],
table,
- true
- );
- }
- else {
+ true);
+ } else {
set_key_filter(
&kc_info->key_filters[i],
&table_share->key_info[i],
table,
- true
- );
+ true);
if (!hidden_primary_key) {
set_key_filter(
&kc_info->key_filters[i],
&table_share->key_info[primary_key],
table,
- true
- );
+ true);
}
}
}
if (i == primary_key || key_is_clustering(&table_share->key_info[i])) {
- error = initialize_col_pack_info(kc_info,table_share,i);
+ error = initialize_col_pack_info(kc_info, table_share, i);
if (error) {
goto exit;
}
}
-
}
exit:
return error;
}
-bool ha_tokudb::can_replace_into_be_fast(TABLE_SHARE* table_share, KEY_AND_COL_INFO* kc_info, uint pk) {
+bool ha_tokudb::can_replace_into_be_fast(
+ TABLE_SHARE* table_share,
+ KEY_AND_COL_INFO* kc_info,
+ uint pk) {
+
uint curr_num_DBs = table_share->keys + tokudb_test(hidden_primary_key);
bool ret_val;
if (curr_num_DBs == 1) {
@@ -1510,7 +1651,7 @@ bool ha_tokudb::can_replace_into_be_fast(TABLE_SHARE* table_share, KEY_AND_COL_I
for (uint curr_index = 0; curr_index < table_share->keys; curr_index++) {
if (curr_index == pk) continue;
KEY* curr_key_info = &table_share->key_info[curr_index];
- for (uint i = 0; i < get_key_parts(curr_key_info); i++) {
+ for (uint i = 0; i < curr_key_info->user_defined_key_parts; i++) {
uint16 curr_field_index = curr_key_info->key_part[i].field->field_index;
if (!bitmap_is_set(&kc_info->key_filters[curr_index],curr_field_index)) {
ret_val = false;
@@ -1544,8 +1685,6 @@ int ha_tokudb::initialize_share(const char* name, int mode) {
if (error) { goto exit; }
}
- DBUG_PRINT("info", ("share->use_count %u", share->use_count));
- share->m_initialize_count++;
error = get_status(txn);
if (error) {
@@ -1574,43 +1713,64 @@ int ha_tokudb::initialize_share(const char* name, int mode) {
goto exit;
#endif
- error = initialize_key_and_col_info(
- table_share,
- table,
- &share->kc_info,
- hidden_primary_key,
- primary_key
- );
+ error =
+ initialize_key_and_col_info(
+ table_share,
+ table,
+ &share->kc_info,
+ hidden_primary_key,
+ primary_key);
if (error) { goto exit; }
-
+
error = open_main_dictionary(name, mode == O_RDONLY, txn);
- if (error) { goto exit; }
+ if (error) {
+ goto exit;
+ }
share->has_unique_keys = false;
+ share->_keys = table_share->keys;
+ share->_max_key_parts = table_share->key_parts;
+ share->_key_descriptors =
+ (TOKUDB_SHARE::key_descriptor_t*)tokudb::memory::malloc(
+ sizeof(TOKUDB_SHARE::key_descriptor_t) * share->_keys,
+ MYF(MY_ZEROFILL));
+
/* Open other keys; These are part of the share structure */
for (uint i = 0; i < table_share->keys; i++) {
+ share->_key_descriptors[i]._parts =
+ table_share->key_info[i].user_defined_key_parts;
+ if (i == primary_key) {
+ share->_key_descriptors[i]._is_unique = true;
+ share->_key_descriptors[i]._name = tokudb::memory::strdup("primary", 0);
+ } else {
+ share->_key_descriptors[i]._is_unique = false;
+ share->_key_descriptors[i]._name =
+ tokudb::memory::strdup(table_share->key_info[i].name, 0);
+ }
+
if (table_share->key_info[i].flags & HA_NOSAME) {
+ share->_key_descriptors[i]._is_unique = true;
share->has_unique_keys = true;
}
if (i != primary_key) {
- error = open_secondary_dictionary(
- &share->key_file[i],
- &table_share->key_info[i],
- name,
- mode == O_RDONLY,
- txn
- );
+ error =
+ open_secondary_dictionary(
+ &share->key_file[i],
+ &table_share->key_info[i],
+ name,
+ mode == O_RDONLY,
+ txn);
if (error) {
goto exit;
}
}
}
- share->replace_into_fast = can_replace_into_be_fast(
- table_share,
- &share->kc_info,
- primary_key
- );
-
+ share->replace_into_fast =
+ can_replace_into_be_fast(
+ table_share,
+ &share->kc_info,
+ primary_key);
+
share->pk_has_string = false;
if (!hidden_primary_key) {
//
@@ -1618,8 +1778,9 @@ int ha_tokudb::initialize_share(const char* name, int mode) {
// the "infinity byte" in keys, and for placing the DBT size in the first four bytes
//
ref_length = sizeof(uint32_t) + sizeof(uchar);
- KEY_PART_INFO *key_part = table->key_info[primary_key].key_part;
- KEY_PART_INFO *end = key_part + get_key_parts(&table->key_info[primary_key]);
+ KEY_PART_INFO* key_part = table->key_info[primary_key].key_part;
+ KEY_PART_INFO* end =
+ key_part + table->key_info[primary_key].user_defined_key_parts;
for (; key_part != end; key_part++) {
ref_length += key_part->field->max_packed_col_length(key_part->length);
TOKU_TYPE toku_type = mysql_to_toku_type(key_part->field);
@@ -1640,9 +1801,8 @@ int ha_tokudb::initialize_share(const char* name, int mode) {
// estimate_num_rows should not fail under normal conditions
//
if (error == 0) {
- share->rows = num_rows;
- }
- else {
+ share->set_row_count(num_rows, true);
+ } else {
goto exit;
}
//
@@ -1655,8 +1815,7 @@ int ha_tokudb::initialize_share(const char* name, int mode) {
if (may_table_be_empty(txn)) {
share->try_table_lock = true;
- }
- else {
+ } else {
share->try_table_lock = false;
}
@@ -1665,12 +1824,22 @@ int ha_tokudb::initialize_share(const char* name, int mode) {
init_hidden_prim_key_info(txn);
// initialize cardinality info from the status dictionary
- share->n_rec_per_key = tokudb::compute_total_key_parts(table_share);
- share->rec_per_key = (uint64_t *) tokudb_my_realloc(share->rec_per_key, share->n_rec_per_key * sizeof (uint64_t), MYF(MY_FAE + MY_ALLOW_ZERO_PTR));
- error = tokudb::get_card_from_status(share->status_block, txn, share->n_rec_per_key, share->rec_per_key);
- if (error) {
- for (uint i = 0; i < share->n_rec_per_key; i++)
- share->rec_per_key[i] = 0;
+ {
+ uint32_t rec_per_keys = tokudb::compute_total_key_parts(table_share);
+ uint64_t* rec_per_key =
+ (uint64_t*)tokudb::memory::malloc(
+ rec_per_keys * sizeof(uint64_t),
+ MYF(MY_FAE));
+ error =
+ tokudb::get_card_from_status(
+ share->status_block,
+ txn,
+ rec_per_keys,
+ rec_per_key);
+ if (error) {
+ memset(rec_per_key, 0, sizeof(ulonglong) * rec_per_keys);
+ }
+ share->init_cardinality_counts(rec_per_keys, rec_per_key);
}
error = 0;
@@ -1720,7 +1889,8 @@ int ha_tokudb::open(const char *name, int mode, uint test_if_locked) {
// the "+ 1" is for the first byte that states +/- infinity
// multiply everything by 2 to account for clustered keys having a key and primary key together
max_key_length = 2*(table_share->max_key_length + MAX_REF_PARTS * 3 + sizeof(uchar));
- alloc_ptr = tokudb_my_multi_malloc(MYF(MY_WME),
+ alloc_ptr = tokudb::memory::multi_malloc(
+ MYF(MY_WME),
&key_buff, max_key_length,
&key_buff2, max_key_length,
&key_buff3, max_key_length,
@@ -1730,81 +1900,81 @@ int ha_tokudb::open(const char *name, int mode, uint test_if_locked) {
&primary_key_buff, (hidden_primary_key ? 0 : max_key_length),
&fixed_cols_for_query, table_share->fields*sizeof(uint32_t),
&var_cols_for_query, table_share->fields*sizeof(uint32_t),
- NullS
- );
+ NullS);
if (alloc_ptr == NULL) {
ret_val = 1;
goto exit;
}
- size_range_query_buff = get_tokudb_read_buf_size(thd);
- range_query_buff = (uchar *)tokudb_my_malloc(size_range_query_buff, MYF(MY_WME));
+ size_range_query_buff = tokudb::sysvars::read_buf_size(thd);
+ range_query_buff =
+ (uchar*)tokudb::memory::malloc(size_range_query_buff, MYF(MY_WME));
if (range_query_buff == NULL) {
ret_val = 1;
goto exit;
}
- alloced_rec_buff_length = table_share->rec_buff_length + table_share->fields;
- rec_buff = (uchar *) tokudb_my_malloc(alloced_rec_buff_length, MYF(MY_WME));
+ alloced_rec_buff_length = table_share->rec_buff_length +
+ table_share->fields;
+ rec_buff = (uchar *) tokudb::memory::malloc(
+ alloced_rec_buff_length,
+ MYF(MY_WME));
if (rec_buff == NULL) {
ret_val = 1;
goto exit;
}
alloced_update_rec_buff_length = alloced_rec_buff_length;
- rec_update_buff = (uchar *) tokudb_my_malloc(alloced_update_rec_buff_length, MYF(MY_WME));
+ rec_update_buff = (uchar*)tokudb::memory::malloc(
+ alloced_update_rec_buff_length,
+ MYF(MY_WME));
if (rec_update_buff == NULL) {
ret_val = 1;
goto exit;
}
// lookup or create share
- tokudb_pthread_mutex_lock(&tokudb_mutex);
- share = get_share(name, table_share);
- assert(share);
+ share = TOKUDB_SHARE::get_share(name, table_share, &lock, true);
+ assert_always(share);
- thr_lock_data_init(&share->lock, &lock, NULL);
-
- tokudb_pthread_mutex_lock(&share->mutex);
- tokudb_pthread_mutex_unlock(&tokudb_mutex);
- share->use_count++;
- while (share->m_state == TOKUDB_SHARE::OPENING || share->m_state == TOKUDB_SHARE::CLOSING) {
- tokudb_pthread_cond_wait(&share->m_openclose_cond, &share->mutex);
- }
- if (share->m_state == TOKUDB_SHARE::CLOSED) {
- share->m_state = TOKUDB_SHARE::OPENING;
- tokudb_pthread_mutex_unlock(&share->mutex);
+ if (share->state() != TOKUDB_SHARE::OPENED) {
+ // means we're responsible for the transition to OPENED, ERROR or CLOSED
ret_val = allocate_key_and_col_info(table_share, &share->kc_info);
if (ret_val == 0) {
ret_val = initialize_share(name, mode);
}
- tokudb_pthread_mutex_lock(&share->mutex);
if (ret_val == 0) {
- share->m_state = TOKUDB_SHARE::OPENED;
+ share->set_state(TOKUDB_SHARE::OPENED);
} else {
- share->m_state = TOKUDB_SHARE::ERROR;
- share->m_error = ret_val;
+ free_key_and_col_info(&share->kc_info);
+ share->set_state(TOKUDB_SHARE::ERROR);
}
- tokudb_pthread_cond_broadcast(&share->m_openclose_cond);
+ share->unlock();
+ } else {
+ // got an already OPENED instance
+ share->unlock();
}
- if (share->m_state == TOKUDB_SHARE::ERROR) {
- ret_val = share->m_error;
- tokudb_pthread_mutex_unlock(&share->mutex);
- free_share(share);
+
+ if (share->state() == TOKUDB_SHARE::ERROR) {
+ share->release();
goto exit;
- } else {
- assert(share->m_state == TOKUDB_SHARE::OPENED);
- tokudb_pthread_mutex_unlock(&share->mutex);
}
+ assert_always(share->state() == TOKUDB_SHARE::OPENED);
+
ref_length = share->ref_length; // If second open
-
- if (tokudb_debug & TOKUDB_DEBUG_OPEN) {
- TOKUDB_HANDLER_TRACE("tokudbopen:%p:share=%p:file=%p:table=%p:table->s=%p:%d",
- this, share, share->file, table, table->s, share->use_count);
- }
+
+ TOKUDB_HANDLER_TRACE_FOR_FLAGS(
+ TOKUDB_DEBUG_OPEN,
+ "tokudbopen:%p:share=%p:file=%p:table=%p:table->s=%p:%d",
+ this,
+ share,
+ share->file,
+ table,
+ table->s,
+ share->use_count());
key_read = false;
stats.block_size = 1<<20; // QQQ Tokudb DB block size
@@ -1813,13 +1983,13 @@ int ha_tokudb::open(const char *name, int mode, uint test_if_locked) {
exit:
if (ret_val) {
- tokudb_my_free(range_query_buff);
+ tokudb::memory::free(range_query_buff);
range_query_buff = NULL;
- tokudb_my_free(alloc_ptr);
+ tokudb::memory::free(alloc_ptr);
alloc_ptr = NULL;
- tokudb_my_free(rec_buff);
+ tokudb::memory::free(rec_buff);
rec_buff = NULL;
- tokudb_my_free(rec_update_buff);
+ tokudb::memory::free(rec_update_buff);
rec_update_buff = NULL;
if (error) {
@@ -1982,7 +2152,7 @@ int ha_tokudb::write_frm_data(DB* db, DB_TXN* txn, const char* frm_name) {
error = 0;
cleanup:
- tokudb_my_free(frm_data);
+ tokudb::memory::free(frm_data);
TOKUDB_HANDLER_DBUG_RETURN(error);
}
@@ -1993,8 +2163,8 @@ int ha_tokudb::remove_frm_data(DB *db, DB_TXN *txn) {
static int smart_dbt_callback_verify_frm (DBT const *key, DBT const *row, void *context) {
DBT* stored_frm = (DBT *)context;
stored_frm->size = row->size;
- stored_frm->data = (uchar *)tokudb_my_malloc(row->size, MYF(MY_WME));
- assert(stored_frm->data);
+ stored_frm->data = (uchar *)tokudb::memory::malloc(row->size, MYF(MY_WME));
+ assert_always(stored_frm->data);
memcpy(stored_frm->data, row->data, row->size);
return 0;
}
@@ -2046,8 +2216,8 @@ int ha_tokudb::verify_frm_data(const char* frm_name, DB_TXN* txn) {
error = 0;
cleanup:
- tokudb_my_free(mysql_frm_data);
- tokudb_my_free(stored_frm.data);
+ tokudb::memory::free(mysql_frm_data);
+ tokudb::memory::free(stored_frm.data);
TOKUDB_HANDLER_DBUG_RETURN(error);
}
@@ -2083,7 +2253,7 @@ int ha_tokudb::write_auto_inc_create(DB* db, ulonglong val, DB_TXN* txn){
//
// Closes a handle to a table.
//
-int ha_tokudb::close(void) {
+int ha_tokudb::close() {
TOKUDB_HANDLER_DBUG_ENTER("");
int r = __close();
TOKUDB_HANDLER_DBUG_RETURN(r);
@@ -2091,13 +2261,12 @@ int ha_tokudb::close(void) {
int ha_tokudb::__close() {
TOKUDB_HANDLER_DBUG_ENTER("");
- if (tokudb_debug & TOKUDB_DEBUG_OPEN)
- TOKUDB_HANDLER_TRACE("close:%p", this);
- tokudb_my_free(rec_buff);
- tokudb_my_free(rec_update_buff);
- tokudb_my_free(blob_buff);
- tokudb_my_free(alloc_ptr);
- tokudb_my_free(range_query_buff);
+ TOKUDB_HANDLER_TRACE_FOR_FLAGS(TOKUDB_DEBUG_OPEN, "close:%p", this);
+ tokudb::memory::free(rec_buff);
+ tokudb::memory::free(rec_update_buff);
+ tokudb::memory::free(blob_buff);
+ tokudb::memory::free(alloc_ptr);
+ tokudb::memory::free(range_query_buff);
for (uint32_t i = 0; i < sizeof(mult_key_dbt_array)/sizeof(mult_key_dbt_array[0]); i++) {
toku_dbt_array_destroy(&mult_key_dbt_array[i]);
}
@@ -2108,7 +2277,7 @@ int ha_tokudb::__close() {
rec_update_buff = NULL;
alloc_ptr = NULL;
ha_tokudb::reset();
- int retval = free_share(share);
+ int retval = share->release();
TOKUDB_HANDLER_DBUG_RETURN(retval);
}
@@ -2120,8 +2289,11 @@ int ha_tokudb::__close() {
//
bool ha_tokudb::fix_rec_buff_for_blob(ulong length) {
if (!rec_buff || (length > alloced_rec_buff_length)) {
- uchar *newptr;
- if (!(newptr = (uchar *) tokudb_my_realloc((void *) rec_buff, length, MYF(MY_ALLOW_ZERO_PTR))))
+ uchar* newptr = (uchar*)tokudb::memory::realloc(
+ (void*)rec_buff,
+ length,
+ MYF(MY_ALLOW_ZERO_PTR));
+ if (!newptr)
return 1;
rec_buff = newptr;
alloced_rec_buff_length = length;
@@ -2137,8 +2309,11 @@ bool ha_tokudb::fix_rec_buff_for_blob(ulong length) {
//
bool ha_tokudb::fix_rec_update_buff_for_blob(ulong length) {
if (!rec_update_buff || (length > alloced_update_rec_buff_length)) {
- uchar *newptr;
- if (!(newptr = (uchar *) tokudb_my_realloc((void *) rec_update_buff, length, MYF(MY_ALLOW_ZERO_PTR))))
+ uchar* newptr = (uchar*)tokudb::memory::realloc(
+ (void*)rec_update_buff,
+ length,
+ MYF(MY_ALLOW_ZERO_PTR));
+ if (!newptr)
return 1;
rec_update_buff= newptr;
alloced_update_rec_buff_length = length;
@@ -2273,9 +2448,11 @@ int ha_tokudb::unpack_blobs(
//
// assert that num_bytes > 0 iff share->num_blobs > 0
//
- assert( !((share->kc_info.num_blobs == 0) && (num_bytes > 0)) );
+ assert_always( !((share->kc_info.num_blobs == 0) && (num_bytes > 0)) );
if (num_bytes > num_blob_bytes) {
- ptr = (uchar *)tokudb_my_realloc((void *)blob_buff, num_bytes, MYF(MY_ALLOW_ZERO_PTR));
+ ptr = (uchar*)tokudb::memory::realloc(
+ (void*)blob_buff, num_bytes,
+ MYF(MY_ALLOW_ZERO_PTR));
if (ptr == NULL) {
error = ENOMEM;
goto exit;
@@ -2390,8 +2567,7 @@ int ha_tokudb::unpack_row(
data_end_offset = uint2korr(var_field_offset_ptr);
break;
default:
- assert(false);
- break;
+ assert_unreachable();
}
unpack_var_field(
record + field_offset(field, table),
@@ -2488,13 +2664,13 @@ exit:
}
uint32_t ha_tokudb::place_key_into_mysql_buff(
- KEY* key_info,
- uchar * record,
- uchar* data
- )
-{
- KEY_PART_INFO *key_part = key_info->key_part, *end = key_part + get_key_parts(key_info);
- uchar *pos = data;
+ KEY* key_info,
+ uchar* record,
+ uchar* data) {
+
+ KEY_PART_INFO* key_part = key_info->key_part;
+ KEY_PART_INFO* end = key_part + key_info->user_defined_key_parts;
+ uchar* pos = data;
for (; key_part != end; key_part++) {
if (key_part->field->null_bit) {
@@ -2513,7 +2689,7 @@ uint32_t ha_tokudb::place_key_into_mysql_buff(
//
// HOPEFULLY TEMPORARY
//
- assert(table->s->db_low_byte_first);
+ assert_always(table->s->db_low_byte_first);
#endif
pos = unpack_toku_key_field(
record + field_offset(key_part->field, table),
@@ -2554,15 +2730,14 @@ void ha_tokudb::unpack_key(uchar * record, DBT const *key, uint index) {
}
uint32_t ha_tokudb::place_key_into_dbt_buff(
- KEY* key_info,
- uchar * buff,
- const uchar * record,
- bool* has_null,
- int key_length
- )
-{
- KEY_PART_INFO *key_part = key_info->key_part;
- KEY_PART_INFO *end = key_part + get_key_parts(key_info);
+ KEY* key_info,
+ uchar* buff,
+ const uchar* record,
+ bool* has_null,
+ int key_length) {
+
+ KEY_PART_INFO* key_part = key_info->key_part;
+ KEY_PART_INFO* end = key_part + key_info->user_defined_key_parts;
uchar* curr_buff = buff;
*has_null = false;
for (; key_part != end && key_length > 0; key_part++) {
@@ -2585,7 +2760,7 @@ uint32_t ha_tokudb::place_key_into_dbt_buff(
//
// HOPEFULLY TEMPORARY
//
- assert(table->s->db_low_byte_first);
+ assert_always(table->s->db_low_byte_first);
#endif
//
// accessing field_offset(key_part->field) instead off key_part->offset
@@ -2742,25 +2917,29 @@ DBT* ha_tokudb::create_dbt_key_for_lookup(
// Returns:
// the parameter key
//
-DBT *ha_tokudb::pack_key(
- DBT * key,
- uint keynr,
- uchar * buff,
- const uchar * key_ptr,
- uint key_length,
- int8_t inf_byte
- )
-{
- TOKUDB_HANDLER_DBUG_ENTER("key %p %u:%2.2x inf=%d", key_ptr, key_length, key_length > 0 ? key_ptr[0] : 0, inf_byte);
+DBT* ha_tokudb::pack_key(
+ DBT* key,
+ uint keynr,
+ uchar* buff,
+ const uchar* key_ptr,
+ uint key_length,
+ int8_t inf_byte) {
+
+ TOKUDB_HANDLER_DBUG_ENTER(
+ "key %p %u:%2.2x inf=%d",
+ key_ptr,
+ key_length,
+ key_length > 0 ? key_ptr[0] : 0,
+ inf_byte);
#if TOKU_INCLUDE_EXTENDED_KEYS
if (keynr != primary_key && !tokudb_test(hidden_primary_key)) {
DBUG_RETURN(pack_ext_key(key, keynr, buff, key_ptr, key_length, inf_byte));
}
#endif
- KEY *key_info = &table->key_info[keynr];
- KEY_PART_INFO *key_part = key_info->key_part;
- KEY_PART_INFO *end = key_part + get_key_parts(key_info);
- my_bitmap_map *old_map = dbug_tmp_use_all_columns(table, table->write_set);
+ KEY* key_info = &table->key_info[keynr];
+ KEY_PART_INFO* key_part = key_info->key_part;
+ KEY_PART_INFO* end = key_part + key_info->user_defined_key_parts;
+ my_bitmap_map* old_map = dbug_tmp_use_all_columns(table, table->write_set);
memset((void *) key, 0, sizeof(*key));
key->data = buff;
@@ -2782,7 +2961,7 @@ DBT *ha_tokudb::pack_key(
offset = 1; // Data is at key_ptr+1
}
#if !defined(MARIADB_BASE_VERSION)
- assert(table->s->db_low_byte_first);
+ assert_always(table->s->db_low_byte_first);
#endif
buff = pack_key_toku_key_field(
buff,
@@ -2802,31 +2981,30 @@ DBT *ha_tokudb::pack_key(
}
#if TOKU_INCLUDE_EXTENDED_KEYS
-DBT *ha_tokudb::pack_ext_key(
- DBT * key,
- uint keynr,
- uchar * buff,
- const uchar * key_ptr,
- uint key_length,
- int8_t inf_byte
- )
-{
+DBT* ha_tokudb::pack_ext_key(
+ DBT* key,
+ uint keynr,
+ uchar* buff,
+ const uchar* key_ptr,
+ uint key_length,
+ int8_t inf_byte) {
+
TOKUDB_HANDLER_DBUG_ENTER("");
// build a list of PK parts that are in the SK. we will use this list to build the
// extended key if necessary.
- KEY *pk_key_info = &table->key_info[primary_key];
- uint pk_parts = get_key_parts(pk_key_info);
+ KEY* pk_key_info = &table->key_info[primary_key];
+ uint pk_parts = pk_key_info->user_defined_key_parts;
uint pk_next = 0;
struct {
const uchar *key_ptr;
KEY_PART_INFO *key_part;
} pk_info[pk_parts];
- KEY *key_info = &table->key_info[keynr];
- KEY_PART_INFO *key_part = key_info->key_part;
- KEY_PART_INFO *end = key_part + get_key_parts(key_info);
- my_bitmap_map *old_map = dbug_tmp_use_all_columns(table, table->write_set);
+ KEY* key_info = &table->key_info[keynr];
+ KEY_PART_INFO* key_part = key_info->key_part;
+ KEY_PART_INFO* end = key_part + key_info->user_defined_key_parts;
+ my_bitmap_map* old_map = dbug_tmp_use_all_columns(table, table->write_set);
memset((void *) key, 0, sizeof(*key));
key->data = buff;
@@ -2838,7 +3016,7 @@ DBT *ha_tokudb::pack_ext_key(
for (; key_part != end && (int) key_length > 0; key_part++) {
// if the SK part is part of the PK, then append it to the list.
if (key_part->field->part_of_key.is_set(primary_key)) {
- assert(pk_next < pk_parts);
+ assert_always(pk_next < pk_parts);
pk_info[pk_next].key_ptr = key_ptr;
pk_info[pk_next].key_part = key_part;
pk_next++;
@@ -2855,7 +3033,7 @@ DBT *ha_tokudb::pack_ext_key(
offset = 1; // Data is at key_ptr+1
}
#if !defined(MARIADB_BASE_VERSION)
- assert(table->s->db_low_byte_first);
+ assert_always(table->s->db_low_byte_first);
#endif
buff = pack_key_toku_key_field(
buff,
@@ -2869,22 +3047,33 @@ DBT *ha_tokudb::pack_ext_key(
}
if (key_length > 0) {
- assert(key_part == end);
+ assert_always(key_part == end);
end = key_info->key_part + get_ext_key_parts(key_info);
// pack PK in order of PK key parts
- for (uint pk_index = 0; key_part != end && (int) key_length > 0 && pk_index < pk_parts; pk_index++) {
+ for (uint pk_index = 0;
+ key_part != end && (int) key_length > 0 && pk_index < pk_parts;
+ pk_index++) {
uint i;
for (i = 0; i < pk_next; i++) {
- if (pk_info[i].key_part->fieldnr == pk_key_info->key_part[pk_index].fieldnr)
+ if (pk_info[i].key_part->fieldnr ==
+ pk_key_info->key_part[pk_index].fieldnr)
break;
}
if (i < pk_next) {
const uchar *this_key_ptr = pk_info[i].key_ptr;
KEY_PART_INFO *this_key_part = pk_info[i].key_part;
- buff = pack_key_toku_key_field(buff, (uchar *) this_key_ptr, this_key_part->field, this_key_part->length);
+ buff = pack_key_toku_key_field(
+ buff,
+ (uchar*)this_key_ptr,
+ this_key_part->field,
+ this_key_part->length);
} else {
- buff = pack_key_toku_key_field(buff, (uchar *) key_ptr, key_part->field, key_part->length);
+ buff = pack_key_toku_key_field(
+ buff,
+ (uchar*)key_ptr,
+ key_part->field,
+ key_part->length);
key_ptr += key_part->store_length;
key_length -= key_part->store_length;
key_part++;
@@ -2907,18 +3096,22 @@ void ha_tokudb::init_hidden_prim_key_info(DB_TXN *txn) {
if (!(share->status & STATUS_PRIMARY_KEY_INIT)) {
int error = 0;
DBC* c = NULL;
- error = share->key_file[primary_key]->cursor(share->key_file[primary_key], txn, &c, 0);
- assert(error == 0);
+ error = share->key_file[primary_key]->cursor(
+ share->key_file[primary_key],
+ txn,
+ &c,
+ 0);
+ assert_always(error == 0);
DBT key,val;
memset(&key, 0, sizeof(key));
memset(&val, 0, sizeof(val));
error = c->c_get(c, &key, &val, DB_LAST);
if (error == 0) {
- assert(key.size == TOKUDB_HIDDEN_PRIMARY_KEY_LENGTH);
+ assert_always(key.size == TOKUDB_HIDDEN_PRIMARY_KEY_LENGTH);
share->auto_ident = hpk_char_to_num((uchar *)key.data);
}
error = c->c_close(c);
- assert(error == 0);
+ assert_always(error == 0);
share->status |= STATUS_PRIMARY_KEY_INIT;
}
TOKUDB_HANDLER_DBUG_VOID_RETURN;
@@ -2939,11 +3132,11 @@ int ha_tokudb::get_status(DB_TXN* txn) {
// open status.tokudb
//
if (!share->status_block) {
- error = open_status_dictionary(
- &share->status_block,
- share->table_name,
- txn
- );
+ error =
+ open_status_dictionary(
+ &share->status_block,
+ share->full_table_name(),
+ txn);
if (error) {
goto cleanup;
}
@@ -2958,7 +3151,7 @@ int ha_tokudb::get_status(DB_TXN* txn) {
key.size = sizeof(curr_key);
value.flags = DB_DBT_USERMEM;
- assert(share->status_block);
+ assert_always(share->status_block);
//
// get version
//
@@ -3048,7 +3241,7 @@ cleanup:
*/
ha_rows ha_tokudb::estimate_rows_upper_bound() {
TOKUDB_HANDLER_DBUG_ENTER("");
- DBUG_RETURN(share->rows + HA_TOKUDB_EXTRA_ROWS);
+ DBUG_RETURN(share->row_count() + HA_TOKUDB_EXTRA_ROWS);
}
//
@@ -3112,8 +3305,8 @@ bool ha_tokudb::may_table_be_empty(DB_TXN *txn) {
DBC* tmp_cursor = NULL;
DB_TXN* tmp_txn = NULL;
- const int empty_scan = THDVAR(ha_thd(), empty_scan);
- if (empty_scan == TOKUDB_EMPTY_SCAN_DISABLED)
+ const int empty_scan = tokudb::sysvars::empty_scan(ha_thd());
+ if (empty_scan == tokudb::sysvars::TOKUDB_EMPTY_SCAN_DISABLED)
goto cleanup;
if (txn == NULL) {
@@ -3128,7 +3321,7 @@ bool ha_tokudb::may_table_be_empty(DB_TXN *txn) {
if (error)
goto cleanup;
tmp_cursor->c_set_check_interrupt_callback(tmp_cursor, tokudb_killed_thd_callback, ha_thd());
- if (empty_scan == TOKUDB_EMPTY_SCAN_LR)
+ if (empty_scan == tokudb::sysvars::TOKUDB_EMPTY_SCAN_LR)
error = tmp_cursor->c_getf_next(tmp_cursor, 0, smart_dbt_do_nothing, NULL);
else
error = tmp_cursor->c_getf_prev(tmp_cursor, 0, smart_dbt_do_nothing, NULL);
@@ -3142,7 +3335,7 @@ bool ha_tokudb::may_table_be_empty(DB_TXN *txn) {
cleanup:
if (tmp_cursor) {
int r = tmp_cursor->c_close(tmp_cursor);
- assert(r == 0);
+ assert_always(r == 0);
tmp_cursor = NULL;
}
if (tmp_txn) {
@@ -3165,23 +3358,24 @@ void ha_tokudb::start_bulk_insert(ha_rows rows) {
ai_metadata_update_required = false;
abort_loader = false;
- rw_rdlock(&share->num_DBs_lock);
+ share->_num_DBs_lock.lock_read();
uint curr_num_DBs = table->s->keys + tokudb_test(hidden_primary_key);
num_DBs_locked_in_bulk = true;
lock_count = 0;
if ((rows == 0 || rows > 1) && share->try_table_lock) {
- if (get_prelock_empty(thd) && may_table_be_empty(transaction) && transaction != NULL) {
+ if (tokudb::sysvars::prelock_empty(thd) &&
+ may_table_be_empty(transaction) &&
+ transaction != NULL) {
if (using_ignore || is_insert_ignore(thd) || thd->lex->duplicates != DUP_ERROR
|| table->s->next_number_key_offset) {
acquire_table_lock(transaction, lock_write);
- }
- else {
+ } else {
mult_dbt_flags[primary_key] = 0;
if (!thd_test_options(thd, OPTION_RELAXED_UNIQUE_CHECKS) && !hidden_primary_key) {
mult_put_flags[primary_key] = DB_NOOVERWRITE;
}
- uint32_t loader_flags = (get_load_save_space(thd)) ?
+ uint32_t loader_flags = (tokudb::sysvars::load_save_space(thd)) ?
LOADER_COMPRESS_INTERMEDIATES : 0;
int error = db_env->create_loader(
@@ -3196,7 +3390,7 @@ void ha_tokudb::start_bulk_insert(ha_rows rows) {
loader_flags
);
if (error) {
- assert(loader == NULL);
+ assert_always(loader == NULL);
goto exit_try_table_lock;
}
@@ -3204,18 +3398,18 @@ void ha_tokudb::start_bulk_insert(ha_rows rows) {
lc.ha = this;
error = loader->set_poll_function(loader, loader_poll_fun, &lc);
- assert(!error);
+ assert_always(!error);
error = loader->set_error_callback(loader, loader_dup_fun, &lc);
- assert(!error);
+ assert_always(!error);
trx->stmt_progress.using_loader = true;
}
}
exit_try_table_lock:
- tokudb_pthread_mutex_lock(&share->mutex);
+ share->lock();
share->try_table_lock = false;
- tokudb_pthread_mutex_unlock(&share->mutex);
+ share->unlock();
}
TOKUDB_HANDLER_DBUG_VOID_RETURN;
}
@@ -3232,9 +3426,9 @@ int ha_tokudb::end_bulk_insert(bool abort) {
tokudb_trx_data* trx = (tokudb_trx_data *) thd_get_ha_data(thd, tokudb_hton);
bool using_loader = (loader != NULL);
if (ai_metadata_update_required) {
- tokudb_pthread_mutex_lock(&share->mutex);
+ share->lock();
error = update_max_auto_inc(share->status_block, share->last_auto_increment);
- tokudb_pthread_mutex_unlock(&share->mutex);
+ share->unlock();
if (error) { goto cleanup; }
}
delay_updating_ai_metadata = false;
@@ -3285,7 +3479,7 @@ int ha_tokudb::end_bulk_insert(bool abort) {
cleanup:
if (num_DBs_locked_in_bulk) {
- rw_unlock(&share->num_DBs_lock);
+ share->_num_DBs_lock.unlock();
}
num_DBs_locked_in_bulk = false;
lock_count = 0;
@@ -3380,10 +3574,10 @@ int ha_tokudb::is_index_unique(bool* is_unique, DB_TXN* txn, DB* db, KEY* key_in
cnt++;
if ((cnt % 10000) == 0) {
sprintf(
- status_msg,
- "Verifying index uniqueness: Checked %llu of %llu rows in key-%s.",
- (long long unsigned) cnt,
- share->rows,
+ status_msg,
+ "Verifying index uniqueness: Checked %llu of %llu rows in key-%s.",
+ (long long unsigned) cnt,
+ share->row_count(),
key_info->name);
thd_proc_info(thd, status_msg);
if (thd_killed(thd)) {
@@ -3467,7 +3661,7 @@ int ha_tokudb::is_val_unique(bool* is_unique, uchar* record, KEY* key_info, uint
cleanup:
if (tmp_cursor) {
int r = tmp_cursor->c_close(tmp_cursor);
- assert(r==0);
+ assert_always(r==0);
tmp_cursor = NULL;
}
return error;
@@ -3475,21 +3669,25 @@ cleanup:
static void maybe_do_unique_checks_delay(THD *thd) {
if (thd->slave_thread) {
- uint64_t delay_ms = THDVAR(thd, rpl_unique_checks_delay);
+ uint64_t delay_ms = tokudb::sysvars::rpl_unique_checks_delay(thd);
if (delay_ms)
usleep(delay_ms * 1000);
}
}
static bool need_read_only(THD *thd) {
- return opt_readonly || !THDVAR(thd, rpl_check_readonly);
-}
+ return opt_readonly || !tokudb::sysvars::rpl_check_readonly(thd);
+}
static bool do_unique_checks(THD *thd, bool do_rpl_event) {
- if (do_rpl_event && thd->slave_thread && need_read_only(thd) && !THDVAR(thd, rpl_unique_checks))
+ if (do_rpl_event &&
+ thd->slave_thread &&
+ need_read_only(thd) &&
+ !tokudb::sysvars::rpl_unique_checks(thd)) {
return false;
- else
+ } else {
return !thd_test_options(thd, OPTION_RELAXED_UNIQUE_CHECKS);
+ }
}
int ha_tokudb::do_uniqueness_checks(uchar* record, DB_TXN* txn, THD* thd) {
@@ -3552,10 +3750,10 @@ void ha_tokudb::test_row_packing(uchar* record, DBT* pk_key, DBT* pk_val) {
//
//use for testing the packing of keys
//
- tmp_pk_key_data = (uchar *)tokudb_my_malloc(pk_key->size, MYF(MY_WME));
- assert(tmp_pk_key_data);
- tmp_pk_val_data = (uchar *)tokudb_my_malloc(pk_val->size, MYF(MY_WME));
- assert(tmp_pk_val_data);
+ tmp_pk_key_data = (uchar*)tokudb::memory::malloc(pk_key->size, MYF(MY_WME));
+ assert_always(tmp_pk_key_data);
+ tmp_pk_val_data = (uchar*)tokudb::memory::malloc(pk_val->size, MYF(MY_WME));
+ assert_always(tmp_pk_val_data);
memcpy(tmp_pk_key_data, pk_key->data, pk_key->size);
memcpy(tmp_pk_val_data, pk_val->data, pk_val->size);
tmp_pk_key.data = tmp_pk_key_data;
@@ -3588,19 +3786,21 @@ void ha_tokudb::test_row_packing(uchar* record, DBT* pk_key, DBT* pk_val) {
&tmp_pk_key,
&tmp_pk_val
);
- assert(tmp_num_bytes == key.size);
+ assert_always(tmp_num_bytes == key.size);
cmp = memcmp(key_buff3,key_buff2,tmp_num_bytes);
- assert(cmp == 0);
+ assert_always(cmp == 0);
//
// test key packing of clustering keys
//
if (key_is_clustering(&table->key_info[keynr])) {
error = pack_row(&row, (const uchar *) record, keynr);
- assert(error == 0);
+ assert_always(error == 0);
uchar* tmp_buff = NULL;
- tmp_buff = (uchar *)tokudb_my_malloc(alloced_rec_buff_length,MYF(MY_WME));
- assert(tmp_buff);
+ tmp_buff = (uchar*)tokudb::memory::malloc(
+ alloced_rec_buff_length,
+ MYF(MY_WME));
+ assert_always(tmp_buff);
row_desc = (uchar *)share->key_file[keynr]->descriptor->dbt.data;
row_desc += (*(uint32_t *)row_desc);
row_desc += (*(uint32_t *)row_desc);
@@ -3612,10 +3812,10 @@ void ha_tokudb::test_row_packing(uchar* record, DBT* pk_key, DBT* pk_val) {
desc_size,
&tmp_pk_val
);
- assert(tmp_num_bytes == row.size);
+ assert_always(tmp_num_bytes == row.size);
cmp = memcmp(tmp_buff,rec_buff,tmp_num_bytes);
- assert(cmp == 0);
- tokudb_my_free(tmp_buff);
+ assert_always(cmp == 0);
+ tokudb::memory::free(tmp_buff);
}
}
@@ -3623,12 +3823,12 @@ void ha_tokudb::test_row_packing(uchar* record, DBT* pk_key, DBT* pk_val) {
// copy stuff back out
//
error = pack_row(pk_val, (const uchar *) record, primary_key);
- assert(pk_val->size == tmp_pk_val.size);
+ assert_always(pk_val->size == tmp_pk_val.size);
cmp = memcmp(pk_val->data, tmp_pk_val_data, pk_val->size);
- assert( cmp == 0);
+ assert_always( cmp == 0);
- tokudb_my_free(tmp_pk_key_data);
- tokudb_my_free(tmp_pk_val_data);
+ tokudb::memory::free(tmp_pk_key_data);
+ tokudb::memory::free(tmp_pk_val_data);
}
// set the put flags for the main dictionary
@@ -3675,7 +3875,7 @@ void ha_tokudb::set_main_dict_put_flags(THD* thd, bool opt_eligible, uint32_t* p
int ha_tokudb::insert_row_to_main_dictionary(uchar* record, DBT* pk_key, DBT* pk_val, DB_TXN* txn) {
int error = 0;
uint curr_num_DBs = table->s->keys + tokudb_test(hidden_primary_key);
- assert(curr_num_DBs == 1);
+ assert_always(curr_num_DBs == 1);
uint32_t put_flags = mult_put_flags[primary_key];
THD *thd = ha_thd();
@@ -3807,33 +4007,36 @@ int ha_tokudb::write_row(uchar * record) {
// of the auto inc field.
//
if (share->has_auto_inc && record == table->record[0]) {
- tokudb_pthread_mutex_lock(&share->mutex);
+ share->lock();
ulonglong curr_auto_inc = retrieve_auto_increment(
- table->field[share->ai_field_index]->key_type(), field_offset(table->field[share->ai_field_index], table), record);
+ table->field[share->ai_field_index]->key_type(),
+ field_offset(table->field[share->ai_field_index], table),
+ record);
if (curr_auto_inc > share->last_auto_increment) {
share->last_auto_increment = curr_auto_inc;
if (delay_updating_ai_metadata) {
ai_metadata_update_required = true;
- }
- else {
- update_max_auto_inc(share->status_block, share->last_auto_increment);
+ } else {
+ update_max_auto_inc(
+ share->status_block,
+ share->last_auto_increment);
}
}
- tokudb_pthread_mutex_unlock(&share->mutex);
+ share->unlock();
}
//
// grab reader lock on numDBs_lock
//
if (!num_DBs_locked_in_bulk) {
- rw_rdlock(&share->num_DBs_lock);
+ share->_num_DBs_lock.lock_read();
num_DBs_locked = true;
}
else {
lock_count++;
if (lock_count >= 2000) {
- rw_unlock(&share->num_DBs_lock);
- rw_rdlock(&share->num_DBs_lock);
+ share->_num_DBs_lock.unlock();
+ share->_num_DBs_lock.lock_read();
lock_count = 0;
}
}
@@ -3863,10 +4066,8 @@ int ha_tokudb::write_row(uchar * record) {
}
}
txn = create_sub_trans ? sub_trans : transaction;
- if (tokudb_debug & TOKUDB_DEBUG_TXN) {
- TOKUDB_HANDLER_TRACE("txn %p", txn);
- }
- if (tokudb_debug & TOKUDB_DEBUG_CHECK_KEY) {
+ TOKUDB_HANDLER_TRACE_FOR_FLAGS(TOKUDB_DEBUG_TXN, "txn %p", txn);
+ if (TOKUDB_UNLIKELY(TOKUDB_DEBUG_FLAGS(TOKUDB_DEBUG_CHECK_KEY))) {
test_row_packing(record,&prim_key,&row);
}
if (loader) {
@@ -3875,8 +4076,7 @@ int ha_tokudb::write_row(uchar * record) {
abort_loader = true;
goto cleanup;
}
- }
- else {
+ } else {
error = do_uniqueness_checks(record, txn, thd);
if (error) {
// for #4633
@@ -3889,8 +4089,7 @@ int ha_tokudb::write_row(uchar * record) {
// was found and this is a duplicate key,
// so we set last_dup_key
last_dup_key = primary_key;
- }
- else if (r != DB_NOTFOUND) {
+ } else if (r != DB_NOTFOUND) {
// if some other error is returned, return that to the user.
error = r;
}
@@ -3900,8 +4099,7 @@ int ha_tokudb::write_row(uchar * record) {
if (curr_num_DBs == 1) {
error = insert_row_to_main_dictionary(record,&prim_key, &row, txn);
if (error) { goto cleanup; }
- }
- else {
+ } else {
error = insert_rows_to_dictionaries_mult(&prim_key, &row, txn, thd);
if (error) { goto cleanup; }
}
@@ -3919,7 +4117,7 @@ int ha_tokudb::write_row(uchar * record) {
}
cleanup:
if (num_DBs_locked) {
- rw_unlock(&share->num_DBs_lock);
+ share->_num_DBs_lock.unlock();
}
if (error == DB_KEYEXIST) {
error = HA_ERR_FOUND_DUPP_KEY;
@@ -3989,7 +4187,7 @@ int ha_tokudb::update_row(const uchar * old_row, uchar * new_row) {
// of the auto inc field.
//
if (share->has_auto_inc && new_row == table->record[0]) {
- tokudb_pthread_mutex_lock(&share->mutex);
+ share->lock();
ulonglong curr_auto_inc = retrieve_auto_increment(
table->field[share->ai_field_index]->key_type(),
field_offset(table->field[share->ai_field_index], table),
@@ -4001,7 +4199,7 @@ int ha_tokudb::update_row(const uchar * old_row, uchar * new_row) {
share->last_auto_increment = curr_auto_inc;
}
}
- tokudb_pthread_mutex_unlock(&share->mutex);
+ share->unlock();
}
//
@@ -4009,7 +4207,7 @@ int ha_tokudb::update_row(const uchar * old_row, uchar * new_row) {
//
bool num_DBs_locked = false;
if (!num_DBs_locked_in_bulk) {
- rw_rdlock(&share->num_DBs_lock);
+ share->_num_DBs_lock.lock_read();
num_DBs_locked = true;
}
curr_num_DBs = share->num_DBs;
@@ -4100,6 +4298,7 @@ int ha_tokudb::update_row(const uchar * old_row, uchar * new_row) {
last_dup_key = primary_key;
}
else if (!error) {
+ updated_rows++;
trx->stmt_progress.updated++;
track_progress(thd);
}
@@ -4107,7 +4306,7 @@ int ha_tokudb::update_row(const uchar * old_row, uchar * new_row) {
cleanup:
if (num_DBs_locked) {
- rw_unlock(&share->num_DBs_lock);
+ share->_num_DBs_lock.unlock();
}
if (error == DB_KEYEXIST) {
error = HA_ERR_FOUND_DUPP_KEY;
@@ -4150,7 +4349,7 @@ int ha_tokudb::delete_row(const uchar * record) {
//
bool num_DBs_locked = false;
if (!num_DBs_locked_in_bulk) {
- rw_rdlock(&share->num_DBs_lock);
+ share->_num_DBs_lock.lock_read();
num_DBs_locked = true;
}
curr_num_DBs = share->num_DBs;
@@ -4166,33 +4365,36 @@ int ha_tokudb::delete_row(const uchar * record) {
goto cleanup;
}
- if (tokudb_debug & TOKUDB_DEBUG_TXN) {
- TOKUDB_HANDLER_TRACE("all %p stmt %p sub_sp_level %p transaction %p", trx->all, trx->stmt, trx->sub_sp_level, transaction);
- }
+ TOKUDB_HANDLER_TRACE_FOR_FLAGS(
+ TOKUDB_DEBUG_TXN,
+ "all %p stmt %p sub_sp_level %p transaction %p",
+ trx->all,
+ trx->stmt,
+ trx->sub_sp_level,
+ transaction);
- error = db_env->del_multiple(
- db_env,
- share->key_file[primary_key],
- transaction,
- &prim_key,
- &row,
- curr_num_DBs,
- share->key_file,
- mult_key_dbt_array,
- mult_del_flags
- );
+ error =
+ db_env->del_multiple(
+ db_env,
+ share->key_file[primary_key],
+ transaction,
+ &prim_key,
+ &row,
+ curr_num_DBs,
+ share->key_file,
+ mult_key_dbt_array,
+ mult_del_flags);
if (error) {
DBUG_PRINT("error", ("Got error %d", error));
- }
- else {
+ } else {
deleted_rows++;
trx->stmt_progress.deleted++;
track_progress(thd);
}
cleanup:
if (num_DBs_locked) {
- rw_unlock(&share->num_DBs_lock);
+ share->_num_DBs_lock.unlock();
}
TOKUDB_HANDLER_DBUG_RETURN(error);
}
@@ -4287,11 +4489,16 @@ cleanup:
TOKUDB_HANDLER_DBUG_RETURN(error);
}
-static bool index_key_is_null(TABLE *table, uint keynr, const uchar *key, uint key_len) {
+static bool index_key_is_null(
+ TABLE* table,
+ uint keynr,
+ const uchar* key,
+ uint key_len) {
+
bool key_can_be_null = false;
- KEY *key_info = &table->key_info[keynr];
- KEY_PART_INFO *key_part = key_info->key_part;
- KEY_PART_INFO *end = key_part + get_key_parts(key_info);
+ KEY* key_info = &table->key_info[keynr];
+ KEY_PART_INFO* key_part = key_info->key_part;
+ KEY_PART_INFO* end = key_part + key_info->user_defined_key_parts;
for (; key_part != end; key_part++) {
if (key_part->null_bit) {
key_can_be_null = true;
@@ -4309,7 +4516,7 @@ static bool tokudb_do_bulk_fetch(THD *thd) {
case SQLCOM_INSERT_SELECT:
case SQLCOM_REPLACE_SELECT:
case SQLCOM_DELETE:
- return THDVAR(thd, bulk_fetch) != 0;
+ return tokudb::sysvars::bulk_fetch(thd) != 0;
default:
return false;
}
@@ -4361,7 +4568,7 @@ cleanup:
//
if (cursor) {
int r = cursor->c_close(cursor);
- assert(r==0);
+ assert_always(r==0);
cursor = NULL;
remove_from_trx_handler_list();
}
@@ -4404,7 +4611,7 @@ int ha_tokudb::index_init(uint keynr, bool sorted) {
if (cursor) {
DBUG_PRINT("note", ("Closing active cursor"));
int r = cursor->c_close(cursor);
- assert(r==0);
+ assert_always(r==0);
remove_from_trx_handler_list();
}
active_index = keynr;
@@ -4430,10 +4637,12 @@ int ha_tokudb::index_init(uint keynr, bool sorted) {
if (use_write_locks) {
cursor_flags |= DB_RMW;
}
- if (get_disable_prefetching(thd)) {
+ if (tokudb::sysvars::disable_prefetching(thd)) {
cursor_flags |= DBC_DISABLE_PREFETCHING;
}
- if ((error = share->key_file[keynr]->cursor(share->key_file[keynr], transaction, &cursor, cursor_flags))) {
+ if ((error = share->key_file[keynr]->cursor(share->key_file[keynr],
+ transaction, &cursor,
+ cursor_flags))) {
if (error == TOKUDB_MVCC_DICTIONARY_TOO_NEW) {
error = HA_ERR_TABLE_DEF_CHANGED;
my_error(ER_TABLE_DEF_CHANGED, MYF(0));
@@ -4478,7 +4687,7 @@ int ha_tokudb::index_end() {
if (cursor) {
DBUG_PRINT("enter", ("table: '%s'", table_share->table_name.str));
int r = cursor->c_close(cursor);
- assert(r==0);
+ assert_always(r==0);
cursor = NULL;
remove_from_trx_handler_list();
last_cursor_error = 0;
@@ -4546,7 +4755,7 @@ void ha_tokudb::extract_hidden_primary_key(uint keynr, DBT const *found_key) {
int ha_tokudb::read_row_callback (uchar * buf, uint keynr, DBT const *row, DBT const *found_key) {
- assert(keynr == primary_key);
+ assert_always(keynr == primary_key);
return unpack_row(buf, row,found_key, keynr);
}
@@ -4672,7 +4881,7 @@ int ha_tokudb::read_full_row(uchar * buf) {
// HA_ERR_END_OF_FILE if not found
// error otherwise
//
-int ha_tokudb::index_next_same(uchar * buf, const uchar * key, uint keylen) {
+int ha_tokudb::index_next_same(uchar* buf, const uchar* key, uint keylen) {
TOKUDB_HANDLER_DBUG_ENTER("");
ha_statistic_increment(&SSV::ha_read_next_count);
@@ -4690,8 +4899,16 @@ int ha_tokudb::index_next_same(uchar * buf, const uchar * key, uint keylen) {
//
// now do the comparison
//
- create_dbt_key_from_table(&found_key,tokudb_active_index,key_buff3,buf,&has_null);
- cmp = tokudb_prefix_cmp_dbt_key(share->key_file[tokudb_active_index], &curr_key, &found_key);
+ create_dbt_key_from_table(
+ &found_key,
+ tokudb_active_index,
+ key_buff3,buf,
+ &has_null);
+ cmp =
+ tokudb_prefix_cmp_dbt_key(
+ share->key_file[tokudb_active_index],
+ &curr_key,
+ &found_key);
if (cmp) {
error = HA_ERR_END_OF_FILE;
}
@@ -4703,7 +4920,7 @@ cleanup:
//
-// According to InnoDB handlerton: Positions an index cursor to the index
+// According to InnoDB handlerton: Positions an index cursor to the index
// specified in keynr. Fetches the row if any
// Parameters:
// [out] buf - buffer for the returned row
@@ -4719,10 +4936,20 @@ cleanup:
// TODO: investigate this for correctness
// error otherwise
//
-int ha_tokudb::index_read(uchar * buf, const uchar * key, uint key_len, enum ha_rkey_function find_flag) {
- TOKUDB_HANDLER_DBUG_ENTER("key %p %u:%2.2x find=%u", key, key_len, key ? key[0] : 0, find_flag);
+int ha_tokudb::index_read(
+ uchar* buf,
+ const uchar* key,
+ uint key_len,
+ enum ha_rkey_function find_flag) {
+
+ TOKUDB_HANDLER_DBUG_ENTER(
+ "key %p %u:%2.2x find=%u",
+ key,
+ key_len,
+ key ? key[0] : 0,
+ find_flag);
invalidate_bulk_fetch();
- if (tokudb_debug & TOKUDB_DEBUG_INDEX_KEY) {
+ if (TOKUDB_UNLIKELY(TOKUDB_DEBUG_FLAGS(TOKUDB_DEBUG_INDEX_KEY))) {
TOKUDB_DBUG_DUMP("mysql key=", key, key_len);
}
DBT row;
@@ -4730,14 +4957,17 @@ int ha_tokudb::index_read(uchar * buf, const uchar * key, uint key_len, enum ha_
int error = 0;
uint32_t flags = 0;
THD* thd = ha_thd();
- tokudb_trx_data* trx = (tokudb_trx_data *) thd_get_ha_data(thd, tokudb_hton);;
+ tokudb_trx_data* trx = (tokudb_trx_data*)thd_get_ha_data(thd, tokudb_hton);
struct smart_dbt_info info;
struct index_read_info ir_info;
HANDLE_INVALID_CURSOR();
- // if we locked a non-null key range and we now have a null key, then remove the bounds from the cursor
- if (range_lock_grabbed && !range_lock_grabbed_null && index_key_is_null(table, tokudb_active_index, key, key_len)) {
+ // if we locked a non-null key range and we now have a null key, then
+ // remove the bounds from the cursor
+ if (range_lock_grabbed &&
+ !range_lock_grabbed_null &&
+ index_key_is_null(table, tokudb_active_index, key, key_len)) {
range_lock_grabbed = range_lock_grabbed_null = false;
cursor->c_remove_restriction(cursor);
}
@@ -4758,7 +4988,7 @@ int ha_tokudb::index_read(uchar * buf, const uchar * key, uint key_len, enum ha_
pack_key(&lookup_key, tokudb_active_index, key_buff3, key, key_len, COL_NEG_INF);
DBT lookup_bound;
pack_key(&lookup_bound, tokudb_active_index, key_buff4, key, key_len, COL_POS_INF);
- if (tokudb_debug & TOKUDB_DEBUG_INDEX_KEY) {
+ if (TOKUDB_UNLIKELY(TOKUDB_DEBUG_FLAGS(TOKUDB_DEBUG_INDEX_KEY))) {
TOKUDB_DBUG_DUMP("tokudb key=", lookup_key.data, lookup_key.size);
}
ir_info.orig_key = &lookup_key;
@@ -4815,8 +5045,8 @@ int ha_tokudb::index_read(uchar * buf, const uchar * key, uint key_len, enum ha_
if (!error && !key_read && tokudb_active_index != primary_key && !key_is_clustering(&table->key_info[tokudb_active_index])) {
error = read_full_row(buf);
}
-
- if (error && (tokudb_debug & TOKUDB_DEBUG_ERROR)) {
+
+ if (TOKUDB_UNLIKELY(error && TOKUDB_DEBUG_FLAGS(TOKUDB_DEBUG_ERROR))) {
TOKUDB_HANDLER_TRACE("error:%d:%d", error, find_flag);
}
trx->stmt_progress.queried++;
@@ -4845,7 +5075,7 @@ int ha_tokudb::read_data_from_range_query_buff(uchar* buf, bool need_val, bool d
// if this is a covering index, this is all we need
if (do_key_read) {
- assert(!need_val);
+ assert_always(!need_val);
extract_hidden_primary_key(tokudb_active_index, &curr_key);
read_key_only(buf, tokudb_active_index, &curr_key);
error = 0;
@@ -4937,17 +5167,27 @@ exit:
return error;
}
-static int
-smart_dbt_bf_callback(DBT const *key, DBT const *row, void *context) {
+static int smart_dbt_bf_callback(
+ DBT const* key,
+ DBT const* row,
+ void* context) {
SMART_DBT_BF_INFO info = (SMART_DBT_BF_INFO)context;
- return info->ha->fill_range_query_buf(info->need_val, key, row, info->direction, info->thd, info->buf, info->key_to_compare);
+ return
+ info->ha->fill_range_query_buf(
+ info->need_val,
+ key,
+ row,
+ info->direction,
+ info->thd,
+ info->buf,
+ info->key_to_compare);
}
-#if defined(MARIADB_BASE_VERSION) || (50600 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 50699)
-enum icp_result ha_tokudb::toku_handler_index_cond_check(Item* pushed_idx_cond)
-{
+enum icp_result ha_tokudb::toku_handler_index_cond_check(
+ Item* pushed_idx_cond) {
+
enum icp_result res;
- if (end_range ) {
+ if (end_range) {
int cmp;
#ifdef MARIADB_BASE_VERSION
cmp = compare_key2(end_range);
@@ -4957,37 +5197,36 @@ enum icp_result ha_tokudb::toku_handler_index_cond_check(Item* pushed_idx_cond)
if (cmp > 0) {
return ICP_OUT_OF_RANGE;
}
- }
+ }
res = pushed_idx_cond->val_int() ? ICP_MATCH : ICP_NO_MATCH;
return res;
}
-#endif
// fill in the range query buf for bulk fetch
int ha_tokudb::fill_range_query_buf(
bool need_val,
- DBT const *key,
- DBT const *row,
+ DBT const* key,
+ DBT const* row,
int direction,
THD* thd,
uchar* buf,
- DBT* key_to_compare
- ) {
+ DBT* key_to_compare) {
+
int error;
//
// first put the value into range_query_buf
//
- uint32_t size_remaining = size_range_query_buff - bytes_used_in_range_query_buff;
+ uint32_t size_remaining =
+ size_range_query_buff - bytes_used_in_range_query_buff;
uint32_t size_needed;
- uint32_t user_defined_size = get_tokudb_read_buf_size(thd);
+ uint32_t user_defined_size = tokudb::sysvars::read_buf_size(thd);
uchar* curr_pos = NULL;
if (key_to_compare) {
int cmp = tokudb_prefix_cmp_dbt_key(
share->key_file[tokudb_active_index],
key_to_compare,
- key
- );
+ key);
if (cmp) {
icp_went_out_of_range = true;
error = 0;
@@ -4995,26 +5234,38 @@ int ha_tokudb::fill_range_query_buf(
}
}
-#if defined(MARIADB_BASE_VERSION) || (50600 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 50699)
// if we have an index condition pushed down, we check it
- if (toku_pushed_idx_cond && (tokudb_active_index == toku_pushed_idx_cond_keyno)) {
+ if (toku_pushed_idx_cond &&
+ (tokudb_active_index == toku_pushed_idx_cond_keyno)) {
unpack_key(buf, key, tokudb_active_index);
- enum icp_result result = toku_handler_index_cond_check(toku_pushed_idx_cond);
+ enum icp_result result =
+ toku_handler_index_cond_check(toku_pushed_idx_cond);
+
// If we have reason to stop, we set icp_went_out_of_range and get out
+ // otherwise, if we simply see that the current key is no match,
+ // we tell the cursor to continue and don't store
+ // the key locally
if (result == ICP_OUT_OF_RANGE || thd_killed(thd)) {
icp_went_out_of_range = true;
error = 0;
+ DEBUG_SYNC(ha_thd(), "tokudb_icp_asc_scan_out_of_range");
goto cleanup;
- }
- // otherwise, if we simply see that the current key is no match,
- // we tell the cursor to continue and don't store
- // the key locally
- else if (result == ICP_NO_MATCH) {
+ } else if (result == ICP_NO_MATCH) {
+ // if we are performing a DESC ICP scan and have no end_range
+ // to compare to stop using ICP filtering as there isn't much more
+ // that we can do without going through contortions with remembering
+ // and comparing key parts.
+ if (!end_range &&
+ direction < 0) {
+
+ cancel_pushed_idx_cond();
+ DEBUG_SYNC(ha_thd(), "tokudb_icp_desc_scan_invalidate");
+ }
+
error = TOKUDB_CURSOR_CONTINUE;
goto cleanup;
}
}
-#endif
// at this point, if ICP is on, we have verified that the key is one
// we are interested in, so we proceed with placing the data
@@ -5023,57 +5274,63 @@ int ha_tokudb::fill_range_query_buf(
if (need_val) {
if (unpack_entire_row) {
size_needed = 2*sizeof(uint32_t) + key->size + row->size;
- }
- else {
+ } else {
// this is an upper bound
- size_needed = sizeof(uint32_t) + // size of key length
- key->size + row->size + //key and row
- num_var_cols_for_query*(sizeof(uint32_t)) + //lengths of varchars stored
- sizeof(uint32_t); //length of blobs
+ size_needed =
+ // size of key length
+ sizeof(uint32_t) +
+ // key and row
+ key->size + row->size +
+ // lengths of varchars stored
+ num_var_cols_for_query * (sizeof(uint32_t)) +
+ // length of blobs
+ sizeof(uint32_t);
}
- }
- else {
+ } else {
size_needed = sizeof(uint32_t) + key->size;
}
if (size_remaining < size_needed) {
- range_query_buff = (uchar *)tokudb_my_realloc(
- (void *)range_query_buff,
- bytes_used_in_range_query_buff+size_needed,
- MYF(MY_WME)
- );
+ range_query_buff =
+ static_cast<uchar*>(tokudb::memory::realloc(
+ static_cast<void*>(range_query_buff),
+ bytes_used_in_range_query_buff + size_needed,
+ MYF(MY_WME)));
if (range_query_buff == NULL) {
error = ENOMEM;
invalidate_bulk_fetch();
goto cleanup;
}
- size_range_query_buff = bytes_used_in_range_query_buff+size_needed;
+ size_range_query_buff = bytes_used_in_range_query_buff + size_needed;
}
//
// now we know we have the size, let's fill the buffer, starting with the key
//
curr_pos = range_query_buff + bytes_used_in_range_query_buff;
- *(uint32_t *)curr_pos = key->size;
+ *reinterpret_cast<uint32_t*>(curr_pos) = key->size;
curr_pos += sizeof(uint32_t);
memcpy(curr_pos, key->data, key->size);
curr_pos += key->size;
if (need_val) {
if (unpack_entire_row) {
- *(uint32_t *)curr_pos = row->size;
+ *reinterpret_cast<uint32_t*>(curr_pos) = row->size;
curr_pos += sizeof(uint32_t);
memcpy(curr_pos, row->data, row->size);
curr_pos += row->size;
- }
- else {
+ } else {
// need to unpack just the data we care about
- const uchar* fixed_field_ptr = (const uchar *) row->data;
+ const uchar* fixed_field_ptr = static_cast<const uchar*>(row->data);
fixed_field_ptr += table_share->null_bytes;
const uchar* var_field_offset_ptr = NULL;
const uchar* var_field_data_ptr = NULL;
- var_field_offset_ptr = fixed_field_ptr + share->kc_info.mcp_info[tokudb_active_index].fixed_field_size;
- var_field_data_ptr = var_field_offset_ptr + share->kc_info.mcp_info[tokudb_active_index].len_of_offsets;
+ var_field_offset_ptr =
+ fixed_field_ptr +
+ share->kc_info.mcp_info[tokudb_active_index].fixed_field_size;
+ var_field_data_ptr =
+ var_field_offset_ptr +
+ share->kc_info.mcp_info[tokudb_active_index].len_of_offsets;
// first the null bytes
memcpy(curr_pos, row->data, table_share->null_bytes);
@@ -5087,8 +5344,7 @@ int ha_tokudb::fill_range_query_buf(
memcpy(
curr_pos,
fixed_field_ptr + share->kc_info.cp_info[tokudb_active_index][field_index].col_pack_val,
- share->kc_info.field_lengths[field_index]
- );
+ share->kc_info.field_lengths[field_index]);
curr_pos += share->kc_info.field_lengths[field_index];
}
@@ -5097,7 +5353,8 @@ int ha_tokudb::fill_range_query_buf(
//
for (uint32_t i = 0; i < num_var_cols_for_query; i++) {
uint field_index = var_cols_for_query[i];
- uint32_t var_field_index = share->kc_info.cp_info[tokudb_active_index][field_index].col_pack_val;
+ uint32_t var_field_index =
+ share->kc_info.cp_info[tokudb_active_index][field_index].col_pack_val;
uint32_t data_start_offset;
uint32_t field_len;
@@ -5106,11 +5363,13 @@ int ha_tokudb::fill_range_query_buf(
&data_start_offset,
var_field_index,
var_field_offset_ptr,
- share->kc_info.num_offset_bytes
- );
+ share->kc_info.num_offset_bytes);
memcpy(curr_pos, &field_len, sizeof(field_len));
curr_pos += sizeof(field_len);
- memcpy(curr_pos, var_field_data_ptr + data_start_offset, field_len);
+ memcpy(
+ curr_pos,
+ var_field_data_ptr + data_start_offset,
+ field_len);
curr_pos += field_len;
}
@@ -5124,9 +5383,12 @@ int ha_tokudb::fill_range_query_buf(
&blob_offset,
share->kc_info.mcp_info[tokudb_active_index].len_of_offsets,
var_field_data_ptr,
- share->kc_info.num_offset_bytes
- );
- data_size = row->size - blob_offset - (uint32_t)(var_field_data_ptr - (const uchar *)row->data);
+ share->kc_info.num_offset_bytes);
+ data_size =
+ row->size -
+ blob_offset -
+ static_cast<uint32_t>((var_field_data_ptr -
+ static_cast<const uchar*>(row->data)));
memcpy(curr_pos, &data_size, sizeof(data_size));
curr_pos += sizeof(data_size);
memcpy(curr_pos, var_field_data_ptr + blob_offset, data_size);
@@ -5136,7 +5398,7 @@ int ha_tokudb::fill_range_query_buf(
}
bytes_used_in_range_query_buff = curr_pos - range_query_buff;
- assert(bytes_used_in_range_query_buff <= size_range_query_buff);
+ assert_always(bytes_used_in_range_query_buff <= size_range_query_buff);
//
// now determine if we should continue with the bulk fetch
@@ -5153,14 +5415,16 @@ int ha_tokudb::fill_range_query_buf(
// row fetch upper bound.
if (bulk_fetch_iteration < HA_TOKU_BULK_FETCH_ITERATION_MAX) {
uint64_t row_fetch_upper_bound = 1LLU << bulk_fetch_iteration;
- assert(row_fetch_upper_bound > 0);
+ assert_always(row_fetch_upper_bound > 0);
if (rows_fetched_using_bulk_fetch >= row_fetch_upper_bound) {
error = 0;
goto cleanup;
}
}
- if (bytes_used_in_range_query_buff + table_share->rec_buff_length > user_defined_size) {
+ if (bytes_used_in_range_query_buff +
+ table_share->rec_buff_length >
+ user_defined_size) {
error = 0;
goto cleanup;
}
@@ -5178,11 +5442,9 @@ int ha_tokudb::fill_range_query_buf(
int cmp = tokudb_cmp_dbt_key(
share->key_file[tokudb_active_index],
key,
- &right_range
- );
+ &right_range);
error = (cmp > 0) ? 0 : TOKUDB_CURSOR_CONTINUE;
- }
- else {
+ } else {
// compare what we got to the left endpoint of prelocked range
// because we are searching keys in descending order
if (prelocked_left_range_size == 0) {
@@ -5196,15 +5458,19 @@ int ha_tokudb::fill_range_query_buf(
int cmp = tokudb_cmp_dbt_key(
share->key_file[tokudb_active_index],
key,
- &left_range
- );
+ &left_range);
error = (cmp < 0) ? 0 : TOKUDB_CURSOR_CONTINUE;
}
cleanup:
return error;
}
-int ha_tokudb::get_next(uchar* buf, int direction, DBT* key_to_compare, bool do_key_read) {
+int ha_tokudb::get_next(
+ uchar* buf,
+ int direction,
+ DBT* key_to_compare,
+ bool do_key_read) {
+
int error = 0;
HANDLE_INVALID_CURSOR();
@@ -5221,17 +5487,18 @@ int ha_tokudb::get_next(uchar* buf, int direction, DBT* key_to_compare, bool do_
// we need to read the val of what we retrieve if
// we do NOT have a covering index AND we are using a clustering secondary
// key
- bool need_val = (do_key_read == 0) &&
- (tokudb_active_index == primary_key || key_is_clustering(&table->key_info[tokudb_active_index]));
+ bool need_val =
+ (do_key_read == 0) &&
+ (tokudb_active_index == primary_key ||
+ key_is_clustering(&table->key_info[tokudb_active_index]));
- if ((bytes_used_in_range_query_buff - curr_range_query_buff_offset) > 0) {
+ if ((bytes_used_in_range_query_buff -
+ curr_range_query_buff_offset) > 0) {
error = read_data_from_range_query_buff(buf, need_val, do_key_read);
- }
- else if (icp_went_out_of_range) {
+ } else if (icp_went_out_of_range) {
icp_went_out_of_range = false;
error = HA_ERR_END_OF_FILE;
- }
- else {
+ } else {
invalidate_bulk_fetch();
if (doing_bulk_fetch) {
struct smart_dbt_bf_info bf_info;
@@ -5252,16 +5519,28 @@ int ha_tokudb::get_next(uchar* buf, int direction, DBT* key_to_compare, bool do_
// this while loop. icp_out_of_range will be set if we hit a row that
// the index condition states is out of our range. When that hits,
// we know all the data in the buffer is the last data we will retrieve
- while (bytes_used_in_range_query_buff == 0 && !icp_went_out_of_range && error == 0) {
+ while (bytes_used_in_range_query_buff == 0 &&
+ !icp_went_out_of_range && error == 0) {
if (direction > 0) {
- error = cursor->c_getf_next(cursor, flags, smart_dbt_bf_callback, &bf_info);
+ error =
+ cursor->c_getf_next(
+ cursor,
+ flags,
+ smart_dbt_bf_callback,
+ &bf_info);
} else {
- error = cursor->c_getf_prev(cursor, flags, smart_dbt_bf_callback, &bf_info);
+ error =
+ cursor->c_getf_prev(
+ cursor,
+ flags,
+ smart_dbt_bf_callback,
+ &bf_info);
}
}
// if there is no data set and we went out of range,
// then there is nothing to return
- if (bytes_used_in_range_query_buff == 0 && icp_went_out_of_range) {
+ if (bytes_used_in_range_query_buff == 0 &&
+ icp_went_out_of_range) {
icp_went_out_of_range = false;
error = HA_ERR_END_OF_FILE;
}
@@ -5269,26 +5548,46 @@ int ha_tokudb::get_next(uchar* buf, int direction, DBT* key_to_compare, bool do_
bulk_fetch_iteration++;
}
- error = handle_cursor_error(error, HA_ERR_END_OF_FILE,tokudb_active_index);
- if (error) { goto cleanup; }
+ error =
+ handle_cursor_error(
+ error,
+ HA_ERR_END_OF_FILE,
+ tokudb_active_index);
+ if (error) {
+ goto cleanup;
+ }
//
// now that range_query_buff is filled, read an element
//
- error = read_data_from_range_query_buff(buf, need_val, do_key_read);
- }
- else {
+ error =
+ read_data_from_range_query_buff(buf, need_val, do_key_read);
+ } else {
struct smart_dbt_info info;
info.ha = this;
info.buf = buf;
info.keynr = tokudb_active_index;
if (direction > 0) {
- error = cursor->c_getf_next(cursor, flags, SMART_DBT_CALLBACK(do_key_read), &info);
+ error =
+ cursor->c_getf_next(
+ cursor,
+ flags,
+ SMART_DBT_CALLBACK(do_key_read),
+ &info);
} else {
- error = cursor->c_getf_prev(cursor, flags, SMART_DBT_CALLBACK(do_key_read), &info);
+ error =
+ cursor->c_getf_prev(
+ cursor,
+ flags,
+ SMART_DBT_CALLBACK(do_key_read),
+ &info);
}
- error = handle_cursor_error(error, HA_ERR_END_OF_FILE, tokudb_active_index);
+ error =
+ handle_cursor_error(
+ error,
+ HA_ERR_END_OF_FILE,
+ tokudb_active_index);
}
}
}
@@ -5301,13 +5600,17 @@ int ha_tokudb::get_next(uchar* buf, int direction, DBT* key_to_compare, bool do_
// read the full row by doing a point query into the
// main table.
//
- if (!error && !do_key_read && (tokudb_active_index != primary_key) && !key_is_clustering(&table->key_info[tokudb_active_index])) {
+ if (!error &&
+ !do_key_read &&
+ (tokudb_active_index != primary_key) &&
+ !key_is_clustering(&table->key_info[tokudb_active_index])) {
error = read_full_row(buf);
}
if (!error) {
THD *thd = ha_thd();
- tokudb_trx_data* trx = (tokudb_trx_data *) thd_get_ha_data(thd, tokudb_hton);
+ tokudb_trx_data* trx =
+ static_cast<tokudb_trx_data*>(thd_get_ha_data(thd, tokudb_hton));
trx->stmt_progress.queried++;
track_progress(thd);
if (thd_killed(thd))
@@ -5509,40 +5812,69 @@ int ha_tokudb::rnd_next(uchar * buf) {
void ha_tokudb::track_progress(THD* thd) {
tokudb_trx_data* trx = (tokudb_trx_data *) thd_get_ha_data(thd, tokudb_hton);
if (trx) {
- ulonglong num_written = trx->stmt_progress.inserted + trx->stmt_progress.updated + trx->stmt_progress.deleted;
+ ulonglong num_written = trx->stmt_progress.inserted +
+ trx->stmt_progress.updated +
+ trx->stmt_progress.deleted;
bool update_status =
- (trx->stmt_progress.queried && tokudb_read_status_frequency && (trx->stmt_progress.queried % tokudb_read_status_frequency) == 0) ||
- (num_written && tokudb_write_status_frequency && (num_written % tokudb_write_status_frequency) == 0);
+ (trx->stmt_progress.queried &&
+ tokudb::sysvars::read_status_frequency &&
+ (trx->stmt_progress.queried %
+ tokudb::sysvars::read_status_frequency) == 0) ||
+ (num_written && tokudb::sysvars::write_status_frequency &&
+ (num_written % tokudb::sysvars::write_status_frequency) == 0);
if (update_status) {
char *next_status = write_status_msg;
bool first = true;
int r;
if (trx->stmt_progress.queried) {
- r = sprintf(next_status, "Queried about %llu row%s", trx->stmt_progress.queried, trx->stmt_progress.queried == 1 ? "" : "s");
- assert(r >= 0);
+ r = sprintf(
+ next_status,
+ "Queried about %llu row%s",
+ trx->stmt_progress.queried,
+ trx->stmt_progress.queried == 1 ? "" : "s");
+ assert_always(r >= 0);
next_status += r;
first = false;
}
if (trx->stmt_progress.inserted) {
if (trx->stmt_progress.using_loader) {
- r = sprintf(next_status, "%sFetched about %llu row%s, loading data still remains", first ? "" : ", ", trx->stmt_progress.inserted, trx->stmt_progress.inserted == 1 ? "" : "s");
- }
- else {
- r = sprintf(next_status, "%sInserted about %llu row%s", first ? "" : ", ", trx->stmt_progress.inserted, trx->stmt_progress.inserted == 1 ? "" : "s");
+ r = sprintf(
+ next_status,
+ "%sFetched about %llu row%s, loading data still remains",
+ first ? "" : ", ",
+ trx->stmt_progress.inserted,
+ trx->stmt_progress.inserted == 1 ? "" : "s");
+ } else {
+ r = sprintf(
+ next_status,
+ "%sInserted about %llu row%s",
+ first ? "" : ", ",
+ trx->stmt_progress.inserted,
+ trx->stmt_progress.inserted == 1 ? "" : "s");
}
- assert(r >= 0);
+ assert_always(r >= 0);
next_status += r;
first = false;
}
if (trx->stmt_progress.updated) {
- r = sprintf(next_status, "%sUpdated about %llu row%s", first ? "" : ", ", trx->stmt_progress.updated, trx->stmt_progress.updated == 1 ? "" : "s");
- assert(r >= 0);
+ r = sprintf(
+ next_status,
+ "%sUpdated about %llu row%s",
+ first ? "" : ", ",
+ trx->stmt_progress.updated,
+ trx->stmt_progress.updated == 1 ? "" : "s");
+ assert_always(r >= 0);
next_status += r;
first = false;
}
if (trx->stmt_progress.deleted) {
- r = sprintf(next_status, "%sDeleted about %llu row%s", first ? "" : ", ", trx->stmt_progress.deleted, trx->stmt_progress.deleted == 1 ? "" : "s");
- assert(r >= 0);
+ r = sprintf(
+ next_status,
+ "%sDeleted about %llu row%s",
+ first ? "" : ", ",
+ trx->stmt_progress.deleted,
+ trx->stmt_progress.deleted == 1 ? "" : "s");
+ assert_always(r >= 0);
next_status += r;
first = false;
}
@@ -5583,7 +5915,7 @@ int ha_tokudb::rnd_pos(uchar * buf, uchar * pos) {
// test rpl slave by inducing a delay before the point query
THD *thd = ha_thd();
if (thd->slave_thread && (in_rpl_delete_rows || in_rpl_update_rows)) {
- uint64_t delay_ms = THDVAR(thd, rpl_lookup_rows_delay);
+ uint64_t delay_ms = tokudb::sysvars::rpl_lookup_rows_delay(thd);
if (delay_ms)
usleep(delay_ms * 1000);
}
@@ -5664,7 +5996,7 @@ int ha_tokudb::prelock_range(const key_range *start_key, const key_range *end_ke
//
if (cursor) {
int r = cursor->c_close(cursor);
- assert(r==0);
+ assert_always(r==0);
cursor = NULL;
remove_from_trx_handler_list();
}
@@ -5783,8 +6115,7 @@ int ha_tokudb::info(uint flag) {
#endif
DB_TXN* txn = NULL;
if (flag & HA_STATUS_VARIABLE) {
- // Just to get optimizations right
- stats.records = share->rows + share->rows_from_locked_table;
+ stats.records = share->row_count() + share->rows_from_locked_table;
stats.deleted = 0;
if (!(flag & HA_STATUS_NO_LOCK)) {
uint64_t num_rows = 0;
@@ -5792,27 +6123,32 @@ int ha_tokudb::info(uint flag) {
memset(&frag_info, 0, sizeof frag_info);
error = txn_begin(db_env, NULL, &txn, DB_READ_UNCOMMITTED, ha_thd());
- if (error) { goto cleanup; }
+ if (error) {
+ goto cleanup;
+ }
// we should always have a primary key
- assert(share->file != NULL);
+ assert_always(share->file != NULL);
error = estimate_num_rows(share->file,&num_rows, txn);
if (error == 0) {
- share->rows = num_rows;
+ share->set_row_count(num_rows, false);
stats.records = num_rows;
- }
- else {
+ } else {
goto cleanup;
}
error = share->file->get_fragmentation(share->file, &frag_info);
- if (error) { goto cleanup; }
+ if (error) {
+ goto cleanup;
+ }
stats.delete_length = frag_info.unused_bytes;
DB_BTREE_STAT64 dict_stats;
error = share->file->stat64(share->file, txn, &dict_stats);
- if (error) { goto cleanup; }
-
+ if (error) {
+ goto cleanup;
+ }
+
stats.create_time = dict_stats.bt_create_time_sec;
stats.update_time = dict_stats.bt_modify_time_sec;
stats.check_time = dict_stats.bt_verify_time_sec;
@@ -5822,18 +6158,24 @@ int ha_tokudb::info(uint flag) {
// in this case, we have a hidden primary key, do not
// want to report space taken up by the hidden primary key to the user
//
- uint64_t hpk_space = TOKUDB_HIDDEN_PRIMARY_KEY_LENGTH*dict_stats.bt_ndata;
- stats.data_file_length = (hpk_space > stats.data_file_length) ? 0 : stats.data_file_length - hpk_space;
- }
- else {
+ uint64_t hpk_space =
+ TOKUDB_HIDDEN_PRIMARY_KEY_LENGTH * dict_stats.bt_ndata;
+ stats.data_file_length =
+ (hpk_space > stats.data_file_length) ?
+ 0 : stats.data_file_length - hpk_space;
+ } else {
//
// one infinity byte per key needs to be subtracted
//
uint64_t inf_byte_space = dict_stats.bt_ndata;
- stats.data_file_length = (inf_byte_space > stats.data_file_length) ? 0 : stats.data_file_length - inf_byte_space;
+ stats.data_file_length =
+ (inf_byte_space > stats.data_file_length) ?
+ 0 : stats.data_file_length - inf_byte_space;
}
- stats.mean_rec_length = stats.records ? (ulong)(stats.data_file_length/stats.records) : 0;
+ stats.mean_rec_length =
+ stats.records ?
+ (ulong)(stats.data_file_length/stats.records) : 0;
stats.index_file_length = 0;
// curr_num_DBs is the number of keys we have, according
// to the mysql layer. if drop index is running concurrently
@@ -5852,37 +6194,58 @@ int ha_tokudb::info(uint flag) {
if (i == primary_key || share->key_file[i] == NULL) {
continue;
}
- error = share->key_file[i]->stat64(
- share->key_file[i],
- txn,
- &dict_stats
- );
- if (error) { goto cleanup; }
+ error =
+ share->key_file[i]->stat64(
+ share->key_file[i],
+ txn,
+ &dict_stats);
+ if (error) {
+ goto cleanup;
+ }
stats.index_file_length += dict_stats.bt_dsize;
- error = share->file->get_fragmentation(
- share->file,
- &frag_info
- );
- if (error) { goto cleanup; }
+ error =
+ share->file->get_fragmentation(
+ share->file,
+ &frag_info);
+ if (error) {
+ goto cleanup;
+ }
stats.delete_length += frag_info.unused_bytes;
}
}
+
+ /*
+ The following comment and logic has been taken from InnoDB and
+ an old hack was removed that forced to always set stats.records > 0
+ ---
+ The MySQL optimizer seems to assume in a left join that n_rows
+ is an accurate estimate if it is zero. Of course, it is not,
+ since we do not have any locks on the rows yet at this phase.
+ Since SHOW TABLE STATUS seems to call this function with the
+ HA_STATUS_TIME flag set, while the left join optimizer does not
+ set that flag, we add one to a zero value if the flag is not
+ set. That way SHOW TABLE STATUS will show the best estimate,
+ while the optimizer never sees the table empty. */
+ if (stats.records == 0 && !(flag & HA_STATUS_TIME)) {
+ stats.records++;
+ }
}
if ((flag & HA_STATUS_CONST)) {
- stats.max_data_file_length= 9223372036854775807ULL;
- tokudb::set_card_in_key_info(table, share->n_rec_per_key, share->rec_per_key);
+ stats.max_data_file_length = 9223372036854775807ULL;
+ share->set_cardinality_counts_in_table(table);
}
/* Don't return key if we got an error for the internal primary key */
if (flag & HA_STATUS_ERRKEY && last_dup_key < table_share->keys) {
errkey = last_dup_key;
- }
+ }
- if (flag & HA_STATUS_AUTO && table->found_next_number_field) {
- THD *thd= table->in_use;
- struct system_variables *variables= &thd->variables;
- stats.auto_increment_value = share->last_auto_increment + variables->auto_increment_increment;
+ if (flag & HA_STATUS_AUTO && table->found_next_number_field) {
+ THD* thd = table->in_use;
+ struct system_variables* variables = &thd->variables;
+ stats.auto_increment_value =
+ share->last_auto_increment + variables->auto_increment_increment;
}
error = 0;
cleanup:
@@ -5929,7 +6292,7 @@ int ha_tokudb::extra(enum ha_extra_function operation) {
TOKUDB_HANDLER_DBUG_RETURN(0);
}
-int ha_tokudb::reset(void) {
+int ha_tokudb::reset() {
TOKUDB_HANDLER_DBUG_ENTER("");
key_read = false;
using_ignore = false;
@@ -5953,14 +6316,13 @@ int ha_tokudb::acquire_table_lock (DB_TXN* trans, TABLE_LOCK_TYPE lt) {
TOKUDB_HANDLER_DBUG_ENTER("%p %s", trans, lt == lock_read ? "r" : "w");
int error = ENOSYS;
if (!num_DBs_locked_in_bulk) {
- rw_rdlock(&share->num_DBs_lock);
+ share->_num_DBs_lock.lock_read();
}
uint curr_num_DBs = share->num_DBs;
if (lt == lock_read) {
error = 0;
goto cleanup;
- }
- else if (lt == lock_write) {
+ } else if (lt == lock_write) {
for (uint i = 0; i < curr_num_DBs; i++) {
DB* db = share->key_file[i];
error = db->pre_acquire_table_lock(db, trans);
@@ -5968,11 +6330,9 @@ int ha_tokudb::acquire_table_lock (DB_TXN* trans, TABLE_LOCK_TYPE lt) {
TOKUDB_HANDLER_TRACE("%d db=%p trans=%p", i, db, trans);
if (error) break;
}
- if (tokudb_debug & TOKUDB_DEBUG_LOCK)
- TOKUDB_HANDLER_TRACE("error=%d", error);
+ TOKUDB_HANDLER_TRACE_FOR_FLAGS(TOKUDB_DEBUG_LOCK, "error=%d", error);
if (error) goto cleanup;
- }
- else {
+ } else {
error = ENOSYS;
goto cleanup;
}
@@ -5980,7 +6340,7 @@ int ha_tokudb::acquire_table_lock (DB_TXN* trans, TABLE_LOCK_TYPE lt) {
error = 0;
cleanup:
if (!num_DBs_locked_in_bulk) {
- rw_unlock(&share->num_DBs_lock);
+ share->_num_DBs_lock.unlock();
}
TOKUDB_HANDLER_DBUG_RETURN(error);
}
@@ -6011,17 +6371,19 @@ int ha_tokudb::create_txn(THD* thd, tokudb_trx_data* trx) {
if ((error = txn_begin(db_env, NULL, &trx->all, txn_begin_flags, thd))) {
goto cleanup;
}
- if (tokudb_debug & TOKUDB_DEBUG_TXN) {
- TOKUDB_HANDLER_TRACE("created master %p", trx->all);
- }
+ TOKUDB_HANDLER_TRACE_FOR_FLAGS(
+ TOKUDB_DEBUG_TXN,
+ "created master %p",
+ trx->all);
trx->sp_level = trx->all;
trans_register_ha(thd, true, tokudb_hton);
}
DBUG_PRINT("trans", ("starting transaction stmt"));
if (trx->stmt) {
- if (tokudb_debug & TOKUDB_DEBUG_TXN) {
- TOKUDB_HANDLER_TRACE("warning:stmt=%p", trx->stmt);
- }
+ TOKUDB_HANDLER_TRACE_FOR_FLAGS(
+ TOKUDB_DEBUG_TXN,
+ "warning:stmt=%p",
+ trx->stmt);
}
uint32_t txn_begin_flags;
if (trx->all == NULL) {
@@ -6036,21 +6398,25 @@ int ha_tokudb::create_txn(THD* thd, tokudb_trx_data* trx) {
if (txn_begin_flags == 0 && is_autocommit && thd_sql_command(thd) == SQLCOM_SELECT) {
txn_begin_flags = DB_TXN_SNAPSHOT;
}
- if (is_autocommit && thd_sql_command(thd) == SQLCOM_SELECT && !thd->in_sub_stmt && lock.type <= TL_READ_NO_INSERT && !thd->lex->uses_stored_routines()) {
+ if (is_autocommit && thd_sql_command(thd) == SQLCOM_SELECT &&
+ !thd->in_sub_stmt && lock.type <= TL_READ_NO_INSERT &&
+ !thd->lex->uses_stored_routines()) {
txn_begin_flags |= DB_TXN_READ_ONLY;
}
- }
- else {
+ } else {
txn_begin_flags = DB_INHERIT_ISOLATION;
}
- if ((error = txn_begin(db_env, trx->sp_level, &trx->stmt, txn_begin_flags, thd))) {
+ error = txn_begin(db_env, trx->sp_level, &trx->stmt, txn_begin_flags, thd);
+ if (error) {
/* We leave the possible master transaction open */
goto cleanup;
}
trx->sub_sp_level = trx->stmt;
- if (tokudb_debug & TOKUDB_DEBUG_TXN) {
- TOKUDB_HANDLER_TRACE("created stmt %p sp_level %p", trx->sp_level, trx->stmt);
- }
+ TOKUDB_HANDLER_TRACE_FOR_FLAGS(
+ TOKUDB_DEBUG_TXN,
+ "created stmt %p sp_level %p",
+ trx->sp_level,
+ trx->stmt);
reset_stmt_progress(&trx->stmt_progress);
trans_register_ha(thd, false, tokudb_hton);
cleanup:
@@ -6081,26 +6447,40 @@ static const char *lock_type_str(int lock_type) {
// error otherwise
//
int ha_tokudb::external_lock(THD * thd, int lock_type) {
- TOKUDB_HANDLER_DBUG_ENTER("cmd %d lock %d %s %s", thd_sql_command(thd), lock_type, lock_type_str(lock_type), share->table_name);
- if (!(tokudb_debug & TOKUDB_DEBUG_ENTER) && (tokudb_debug & TOKUDB_DEBUG_LOCK)) {
- TOKUDB_HANDLER_TRACE("cmd %d lock %d %s %s", thd_sql_command(thd), lock_type, lock_type_str(lock_type), share->table_name);
- }
- if (tokudb_debug & TOKUDB_DEBUG_LOCK) {
- TOKUDB_HANDLER_TRACE("q %s", thd->query());
- }
+ TOKUDB_HANDLER_DBUG_ENTER(
+ "cmd %d lock %d %s %s",
+ thd_sql_command(thd),
+ lock_type,
+ lock_type_str(lock_type),
+ share->full_table_name());
+ if (TOKUDB_UNLIKELY(!TOKUDB_DEBUG_FLAGS(TOKUDB_DEBUG_ENTER) &&
+ TOKUDB_DEBUG_FLAGS(TOKUDB_DEBUG_LOCK))) {
+ TOKUDB_HANDLER_TRACE(
+ "cmd %d lock %d %s %s",
+ thd_sql_command(thd),
+ lock_type,
+ lock_type_str(lock_type),
+ share->full_table_name());
+ }
+ TOKUDB_HANDLER_TRACE_FOR_FLAGS(TOKUDB_DEBUG_LOCK, "q %s", thd->query());
int error = 0;
- tokudb_trx_data *trx = (tokudb_trx_data *) thd_get_ha_data(thd, tokudb_hton);
+ tokudb_trx_data* trx = (tokudb_trx_data*)thd_get_ha_data(thd, tokudb_hton);
if (!trx) {
error = create_tokudb_trx_data_instance(&trx);
if (error) { goto cleanup; }
thd_set_ha_data(thd, tokudb_hton, trx);
}
- if (tokudb_debug & TOKUDB_DEBUG_TXN) {
- TOKUDB_HANDLER_TRACE("trx %p %p %p %p %u %u", trx->all, trx->stmt, trx->sp_level, trx->sub_sp_level,
- trx->tokudb_lock_count, trx->create_lock_count);
- }
+ TOKUDB_HANDLER_TRACE_FOR_FLAGS(
+ TOKUDB_DEBUG_TXN,
+ "trx %p %p %p %p %u %u",
+ trx->all,
+ trx->stmt,
+ trx->sp_level,
+ trx->sub_sp_level,
+ trx->tokudb_lock_count,
+ trx->create_lock_count);
if (trx->all == NULL) {
trx->sp_level = NULL;
@@ -6120,19 +6500,11 @@ int ha_tokudb::external_lock(THD * thd, int lock_type) {
}
transaction = trx->sub_sp_level;
trx->tokudb_lock_count++;
- }
- else {
- tokudb_pthread_mutex_lock(&share->mutex);
- // hate dealing with comparison of signed vs unsigned, so doing this
- if (deleted_rows > added_rows && share->rows < (deleted_rows - added_rows)) {
- share->rows = 0;
- }
- else {
- share->rows += (added_rows - deleted_rows);
- }
- tokudb_pthread_mutex_unlock(&share->mutex);
+ } else {
+ share->update_row_count(thd, added_rows, deleted_rows, updated_rows);
added_rows = 0;
deleted_rows = 0;
+ updated_rows = 0;
share->rows_from_locked_table = 0;
if (trx->tokudb_lock_count > 0) {
if (--trx->tokudb_lock_count <= trx->create_lock_count) {
@@ -6154,8 +6526,7 @@ int ha_tokudb::external_lock(THD * thd, int lock_type) {
}
}
cleanup:
- if (tokudb_debug & TOKUDB_DEBUG_LOCK)
- TOKUDB_HANDLER_TRACE("error=%d", error);
+ TOKUDB_HANDLER_TRACE_FOR_FLAGS(TOKUDB_DEBUG_LOCK, "error=%d", error);
TOKUDB_HANDLER_DBUG_RETURN(error);
}
@@ -6164,24 +6535,32 @@ cleanup:
TABLE LOCK is done.
Under LOCK TABLES, each used tables will force a call to start_stmt.
*/
-int ha_tokudb::start_stmt(THD * thd, thr_lock_type lock_type) {
- TOKUDB_HANDLER_DBUG_ENTER("cmd %d lock %d %s", thd_sql_command(thd), lock_type, share->table_name);
- if (tokudb_debug & TOKUDB_DEBUG_LOCK) {
- TOKUDB_HANDLER_TRACE("q %s", thd->query());
- }
+int ha_tokudb::start_stmt(THD* thd, thr_lock_type lock_type) {
+ TOKUDB_HANDLER_DBUG_ENTER(
+ "cmd %d lock %d %s",
+ thd_sql_command(thd),
+ lock_type,
+ share->full_table_name());
+
+ TOKUDB_HANDLER_TRACE_FOR_FLAGS(TOKUDB_DEBUG_LOCK, "q %s", thd->query());
int error = 0;
- tokudb_trx_data *trx = (tokudb_trx_data *) thd_get_ha_data(thd, tokudb_hton);
+ tokudb_trx_data* trx = (tokudb_trx_data*)thd_get_ha_data(thd, tokudb_hton);
if (!trx) {
error = create_tokudb_trx_data_instance(&trx);
if (error) { goto cleanup; }
thd_set_ha_data(thd, tokudb_hton, trx);
}
- if (tokudb_debug & TOKUDB_DEBUG_TXN) {
- TOKUDB_HANDLER_TRACE("trx %p %p %p %p %u %u", trx->all, trx->stmt, trx->sp_level, trx->sub_sp_level,
- trx->tokudb_lock_count, trx->create_lock_count);
- }
+ TOKUDB_HANDLER_TRACE_FOR_FLAGS(
+ TOKUDB_DEBUG_TXN,
+ "trx %p %p %p %p %u %u",
+ trx->all,
+ trx->stmt,
+ trx->sp_level,
+ trx->sub_sp_level,
+ trx->tokudb_lock_count,
+ trx->create_lock_count);
/*
note that trx->stmt may have been already initialized as start_stmt()
@@ -6194,11 +6573,11 @@ int ha_tokudb::start_stmt(THD * thd, thr_lock_type lock_type) {
goto cleanup;
}
trx->create_lock_count = trx->tokudb_lock_count;
- }
- else {
- if (tokudb_debug & TOKUDB_DEBUG_TXN) {
- TOKUDB_HANDLER_TRACE("trx->stmt %p already existed", trx->stmt);
- }
+ } else {
+ TOKUDB_HANDLER_TRACE_FOR_FLAGS(
+ TOKUDB_DEBUG_TXN,
+ "trx->stmt %p already existed",
+ trx->stmt);
}
if (added_rows > deleted_rows) {
share->rows_from_locked_table = added_rows - deleted_rows;
@@ -6272,27 +6651,40 @@ uint32_t ha_tokudb::get_cursor_isolation_flags(enum thr_lock_type lock_type, THD
time). In the future we will probably try to remove this.
*/
-THR_LOCK_DATA **ha_tokudb::store_lock(THD * thd, THR_LOCK_DATA ** to, enum thr_lock_type lock_type) {
- TOKUDB_HANDLER_DBUG_ENTER("lock_type=%d cmd=%d", lock_type, thd_sql_command(thd));
- if (tokudb_debug & TOKUDB_DEBUG_LOCK) {
- TOKUDB_HANDLER_TRACE("lock_type=%d cmd=%d", lock_type, thd_sql_command(thd));
- }
+THR_LOCK_DATA* *ha_tokudb::store_lock(
+ THD* thd,
+ THR_LOCK_DATA** to,
+ enum thr_lock_type lock_type) {
+
+ TOKUDB_HANDLER_DBUG_ENTER(
+ "lock_type=%d cmd=%d",
+ lock_type,
+ thd_sql_command(thd));
+ TOKUDB_HANDLER_TRACE_FOR_FLAGS(
+ TOKUDB_DEBUG_LOCK,
+ "lock_type=%d cmd=%d",
+ lock_type,
+ thd_sql_command(thd));
if (lock_type != TL_IGNORE && lock.type == TL_UNLOCK) {
enum_sql_command sql_command = (enum_sql_command) thd_sql_command(thd);
if (!thd->in_lock_tables) {
- if (sql_command == SQLCOM_CREATE_INDEX && get_create_index_online(thd)) {
+ if (sql_command == SQLCOM_CREATE_INDEX &&
+ tokudb::sysvars::create_index_online(thd)) {
// hot indexing
- rw_rdlock(&share->num_DBs_lock);
+ share->_num_DBs_lock.lock_read();
if (share->num_DBs == (table->s->keys + tokudb_test(hidden_primary_key))) {
lock_type = TL_WRITE_ALLOW_WRITE;
}
- rw_unlock(&share->num_DBs_lock);
- } else if ((lock_type >= TL_WRITE_CONCURRENT_INSERT && lock_type <= TL_WRITE) &&
- sql_command != SQLCOM_TRUNCATE && !thd_tablespace_op(thd)) {
+ share->_num_DBs_lock.unlock();
+ } else if ((lock_type >= TL_WRITE_CONCURRENT_INSERT &&
+ lock_type <= TL_WRITE) &&
+ sql_command != SQLCOM_TRUNCATE &&
+ !thd_tablespace_op(thd)) {
// allow concurrent writes
lock_type = TL_WRITE_ALLOW_WRITE;
- } else if (sql_command == SQLCOM_OPTIMIZE && lock_type == TL_READ_NO_INSERT) {
+ } else if (sql_command == SQLCOM_OPTIMIZE &&
+ lock_type == TL_READ_NO_INSERT) {
// hot optimize table
lock_type = TL_READ;
}
@@ -6300,88 +6692,130 @@ THR_LOCK_DATA **ha_tokudb::store_lock(THD * thd, THR_LOCK_DATA ** to, enum thr_l
lock.type = lock_type;
}
*to++ = &lock;
- if (tokudb_debug & TOKUDB_DEBUG_LOCK)
- TOKUDB_HANDLER_TRACE("lock_type=%d", lock_type);
- DBUG_RETURN(to);
+ TOKUDB_HANDLER_TRACE_FOR_FLAGS(
+ TOKUDB_DEBUG_LOCK,
+ "lock_type=%d",
+ lock_type);
+ TOKUDB_HANDLER_DBUG_RETURN_PTR(to);
}
-static toku_compression_method get_compression_method(DB *file) {
+static toku_compression_method get_compression_method(DB* file) {
enum toku_compression_method method;
int r = file->get_compression_method(file, &method);
- assert(r == 0);
+ assert_always(r == 0);
return method;
}
#if TOKU_INCLUDE_ROW_TYPE_COMPRESSION
-enum row_type ha_tokudb::get_row_type(void) const {
+enum row_type ha_tokudb::get_row_type() const {
toku_compression_method compression_method = get_compression_method(share->file);
return toku_compression_method_to_row_type(compression_method);
}
#endif
static int create_sub_table(
- const char *table_name,
- DBT* row_descriptor,
- DB_TXN* txn,
- uint32_t block_size,
+ const char* table_name,
+ DBT* row_descriptor,
+ DB_TXN* txn,
+ uint32_t block_size,
uint32_t read_block_size,
toku_compression_method compression_method,
bool is_hot_index,
- uint32_t fanout
- )
-{
+ uint32_t fanout) {
+
TOKUDB_DBUG_ENTER("");
int error;
DB *file = NULL;
uint32_t create_flags;
-
-
+
+
error = db_create(&file, db_env, 0);
if (error) {
DBUG_PRINT("error", ("Got error: %d when creating table", error));
my_errno = error;
goto exit;
}
-
+
if (block_size != 0) {
error = file->set_pagesize(file, block_size);
if (error != 0) {
- DBUG_PRINT("error", ("Got error: %d when setting block size %u for table '%s'", error, block_size, table_name));
+ DBUG_PRINT(
+ "error",
+ ("Got error: %d when setting block size %u for table '%s'",
+ error,
+ block_size,
+ table_name));
goto exit;
}
}
if (read_block_size != 0) {
error = file->set_readpagesize(file, read_block_size);
if (error != 0) {
- DBUG_PRINT("error", ("Got error: %d when setting read block size %u for table '%s'", error, read_block_size, table_name));
+ DBUG_PRINT(
+ "error",
+ ("Got error: %d when setting read block size %u for table '%s'",
+ error,
+ read_block_size,
+ table_name));
goto exit;
}
}
if (fanout != 0) {
error = file->set_fanout(file, fanout);
if (error != 0) {
- DBUG_PRINT("error", ("Got error: %d when setting fanout %u for table '%s'",
- error, fanout, table_name));
+ DBUG_PRINT(
+ "error",
+ ("Got error: %d when setting fanout %u for table '%s'",
+ error,
+ fanout,
+ table_name));
goto exit;
}
}
error = file->set_compression_method(file, compression_method);
if (error != 0) {
- DBUG_PRINT("error", ("Got error: %d when setting compression type %u for table '%s'", error, compression_method, table_name));
+ DBUG_PRINT(
+ "error",
+ ("Got error: %d when setting compression type %u for table '%s'",
+ error,
+ compression_method,
+ table_name));
goto exit;
}
- create_flags = DB_THREAD | DB_CREATE | DB_EXCL | (is_hot_index ? DB_IS_HOT_INDEX : 0);
- error = file->open(file, txn, table_name, NULL, DB_BTREE, create_flags, my_umask);
+ create_flags =
+ DB_THREAD | DB_CREATE | DB_EXCL | (is_hot_index ? DB_IS_HOT_INDEX : 0);
+ error =
+ file->open(
+ file,
+ txn,
+ table_name,
+ NULL,
+ DB_BTREE,
+ create_flags,
+ my_umask);
if (error) {
- DBUG_PRINT("error", ("Got error: %d when opening table '%s'", error, table_name));
+ DBUG_PRINT(
+ "error",
+ ("Got error: %d when opening table '%s'", error, table_name));
goto exit;
}
- error = file->change_descriptor(file, txn, row_descriptor, (is_hot_index ? DB_IS_HOT_INDEX | DB_UPDATE_CMP_DESCRIPTOR : DB_UPDATE_CMP_DESCRIPTOR));
+ error =
+ file->change_descriptor(
+ file,
+ txn,
+ row_descriptor,
+ (is_hot_index ? DB_IS_HOT_INDEX |
+ DB_UPDATE_CMP_DESCRIPTOR :
+ DB_UPDATE_CMP_DESCRIPTOR));
if (error) {
- DBUG_PRINT("error", ("Got error: %d when setting row descriptor for table '%s'", error, table_name));
+ DBUG_PRINT(
+ "error",
+ ("Got error: %d when setting row descriptor for table '%s'",
+ error,
+ table_name));
goto exit;
}
@@ -6389,7 +6823,7 @@ static int create_sub_table(
exit:
if (file) {
int r = file->close(file, 0);
- assert(r==0);
+ assert_always(r==0);
}
TOKUDB_DBUG_RETURN(error);
}
@@ -6407,7 +6841,8 @@ void ha_tokudb::update_create_info(HA_CREATE_INFO* create_info) {
// show create table asks us to update this create_info, this makes it
// so we'll always show what compression type we're using
create_info->row_type = get_row_type();
- if (create_info->row_type == ROW_TYPE_TOKU_ZLIB && THDVAR(ha_thd(), hide_default_row_format) != 0) {
+ if (create_info->row_type == ROW_TYPE_TOKU_ZLIB &&
+ tokudb::sysvars::hide_default_row_format(ha_thd()) != 0) {
create_info->row_type = ROW_TYPE_DEFAULT;
}
}
@@ -6471,27 +6906,43 @@ int ha_tokudb::write_key_name_to_status(DB* status_block, char* key_name, DB_TXN
}
//
-// some tracing moved out of ha_tokudb::create, because ::create was getting cluttered
+// some tracing moved out of ha_tokudb::create, because ::create was
+// getting cluttered
//
void ha_tokudb::trace_create_table_info(const char *name, TABLE * form) {
uint i;
//
// tracing information about what type of table we are creating
//
- if (tokudb_debug & TOKUDB_DEBUG_OPEN) {
+ if (TOKUDB_UNLIKELY(TOKUDB_DEBUG_FLAGS(TOKUDB_DEBUG_OPEN))) {
for (i = 0; i < form->s->fields; i++) {
Field *field = form->s->field[i];
- TOKUDB_HANDLER_TRACE("field:%d:%s:type=%d:flags=%x", i, field->field_name, field->type(), field->flags);
+ TOKUDB_HANDLER_TRACE(
+ "field:%d:%s:type=%d:flags=%x",
+ i,
+ field->field_name,
+ field->type(),
+ field->flags);
}
for (i = 0; i < form->s->keys; i++) {
KEY *key = &form->s->key_info[i];
- TOKUDB_HANDLER_TRACE("key:%d:%s:%d", i, key->name, get_key_parts(key));
+ TOKUDB_HANDLER_TRACE(
+ "key:%d:%s:%d",
+ i,
+ key->name,
+ key->user_defined_key_parts);
uint p;
- for (p = 0; p < get_key_parts(key); p++) {
- KEY_PART_INFO *key_part = &key->key_part[p];
- Field *field = key_part->field;
- TOKUDB_HANDLER_TRACE("key:%d:%d:length=%d:%s:type=%d:flags=%x",
- i, p, key_part->length, field->field_name, field->type(), field->flags);
+ for (p = 0; p < key->user_defined_key_parts; p++) {
+ KEY_PART_INFO* key_part = &key->key_part[p];
+ Field* field = key_part->field;
+ TOKUDB_HANDLER_TRACE(
+ "key:%d:%d:length=%d:%s:type=%d:flags=%x",
+ i,
+ p,
+ key_part->length,
+ field->field_name,
+ field->type(),
+ field->flags);
}
}
}
@@ -6499,9 +6950,12 @@ void ha_tokudb::trace_create_table_info(const char *name, TABLE * form) {
static uint32_t get_max_desc_size(KEY_AND_COL_INFO* kc_info, TABLE* form) {
uint32_t max_row_desc_buff_size;
- max_row_desc_buff_size = 2*(form->s->fields * 6)+10; // upper bound of key comparison descriptor
- max_row_desc_buff_size += get_max_secondary_key_pack_desc_size(kc_info); // upper bound for sec. key part
- max_row_desc_buff_size += get_max_clustering_val_pack_desc_size(form->s); // upper bound for clustering val part
+ // upper bound of key comparison descriptor
+ max_row_desc_buff_size = 2*(form->s->fields * 6)+10;
+ // upper bound for sec. key part
+ max_row_desc_buff_size += get_max_secondary_key_pack_desc_size(kc_info);
+ // upper bound for clustering val part
+ max_row_desc_buff_size += get_max_clustering_val_pack_desc_size(form->s);
return max_row_desc_buff_size;
}
@@ -6513,9 +6967,8 @@ static uint32_t create_secondary_key_descriptor(
TABLE* form,
uint primary_key,
uint32_t keynr,
- KEY_AND_COL_INFO* kc_info
- )
-{
+ KEY_AND_COL_INFO* kc_info) {
+
uchar* ptr = NULL;
ptr = buf;
@@ -6554,23 +7007,25 @@ static uint32_t create_secondary_key_descriptor(
// creates dictionary for secondary index, with key description key_info, all using txn
//
int ha_tokudb::create_secondary_dictionary(
- const char* name, TABLE* form,
- KEY* key_info,
- DB_TXN* txn,
- KEY_AND_COL_INFO* kc_info,
+ const char* name,
+ TABLE* form,
+ KEY* key_info,
+ DB_TXN* txn,
+ KEY_AND_COL_INFO* kc_info,
uint32_t keynr,
bool is_hot_index,
- toku_compression_method compression_method
- )
-{
+ toku_compression_method compression_method) {
+
int error;
DBT row_descriptor;
uchar* row_desc_buff = NULL;
char* newname = NULL;
+ size_t newname_len = 0;
KEY* prim_key = NULL;
char dict_name[MAX_DICT_NAME_LEN];
uint32_t max_row_desc_buff_size;
- uint hpk= (form->s->primary_key >= MAX_KEY) ? TOKUDB_HIDDEN_PRIMARY_KEY_LENGTH : 0;
+ uint hpk= (form->s->primary_key >= MAX_KEY) ?
+ TOKUDB_HIDDEN_PRIMARY_KEY_LENGTH : 0;
uint32_t block_size;
uint32_t read_block_size;
uint32_t fanout;
@@ -6580,14 +7035,23 @@ int ha_tokudb::create_secondary_dictionary(
max_row_desc_buff_size = get_max_desc_size(kc_info,form);
- row_desc_buff = (uchar *)tokudb_my_malloc(max_row_desc_buff_size, MYF(MY_WME));
- if (row_desc_buff == NULL){ error = ENOMEM; goto cleanup;}
+ row_desc_buff = (uchar*)tokudb::memory::malloc(
+ max_row_desc_buff_size,
+ MYF(MY_WME));
+ if (row_desc_buff == NULL) {
+ error = ENOMEM;
+ goto cleanup;
+ }
- newname = (char *)tokudb_my_malloc(get_max_dict_name_path_length(name),MYF(MY_WME));
- if (newname == NULL){ error = ENOMEM; goto cleanup;}
+ newname_len = get_max_dict_name_path_length(name);
+ newname = (char*)tokudb::memory::malloc(newname_len, MYF(MY_WME));
+ if (newname == NULL) {
+ error = ENOMEM;
+ goto cleanup;
+ }
sprintf(dict_name, "key-%s", key_info->name);
- make_name(newname, name, dict_name);
+ make_name(newname, newname_len, name, dict_name);
prim_key = (hpk) ? NULL : &form->s->key_info[primary_key];
@@ -6606,20 +7070,25 @@ int ha_tokudb::create_secondary_dictionary(
form,
primary_key,
keynr,
- kc_info
- );
- assert(row_descriptor.size <= max_row_desc_buff_size);
+ kc_info);
+ assert_always(row_descriptor.size <= max_row_desc_buff_size);
- block_size = get_tokudb_block_size(thd);
- read_block_size = get_tokudb_read_block_size(thd);
- fanout = get_tokudb_fanout(thd);
+ block_size = tokudb::sysvars::block_size(thd);
+ read_block_size = tokudb::sysvars::read_block_size(thd);
+ fanout = tokudb::sysvars::fanout(thd);
- error = create_sub_table(newname, &row_descriptor, txn, block_size,
- read_block_size, compression_method, is_hot_index,
- fanout);
+ error = create_sub_table(
+ newname,
+ &row_descriptor,
+ txn,
+ block_size,
+ read_block_size,
+ compression_method,
+ is_hot_index,
+ fanout);
cleanup:
- tokudb_my_free(newname);
- tokudb_my_free(row_desc_buff);
+ tokudb::memory::free(newname);
+ tokudb::memory::free(row_desc_buff);
return error;
}
@@ -6630,21 +7099,17 @@ static uint32_t create_main_key_descriptor(
uint hpk,
uint primary_key,
TABLE* form,
- KEY_AND_COL_INFO* kc_info
- )
-{
+ KEY_AND_COL_INFO* kc_info) {
+
uchar* ptr = buf;
ptr += create_toku_key_descriptor(
ptr,
hpk,
prim_key,
false,
- NULL
- );
+ NULL);
- ptr += create_toku_main_key_pack_descriptor(
- ptr
- );
+ ptr += create_toku_main_key_pack_descriptor(ptr);
ptr += create_toku_clustering_val_pack_descriptor(
ptr,
@@ -6652,8 +7117,7 @@ static uint32_t create_main_key_descriptor(
form->s,
kc_info,
primary_key,
- false
- );
+ false);
return ptr - buf;
}
@@ -6661,14 +7125,21 @@ static uint32_t create_main_key_descriptor(
// create and close the main dictionarr with name of "name" using table form, all within
// transaction txn.
//
-int ha_tokudb::create_main_dictionary(const char* name, TABLE* form, DB_TXN* txn, KEY_AND_COL_INFO* kc_info, toku_compression_method compression_method) {
+int ha_tokudb::create_main_dictionary(
+ const char* name,
+ TABLE* form,
+ DB_TXN* txn,
+ KEY_AND_COL_INFO* kc_info,
+ toku_compression_method compression_method) {
+
int error;
DBT row_descriptor;
uchar* row_desc_buff = NULL;
char* newname = NULL;
+ size_t newname_len = 0;
KEY* prim_key = NULL;
uint32_t max_row_desc_buff_size;
- uint hpk= (form->s->primary_key >= MAX_KEY) ? TOKUDB_HIDDEN_PRIMARY_KEY_LENGTH : 0;
+ uint hpk = (form->s->primary_key >= MAX_KEY) ? TOKUDB_HIDDEN_PRIMARY_KEY_LENGTH : 0;
uint32_t block_size;
uint32_t read_block_size;
uint32_t fanout;
@@ -6677,13 +7148,22 @@ int ha_tokudb::create_main_dictionary(const char* name, TABLE* form, DB_TXN* txn
memset(&row_descriptor, 0, sizeof(row_descriptor));
max_row_desc_buff_size = get_max_desc_size(kc_info, form);
- row_desc_buff = (uchar *)tokudb_my_malloc(max_row_desc_buff_size, MYF(MY_WME));
- if (row_desc_buff == NULL){ error = ENOMEM; goto cleanup;}
+ row_desc_buff = (uchar*)tokudb::memory::malloc(
+ max_row_desc_buff_size,
+ MYF(MY_WME));
+ if (row_desc_buff == NULL) {
+ error = ENOMEM;
+ goto cleanup;
+ }
- newname = (char *)tokudb_my_malloc(get_max_dict_name_path_length(name),MYF(MY_WME));
- if (newname == NULL){ error = ENOMEM; goto cleanup;}
+ newname_len = get_max_dict_name_path_length(name);
+ newname = (char*)tokudb::memory::malloc(newname_len, MYF(MY_WME));
+ if (newname == NULL) {
+ error = ENOMEM;
+ goto cleanup;
+ }
- make_name(newname, name, "main");
+ make_name(newname, newname_len, name, "main");
prim_key = (hpk) ? NULL : &form->s->key_info[primary_key];
@@ -6700,21 +7180,26 @@ int ha_tokudb::create_main_dictionary(const char* name, TABLE* form, DB_TXN* txn
hpk,
primary_key,
form,
- kc_info
- );
- assert(row_descriptor.size <= max_row_desc_buff_size);
+ kc_info);
+ assert_always(row_descriptor.size <= max_row_desc_buff_size);
- block_size = get_tokudb_block_size(thd);
- read_block_size = get_tokudb_read_block_size(thd);
- fanout = get_tokudb_fanout(thd);
+ block_size = tokudb::sysvars::block_size(thd);
+ read_block_size = tokudb::sysvars::read_block_size(thd);
+ fanout = tokudb::sysvars::fanout(thd);
/* Create the main table that will hold the real rows */
- error = create_sub_table(newname, &row_descriptor, txn, block_size,
- read_block_size, compression_method, false,
- fanout);
+ error = create_sub_table(
+ newname,
+ &row_descriptor,
+ txn,
+ block_size,
+ read_block_size,
+ compression_method,
+ false,
+ fanout);
cleanup:
- tokudb_my_free(newname);
- tokudb_my_free(row_desc_buff);
+ tokudb::memory::free(newname);
+ tokudb::memory::free(row_desc_buff);
return error;
}
@@ -6728,7 +7213,11 @@ cleanup:
// 0 on success
// error otherwise
//
-int ha_tokudb::create(const char *name, TABLE * form, HA_CREATE_INFO * create_info) {
+int ha_tokudb::create(
+ const char* name,
+ TABLE* form,
+ HA_CREATE_INFO* create_info) {
+
TOKUDB_HANDLER_DBUG_ENTER("%s", name);
int error;
@@ -6738,6 +7227,7 @@ int ha_tokudb::create(const char *name, TABLE * form, HA_CREATE_INFO * create_in
DB_TXN* txn = NULL;
bool do_commit = false;
char* newname = NULL;
+ size_t newname_len = 0;
KEY_AND_COL_INFO kc_info;
tokudb_trx_data *trx = NULL;
THD* thd = ha_thd();
@@ -6753,15 +7243,18 @@ int ha_tokudb::create(const char *name, TABLE * form, HA_CREATE_INFO * create_in
#endif
#if TOKU_INCLUDE_OPTION_STRUCTS
- const srv_row_format_t row_format = (srv_row_format_t) form->s->option_struct->row_format;
+ const tokudb::sysvars::row_format_t row_format =
+ (tokudb::sysvars::row_format_t)form->s->option_struct->row_format;
#else
- const srv_row_format_t row_format = (create_info->used_fields & HA_CREATE_USED_ROW_FORMAT)
+ const tokudb::sysvars::row_format_t row_format =
+ (create_info->used_fields & HA_CREATE_USED_ROW_FORMAT)
? row_type_to_row_format(create_info->row_type)
- : get_row_format(thd);
+ : tokudb::sysvars::row_format(thd);
#endif
- const toku_compression_method compression_method = row_format_to_toku_compression_method(row_format);
+ const toku_compression_method compression_method =
+ row_format_to_toku_compression_method(row_format);
- bool create_from_engine= (create_info->table_options & HA_OPTION_CREATE_FROM_ENGINE);
+ bool create_from_engine = (create_info->table_options & HA_OPTION_CREATE_FROM_ENGINE);
if (create_from_engine) {
// table already exists, nothing to do
error = 0;
@@ -6786,17 +7279,23 @@ int ha_tokudb::create(const char *name, TABLE * form, HA_CREATE_INFO * create_in
}
}
- newname = (char *)tokudb_my_malloc(get_max_dict_name_path_length(name),MYF(MY_WME));
- if (newname == NULL){ error = ENOMEM; goto cleanup;}
+ newname_len = get_max_dict_name_path_length(name);
+ newname = (char*)tokudb::memory::malloc(newname_len, MYF(MY_WME));
+ if (newname == NULL) {
+ error = ENOMEM;
+ goto cleanup;
+ }
trx = (tokudb_trx_data *) thd_get_ha_data(ha_thd(), tokudb_hton);
- if (trx && trx->sub_sp_level && thd_sql_command(thd) == SQLCOM_CREATE_TABLE) {
+ if (trx && trx->sub_sp_level &&
+ thd_sql_command(thd) == SQLCOM_CREATE_TABLE) {
txn = trx->sub_sp_level;
- }
- else {
+ } else {
do_commit = true;
error = txn_begin(db_env, 0, &txn, 0, thd);
- if (error) { goto cleanup; }
+ if (error) {
+ goto cleanup;
+ }
}
primary_key = form->s->primary_key;
@@ -6809,45 +7308,76 @@ int ha_tokudb::create(const char *name, TABLE * form, HA_CREATE_INFO * create_in
trace_create_table_info(name,form);
/* Create status.tokudb and save relevant metadata */
- make_name(newname, name, "status");
+ make_name(newname, newname_len, name, "status");
- error = tokudb::create_status(db_env, &status_block, newname, txn);
+ error = tokudb::metadata::create(db_env, &status_block, newname, txn);
if (error) { goto cleanup; }
version = HA_TOKU_VERSION;
- error = write_to_status(status_block, hatoku_new_version,&version,sizeof(version), txn);
- if (error) { goto cleanup; }
+ error = write_to_status(
+ status_block,
+ hatoku_new_version,
+ &version,
+ sizeof(version),
+ txn);
+ if (error) {
+ goto cleanup;
+ }
capabilities = HA_TOKU_CAP;
- error = write_to_status(status_block, hatoku_capabilities,&capabilities,sizeof(capabilities), txn);
- if (error) { goto cleanup; }
+ error = write_to_status(
+ status_block,
+ hatoku_capabilities,
+ &capabilities,
+ sizeof(capabilities),
+ txn);
+ if (error) {
+ goto cleanup;
+ }
- error = write_auto_inc_create(status_block, create_info->auto_increment_value, txn);
- if (error) { goto cleanup; }
+ error = write_auto_inc_create(
+ status_block,
+ create_info->auto_increment_value,
+ txn);
+ if (error) {
+ goto cleanup;
+ }
#if WITH_PARTITION_STORAGE_ENGINE
if (TOKU_PARTITION_WRITE_FRM_DATA || form->part_info == NULL) {
error = write_frm_data(status_block, txn, form->s->path.str);
- if (error) { goto cleanup; }
+ if (error) {
+ goto cleanup;
+ }
}
#else
error = write_frm_data(status_block, txn, form->s->path.str);
- if (error) { goto cleanup; }
+ if (error) {
+ goto cleanup;
+ }
#endif
error = allocate_key_and_col_info(form->s, &kc_info);
- if (error) { goto cleanup; }
+ if (error) {
+ goto cleanup;
+ }
error = initialize_key_and_col_info(
- form->s,
+ form->s,
form,
&kc_info,
hidden_primary_key,
- primary_key
- );
- if (error) { goto cleanup; }
+ primary_key);
+ if (error) {
+ goto cleanup;
+ }
- error = create_main_dictionary(name, form, txn, &kc_info, compression_method);
+ error = create_main_dictionary(
+ name,
+ form,
+ txn,
+ &kc_info,
+ compression_method);
if (error) {
goto cleanup;
}
@@ -6855,32 +7385,44 @@ int ha_tokudb::create(const char *name, TABLE * form, HA_CREATE_INFO * create_in
for (uint i = 0; i < form->s->keys; i++) {
if (i != primary_key) {
- error = create_secondary_dictionary(name, form, &form->key_info[i], txn, &kc_info, i, false, compression_method);
+ error = create_secondary_dictionary(
+ name,
+ form,
+ &form->key_info[i],
+ txn,
+ &kc_info,
+ i,
+ false,
+ compression_method);
if (error) {
goto cleanup;
}
- error = write_key_name_to_status(status_block, form->s->key_info[i].name, txn);
- if (error) { goto cleanup; }
+ error = write_key_name_to_status(
+ status_block,
+ form->s->key_info[i].name,
+ txn);
+ if (error) {
+ goto cleanup;
+ }
}
}
error = 0;
cleanup:
if (status_block != NULL) {
- int r = tokudb::close_status(&status_block);
- assert(r==0);
+ int r = tokudb::metadata::close(&status_block);
+ assert_always(r==0);
}
free_key_and_col_info(&kc_info);
if (do_commit && txn) {
if (error) {
abort_txn(txn);
- }
- else {
+ } else {
commit_txn(txn,0);
}
}
- tokudb_my_free(newname);
+ tokudb::memory::free(newname);
TOKUDB_HANDLER_DBUG_RETURN(error);
}
@@ -6903,27 +7445,36 @@ int ha_tokudb::discard_or_import_tablespace(my_bool discard) {
// is_key specifies if it is a secondary index (and hence a "key-" needs to be prepended) or
// if it is not a secondary index
//
-int ha_tokudb::delete_or_rename_dictionary( const char* from_name, const char* to_name, const char* secondary_name, bool is_key, DB_TXN* txn, bool is_delete) {
+int ha_tokudb::delete_or_rename_dictionary(
+ const char* from_name,
+ const char* to_name,
+ const char* secondary_name,
+ bool is_key,
+ DB_TXN* txn,
+ bool is_delete) {
+
int error;
char dict_name[MAX_DICT_NAME_LEN];
char* new_from_name = NULL;
+ size_t new_from_name_len = 0;
char* new_to_name = NULL;
- assert(txn);
+ size_t new_to_name_len = 0;
+ assert_always(txn);
- new_from_name = (char *)tokudb_my_malloc(
- get_max_dict_name_path_length(from_name),
- MYF(MY_WME)
- );
+ new_from_name_len = get_max_dict_name_path_length(from_name);
+ new_from_name = (char*)tokudb::memory::malloc(
+ new_from_name_len,
+ MYF(MY_WME));
if (new_from_name == NULL) {
error = ENOMEM;
goto cleanup;
}
if (!is_delete) {
- assert(to_name);
- new_to_name = (char *)tokudb_my_malloc(
- get_max_dict_name_path_length(to_name),
- MYF(MY_WME)
- );
+ assert_always(to_name);
+ new_to_name_len = get_max_dict_name_path_length(to_name);
+ new_to_name = (char*)tokudb::memory::malloc(
+ new_to_name_len,
+ MYF(MY_WME));
if (new_to_name == NULL) {
error = ENOMEM;
goto cleanup;
@@ -6932,32 +7483,37 @@ int ha_tokudb::delete_or_rename_dictionary( const char* from_name, const char* t
if (is_key) {
sprintf(dict_name, "key-%s", secondary_name);
- make_name(new_from_name, from_name, dict_name);
- }
- else {
- make_name(new_from_name, from_name, secondary_name);
+ make_name(new_from_name, new_from_name_len, from_name, dict_name);
+ } else {
+ make_name(new_from_name, new_from_name_len, from_name, secondary_name);
}
if (!is_delete) {
if (is_key) {
sprintf(dict_name, "key-%s", secondary_name);
- make_name(new_to_name, to_name, dict_name);
- }
- else {
- make_name(new_to_name, to_name, secondary_name);
+ make_name(new_to_name, new_to_name_len, to_name, dict_name);
+ } else {
+ make_name(new_to_name, new_to_name_len, to_name, secondary_name);
}
}
if (is_delete) {
error = db_env->dbremove(db_env, txn, new_from_name, NULL, 0);
+ } else {
+ error = db_env->dbrename(
+ db_env,
+ txn,
+ new_from_name,
+ NULL,
+ new_to_name,
+ 0);
}
- else {
- error = db_env->dbrename(db_env, txn, new_from_name, NULL, new_to_name, 0);
+ if (error) {
+ goto cleanup;
}
- if (error) { goto cleanup; }
cleanup:
- tokudb_my_free(new_from_name);
- tokudb_my_free(new_to_name);
+ tokudb::memory::free(new_from_name);
+ tokudb::memory::free(new_to_name);
return error;
}
@@ -7023,12 +7579,12 @@ int ha_tokudb::delete_or_rename_table (const char* from_name, const char* to_nam
if (error) { goto cleanup; }
error = status_cursor->c_close(status_cursor);
- assert(error==0);
+ assert_always(error==0);
status_cursor = NULL;
if (error) { goto cleanup; }
error = status_db->close(status_db, 0);
- assert(error == 0);
+ assert_always(error == 0);
status_db = NULL;
//
@@ -7041,11 +7597,11 @@ int ha_tokudb::delete_or_rename_table (const char* from_name, const char* to_nam
cleanup:
if (status_cursor) {
int r = status_cursor->c_close(status_cursor);
- assert(r==0);
+ assert_always(r==0);
}
if (status_db) {
int r = status_db->close(status_db, 0);
- assert(r==0);
+ assert_always(r==0);
}
if (txn) {
if (error) {
@@ -7069,12 +7625,25 @@ cleanup:
//
int ha_tokudb::delete_table(const char *name) {
TOKUDB_HANDLER_DBUG_ENTER("%s", name);
+ TOKUDB_SHARE* share = TOKUDB_SHARE::get_share(name, NULL, NULL, false);
+ if (share) {
+ share->unlock();
+ share->release();
+ // this should be enough to handle locking as the higher level MDL
+ // on this table should prevent any new analyze tasks.
+ share->cancel_background_jobs();
+ TOKUDB_SHARE::drop_share(share);
+ }
+
int error;
error = delete_or_rename_table(name, NULL, true);
- if (error == DB_LOCK_NOTGRANTED && ((tokudb_debug & TOKUDB_DEBUG_HIDE_DDL_LOCK_ERRORS) == 0)) {
- sql_print_error("Could not delete table %s because \
-another transaction has accessed the table. \
-To drop the table, make sure no transactions touch the table.", name);
+ if (TOKUDB_LIKELY(TOKUDB_DEBUG_FLAGS(TOKUDB_DEBUG_HIDE_DDL_LOCK_ERRORS) == 0) &&
+ error == DB_LOCK_NOTGRANTED) {
+ sql_print_error(
+ "Could not delete table %s because another transaction has "
+ "accessed the table. To drop the table, make sure no "
+ "transactions touch the table.",
+ name);
}
TOKUDB_HANDLER_DBUG_RETURN(error);
}
@@ -7091,12 +7660,25 @@ To drop the table, make sure no transactions touch the table.", name);
//
int ha_tokudb::rename_table(const char *from, const char *to) {
TOKUDB_HANDLER_DBUG_ENTER("%s %s", from, to);
+ TOKUDB_SHARE* share = TOKUDB_SHARE::get_share(from, NULL, NULL, false);
+ if (share) {
+ share->unlock();
+ share->release();
+ // this should be enough to handle locking as the higher level MDL
+ // on this table should prevent any new analyze tasks.
+ share->cancel_background_jobs();
+ TOKUDB_SHARE::drop_share(share);
+ }
int error;
error = delete_or_rename_table(from, to, false);
- if (error == DB_LOCK_NOTGRANTED && ((tokudb_debug & TOKUDB_DEBUG_HIDE_DDL_LOCK_ERRORS) == 0)) {
- sql_print_error("Could not rename table from %s to %s because \
-another transaction has accessed the table. \
-To rename the table, make sure no transactions touch the table.", from, to);
+ if (TOKUDB_LIKELY(TOKUDB_DEBUG_FLAGS(TOKUDB_DEBUG_HIDE_DDL_LOCK_ERRORS) == 0) &&
+ error == DB_LOCK_NOTGRANTED) {
+ sql_print_error(
+ "Could not rename table from %s to %s because another transaction "
+ "has accessed the table. To rename the table, make sure no "
+ "transactions touch the table.",
+ from,
+ to);
}
TOKUDB_HANDLER_DBUG_RETURN(error);
}
@@ -7111,9 +7693,11 @@ To rename the table, make sure no transactions touch the table.", from, to);
double ha_tokudb::scan_time() {
TOKUDB_HANDLER_DBUG_ENTER("");
double ret_val = (double)stats.records / 3;
- if (tokudb_debug & TOKUDB_DEBUG_RETURN) {
- TOKUDB_HANDLER_TRACE("return %" PRIu64 " %f", (uint64_t) stats.records, ret_val);
- }
+ TOKUDB_HANDLER_TRACE_FOR_FLAGS(
+ TOKUDB_DEBUG_RETURN,
+ "return %" PRIu64 " %f",
+ (uint64_t)stats.records,
+ ret_val);
DBUG_RETURN(ret_val);
}
@@ -7137,10 +7721,7 @@ double ha_tokudb::keyread_time(uint index, uint ranges, ha_rows rows)
(table->key_info[index].key_length +
ref_length) + 1);
ret_val = (rows + keys_per_block - 1)/ keys_per_block;
- if (tokudb_debug & TOKUDB_DEBUG_RETURN) {
- TOKUDB_HANDLER_TRACE("return %f", ret_val);
- }
- DBUG_RETURN(ret_val);
+ TOKUDB_HANDLER_DBUG_RETURN_DOUBLE(ret_val);
}
//
@@ -7202,19 +7783,13 @@ double ha_tokudb::read_time(
ret_val = is_clustering ? ret_val + 0.00001 : ret_val;
cleanup:
- if (tokudb_debug & TOKUDB_DEBUG_RETURN) {
- TOKUDB_HANDLER_TRACE("return %f", ret_val);
- }
- DBUG_RETURN(ret_val);
+ TOKUDB_HANDLER_DBUG_RETURN_DOUBLE(ret_val);
}
double ha_tokudb::index_only_read_time(uint keynr, double records) {
TOKUDB_HANDLER_DBUG_ENTER("%u %f", keynr, records);
double ret_val = keyread_time(keynr, 1, (ha_rows)records);
- if (tokudb_debug & TOKUDB_DEBUG_RETURN) {
- TOKUDB_HANDLER_TRACE("return %f", ret_val);
- }
- DBUG_RETURN(ret_val);
+ TOKUDB_HANDLER_DBUG_RETURN_DOUBLE(ret_val);
}
//
@@ -7287,9 +7862,11 @@ ha_rows ha_tokudb::records_in_range(uint keynr, key_range* start_key, key_range*
ret_val = (ha_rows) (rows <= 1 ? 1 : rows);
cleanup:
- if (tokudb_debug & TOKUDB_DEBUG_RETURN) {
- TOKUDB_HANDLER_TRACE("return %" PRIu64 " %" PRIu64, (uint64_t) ret_val, rows);
- }
+ TOKUDB_HANDLER_TRACE_FOR_FLAGS(
+ TOKUDB_DEBUG_RETURN,
+ "return %" PRIu64 " %" PRIu64,
+ (uint64_t)ret_val,
+ rows);
DBUG_RETURN(ret_val);
}
@@ -7345,12 +7922,19 @@ void ha_tokudb::init_auto_increment() {
commit_txn(txn, 0);
}
- if (tokudb_debug & TOKUDB_DEBUG_AUTO_INCREMENT) {
- TOKUDB_HANDLER_TRACE("init auto increment:%lld", share->last_auto_increment);
- }
+ TOKUDB_HANDLER_TRACE_FOR_FLAGS(
+ TOKUDB_DEBUG_AUTO_INCREMENT,
+ "init auto increment:%lld",
+ share->last_auto_increment);
}
-void ha_tokudb::get_auto_increment(ulonglong offset, ulonglong increment, ulonglong nb_desired_values, ulonglong * first_value, ulonglong * nb_reserved_values) {
+void ha_tokudb::get_auto_increment(
+ ulonglong offset,
+ ulonglong increment,
+ ulonglong nb_desired_values,
+ ulonglong* first_value,
+ ulonglong* nb_reserved_values) {
+
TOKUDB_HANDLER_DBUG_ENTER("");
ulonglong nr;
bool over;
@@ -7361,14 +7945,13 @@ void ha_tokudb::get_auto_increment(ulonglong offset, ulonglong increment, ulongl
DBUG_VOID_RETURN;
}
- tokudb_pthread_mutex_lock(&share->mutex);
+ share->lock();
if (share->auto_inc_create_value > share->last_auto_increment) {
nr = share->auto_inc_create_value;
over = false;
share->last_auto_increment = share->auto_inc_create_value;
- }
- else {
+ } else {
nr = share->last_auto_increment + increment;
over = nr < share->last_auto_increment;
if (over)
@@ -7378,19 +7961,23 @@ void ha_tokudb::get_auto_increment(ulonglong offset, ulonglong increment, ulongl
share->last_auto_increment = nr + (nb_desired_values - 1)*increment;
if (delay_updating_ai_metadata) {
ai_metadata_update_required = true;
- }
- else {
- update_max_auto_inc(share->status_block, share->last_auto_increment);
- }
- }
-
- if (tokudb_debug & TOKUDB_DEBUG_AUTO_INCREMENT) {
- TOKUDB_HANDLER_TRACE("get_auto_increment(%lld,%lld,%lld):got:%lld:%lld",
- offset, increment, nb_desired_values, nr, nb_desired_values);
- }
+ } else {
+ update_max_auto_inc(
+ share->status_block,
+ share->last_auto_increment);
+ }
+ }
+ TOKUDB_HANDLER_TRACE_FOR_FLAGS(
+ TOKUDB_DEBUG_AUTO_INCREMENT,
+ "get_auto_increment(%lld,%lld,%lld): got:%lld:%lld",
+ offset,
+ increment,
+ nb_desired_values,
+ nr,
+ nb_desired_values);
*first_value = nr;
*nb_reserved_values = nb_desired_values;
- tokudb_pthread_mutex_unlock(&share->mutex);
+ share->unlock();
TOKUDB_HANDLER_DBUG_VOID_RETURN;
}
@@ -7419,16 +8006,15 @@ bool ha_tokudb::is_auto_inc_singleton(){
// 0 on success, error otherwise
//
int ha_tokudb::tokudb_add_index(
- TABLE *table_arg,
- KEY *key_info,
- uint num_of_keys,
- DB_TXN* txn,
+ TABLE* table_arg,
+ KEY* key_info,
+ uint num_of_keys,
+ DB_TXN* txn,
bool* inc_num_DBs,
- bool* modified_DBs
- )
-{
+ bool* modified_DBs) {
+
TOKUDB_HANDLER_DBUG_ENTER("");
- assert(txn);
+ assert_always(txn);
int error;
uint curr_index = 0;
@@ -7438,7 +8024,7 @@ int ha_tokudb::tokudb_add_index(
THD* thd = ha_thd();
DB_LOADER* loader = NULL;
DB_INDEXER* indexer = NULL;
- bool loader_save_space = get_load_save_space(thd);
+ bool loader_save_space = tokudb::sysvars::load_save_space(thd);
bool use_hot_index = (lock.type == TL_WRITE_ALLOW_WRITE);
uint32_t loader_flags = loader_save_space ? LOADER_COMPRESS_INTERMEDIATES : 0;
uint32_t indexer_flags = 0;
@@ -7468,14 +8054,17 @@ int ha_tokudb::tokudb_add_index(
//
// get the row type to use for the indexes we're adding
//
- toku_compression_method compression_method = get_compression_method(share->file);
+ toku_compression_method compression_method =
+ get_compression_method(share->file);
//
// status message to be shown in "show process list"
//
const char *orig_proc_info = tokudb_thd_get_proc_info(thd);
- char status_msg[MAX_ALIAS_NAME + 200]; //buffer of 200 should be a good upper bound.
- ulonglong num_processed = 0; //variable that stores number of elements inserted thus far
+ // buffer of 200 should be a good upper bound.
+ char status_msg[MAX_ALIAS_NAME + 200];
+ // variable that stores number of elements inserted thus far
+ ulonglong num_processed = 0;
thd_proc_info(thd, "Adding indexes");
//
@@ -7500,13 +8089,15 @@ int ha_tokudb::tokudb_add_index(
}
}
- rw_wrlock(&share->num_DBs_lock);
+ share->_num_DBs_lock.lock_write();
rw_lock_taken = true;
//
// open all the DB files and set the appropriate variables in share
// they go to the end of share->key_file
//
- creating_hot_index = use_hot_index && num_of_keys == 1 && (key_info[0].flags & HA_NOSAME) == 0;
+ creating_hot_index =
+ use_hot_index && num_of_keys == 1 &&
+ (key_info[0].flags & HA_NOSAME) == 0;
if (use_hot_index && (share->num_DBs > curr_num_DBs)) {
//
// already have hot index in progress, get out
@@ -7522,35 +8113,47 @@ int ha_tokudb::tokudb_add_index(
&share->kc_info.key_filters[curr_index],
&key_info[i],
table_arg,
- false
- );
+ false);
if (!hidden_primary_key) {
set_key_filter(
&share->kc_info.key_filters[curr_index],
&table_arg->key_info[primary_key],
table_arg,
- false
- );
+ false);
}
- error = initialize_col_pack_info(&share->kc_info,table_arg->s,curr_index);
+ error = initialize_col_pack_info(
+ &share->kc_info,
+ table_arg->s,
+ curr_index);
if (error) {
goto cleanup;
}
}
- error = create_secondary_dictionary(share->table_name, table_arg, &key_info[i], txn, &share->kc_info, curr_index, creating_hot_index, compression_method);
- if (error) { goto cleanup; }
+ error = create_secondary_dictionary(
+ share->full_table_name(),
+ table_arg,
+ &key_info[i],
+ txn,
+ &share->kc_info,
+ curr_index,
+ creating_hot_index,
+ compression_method);
+ if (error) {
+ goto cleanup;
+ }
error = open_secondary_dictionary(
- &share->key_file[curr_index],
+ &share->key_file[curr_index],
&key_info[i],
- share->table_name,
+ share->full_table_name(),
false,
- txn
- );
- if (error) { goto cleanup; }
+ txn);
+ if (error) {
+ goto cleanup;
+ }
}
if (creating_hot_index) {
@@ -7564,17 +8167,22 @@ int ha_tokudb::tokudb_add_index(
num_of_keys,
&share->key_file[curr_num_DBs],
mult_db_flags,
- indexer_flags
- );
- if (error) { goto cleanup; }
+ indexer_flags);
+ if (error) {
+ goto cleanup;
+ }
error = indexer->set_poll_function(indexer, ai_poll_fun, &lc);
- if (error) { goto cleanup; }
+ if (error) {
+ goto cleanup;
+ }
error = indexer->set_error_callback(indexer, loader_ai_err_fun, &lc);
- if (error) { goto cleanup; }
+ if (error) {
+ goto cleanup;
+ }
- rw_unlock(&share->num_DBs_lock);
+ share->_num_DBs_lock.unlock();
rw_lock_taken = false;
#ifdef HA_TOKUDB_HAS_THD_PROGRESS
@@ -7585,17 +8193,20 @@ int ha_tokudb::tokudb_add_index(
error = indexer->build(indexer);
- if (error) { goto cleanup; }
+ if (error) {
+ goto cleanup;
+ }
- rw_wrlock(&share->num_DBs_lock);
+ share->_num_DBs_lock.lock_write();
error = indexer->close(indexer);
- rw_unlock(&share->num_DBs_lock);
- if (error) { goto cleanup; }
+ share->_num_DBs_lock.unlock();
+ if (error) {
+ goto cleanup;
+ }
indexer = NULL;
- }
- else {
+ } else {
DBUG_ASSERT(table->mdl_ticket->get_type() >= MDL_SHARED_NO_WRITE);
- rw_unlock(&share->num_DBs_lock);
+ share->_num_DBs_lock.unlock();
rw_lock_taken = false;
prelocked_right_range_size = 0;
prelocked_left_range_size = 0;
@@ -7608,27 +8219,37 @@ int ha_tokudb::tokudb_add_index(
bf_info.key_to_compare = NULL;
error = db_env->create_loader(
- db_env,
- txn,
- &loader,
+ db_env,
+ txn,
+ &loader,
NULL, // no src_db needed
- num_of_keys,
- &share->key_file[curr_num_DBs],
+ num_of_keys,
+ &share->key_file[curr_num_DBs],
mult_put_flags,
mult_dbt_flags,
- loader_flags
- );
- if (error) { goto cleanup; }
+ loader_flags);
+ if (error) {
+ goto cleanup;
+ }
error = loader->set_poll_function(loader, loader_poll_fun, &lc);
- if (error) { goto cleanup; }
+ if (error) {
+ goto cleanup;
+ }
error = loader->set_error_callback(loader, loader_ai_err_fun, &lc);
- if (error) { goto cleanup; }
+ if (error) {
+ goto cleanup;
+ }
//
// scan primary table, create each secondary key, add to each DB
//
- if ((error = share->file->cursor(share->file, txn, &tmp_cursor, DB_SERIALIZABLE))) {
+ error = share->file->cursor(
+ share->file,
+ txn,
+ &tmp_cursor,
+ DB_SERIALIZABLE);
+ if (error) {
tmp_cursor = NULL; // Safety
goto cleanup;
}
@@ -7643,16 +8264,21 @@ int ha_tokudb::tokudb_add_index(
share->file->dbt_neg_infty(),
share->file->dbt_pos_infty(),
true,
- 0
- );
- if (error) { goto cleanup; }
+ 0);
+ if (error) {
+ goto cleanup;
+ }
// set the bulk fetch iteration to its max so that adding an
// index fills the bulk fetch buffer every time. we do not
// want it to grow exponentially fast.
rows_fetched_using_bulk_fetch = 0;
bulk_fetch_iteration = HA_TOKU_BULK_FETCH_ITERATION_MAX;
- cursor_ret_val = tmp_cursor->c_getf_next(tmp_cursor, DB_PRELOCKED,smart_dbt_bf_callback, &bf_info);
+ cursor_ret_val = tmp_cursor->c_getf_next(
+ tmp_cursor,
+ DB_PRELOCKED,
+ smart_dbt_bf_callback,
+ &bf_info);
#ifdef HA_TOKUDB_HAS_THD_PROGRESS
// initialize a two phase progress report.
@@ -7660,21 +8286,30 @@ int ha_tokudb::tokudb_add_index(
thd_progress_init(thd, 2);
#endif
- while (cursor_ret_val != DB_NOTFOUND || ((bytes_used_in_range_query_buff - curr_range_query_buff_offset) > 0)) {
- if ((bytes_used_in_range_query_buff - curr_range_query_buff_offset) == 0) {
+ while (cursor_ret_val != DB_NOTFOUND ||
+ ((bytes_used_in_range_query_buff -
+ curr_range_query_buff_offset) > 0)) {
+ if ((bytes_used_in_range_query_buff -
+ curr_range_query_buff_offset) == 0) {
invalidate_bulk_fetch(); // reset the buffers
- cursor_ret_val = tmp_cursor->c_getf_next(tmp_cursor, DB_PRELOCKED, smart_dbt_bf_callback, &bf_info);
+ cursor_ret_val = tmp_cursor->c_getf_next(
+ tmp_cursor,
+ DB_PRELOCKED,
+ smart_dbt_bf_callback,
+ &bf_info);
if (cursor_ret_val != DB_NOTFOUND && cursor_ret_val != 0) {
error = cursor_ret_val;
goto cleanup;
}
}
- // do this check in case the the c_getf_next did not put anything into the buffer because
- // there was no more data
- if ((bytes_used_in_range_query_buff - curr_range_query_buff_offset) == 0) {
+ // do this check in case the the c_getf_next did not put anything
+ // into the buffer because there was no more data
+ if ((bytes_used_in_range_query_buff -
+ curr_range_query_buff_offset) == 0) {
break;
}
- // at this point, we know the range query buffer has at least one key/val pair
+ // at this point, we know the range query buffer has at least one
+ // key/val pair
uchar* curr_pos = range_query_buff+curr_range_query_buff_offset;
uint32_t key_size = *(uint32_t *)curr_pos;
@@ -7694,17 +8329,26 @@ int ha_tokudb::tokudb_add_index(
curr_range_query_buff_offset = curr_pos - range_query_buff;
error = loader->put(loader, &curr_pk_key, &curr_pk_val);
- if (error) { goto cleanup; }
+ if (error) {
+ goto cleanup;
+ }
num_processed++;
if ((num_processed % 1000) == 0) {
- sprintf(status_msg, "Adding indexes: Fetched %llu of about %llu rows, loading of data still remains.",
- num_processed, (long long unsigned) share->rows);
+ sprintf(
+ status_msg,
+ "Adding indexes: Fetched %llu of about %llu rows, loading "
+ "of data still remains.",
+ num_processed,
+ (long long unsigned)share->row_count());
thd_proc_info(thd, status_msg);
#ifdef HA_TOKUDB_HAS_THD_PROGRESS
- thd_progress_report(thd, num_processed, (long long unsigned) share->rows);
+ thd_progress_report(
+ thd,
+ num_processed,
+ (long long unsigned)share->row_count());
#endif
if (thd_killed(thd)) {
@@ -7714,7 +8358,7 @@ int ha_tokudb::tokudb_add_index(
}
}
error = tmp_cursor->c_close(tmp_cursor);
- assert(error==0);
+ assert_always(error==0);
tmp_cursor = NULL;
#ifdef HA_TOKUDB_HAS_THD_PROGRESS
@@ -7732,9 +8376,14 @@ int ha_tokudb::tokudb_add_index(
for (uint i = 0; i < num_of_keys; i++, curr_index++) {
if (key_info[i].flags & HA_NOSAME) {
bool is_unique;
- error = is_index_unique(&is_unique, txn, share->key_file[curr_index], &key_info[i],
- creating_hot_index ? 0 : DB_PRELOCKED_WRITE);
- if (error) goto cleanup;
+ error = is_index_unique(
+ &is_unique,
+ txn,
+ share->key_file[curr_index],
+ &key_info[i],
+ creating_hot_index ? 0 : DB_PRELOCKED_WRITE);
+ if (error)
+ goto cleanup;
if (!is_unique) {
error = HA_ERR_FOUND_DUPP_KEY;
last_dup_key = i;
@@ -7743,22 +8392,20 @@ int ha_tokudb::tokudb_add_index(
}
}
+ share->lock();
//
// We have an accurate row count, might as well update share->rows
//
if(!creating_hot_index) {
- tokudb_pthread_mutex_lock(&share->mutex);
- share->rows = num_processed;
- tokudb_pthread_mutex_unlock(&share->mutex);
+ share->set_row_count(num_processed, true);
}
//
// now write stuff to status.tokudb
//
- tokudb_pthread_mutex_lock(&share->mutex);
for (uint i = 0; i < num_of_keys; i++) {
write_key_name_to_status(share->status_block, key_info[i].name, txn);
}
- tokudb_pthread_mutex_unlock(&share->mutex);
+ share->unlock();
error = 0;
cleanup:
@@ -7766,12 +8413,12 @@ cleanup:
thd_progress_end(thd);
#endif
if (rw_lock_taken) {
- rw_unlock(&share->num_DBs_lock);
+ share->_num_DBs_lock.unlock();
rw_lock_taken = false;
}
if (tmp_cursor) {
int r = tmp_cursor->c_close(tmp_cursor);
- assert(r==0);
+ assert_always(r==0);
tmp_cursor = NULL;
}
if (loader != NULL) {
@@ -7782,14 +8429,17 @@ cleanup:
if (indexer != NULL) {
sprintf(status_msg, "aborting creation of indexes.");
thd_proc_info(thd, status_msg);
- rw_wrlock(&share->num_DBs_lock);
+ share->_num_DBs_lock.lock_write();
indexer->abort(indexer);
- rw_unlock(&share->num_DBs_lock);
+ share->_num_DBs_lock.unlock();
}
- if (error == DB_LOCK_NOTGRANTED && ((tokudb_debug & TOKUDB_DEBUG_HIDE_DDL_LOCK_ERRORS) == 0)) {
- sql_print_error("Could not add indexes to table %s because \
-another transaction has accessed the table. \
-To add indexes, make sure no transactions touch the table.", share->table_name);
+ if (TOKUDB_LIKELY(TOKUDB_DEBUG_FLAGS(TOKUDB_DEBUG_HIDE_DDL_LOCK_ERRORS) == 0) &&
+ error == DB_LOCK_NOTGRANTED) {
+ sql_print_error(
+ "Could not add indexes to table %s because another transaction has "
+ "accessed the table. To add indexes, make sure no transactions "
+ "touch the table.",
+ share->full_table_name());
}
thd_proc_info(thd, orig_proc_info);
TOKUDB_HANDLER_DBUG_RETURN(error ? error : loader_error);
@@ -7799,7 +8449,12 @@ To add indexes, make sure no transactions touch the table.", share->table_name);
// Internal function called by ha_tokudb::add_index and ha_tokudb::alter_table_phase2
// Closes added indexes in case of error in error path of add_index and alter_table_phase2
//
-void ha_tokudb::restore_add_index(TABLE* table_arg, uint num_of_keys, bool incremented_numDBs, bool modified_DBs) {
+void ha_tokudb::restore_add_index(
+ TABLE* table_arg,
+ uint num_of_keys,
+ bool incremented_numDBs,
+ bool modified_DBs) {
+
uint curr_num_DBs = table_arg->s->keys + tokudb_test(hidden_primary_key);
uint curr_index = 0;
@@ -7808,7 +8463,7 @@ void ha_tokudb::restore_add_index(TABLE* table_arg, uint num_of_keys, bool incre
// so that there is not a window
//
if (incremented_numDBs) {
- rw_wrlock(&share->num_DBs_lock);
+ share->_num_DBs_lock.lock_write();
share->num_DBs--;
}
if (modified_DBs) {
@@ -7821,15 +8476,14 @@ void ha_tokudb::restore_add_index(TABLE* table_arg, uint num_of_keys, bool incre
if (share->key_file[curr_index]) {
int r = share->key_file[curr_index]->close(
share->key_file[curr_index],
- 0
- );
- assert(r==0);
+ 0);
+ assert_always(r==0);
share->key_file[curr_index] = NULL;
}
}
}
if (incremented_numDBs) {
- rw_unlock(&share->num_DBs_lock);
+ share->_num_DBs_lock.unlock();
}
}
@@ -7837,14 +8491,22 @@ void ha_tokudb::restore_add_index(TABLE* table_arg, uint num_of_keys, bool incre
// Internal function called by ha_tokudb::prepare_drop_index and ha_tokudb::alter_table_phase2
// With a transaction, drops dictionaries associated with indexes in key_num
//
-int ha_tokudb::drop_indexes(TABLE *table_arg, uint *key_num, uint num_of_keys, KEY *key_info, DB_TXN* txn) {
+int ha_tokudb::drop_indexes(
+ TABLE* table_arg,
+ uint* key_num,
+ uint num_of_keys,
+ KEY* key_info,
+ DB_TXN* txn) {
+
TOKUDB_HANDLER_DBUG_ENTER("");
- assert(txn);
+ assert_always(txn);
int error = 0;
for (uint i = 0; i < num_of_keys; i++) {
uint curr_index = key_num[i];
- error = share->key_file[curr_index]->pre_acquire_fileops_lock(share->key_file[curr_index],txn);
+ error = share->key_file[curr_index]->pre_acquire_fileops_lock(
+ share->key_file[curr_index],
+ txn);
if (error != 0) {
goto cleanup;
}
@@ -7852,30 +8514,51 @@ int ha_tokudb::drop_indexes(TABLE *table_arg, uint *key_num, uint num_of_keys, K
for (uint i = 0; i < num_of_keys; i++) {
uint curr_index = key_num[i];
int r = share->key_file[curr_index]->close(share->key_file[curr_index],0);
- assert(r==0);
+ assert_always(r==0);
share->key_file[curr_index] = NULL;
- error = remove_key_name_from_status(share->status_block, key_info[curr_index].name, txn);
- if (error) { goto cleanup; }
+ error = remove_key_name_from_status(
+ share->status_block,
+ key_info[curr_index].name,
+ txn);
+ if (error) {
+ goto cleanup;
+ }
- error = delete_or_rename_dictionary(share->table_name, NULL, key_info[curr_index].name, true, txn, true);
- if (error) { goto cleanup; }
+ error = delete_or_rename_dictionary(
+ share->full_table_name(),
+ NULL,
+ key_info[curr_index].name,
+ true,
+ txn,
+ true);
+ if (error) {
+ goto cleanup;
+ }
}
cleanup:
- if (error == DB_LOCK_NOTGRANTED && ((tokudb_debug & TOKUDB_DEBUG_HIDE_DDL_LOCK_ERRORS) == 0)) {
- sql_print_error("Could not drop indexes from table %s because \
-another transaction has accessed the table. \
-To drop indexes, make sure no transactions touch the table.", share->table_name);
+ if (TOKUDB_LIKELY(TOKUDB_DEBUG_FLAGS(TOKUDB_DEBUG_HIDE_DDL_LOCK_ERRORS) == 0) &&
+ error == DB_LOCK_NOTGRANTED) {
+ sql_print_error(
+ "Could not drop indexes from table %s because another transaction "
+ "has accessed the table. To drop indexes, make sure no "
+ "transactions touch the table.",
+ share->full_table_name());
}
TOKUDB_HANDLER_DBUG_RETURN(error);
}
//
-// Internal function called by ha_tokudb::prepare_drop_index and ha_tokudb::alter_table_phase2
-// Restores dropped indexes in case of error in error path of prepare_drop_index and alter_table_phase2
+// Internal function called by ha_tokudb::prepare_drop_index and
+// ha_tokudb::alter_table_phase2
+// Restores dropped indexes in case of error in error path of
+// prepare_drop_index and alter_table_phase2
//
-void ha_tokudb::restore_drop_indexes(TABLE *table_arg, uint *key_num, uint num_of_keys) {
+void ha_tokudb::restore_drop_indexes(
+ TABLE* table_arg,
+ uint* key_num,
+ uint num_of_keys) {
//
// reopen closed dictionaries
@@ -7885,13 +8568,12 @@ void ha_tokudb::restore_drop_indexes(TABLE *table_arg, uint *key_num, uint num_o
uint curr_index = key_num[i];
if (share->key_file[curr_index] == NULL) {
r = open_secondary_dictionary(
- &share->key_file[curr_index],
+ &share->key_file[curr_index],
&table_share->key_info[curr_index],
- share->table_name,
- false, //
- NULL
- );
- assert(!r);
+ share->full_table_name(),
+ false,
+ NULL);
+ assert_always(!r);
}
}
}
@@ -7937,56 +8619,65 @@ void ha_tokudb::print_error(int error, myf errflag) {
// does so by deleting and then recreating the dictionary in the context
// of a transaction
//
-int ha_tokudb::truncate_dictionary( uint keynr, DB_TXN* txn ) {
+int ha_tokudb::truncate_dictionary(uint keynr, DB_TXN* txn) {
int error;
bool is_pk = (keynr == primary_key);
- toku_compression_method compression_method = get_compression_method(share->key_file[keynr]);
+ toku_compression_method compression_method =
+ get_compression_method(share->key_file[keynr]);
error = share->key_file[keynr]->close(share->key_file[keynr], 0);
- assert(error == 0);
+ assert_always(error == 0);
share->key_file[keynr] = NULL;
- if (is_pk) { share->file = NULL; }
+ if (is_pk) {
+ share->file = NULL;
+ }
if (is_pk) {
error = delete_or_rename_dictionary(
- share->table_name,
+ share->full_table_name(),
NULL,
- "main",
+ "main",
false, //is_key
txn,
- true // is a delete
- );
- if (error) { goto cleanup; }
- }
- else {
+ true); // is a delete
+ if (error) {
+ goto cleanup;
+ }
+ } else {
error = delete_or_rename_dictionary(
- share->table_name,
+ share->full_table_name(),
NULL,
- table_share->key_info[keynr].name,
+ table_share->key_info[keynr].name,
true, //is_key
txn,
- true // is a delete
- );
- if (error) { goto cleanup; }
+ true); // is a delete
+ if (error) {
+ goto cleanup;
+ }
}
if (is_pk) {
- error = create_main_dictionary(share->table_name, table, txn, &share->kc_info, compression_method);
- }
- else {
+ error = create_main_dictionary(
+ share->full_table_name(),
+ table,
+ txn,
+ &share->kc_info,
+ compression_method);
+ } else {
error = create_secondary_dictionary(
- share->table_name,
- table,
- &table_share->key_info[keynr],
+ share->full_table_name(),
+ table,
+ &table_share->key_info[keynr],
txn,
&share->kc_info,
keynr,
false,
- compression_method
- );
+ compression_method);
+ }
+ if (error) {
+ goto cleanup;
}
- if (error) { goto cleanup; }
cleanup:
return error;
@@ -8025,40 +8716,51 @@ int ha_tokudb::delete_all_rows_internal() {
uint curr_num_DBs = 0;
DB_TXN* txn = NULL;
+ // this should be enough to handle locking as the higher level MDL
+ // on this table should prevent any new analyze tasks.
+ share->cancel_background_jobs();
+
error = txn_begin(db_env, 0, &txn, 0, ha_thd());
- if (error) { goto cleanup; }
+ if (error) {
+ goto cleanup;
+ }
curr_num_DBs = table->s->keys + tokudb_test(hidden_primary_key);
for (uint i = 0; i < curr_num_DBs; i++) {
error = share->key_file[i]->pre_acquire_fileops_lock(
- share->key_file[i],
- txn
- );
- if (error) { goto cleanup; }
+ share->key_file[i],
+ txn);
+ if (error) {
+ goto cleanup;
+ }
error = share->key_file[i]->pre_acquire_table_lock(
- share->key_file[i],
- txn
- );
- if (error) { goto cleanup; }
+ share->key_file[i],
+ txn);
+ if (error) {
+ goto cleanup;
+ }
}
for (uint i = 0; i < curr_num_DBs; i++) {
error = truncate_dictionary(i, txn);
- if (error) { goto cleanup; }
+ if (error) {
+ goto cleanup;
+ }
}
+ DEBUG_SYNC(ha_thd(), "tokudb_after_truncate_all_dictionarys");
+
// zap the row count
if (error == 0) {
- share->rows = 0;
- // update auto increment
- share->last_auto_increment = 0;
- // calling write_to_status directly because we need to use txn
- write_to_status(
- share->status_block,
+ share->set_row_count(0, false);
+ // update auto increment
+ share->last_auto_increment = 0;
+ // calling write_to_status directly because we need to use txn
+ write_to_status(
+ share->status_block,
hatoku_max_ai,
- &share->last_auto_increment,
- sizeof(share->last_auto_increment),
- txn
- );
+ &share->last_auto_increment,
+ sizeof(share->last_auto_increment),
+ txn);
}
share->try_table_lock = true;
@@ -8066,16 +8768,19 @@ cleanup:
if (txn) {
if (error) {
abort_txn(txn);
- }
- else {
+ } else {
commit_txn(txn,0);
}
}
- if (error == DB_LOCK_NOTGRANTED && ((tokudb_debug & TOKUDB_DEBUG_HIDE_DDL_LOCK_ERRORS) == 0)) {
- sql_print_error("Could not truncate table %s because another transaction has accessed the \
- table. To truncate the table, make sure no transactions touch the table.",
- share->table_name);
+ if (TOKUDB_LIKELY(TOKUDB_DEBUG_FLAGS(
+ TOKUDB_DEBUG_HIDE_DDL_LOCK_ERRORS) == 0) &&
+ error == DB_LOCK_NOTGRANTED) {
+ sql_print_error(
+ "Could not truncate table %s because another transaction has "
+ "accessed the table. To truncate the table, make sure no "
+ "transactions touch the table.",
+ share->full_table_name());
}
//
// regardless of errors, need to reopen the DB's
@@ -8085,21 +8790,18 @@ cleanup:
if (share->key_file[i] == NULL) {
if (i != primary_key) {
r = open_secondary_dictionary(
- &share->key_file[i],
- &table_share->key_info[i],
- share->table_name,
- false, //
- NULL
- );
- assert(!r);
- }
- else {
+ &share->key_file[i],
+ &table_share->key_info[i],
+ share->full_table_name(),
+ false,
+ NULL);
+ assert_always(!r);
+ } else {
r = open_main_dictionary(
- share->table_name,
- false,
- NULL
- );
- assert(!r);
+ share->full_table_name(),
+ false,
+ NULL);
+ assert_always(!r);
}
}
}
@@ -8111,7 +8813,7 @@ void ha_tokudb::set_loader_error(int err) {
}
void ha_tokudb::set_dup_value_for_pk(DBT* key) {
- assert(!hidden_primary_key);
+ assert_always(!hidden_primary_key);
unpack_key(table->record[0],key,primary_key);
last_dup_key = primary_key;
}
@@ -8144,21 +8846,28 @@ Item* ha_tokudb::idx_cond_push(uint keyno_arg, Item* idx_cond_arg) {
return idx_cond_arg;
}
+void ha_tokudb::cancel_pushed_idx_cond() {
+ invalidate_icp();
+ handler::cancel_pushed_idx_cond();
+}
+
void ha_tokudb::cleanup_txn(DB_TXN *txn) {
if (transaction == txn && cursor) {
int r = cursor->c_close(cursor);
- assert(r == 0);
+ assert_always(r == 0);
cursor = NULL;
}
}
void ha_tokudb::add_to_trx_handler_list() {
- tokudb_trx_data *trx = (tokudb_trx_data *) thd_get_ha_data(ha_thd(), tokudb_hton);
+ tokudb_trx_data* trx =
+ (tokudb_trx_data*)thd_get_ha_data(ha_thd(), tokudb_hton);
trx->handlers = list_add(trx->handlers, &trx_handler_list);
}
void ha_tokudb::remove_from_trx_handler_list() {
- tokudb_trx_data *trx = (tokudb_trx_data *) thd_get_ha_data(ha_thd(), tokudb_hton);
+ tokudb_trx_data* trx =
+ (tokudb_trx_data*)thd_get_ha_data(ha_thd(), tokudb_hton);
trx->handlers = list_delete(trx->handlers, &trx_handler_list);
}
@@ -8190,7 +8899,7 @@ bool ha_tokudb::rpl_lookup_rows() {
if (!in_rpl_delete_rows && !in_rpl_update_rows)
return true;
else
- return THDVAR(ha_thd(), rpl_lookup_rows);
+ return tokudb::sysvars::rpl_lookup_rows(ha_thd());
}
// table admin
diff --git a/storage/tokudb/ha_tokudb.h b/storage/tokudb/ha_tokudb.h
index 5b387924b1e..3d7a3a7fa05 100644
--- a/storage/tokudb/ha_tokudb.h
+++ b/storage/tokudb/ha_tokudb.h
@@ -23,11 +23,12 @@ Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
-#if !defined(HA_TOKUDB_H)
-#define HA_TOKUDB_H
+#ifndef _HA_TOKUDB_H
+#define _HA_TOKUDB_H
-#include <db.h>
+#include "hatoku_hton.h"
#include "hatoku_cmp.h"
+#include "tokudb_background.h"
#define HA_TOKU_ORIG_VERSION 4
#define HA_TOKU_VERSION 4
@@ -45,84 +46,384 @@ typedef struct loader_context {
} *LOADER_CONTEXT;
//
-// This object stores table information that is to be shared
+// This class stores table information that is to be shared
// among all ha_tokudb objects.
-// There is one instance per table, shared among threads.
+// There is one instance per table, shared among handlers.
// Some of the variables here are the DB* pointers to indexes,
// and auto increment information.
//
+// When the last user releases it's reference on the share,
+// it closes all of its database handles and releases all info
+// The share instance stays around though so some data can be transiently
+// kept across open-close-open-close cycles. These data will be explicitly
+// noted below.
+//
class TOKUDB_SHARE {
public:
- void init(void);
- void destroy(void);
+ enum share_state_t {
+ CLOSED = 0,
+ OPENED = 1,
+ ERROR = 2
+ };
+
+ // one time, start up init
+ static void static_init();
+
+ // one time, shutdown destroy
+ static void static_destroy();
+
+ // retuns a locked, properly reference counted share
+ // callers must check to ensure share is in correct state for callers use
+ // and unlock the share.
+ // if create_new is set, a new "CLOSED" share will be created if one
+ // doesn't exist, otherwise will return NULL if an existing is not found.
+ static TOKUDB_SHARE* get_share(
+ const char* table_name,
+ TABLE_SHARE* table_share,
+ THR_LOCK_DATA* data,
+ bool create_new);
+
+ // removes a share entirely from the pool, call to rename/deleta a table
+ // caller must hold ddl_mutex on this share and the share MUST have
+ // exactly 0 _use_count
+ static void drop_share(TOKUDB_SHARE* share);
+
+ // returns state string for logging/reporting
+ static const char* get_state_string(share_state_t state);
+
+ void* operator new(size_t sz);
+ void operator delete(void* p);
+
+ TOKUDB_SHARE();
+
+ // increases the ref count and waits for any currently executing state
+ // transition to complete
+ // returns current state and leaves share locked
+ // callers must check to ensure share is in correct state for callers use
+ // and unlock the share.
+ share_state_t addref();
+
+ // decreases the ref count and potentially closes the share
+ // caller must not have ownership of mutex, will lock and release
+ int release();
+
+ // returns the current use count
+ // no locking requirements
+ inline int use_count() const;
+
+ // locks the share
+ inline void lock() const;
+
+ // unlocks the share
+ inline void unlock() const;
+
+ // returns the current state of the share
+ // no locking requirements
+ inline share_state_t state() const;
+
+ // sets the state of the share
+ // caller must hold mutex on this share
+ inline void set_state(share_state_t state);
+
+ // returns the full MySQL table name of the table ex:
+ // ./database/table
+ // no locking requirements
+ inline const char* full_table_name() const;
+
+ // returns the strlen of the full table name
+ // no locking requirements
+ inline uint full_table_name_length() const;
+
+ // returns the parsed database name this table resides in
+ // no locking requirements
+ inline const char* database_name() const;
+
+ // returns the strlen of the database name
+ // no locking requirements
+ inline uint database_name_length() const;
+
+ // returns the parsed table name of this table
+ // no locking requirements
+ inline const char* table_name() const;
+
+ // returns the strlen of the the table name
+ // no locking requirements
+ inline uint table_name_length() const;
+
+ // sets the estimated number of rows in the table
+ // should be called only during share initialization and info call
+ // caller must hold mutex on this share unless specified by 'locked'
+ inline void set_row_count(uint64_t rows, bool locked);
+
+ // updates tracked row count and ongoing table change delta tracking
+ // called from any ha_tokudb operation that inserts/modifies/deletes rows
+ // may spawn background analysis if enabled, allowed and threshold hit
+ // caller must not have ownership of mutex, will lock and release
+ void update_row_count(
+ THD* thd,
+ uint64_t added,
+ uint64_t deleted,
+ uint64_t updated);
+
+ // returns the current row count estimate
+ // no locking requirements
+ inline ha_rows row_count() const;
+
+ // initializes cardinality statistics, takes ownership of incoming buffer
+ // caller must hold mutex on this share
+ inline void init_cardinality_counts(
+ uint32_t rec_per_keys,
+ uint64_t* rec_per_key);
+
+ // update the cardinality statistics. number of records must match
+ // caller must hold mutex on this share
+ inline void update_cardinality_counts(
+ uint32_t rec_per_keys,
+ const uint64_t* rec_per_key);
+
+ // disallow any auto analysis from taking place
+ // caller must hold mutex on this share
+ inline void disallow_auto_analysis();
+
+ // allow any auto analysis to take place
+ // pass in true for 'reset_deltas' to reset delta counting to 0
+ // caller must hold mutex on this share
+ inline void allow_auto_analysis(bool reset_deltas);
+
+ // cancels all background jobs for this share
+ // no locking requirements
+ inline void cancel_background_jobs() const;
+
+ // copies cardinality statistics into TABLE counter set
+ // caller must not have ownership of mutex, will lock and release
+ void set_cardinality_counts_in_table(TABLE* table);
+
+ // performs table analysis on underlying indices and produces estimated
+ // cardinality statistics.
+ // on success updates cardinality counts in status database and this share
+ // MUST pass a valid THD to access session variables.
+ // MAY pass txn. If txn is passed, assumes an explicit user scheduled
+ // ANALYZE and not an auto ANALYZE resulting from delta threshold
+ // uses session variables:
+ // tokudb_analyze_in_background, tokudb_analyze_throttle,
+ // tokudb_analyze_time, and tokudb_analyze_delete_fraction
+ // caller must hold mutex on this share
+ int analyze_standard(THD* thd, DB_TXN* txn);
+
+ // performs table scan and updates the internal FT logical row count value
+ // on success also updates share row count estimate.
+ // MUST pass a valid THD to access session variables.
+ // MAY pass txn. If txn is passed, assumes an explicit user scheduled
+ // uses session variables:
+ // tokudb_analyze_in_background, and tokudb_analyze_throttle
+ // caller must not have ownership of mutex, will lock and release
+ int analyze_recount_rows(THD* thd, DB_TXN* txn);
public:
- char *table_name;
- uint table_name_length, use_count;
- pthread_mutex_t mutex;
- THR_LOCK lock;
-
+ //*********************************
+ // Destroyed and recreated on open-close-open
ulonglong auto_ident;
ulonglong last_auto_increment, auto_inc_create_value;
- //
- // estimate on number of rows in table
- //
- ha_rows rows;
- //
+
// estimate on number of rows added in the process of a locked tables
// this is so we can better estimate row count during a lock table
- //
ha_rows rows_from_locked_table;
- DB *status_block;
- //
+ DB* status_block;
+
// DB that is indexed on the primary key
- //
- DB *file;
- //
+ DB* file;
+
// array of all DB's that make up table, includes DB that
// is indexed on the primary key, add 1 in case primary
// key is hidden
- //
- DB *key_file[MAX_KEY +1];
- rw_lock_t key_file_lock;
+ DB* key_file[MAX_KEY + 1];
uint status, version, capabilities;
uint ref_length;
- //
+
// whether table has an auto increment column
- //
bool has_auto_inc;
- //
+
// index of auto increment column in table->field, if auto_inc exists
- //
uint ai_field_index;
- //
+
// whether the primary key has a string
- //
bool pk_has_string;
KEY_AND_COL_INFO kc_info;
-
- //
+
+ // key info copied from TABLE_SHARE, used by background jobs that have no
+ // access to a handler instance
+ uint _keys;
+ uint _max_key_parts;
+ struct key_descriptor_t {
+ uint _parts;
+ bool _is_unique;
+ char* _name;
+ };
+ key_descriptor_t* _key_descriptors;
+
// we want the following optimization for bulk loads, if the table is empty,
// attempt to grab a table lock. emptiness check can be expensive,
// so we try it once for a table. After that, we keep this variable around
- // to tell us to not try it again.
- //
- bool try_table_lock;
+ // to tell us to not try it again.
+ bool try_table_lock;
bool has_unique_keys;
bool replace_into_fast;
- rw_lock_t num_DBs_lock;
+ tokudb::thread::rwlock_t _num_DBs_lock;
uint32_t num_DBs;
- pthread_cond_t m_openclose_cond;
- enum { CLOSED, OPENING, OPENED, CLOSING, ERROR } m_state;
- int m_error;
- int m_initialize_count;
+private:
+ static HASH _open_tables;
+ static tokudb::thread::mutex_t _open_tables_mutex;
+
+ static uchar* hash_get_key(
+ TOKUDB_SHARE* share,
+ size_t* length,
+ TOKUDB_UNUSED(my_bool not_used));
+
+ static void hash_free_element(TOKUDB_SHARE* share);
- uint n_rec_per_key;
- uint64_t *rec_per_key;
+ //*********************************
+ // Spans open-close-open
+ mutable tokudb::thread::mutex_t _mutex;
+ mutable tokudb::thread::mutex_t _ddl_mutex;
+ uint _use_count;
+
+ share_state_t _state;
+
+ ulonglong _row_delta_activity;
+ bool _allow_auto_analysis;
+
+ String _full_table_name;
+ String _database_name;
+ String _table_name;
+
+ //*********************************
+ // Destroyed and recreated on open-close-open
+ THR_LOCK _thr_lock;
+
+ // estimate on number of rows in table
+ ha_rows _rows;
+
+ // cardinality counts
+ uint32_t _rec_per_keys;
+ uint64_t* _rec_per_key;
+
+ void init(const char* table_name);
+ void destroy();
};
+inline int TOKUDB_SHARE::use_count() const {
+ return _use_count;
+}
+inline void TOKUDB_SHARE::lock() const {
+ TOKUDB_SHARE_DBUG_ENTER("file[%s]:state[%s]:use_count[%d]",
+ _full_table_name.ptr(),
+ get_state_string(_state),
+ _use_count);
+ _mutex.lock();
+ TOKUDB_SHARE_DBUG_VOID_RETURN();
+}
+inline void TOKUDB_SHARE::unlock() const {
+ TOKUDB_SHARE_DBUG_ENTER("file[%s]:state[%s]:use_count[%d]",
+ _full_table_name.ptr(),
+ get_state_string(_state),
+ _use_count);
+ _mutex.unlock();
+ TOKUDB_SHARE_DBUG_VOID_RETURN();
+}
+inline TOKUDB_SHARE::share_state_t TOKUDB_SHARE::state() const {
+ return _state;
+}
+inline void TOKUDB_SHARE::set_state(TOKUDB_SHARE::share_state_t state) {
+ TOKUDB_SHARE_DBUG_ENTER("file[%s]:state[%s]:use_count[%d]:new_state[%s]",
+ _full_table_name.ptr(),
+ get_state_string(_state),
+ _use_count,
+ get_state_string(state));
+
+ assert_debug(_mutex.is_owned_by_me());
+ _state = state;
+ TOKUDB_SHARE_DBUG_VOID_RETURN();
+}
+inline const char* TOKUDB_SHARE::full_table_name() const {
+ return _full_table_name.ptr();
+}
+inline uint TOKUDB_SHARE::full_table_name_length() const {
+ return _full_table_name.length();
+}
+inline const char* TOKUDB_SHARE::database_name() const {
+ return _database_name.ptr();
+}
+inline uint TOKUDB_SHARE::database_name_length() const {
+ return _database_name.length();
+}
+inline const char* TOKUDB_SHARE::table_name() const {
+ return _table_name.ptr();
+}
+inline uint TOKUDB_SHARE::table_name_length() const {
+ return _table_name.length();
+}
+inline void TOKUDB_SHARE::set_row_count(uint64_t rows, bool locked) {
+ TOKUDB_SHARE_DBUG_ENTER("file[%s]:state[%s]:use_count[%d]:rows[%" PRIu64 "]:locked[%d]",
+ _full_table_name.ptr(),
+ get_state_string(_state),
+ _use_count,
+ rows,
+ locked);
+
+ if (!locked) {
+ lock();
+ } else {
+ assert_debug(_mutex.is_owned_by_me());
+ }
+ if (_rows && rows == 0)
+ _row_delta_activity = 0;
+
+ _rows = rows;
+ if (!locked) {
+ unlock();
+ }
+ TOKUDB_SHARE_DBUG_VOID_RETURN();
+}
+inline ha_rows TOKUDB_SHARE::row_count() const {
+ return _rows;
+}
+inline void TOKUDB_SHARE::init_cardinality_counts(
+ uint32_t rec_per_keys,
+ uint64_t* rec_per_key) {
+
+ assert_debug(_mutex.is_owned_by_me());
+ // can not change number of keys live
+ assert_always(_rec_per_key == NULL && _rec_per_keys == 0);
+ _rec_per_keys = rec_per_keys;
+ _rec_per_key = rec_per_key;
+}
+inline void TOKUDB_SHARE::update_cardinality_counts(
+ uint32_t rec_per_keys,
+ const uint64_t* rec_per_key) {
+
+ assert_debug(_mutex.is_owned_by_me());
+ // can not change number of keys live
+ assert_always(rec_per_keys == _rec_per_keys);
+ assert_always(rec_per_key != NULL);
+ memcpy(_rec_per_key, rec_per_key, _rec_per_keys * sizeof(uint64_t));
+}
+inline void TOKUDB_SHARE::disallow_auto_analysis() {
+ assert_debug(_mutex.is_owned_by_me());
+ _allow_auto_analysis = false;
+}
+inline void TOKUDB_SHARE::allow_auto_analysis(bool reset_deltas) {
+ assert_debug(_mutex.is_owned_by_me());
+ _allow_auto_analysis = true;
+ if (reset_deltas)
+ _row_delta_activity = 0;
+}
+inline void TOKUDB_SHARE::cancel_background_jobs() const {
+ tokudb::background::_job_manager->cancel_job(full_table_name());
+}
+
+
typedef struct st_filter_key_part_info {
uint offset;
@@ -278,6 +579,7 @@ private:
//
ulonglong added_rows;
ulonglong deleted_rows;
+ ulonglong updated_rows;
uint last_dup_key;
@@ -438,7 +740,7 @@ public:
// Returns a bit mask of capabilities of storage engine. Capabilities
// defined in sql/handler.h
//
- ulonglong table_flags(void) const;
+ ulonglong table_flags() const;
ulong index_flags(uint inx, uint part, bool all_parts) const;
@@ -482,7 +784,7 @@ public:
double index_only_read_time(uint keynr, double records);
int open(const char *name, int mode, uint test_if_locked);
- int close(void);
+ int close();
void update_create_info(HA_CREATE_INFO* create_info);
int create(const char *name, TABLE * form, HA_CREATE_INFO * create_info);
int delete_table(const char *name);
@@ -528,7 +830,7 @@ public:
void position(const uchar * record);
int info(uint);
int extra(enum ha_extra_function operation);
- int reset(void);
+ int reset();
int external_lock(THD * thd, int lock_type);
int start_stmt(THD * thd, thr_lock_type lock_type);
@@ -540,12 +842,17 @@ public:
int get_status(DB_TXN* trans);
void init_hidden_prim_key_info(DB_TXN *txn);
inline void get_auto_primary_key(uchar * to) {
- tokudb_pthread_mutex_lock(&share->mutex);
+ share->lock();
share->auto_ident++;
hpk_num_to_char(to, share->auto_ident);
- tokudb_pthread_mutex_unlock(&share->mutex);
+ share->unlock();
}
- virtual void get_auto_increment(ulonglong offset, ulonglong increment, ulonglong nb_desired_values, ulonglong * first_value, ulonglong * nb_reserved_values);
+ virtual void get_auto_increment(
+ ulonglong offset,
+ ulonglong increment,
+ ulonglong nb_desired_values,
+ ulonglong* first_value,
+ ulonglong* nb_reserved_values);
bool is_optimize_blocking();
bool is_auto_inc_singleton();
void print_error(int error, myf errflag);
@@ -555,9 +862,6 @@ public:
bool primary_key_is_clustered() {
return true;
}
- bool supports_clustered_keys() {
- return true;
- }
int cmp_ref(const uchar * ref1, const uchar * ref2);
bool check_if_incompatible_data(HA_CREATE_INFO * info, uint table_changes);
@@ -599,15 +903,8 @@ public:
#endif
- // ICP introduced in MariaDB 5.5
Item* idx_cond_push(uint keyno, class Item* idx_cond);
-
-#ifdef MARIADB_BASE_VERSION
- void cancel_pushed_idx_cond()
- {
- invalidate_icp();
- }
-#endif
+ void cancel_pushed_idx_cond();
#if TOKU_INCLUDE_ALTER_56
public:
@@ -693,13 +990,13 @@ public:
int fill_range_query_buf(
bool need_val,
- DBT const *key,
- DBT const *row,
+ DBT const* key,
+ DBT const* row,
int direction,
THD* thd,
uchar* buf,
- DBT* key_to_compare
- );
+ DBT* key_to_compare);
+
#if TOKU_INCLUDE_ROW_TYPE_COMPRESSION
enum row_type get_row_type() const;
#endif
@@ -709,9 +1006,7 @@ private:
int get_next(uchar* buf, int direction, DBT* key_to_compare, bool do_key_read);
int read_data_from_range_query_buff(uchar* buf, bool need_val, bool do_key_read);
// for ICP, only in MariaDB and MySQL 5.6
-#if defined(MARIADB_BASE_VERSION) || (50600 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 50699)
enum icp_result toku_handler_index_cond_check(Item* pushed_idx_cond);
-#endif
void invalidate_bulk_fetch();
void invalidate_icp();
int delete_all_rows_internal();
@@ -758,25 +1053,5 @@ private:
bool in_rpl_update_rows;
};
-#if TOKU_INCLUDE_OPTION_STRUCTS
-struct ha_table_option_struct {
- uint row_format;
-};
-
-struct ha_index_option_struct {
- bool clustering;
-};
-
-static inline bool key_is_clustering(const KEY *key) {
- return (key->flags & HA_CLUSTERING) || (key->option_struct && key->option_struct->clustering);
-}
-
-#else
-
-static inline bool key_is_clustering(const KEY *key) {
- return key->flags & HA_CLUSTERING;
-}
-#endif
-
-#endif
+#endif // _HA_TOKUDB_H
diff --git a/storage/tokudb/ha_tokudb_admin.cc b/storage/tokudb/ha_tokudb_admin.cc
index 996ce49d85f..db3d6c112d4 100644
--- a/storage/tokudb/ha_tokudb_admin.cc
+++ b/storage/tokudb/ha_tokudb_admin.cc
@@ -23,132 +23,801 @@ Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+#include "tokudb_sysvars.h"
#include "toku_time.h"
-struct analyze_progress_extra {
- THD *thd;
- TOKUDB_SHARE *share;
- TABLE_SHARE *table_share;
- uint key_i;
- const char *key_name;
- time_t t_start;
- char *write_status_msg;
+namespace tokudb {
+namespace analyze {
+
+class recount_rows_t : public tokudb::background::job_manager_t::job_t {
+public:
+ void* operator new(size_t sz);
+ void operator delete(void* p);
+
+ recount_rows_t(
+ bool user_schedued,
+ THD* thd,
+ TOKUDB_SHARE* share,
+ DB_TXN* txn);
+
+ virtual ~recount_rows_t();
+
+ virtual const char* key();
+
+ virtual void status(
+ char* database,
+ char* table,
+ char* type,
+ char* params,
+ char* status);
+
+protected:
+ virtual void on_run();
+
+ virtual void on_destroy();
+
+private:
+ // to be provided by the initiator of recount rows
+ THD* _thd;
+ TOKUDB_SHARE* _share;
+ DB_TXN* _txn;
+ ulonglong _throttle;
+
+ // for recount rows status reporting
+ int _result;
+ ulonglong _recount_start; // in microseconds
+ ulonglong _total_elapsed_time; // in microseconds
+
+ bool _local_txn;
+ ulonglong _rows;
+ ulonglong _deleted_rows;
+ ulonglong _ticks;
+
+ static int analyze_recount_rows_progress(
+ uint64_t count,
+ uint64_t deleted,
+ void* extra);
+ int analyze_recount_rows_progress(uint64_t count, uint64_t deleted);
+ void get_analyze_status(char*);
};
-static int analyze_progress(void *v_extra, uint64_t rows) {
- struct analyze_progress_extra *extra = (struct analyze_progress_extra *) v_extra;
- THD *thd = extra->thd;
- if (thd_killed(thd))
- return ER_ABORTING_CONNECTION;
+void* recount_rows_t::operator new(size_t sz) {
+ return tokudb::memory::malloc(sz, MYF(MY_WME|MY_ZEROFILL|MY_FAE));
+}
+void recount_rows_t::operator delete(void* p) {
+ tokudb::memory::free(p);
+}
+recount_rows_t::recount_rows_t(
+ bool user_scheduled,
+ THD* thd,
+ TOKUDB_SHARE* share,
+ DB_TXN* txn) :
+ tokudb::background::job_manager_t::job_t(user_scheduled),
+ _share(share),
+ _result(HA_ADMIN_OK),
+ _recount_start(0),
+ _total_elapsed_time(0),
+ _local_txn(false),
+ _rows(0),
+ _deleted_rows(0),
+ _ticks(0) {
+
+ assert_debug(thd != NULL);
+ assert_debug(share != NULL);
+
+ if (tokudb::sysvars::analyze_in_background(thd)) {
+ _thd = NULL;
+ _txn = NULL;
+ } else {
+ _thd = thd;
+ _txn = txn;
+ }
+
+ _throttle = tokudb::sysvars::analyze_throttle(thd);
+}
+recount_rows_t::~recount_rows_t() {
+}
+void recount_rows_t::on_run() {
+ _recount_start = tokudb::time::microsec();
+ _total_elapsed_time = 0;
+
+ if (_txn == NULL) {
+ _result = db_env->txn_begin(db_env, NULL, &_txn, DB_READ_UNCOMMITTED);
+
+ if (_result != 0) {
+ _txn = NULL;
+ _result = HA_ADMIN_FAILED;
+ goto error;
+ }
+ _local_txn = true;
+ } else {
+ _local_txn = false;
+ }
+
+ _result =
+ _share->file->recount_rows(
+ _share->file,
+ analyze_recount_rows_progress,
+ this);
+
+ if (_result != 0) {
+ if (_local_txn) {
+ _txn->abort(_txn);
+ _txn = NULL;
+ }
+ _result = HA_ADMIN_FAILED;
+ goto error;
+ }
+
+ DB_BTREE_STAT64 dict_stats;
+ _result = _share->file->stat64(_share->file, _txn, &dict_stats);
+ if (_result == 0) {
+ _share->set_row_count(dict_stats.bt_ndata, false);
+ }
+ if (_result != 0)
+ _result = HA_ADMIN_FAILED;
+
+ if (_local_txn) {
+ if (_result == HA_ADMIN_OK) {
+ _txn->commit(_txn, 0);
+ } else {
+ _txn->abort(_txn);
+ }
+ _txn = NULL;
+ }
+
+ sql_print_information(
+ "tokudb analyze recount rows %d counted %lld",
+ _result,
+ _share->row_count());
+error:
+ return;
+}
+void recount_rows_t::on_destroy() {
+ _share->release();
+}
+const char* recount_rows_t::key() {
+ return _share->full_table_name();
+}
+void recount_rows_t::status(
+ char* database,
+ char* table,
+ char* type,
+ char* params,
+ char* status) {
+
+ strcpy(database, _share->database_name());
+ strcpy(table, _share->table_name());
+ strcpy(type, "TOKUDB_ANALYZE_MODE_RECOUNT_ROWS");
+ sprintf(params, "TOKUDB_ANALYZE_THROTTLE=%llu;", _throttle);
+ get_analyze_status(status);
+}
+int recount_rows_t::analyze_recount_rows_progress(
+ uint64_t count,
+ uint64_t deleted,
+ void* extra) {
+
+ recount_rows_t* context = (recount_rows_t*)extra;
+ return context->analyze_recount_rows_progress(count, deleted);
+}
+int recount_rows_t::analyze_recount_rows_progress(
+ uint64_t count,
+ uint64_t deleted) {
+
+ _rows = count;
+ _deleted_rows += deleted;
+ deleted > 0 ? _ticks += deleted : _ticks++;
+
+ if (_ticks > 1000) {
+ _ticks = 0;
+ uint64_t now = tokudb::time::microsec();
+ _total_elapsed_time = now - _recount_start;
+ if ((_thd && thd_killed(_thd)) || cancelled()) {
+ // client killed
+ return ER_ABORTING_CONNECTION;
+ }
+
+ // report
+ if (_thd) {
+ char status[256];
+ get_analyze_status(status);
+ thd_proc_info(_thd, status);
+ }
+
+ // throttle
+ // given the throttle value, lets calculate the maximum number of rows
+ // we should have seen so far in a .1 sec resolution
+ if (_throttle > 0) {
+ uint64_t estimated_rows = _total_elapsed_time / 100000;
+ estimated_rows = estimated_rows * (_throttle / 10);
+ if (_rows + _deleted_rows > estimated_rows) {
+ // sleep for 1/10 of a second
+ tokudb::time::sleep_microsec(100000);
+ }
+ }
+ }
+ return 0;
+}
+void recount_rows_t::get_analyze_status(char* msg) {
+ sprintf(
+ msg,
+ "recount_rows %s.%s counted %llu rows and %llu deleted in %llu "
+ "seconds.",
+ _share->database_name(),
+ _share->table_name(),
+ _rows,
+ _deleted_rows,
+ _total_elapsed_time / tokudb::time::MICROSECONDS);
+}
+
+
+class standard_t : public tokudb::background::job_manager_t::job_t {
+public:
+ void* operator new(size_t sz);
+ void operator delete(void* p);
+
+ standard_t(bool user_scheduled, THD* thd, TOKUDB_SHARE* share, DB_TXN* txn);
+
+ virtual ~standard_t();
+
+ virtual const char* key(void);
+
+ virtual void status(
+ char* database,
+ char* table,
+ char* type,
+ char* params,
+ char* status);
+
+protected:
+ virtual void on_run();
+
+ virtual void on_destroy();
+
+private:
+ // to be provided by initiator of analyze
+ THD* _thd;
+ TOKUDB_SHARE* _share;
+ DB_TXN* _txn;
+ ulonglong _throttle; // in microseconds
+ ulonglong _time_limit; // in microseconds
+ double _delete_fraction;
+
+ // for analyze status reporting, may also use other state
+ int _result;
+ ulonglong _analyze_start; // in microseconds
+ ulonglong _total_elapsed_time; // in microseconds
+
+ // for analyze internal use, pretty much these are per-key/index
+ ulonglong _current_key;
+ bool _local_txn;
+ ulonglong _half_time;
+ ulonglong _half_rows;
+ ulonglong _rows;
+ ulonglong _deleted_rows;
+ ulonglong _ticks;
+ ulonglong _analyze_key_start; // in microseconds
+ ulonglong _key_elapsed_time; // in microseconds
+ uint _scan_direction;
+
+ static bool analyze_standard_cursor_callback(
+ void* extra,
+ uint64_t deleted_rows);
+ bool analyze_standard_cursor_callback(uint64_t deleted_rows);
+
+ void get_analyze_status(char*);
+ int analyze_key_progress();
+ int analyze_key(uint64_t* rec_per_key_part);
+};
+
+void* standard_t::operator new(size_t sz) {
+ return tokudb::memory::malloc(sz, MYF(MY_WME|MY_ZEROFILL|MY_FAE));
+}
+void standard_t::operator delete(void* p) {
+ tokudb::memory::free(p);
+}
+standard_t::standard_t(
+ bool user_scheduled,
+ THD* thd,
+ TOKUDB_SHARE* share,
+ DB_TXN* txn) :
+ tokudb::background::job_manager_t::job_t(user_scheduled),
+ _share(share),
+ _result(HA_ADMIN_OK),
+ _analyze_start(0),
+ _total_elapsed_time(0),
+ _current_key(0),
+ _local_txn(false),
+ _half_time(0),
+ _half_rows(0),
+ _rows(0),
+ _deleted_rows(0),
+ _ticks(0),
+ _analyze_key_start(0),
+ _key_elapsed_time(0),
+ _scan_direction(0) {
+
+ assert_debug(thd != NULL);
+ assert_debug(share != NULL);
+
+ if (tokudb::sysvars::analyze_in_background(thd)) {
+ _thd = NULL;
+ _txn = NULL;
+ } else {
+ _thd = thd;
+ _txn = txn;
+ }
+ _throttle = tokudb::sysvars::analyze_throttle(thd);
+ _time_limit =
+ tokudb::sysvars::analyze_time(thd) * tokudb::time::MICROSECONDS;
+ _delete_fraction = tokudb::sysvars::analyze_delete_fraction(thd);
+}
+standard_t::~standard_t() {
+}
+void standard_t::on_run() {
+ DB_BTREE_STAT64 stat64;
+ uint64_t rec_per_key_part[_share->_max_key_parts];
+ uint64_t total_key_parts = 0;
+ _analyze_start = tokudb::time::microsec();
+ _half_time = _time_limit > 0 ? _time_limit/2 : 0;
+
+ if (_txn == NULL) {
+ _result = db_env->txn_begin(db_env, NULL, &_txn, DB_READ_UNCOMMITTED);
+
+ if (_result != 0) {
+ _txn = NULL;
+ _result = HA_ADMIN_FAILED;
+ goto error;
+ }
+ _local_txn = true;
+ } else {
+ _local_txn = false;
+ }
+
+ assert_always(_share->key_file[0] != NULL);
+ _result = _share->key_file[0]->stat64(_share->key_file[0], _txn, &stat64);
+ if (_result != 0) {
+ _result = HA_ADMIN_FAILED;
+ goto cleanup;
+ }
+ _half_rows = stat64.bt_ndata / 2;
+
+ for (ulonglong current_key = 0;
+ _result == HA_ADMIN_OK && current_key < _share->_keys;
+ current_key++) {
+
+ _current_key = current_key;
+ _rows = _deleted_rows = _ticks = 0;
+ _result = analyze_key(&rec_per_key_part[total_key_parts]);
+
+ if ((_result != 0 && _result != ETIME) ||
+ (_result != 0 && _rows == 0 && _deleted_rows > 0)) {
+ _result = HA_ADMIN_FAILED;
+ }
+ if (_thd && (_result == HA_ADMIN_FAILED ||
+ (double)_deleted_rows >
+ _delete_fraction * (_rows + _deleted_rows))) {
+
+ char name[256]; int namelen;
+ namelen =
+ snprintf(
+ name,
+ sizeof(name),
+ "%s.%s.%s",
+ _share->database_name(),
+ _share->table_name(),
+ _share->_key_descriptors[_current_key]._name);
+ _thd->protocol->prepare_for_resend();
+ _thd->protocol->store(name, namelen, system_charset_info);
+ _thd->protocol->store("analyze", 7, system_charset_info);
+ _thd->protocol->store("info", 4, system_charset_info);
+ char rowmsg[256];
+ int rowmsglen;
+ rowmsglen =
+ snprintf(
+ rowmsg,
+ sizeof(rowmsg),
+ "rows processed %llu rows deleted %llu",
+ _rows,
+ _deleted_rows);
+ _thd->protocol->store(rowmsg, rowmsglen, system_charset_info);
+ _thd->protocol->write();
+
+ sql_print_information(
+ "tokudb analyze on %.*s %.*s",
+ namelen,
+ name,
+ rowmsglen,
+ rowmsg);
+ }
+
+ total_key_parts += _share->_key_descriptors[_current_key]._parts;
+ }
+ if (_result == HA_ADMIN_OK) {
+ int error =
+ tokudb::set_card_in_status(
+ _share->status_block,
+ _txn,
+ total_key_parts,
+ rec_per_key_part);
+ if (error)
+ _result = HA_ADMIN_FAILED;
+
+ _share->lock();
+ _share->update_cardinality_counts(total_key_parts, rec_per_key_part);
+ _share->allow_auto_analysis(true);
+ _share->unlock();
+ }
+
+cleanup:
+ if (_local_txn) {
+ if (_result == HA_ADMIN_OK) {
+ _txn->commit(_txn, 0);
+ } else {
+ _txn->abort(_txn);
+ }
+ _txn = NULL;
+ }
+
+error:
+ return;
+
+}
+void standard_t::on_destroy() {
+ _share->lock();
+ _share->allow_auto_analysis(false);
+ _share->unlock();
+ _share->release();
+}
+const char* standard_t::key() {
+ return _share->full_table_name();
+}
+void standard_t::status(
+ char* database,
+ char* table,
+ char* type,
+ char* params,
+ char* status) {
+
+ strcpy(database, _share->database_name());
+ strcpy(table, _share->table_name());
+ strcpy(type, "TOKUDB_ANALYZE_MODE_STANDARD");
+ sprintf(
+ params,
+ "TOKUDB_ANALYZE_DELETE_FRACTION=%f; "
+ "TOKUDB_ANALYZE_TIME=%llu; TOKUDB_ANALYZE_THROTTLE=%llu;",
+ _delete_fraction,
+ _time_limit / tokudb::time::MICROSECONDS,
+ _throttle);
+ get_analyze_status(status);
+}
+bool standard_t::analyze_standard_cursor_callback(
+ void* extra,
+ uint64_t deleted_rows) {
+ standard_t* context = (standard_t*)extra;
+ return context->analyze_standard_cursor_callback(deleted_rows);
+}
+bool standard_t::analyze_standard_cursor_callback(uint64_t deleted_rows) {
+ _deleted_rows += deleted_rows;
+ _ticks += deleted_rows;
+ return analyze_key_progress() != 0;
+}
+void standard_t::get_analyze_status(char* msg) {
+ static const char* scan_direction_str[] = {
+ "not scanning",
+ "scanning forward",
+ "scanning backward",
+ "scan unknown"
+ };
+
+ const char* scan_direction = NULL;
+ switch (_scan_direction) {
+ case 0: scan_direction = scan_direction_str[0]; break;
+ case DB_NEXT: scan_direction = scan_direction_str[1]; break;
+ case DB_PREV: scan_direction = scan_direction_str[2]; break;
+ default: scan_direction = scan_direction_str[3]; break;
+ }
- time_t t_now = time(0);
- time_t t_limit = THDVAR(thd, analyze_time);
- time_t t_start = extra->t_start;
- if (t_limit > 0 && t_now - t_start > t_limit)
- return ETIME;
float progress_rows = 0.0;
- TOKUDB_SHARE *share = extra->share;
- if (share->rows > 0)
- progress_rows = (float) rows / (float) share->rows;
+ if (_share->row_count() > 0)
+ progress_rows = (float) _rows / (float) _share->row_count();
float progress_time = 0.0;
- if (t_limit > 0)
- progress_time = (float) (t_now - t_start) / (float) t_limit;
- char *write_status_msg = extra->write_status_msg;
- TABLE_SHARE *table_share = extra->table_share;
- sprintf(write_status_msg, "%.*s.%.*s.%s %u of %u %.lf%% rows %.lf%% time",
- (int) table_share->db.length, table_share->db.str,
- (int) table_share->table_name.length, table_share->table_name.str,
- extra->key_name, extra->key_i, table_share->keys, progress_rows * 100.0, progress_time * 100.0);
- thd_proc_info(thd, write_status_msg);
+ if (_time_limit > 0)
+ progress_time = (float) _key_elapsed_time / (float) _time_limit;
+ sprintf(
+ msg,
+ "analyze table standard %s.%s.%s %llu of %u %.lf%% rows %.lf%% time, "
+ "%s",
+ _share->database_name(),
+ _share->table_name(),
+ _share->_key_descriptors[_current_key]._name,
+ _current_key,
+ _share->_keys,
+ progress_rows * 100.0,
+ progress_time * 100.0,
+ scan_direction);
+}
+int standard_t::analyze_key_progress(void) {
+ if (_ticks > 1000) {
+ _ticks = 0;
+ uint64_t now = tokudb::time::microsec();
+ _total_elapsed_time = now - _analyze_start;
+ _key_elapsed_time = now - _analyze_key_start;
+ if ((_thd && thd_killed(_thd)) || cancelled()) {
+ // client killed
+ return ER_ABORTING_CONNECTION;
+ } else if(_time_limit > 0 &&
+ (uint64_t)_key_elapsed_time > _time_limit) {
+ // time limit reached
+ return ETIME;
+ }
+
+ // report
+ if (_thd) {
+ char status[256];
+ get_analyze_status(status);
+ thd_proc_info(_thd, status);
+ }
+
+ // throttle
+ // given the throttle value, lets calculate the maximum number of rows
+ // we should have seen so far in a .1 sec resolution
+ if (_throttle > 0) {
+ uint64_t estimated_rows = _key_elapsed_time / 100000;
+ estimated_rows = estimated_rows * (_throttle / 10);
+ if (_rows + _deleted_rows > estimated_rows) {
+ // sleep for 1/10 of a second
+ tokudb::time::sleep_microsec(100000);
+ }
+ }
+ }
return 0;
}
+int standard_t::analyze_key(uint64_t* rec_per_key_part) {
+ int error = 0;
+ DB* db = _share->key_file[_current_key];
+ assert_always(db != NULL);
+ uint64_t num_key_parts = _share->_key_descriptors[_current_key]._parts;
+ uint64_t unique_rows[num_key_parts];
+ bool is_unique = _share->_key_descriptors[_current_key]._is_unique;
+ DBC* cursor = NULL;
+ int close_error = 0;
+ DBT key, prev_key;
+ bool copy_key = false;
+
+ _analyze_key_start = tokudb::time::microsec();
+ _key_elapsed_time = 0;
+ _scan_direction = DB_NEXT;
+
+ if (is_unique && num_key_parts == 1) {
+ // don't compute for unique keys with a single part. we already know
+ // the answer.
+ _rows = unique_rows[0] = 1;
+ goto done;
+ }
+
+ for (uint64_t i = 0; i < num_key_parts; i++)
+ unique_rows[i] = 1;
+
+ // stop looking when the entire dictionary was analyzed, or a
+ // cap on execution time was reached, or the analyze was killed.
+ while (1) {
+ if (cursor == NULL) {
+ error = db->cursor(db, _txn, &cursor, 0);
+ if (error != 0)
+ goto done;
+
+ cursor->c_set_check_interrupt_callback(
+ cursor,
+ analyze_standard_cursor_callback,
+ this);
+
+ memset(&key, 0, sizeof(DBT));
+ memset(&prev_key, 0, sizeof(DBT));
+ copy_key = true;
+ }
+
+ error = cursor->c_get(cursor, &key, 0, _scan_direction);
+ if (error != 0) {
+ if (error == DB_NOTFOUND || error == TOKUDB_INTERRUPTED)
+ error = 0; // not an error
+ break;
+ } else if (cancelled()) {
+ error = ER_ABORTING_CONNECTION;
+ break;
+ }
+
+ _rows++;
+ _ticks++;
+
+ // if copy_key is false at this pont, we have some value sitting in
+ // prev_key that we can compare to
+ // if the comparison reveals a unique key, we must set copy_key to true
+ // so the code following can copy he current key into prev_key for the
+ // next iteration
+ if (copy_key == false) {
+ // compare this key with the previous key. ignore
+ // appended PK for SK's.
+ // TODO if a prefix is different, then all larger keys
+ // that include the prefix are also different.
+ // TODO if we are comparing the entire primary key or
+ // the entire unique secondary key, then the cardinality
+ // must be 1, so we can avoid computing it.
+ for (uint64_t i = 0; i < num_key_parts; i++) {
+ int cmp = tokudb_cmp_dbt_key_parts(db, &prev_key, &key, i+1);
+ if (cmp != 0) {
+ unique_rows[i]++;
+ copy_key = true;
+ }
+ }
+ }
+
+ // prev_key = key or prev_key is NULL
+ if (copy_key) {
+ prev_key.data =
+ tokudb::memory::realloc(
+ prev_key.data,
+ key.size,
+ MYF(MY_WME|MY_ZEROFILL|MY_FAE));
+ assert_always(prev_key.data);
+ prev_key.size = key.size;
+ memcpy(prev_key.data, key.data, prev_key.size);
+ copy_key = false;
+ }
+
+ error = analyze_key_progress();
+ if (error == ETIME) {
+ error = 0;
+ break;
+ } else if (error) {
+ break;
+ }
+
+ // if we have a time limit, are scanning forward and have exceed the
+ // _half_time and not passed the _half_rows number of the rows in the
+ // index: clean up the keys, close the cursor and reverse direction.
+ if (TOKUDB_UNLIKELY(_half_time > 0 &&
+ _scan_direction == DB_NEXT &&
+ _key_elapsed_time >= _half_time &&
+ _rows < _half_rows)) {
+
+ tokudb::memory::free(prev_key.data); prev_key.data = NULL;
+ close_error = cursor->c_close(cursor);
+ assert_always(close_error == 0);
+ cursor = NULL;
+ _scan_direction = DB_PREV;
+ }
+ }
+ // cleanup
+ if (prev_key.data) tokudb::memory::free(prev_key.data);
+ if (cursor) close_error = cursor->c_close(cursor);
+ assert_always(close_error == 0);
+
+done:
+ // return cardinality
+ for (uint64_t i = 0; i < num_key_parts; i++) {
+ rec_per_key_part[i] = _rows / unique_rows[i];
+ }
+ return error;
+}
+
+} // namespace analyze
+} // namespace tokudb
+
int ha_tokudb::analyze(THD *thd, HA_CHECK_OPT *check_opt) {
- TOKUDB_HANDLER_DBUG_ENTER("%s", share->table_name);
+ TOKUDB_HANDLER_DBUG_ENTER("%s", share->table_name());
+ int result = HA_ADMIN_OK;
+ tokudb::sysvars::analyze_mode_t mode = tokudb::sysvars::analyze_mode(thd);
+
+ switch (mode) {
+ case tokudb::sysvars::TOKUDB_ANALYZE_RECOUNT_ROWS:
+ result = share->analyze_recount_rows(thd, transaction);
+ break;
+ case tokudb::sysvars::TOKUDB_ANALYZE_STANDARD:
+ share->lock();
+ result = share->analyze_standard(thd, transaction);
+ share->unlock();
+ break;
+ case tokudb::sysvars::TOKUDB_ANALYZE_CANCEL:
+ share->cancel_background_jobs();
+ break;
+ default:
+ break; // no-op
+ }
+ TOKUDB_HANDLER_DBUG_RETURN(result);
+}
+
+int TOKUDB_SHARE::analyze_recount_rows(THD* thd,DB_TXN* txn) {
+ TOKUDB_HANDLER_DBUG_ENTER("%s", table_name());
+
+ assert_always(thd != NULL);
+
const char *orig_proc_info = tokudb_thd_get_proc_info(thd);
- uint64_t rec_per_key[table_share->key_parts];
+ int result = HA_ADMIN_OK;
+
+ tokudb::analyze::recount_rows_t* job
+ = new tokudb::analyze::recount_rows_t(true, thd, this, txn);
+ assert_always(job != NULL);
+
+ // job->destroy will drop the ref
+ addref();
+ unlock();
+
+ bool ret = tokudb::background::_job_manager->
+ run_job(job, tokudb::sysvars::analyze_in_background(thd));
+
+ if (!ret) {
+ job->destroy();
+ delete job;
+ result = HA_ADMIN_FAILED;
+ }
+
+ thd_proc_info(thd, orig_proc_info);
+
+ TOKUDB_HANDLER_DBUG_RETURN(result);
+}
+
+// on entry, if txn is !NULL, it is a user session invoking ANALYZE directly
+// and no lock will be held on 'this', else if txn is NULL it is an auto and
+// 'this' will be locked.
+int TOKUDB_SHARE::analyze_standard(THD* thd, DB_TXN* txn) {
+ TOKUDB_HANDLER_DBUG_ENTER("%s", table_name());
+
+ assert_always(thd != NULL);
+ assert_debug(_mutex.is_owned_by_me() == true);
+
int result = HA_ADMIN_OK;
// stub out analyze if optimize is remapped to alter recreate + analyze
- if (thd_sql_command(thd) != SQLCOM_ANALYZE && thd_sql_command(thd) != SQLCOM_ALTER_TABLE) {
+ // when not auto analyze or if this is an alter
+ if ((txn &&
+ thd_sql_command(thd) != SQLCOM_ANALYZE &&
+ thd_sql_command(thd) != SQLCOM_ALTER_TABLE) ||
+ thd_sql_command(thd) == SQLCOM_ALTER_TABLE) {
TOKUDB_HANDLER_DBUG_RETURN(result);
}
- DB_TXN *txn = transaction;
- if (!txn) {
+ const char *orig_proc_info = tokudb_thd_get_proc_info(thd);
+
+ tokudb::analyze::standard_t* job
+ = new tokudb::analyze::standard_t(txn == NULL ? false : true, thd,
+ this, txn);
+ assert_always(job != NULL);
+
+ // akin to calling addref, but we know, right here, right now, everything
+ // in the share is set up, files open, etc...
+ // job->destroy will drop the ref
+ _use_count++;
+
+ // don't want any autos kicking off while we are analyzing
+ disallow_auto_analysis();
+
+ unlock();
+
+ bool ret =
+ tokudb::background::_job_manager->run_job(
+ job,
+ tokudb::sysvars::analyze_in_background(thd));
+
+ if (!ret) {
+ job->destroy();
+ delete job;
result = HA_ADMIN_FAILED;
}
- uint total_key_parts = 0;
- if (result == HA_ADMIN_OK) {
- // compute cardinality for each key
- for (uint i = 0; result == HA_ADMIN_OK && i < table_share->keys; i++) {
- KEY *key_info = &table_share->key_info[i];
- uint64_t num_key_parts = get_key_parts(key_info);
- const char *key_name = i == primary_key ? "primary" : key_info->name;
- struct analyze_progress_extra analyze_progress_extra = {
- thd, share, table_share, i, key_name, time(0), write_status_msg
- };
- bool is_unique = false;
- if (i == primary_key || (key_info->flags & HA_NOSAME))
- is_unique = true;
- uint64_t rows = 0;
- uint64_t deleted_rows = 0;
- int error = tokudb::analyze_card(share->key_file[i], txn, is_unique, num_key_parts, &rec_per_key[total_key_parts],
- tokudb_cmp_dbt_key_parts, analyze_progress, &analyze_progress_extra,
- &rows, &deleted_rows);
- sql_print_information("tokudb analyze %d %" PRIu64 " %" PRIu64, error, rows, deleted_rows);
- if (error != 0 && error != ETIME) {
- result = HA_ADMIN_FAILED;
- }
- if (error != 0 && rows == 0 && deleted_rows > 0) {
- result = HA_ADMIN_FAILED;
- }
- double f = THDVAR(thd, analyze_delete_fraction);
- if (result == HA_ADMIN_FAILED || (double) deleted_rows > f * (rows + deleted_rows)) {
- char name[256]; int namelen;
- namelen = snprintf(name, sizeof name, "%.*s.%.*s.%s",
- (int) table_share->db.length, table_share->db.str,
- (int) table_share->table_name.length, table_share->table_name.str,
- key_name);
- thd->protocol->prepare_for_resend();
- thd->protocol->store(name, namelen, system_charset_info);
- thd->protocol->store("analyze", 7, system_charset_info);
- thd->protocol->store("info", 4, system_charset_info);
- char rowmsg[256]; int rowmsglen;
- rowmsglen = snprintf(rowmsg, sizeof rowmsg, "rows processed %" PRIu64 " rows deleted %" PRIu64, rows, deleted_rows);
- thd->protocol->store(rowmsg, rowmsglen, system_charset_info);
- thd->protocol->write();
-
- sql_print_information("tokudb analyze on %.*s %.*s",
- namelen, name, rowmsglen, rowmsg);
- }
- if (tokudb_debug & TOKUDB_DEBUG_ANALYZE) {
- char name[256]; int namelen;
- namelen = snprintf(name, sizeof name, "%.*s.%.*s.%s",
- (int) table_share->db.length, table_share->db.str,
- (int) table_share->table_name.length, table_share->table_name.str,
- key_name);
- TOKUDB_HANDLER_TRACE("%.*s rows %" PRIu64 " deleted %" PRIu64,
- namelen, name, rows, deleted_rows);
- for (uint j = 0; j < num_key_parts; j++)
- TOKUDB_HANDLER_TRACE("%lu", rec_per_key[total_key_parts+j]);
- }
- total_key_parts += num_key_parts;
- }
- }
- if (result == HA_ADMIN_OK) {
- int error = tokudb::set_card_in_status(share->status_block, txn, total_key_parts, rec_per_key);
- if (error)
- result = HA_ADMIN_FAILED;
- }
+
+ lock();
+
thd_proc_info(thd, orig_proc_info);
+
TOKUDB_HANDLER_DBUG_RETURN(result);
}
+
typedef struct hot_optimize_context {
- THD *thd;
+ THD* thd;
char* write_status_msg;
- ha_tokudb *ha;
+ ha_tokudb* ha;
uint progress_stage;
uint current_table;
uint num_tables;
@@ -160,11 +829,18 @@ typedef struct hot_optimize_context {
static int hot_optimize_progress_fun(void *extra, float progress) {
HOT_OPTIMIZE_CONTEXT context = (HOT_OPTIMIZE_CONTEXT)extra;
if (thd_killed(context->thd)) {
- sprintf(context->write_status_msg, "The process has been killed, aborting hot optimize.");
+ sprintf(
+ context->write_status_msg,
+ "The process has been killed, aborting hot optimize.");
return ER_ABORTING_CONNECTION;
}
float percentage = progress * 100;
- sprintf(context->write_status_msg, "Optimization of index %u of %u about %.lf%% done", context->current_table + 1, context->num_tables, percentage);
+ sprintf(
+ context->write_status_msg,
+ "Optimization of index %u of %u about %.lf%% done",
+ context->current_table + 1,
+ context->num_tables,
+ percentage);
thd_proc_info(context->thd, context->write_status_msg);
#ifdef HA_TOKUDB_HAS_THD_PROGRESS
if (context->progress_stage < context->current_table) {
@@ -193,10 +869,10 @@ static int hot_optimize_progress_fun(void *extra, float progress) {
}
// flatten all DB's in this table, to do so, peform hot optimize on each db
-int ha_tokudb::do_optimize(THD *thd) {
- TOKUDB_HANDLER_DBUG_ENTER("%s", share->table_name);
+int ha_tokudb::do_optimize(THD* thd) {
+ TOKUDB_HANDLER_DBUG_ENTER("%s", share->table_name());
int error = 0;
- const char *orig_proc_info = tokudb_thd_get_proc_info(thd);
+ const char* orig_proc_info = tokudb_thd_get_proc_info(thd);
uint curr_num_DBs = table->s->keys + tokudb_test(hidden_primary_key);
#ifdef HA_TOKUDB_HAS_THD_PROGRESS
@@ -207,16 +883,22 @@ int ha_tokudb::do_optimize(THD *thd) {
// for each DB, run optimize and hot_optimize
for (uint i = 0; i < curr_num_DBs; i++) {
- // only optimize the index if it matches the optimize_index_name session variable
- const char *optimize_index_name = THDVAR(thd, optimize_index_name);
+ // only optimize the index if it matches the optimize_index_name
+ // session variable
+ const char* optimize_index_name =
+ tokudb::sysvars::optimize_index_name(thd);
if (optimize_index_name) {
- const char *this_index_name = i >= table_share->keys ? "primary" : table_share->key_info[i].name;
+ const char* this_index_name =
+ i >= table_share->keys ?
+ "primary" :
+ table_share->key_info[i].name;
if (strcasecmp(optimize_index_name, this_index_name) != 0) {
continue;
}
}
DB* db = share->key_file[i];
+ assert_always(db != NULL);
error = db->optimize(db);
if (error) {
goto cleanup;
@@ -229,11 +911,18 @@ int ha_tokudb::do_optimize(THD *thd) {
hc.ha = this;
hc.current_table = i;
hc.num_tables = curr_num_DBs;
- hc.progress_limit = THDVAR(thd, optimize_index_fraction);
+ hc.progress_limit = tokudb::sysvars::optimize_index_fraction(thd);
hc.progress_last_time = toku_current_time_microsec();
- hc.throttle = THDVAR(thd, optimize_throttle);
+ hc.throttle = tokudb::sysvars::optimize_throttle(thd);
uint64_t loops_run;
- error = db->hot_optimize(db, NULL, NULL, hot_optimize_progress_fun, &hc, &loops_run);
+ error =
+ db->hot_optimize(
+ db,
+ NULL,
+ NULL,
+ hot_optimize_progress_fun,
+ &hc,
+ &loops_run);
if (error) {
goto cleanup;
}
@@ -248,8 +937,8 @@ cleanup:
TOKUDB_HANDLER_DBUG_RETURN(error);
}
-int ha_tokudb::optimize(THD *thd, HA_CHECK_OPT *check_opt) {
- TOKUDB_HANDLER_DBUG_ENTER("%s", share->table_name);
+int ha_tokudb::optimize(THD* thd, HA_CHECK_OPT* check_opt) {
+ TOKUDB_HANDLER_DBUG_ENTER("%s", share->table_name());
int error;
#if TOKU_OPTIMIZE_WITH_RECREATE
error = HA_ADMIN_TRY_ALTER;
@@ -260,23 +949,30 @@ int ha_tokudb::optimize(THD *thd, HA_CHECK_OPT *check_opt) {
}
struct check_context {
- THD *thd;
+ THD* thd;
};
-static int ha_tokudb_check_progress(void *extra, float progress) {
- struct check_context *context = (struct check_context *) extra;
+static int ha_tokudb_check_progress(void* extra, float progress) {
+ struct check_context* context = (struct check_context*)extra;
int result = 0;
if (thd_killed(context->thd))
result = ER_ABORTING_CONNECTION;
return result;
}
-static void ha_tokudb_check_info(THD *thd, TABLE *table, const char *msg) {
+static void ha_tokudb_check_info(THD* thd, TABLE* table, const char* msg) {
if (thd->vio_ok()) {
- char tablename[table->s->db.length + 1 + table->s->table_name.length + 1];
- snprintf(tablename, sizeof tablename, "%.*s.%.*s",
- (int) table->s->db.length, table->s->db.str,
- (int) table->s->table_name.length, table->s->table_name.str);
+ char tablename[
+ table->s->db.length + 1 +
+ table->s->table_name.length + 1];
+ snprintf(
+ tablename,
+ sizeof(tablename),
+ "%.*s.%.*s",
+ (int)table->s->db.length,
+ table->s->db.str,
+ (int)table->s->table_name.length,
+ table->s->table_name.str);
thd->protocol->prepare_for_resend();
thd->protocol->store(tablename, strlen(tablename), system_charset_info);
thd->protocol->store("check", 5, system_charset_info);
@@ -286,9 +982,9 @@ static void ha_tokudb_check_info(THD *thd, TABLE *table, const char *msg) {
}
}
-int ha_tokudb::check(THD *thd, HA_CHECK_OPT *check_opt) {
- TOKUDB_HANDLER_DBUG_ENTER("%s", share->table_name);
- const char *orig_proc_info = tokudb_thd_get_proc_info(thd);
+int ha_tokudb::check(THD* thd, HA_CHECK_OPT* check_opt) {
+ TOKUDB_HANDLER_DBUG_ENTER("%s", share->table_name());
+ const char* orig_proc_info = tokudb_thd_get_proc_info(thd);
int result = HA_ADMIN_OK;
int r;
@@ -305,38 +1001,73 @@ int ha_tokudb::check(THD *thd, HA_CHECK_OPT *check_opt) {
result = HA_ADMIN_INTERNAL_ERROR;
if (result == HA_ADMIN_OK) {
uint32_t num_DBs = table_share->keys + tokudb_test(hidden_primary_key);
- snprintf(write_status_msg, sizeof write_status_msg, "%s primary=%d num=%d", share->table_name, primary_key, num_DBs);
- if (tokudb_debug & TOKUDB_DEBUG_CHECK) {
+ snprintf(
+ write_status_msg,
+ sizeof(write_status_msg),
+ "%s primary=%d num=%d",
+ share->table_name(),
+ primary_key,
+ num_DBs);
+ if (TOKUDB_UNLIKELY(TOKUDB_DEBUG_FLAGS(TOKUDB_DEBUG_CHECK))) {
ha_tokudb_check_info(thd, table, write_status_msg);
time_t now = time(0);
char timebuf[32];
- TOKUDB_HANDLER_TRACE("%.24s %s", ctime_r(&now, timebuf), write_status_msg);
+ TOKUDB_HANDLER_TRACE(
+ "%.24s %s",
+ ctime_r(&now, timebuf),
+ write_status_msg);
}
for (uint i = 0; i < num_DBs; i++) {
- DB *db = share->key_file[i];
- const char *kname = i == primary_key ? "primary" : table_share->key_info[i].name;
- snprintf(write_status_msg, sizeof write_status_msg, "%s key=%s %u", share->table_name, kname, i);
+ DB* db = share->key_file[i];
+ assert_always(db != NULL);
+ const char* kname =
+ i == primary_key ? "primary" : table_share->key_info[i].name;
+ snprintf(
+ write_status_msg,
+ sizeof(write_status_msg),
+ "%s key=%s %u",
+ share->table_name(),
+ kname,
+ i);
thd_proc_info(thd, write_status_msg);
- if (tokudb_debug & TOKUDB_DEBUG_CHECK) {
+ if (TOKUDB_UNLIKELY(TOKUDB_DEBUG_FLAGS(TOKUDB_DEBUG_CHECK))) {
ha_tokudb_check_info(thd, table, write_status_msg);
time_t now = time(0);
char timebuf[32];
- TOKUDB_HANDLER_TRACE("%.24s %s", ctime_r(&now, timebuf), write_status_msg);
+ TOKUDB_HANDLER_TRACE(
+ "%.24s %s",
+ ctime_r(&now, timebuf),
+ write_status_msg);
}
struct check_context check_context = { thd };
- r = db->verify_with_progress(db, ha_tokudb_check_progress, &check_context, (tokudb_debug & TOKUDB_DEBUG_CHECK) != 0, keep_going);
+ r = db->verify_with_progress(
+ db,
+ ha_tokudb_check_progress,
+ &check_context,
+ (tokudb::sysvars::debug & TOKUDB_DEBUG_CHECK) != 0,
+ keep_going);
if (r != 0) {
char msg[32 + strlen(kname)];
sprintf(msg, "Corrupt %s", kname);
ha_tokudb_check_info(thd, table, msg);
}
- snprintf(write_status_msg, sizeof write_status_msg, "%s key=%s %u result=%d", share->table_name, kname, i, r);
+ snprintf(
+ write_status_msg,
+ sizeof(write_status_msg),
+ "%s key=%s %u result=%d",
+ share->full_table_name(),
+ kname,
+ i,
+ r);
thd_proc_info(thd, write_status_msg);
- if (tokudb_debug & TOKUDB_DEBUG_CHECK) {
+ if (TOKUDB_UNLIKELY(TOKUDB_DEBUG_FLAGS(TOKUDB_DEBUG_CHECK))) {
ha_tokudb_check_info(thd, table, write_status_msg);
time_t now = time(0);
char timebuf[32];
- TOKUDB_HANDLER_TRACE("%.24s %s", ctime_r(&now, timebuf), write_status_msg);
+ TOKUDB_HANDLER_TRACE(
+ "%.24s %s",
+ ctime_r(&now, timebuf),
+ write_status_msg);
}
if (result == HA_ADMIN_OK && r != 0) {
result = HA_ADMIN_CORRUPT;
diff --git a/storage/tokudb/ha_tokudb_alter_56.cc b/storage/tokudb/ha_tokudb_alter_56.cc
index a54bebc5420..473c4984eb6 100644
--- a/storage/tokudb/ha_tokudb_alter_56.cc
+++ b/storage/tokudb/ha_tokudb_alter_56.cc
@@ -67,7 +67,7 @@ public:
}
public:
ulong handler_flags;
- DB_TXN *alter_txn;
+ DB_TXN* alter_txn;
bool add_index_changed;
bool incremented_num_DBs, modified_DBs;
bool drop_index_changed;
@@ -79,81 +79,110 @@ public:
bool expand_blob_update_needed;
bool optimize_needed;
Dynamic_array<uint> changed_fields;
- KEY_AND_COL_INFO *table_kc_info;
- KEY_AND_COL_INFO *altered_table_kc_info;
+ KEY_AND_COL_INFO* table_kc_info;
+ KEY_AND_COL_INFO* altered_table_kc_info;
KEY_AND_COL_INFO altered_table_kc_info_base;
};
// Debug function to print out an alter table operation
-void ha_tokudb::print_alter_info(TABLE *altered_table, Alter_inplace_info *ha_alter_info) {
- printf("***are keys of two tables same? %d\n", tables_have_same_keys(table, altered_table, false, false));
+void ha_tokudb::print_alter_info(
+ TABLE* altered_table,
+ Alter_inplace_info* ha_alter_info) {
+
+ TOKUDB_TRACE(
+ "***are keys of two tables same? %d",
+ tables_have_same_keys(table, altered_table, false, false));
if (ha_alter_info->handler_flags) {
- printf("***alter flags set ***\n");
+ TOKUDB_TRACE("***alter flags set ***");
for (int i = 0; i < 32; i++) {
if (ha_alter_info->handler_flags & (1 << i))
- printf("%d\n", i);
+ TOKUDB_TRACE("%d", i);
}
}
- // everyone calculates data by doing some default_values - record[0], but I do not see why
- // that is necessary
- printf("******\n");
- printf("***orig table***\n");
+ // everyone calculates data by doing some default_values - record[0], but
+ // I do not see why that is necessary
+ TOKUDB_TRACE("******");
+ TOKUDB_TRACE("***orig table***");
for (uint i = 0; i < table->s->fields; i++) {
//
// make sure to use table->field, and NOT table->s->field
//
Field* curr_field = table->field[i];
uint null_offset = get_null_offset(table, curr_field);
- printf(
- "name: %s, types: %u %u, nullable: %d, null_offset: %d, is_null_field: %d, is_null %d, pack_length %u\n",
- curr_field->field_name,
- curr_field->real_type(), mysql_to_toku_type(curr_field),
- curr_field->null_bit,
- null_offset,
- curr_field->real_maybe_null(),
- curr_field->real_maybe_null() ? table->s->default_values[null_offset] & curr_field->null_bit : 0xffffffff,
- curr_field->pack_length()
- );
+ TOKUDB_TRACE(
+ "name: %s, types: %u %u, nullable: %d, null_offset: %d, is_null_field: "
+ "%d, is_null %d, pack_length %u",
+ curr_field->field_name,
+ curr_field->real_type(),
+ mysql_to_toku_type(curr_field),
+ curr_field->null_bit,
+ null_offset,
+ curr_field->real_maybe_null(),
+ curr_field->real_maybe_null() ?
+ table->s->default_values[null_offset] & curr_field->null_bit :
+ 0xffffffff,
+ curr_field->pack_length());
}
- printf("******\n");
- printf("***altered table***\n");
+ TOKUDB_TRACE("******");
+ TOKUDB_TRACE("***altered table***");
for (uint i = 0; i < altered_table->s->fields; i++) {
Field* curr_field = altered_table->field[i];
uint null_offset = get_null_offset(altered_table, curr_field);
- printf(
- "name: %s, types: %u %u, nullable: %d, null_offset: %d, is_null_field: %d, is_null %d, pack_length %u\n",
- curr_field->field_name,
- curr_field->real_type(), mysql_to_toku_type(curr_field),
- curr_field->null_bit,
- null_offset,
- curr_field->real_maybe_null(),
- curr_field->real_maybe_null() ? altered_table->s->default_values[null_offset] & curr_field->null_bit : 0xffffffff,
- curr_field->pack_length()
- );
+ TOKUDB_TRACE(
+ "name: %s, types: %u %u, nullable: %d, null_offset: %d, "
+ "is_null_field: %d, is_null %d, pack_length %u",
+ curr_field->field_name,
+ curr_field->real_type(),
+ mysql_to_toku_type(curr_field),
+ curr_field->null_bit,
+ null_offset,
+ curr_field->real_maybe_null(),
+ curr_field->real_maybe_null() ?
+ altered_table->s->default_values[null_offset] &
+ curr_field->null_bit : 0xffffffff,
+ curr_field->pack_length());
}
- printf("******\n");
+ TOKUDB_TRACE("******");
}
-// Given two tables with equal number of fields, find all of the fields with different types
-// and return the indexes of the different fields in the changed_fields array. This function ignores field
-// name differences.
-static int find_changed_fields(TABLE *table_a, TABLE *table_b, Dynamic_array<uint> &changed_fields) {
+// Given two tables with equal number of fields, find all of the fields with
+// different types and return the indexes of the different fields in the
+// changed_fields array. This function ignores field name differences.
+static int find_changed_fields(
+ TABLE* table_a,
+ TABLE* table_b,
+ Dynamic_array<uint>& changed_fields) {
+
for (uint i = 0; i < table_a->s->fields; i++) {
- Field *field_a = table_a->field[i];
- Field *field_b = table_b->field[i];
+ Field* field_a = table_a->field[i];
+ Field* field_b = table_b->field[i];
if (!fields_are_same_type(field_a, field_b))
changed_fields.append(i);
}
return changed_fields.elements();
}
-static bool change_length_is_supported(TABLE *table, TABLE *altered_table, Alter_inplace_info *ha_alter_info, tokudb_alter_ctx *ctx);
-
-static bool change_type_is_supported(TABLE *table, TABLE *altered_table, Alter_inplace_info *ha_alter_info, tokudb_alter_ctx *ctx);
+static bool change_length_is_supported(
+ TABLE* table,
+ TABLE* altered_table,
+ Alter_inplace_info* ha_alter_info,
+ tokudb_alter_ctx* ctx);
+
+static bool change_type_is_supported(
+ TABLE* table,
+ TABLE* altered_table,
+ Alter_inplace_info* ha_alter_info,
+ tokudb_alter_ctx* ctx);
+
+// The ha_alter_info->handler_flags can not be trusted.
+// This function maps the bogus handler flags to something we like.
+static ulong fix_handler_flags(
+ THD* thd,
+ TABLE* table,
+ TABLE* altered_table,
+ Alter_inplace_info* ha_alter_info) {
-// The ha_alter_info->handler_flags can not be trusted. This function maps the bogus handler flags to something we like.
-static ulong fix_handler_flags(THD *thd, TABLE *table, TABLE *altered_table, Alter_inplace_info *ha_alter_info) {
ulong handler_flags = ha_alter_info->handler_flags;
#if 100000 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 100199
@@ -162,23 +191,33 @@ static ulong fix_handler_flags(THD *thd, TABLE *table, TABLE *altered_table, Alt
#endif
// workaround for fill_alter_inplace_info bug (#5193)
- // the function erroneously sets the ADD_INDEX and DROP_INDEX flags for a column addition that does not
- // change the keys. the following code turns the ADD_INDEX and DROP_INDEX flags so that we can do hot
- // column addition later.
- if (handler_flags & (Alter_inplace_info::ADD_COLUMN + Alter_inplace_info::DROP_COLUMN)) {
- if (handler_flags & (Alter_inplace_info::ADD_INDEX + Alter_inplace_info::DROP_INDEX)) {
- if (tables_have_same_keys(table, altered_table, THDVAR(thd, alter_print_error) != 0, false)) {
- handler_flags &= ~(Alter_inplace_info::ADD_INDEX + Alter_inplace_info::DROP_INDEX);
+ // the function erroneously sets the ADD_INDEX and DROP_INDEX flags for a
+ // column addition that does not change the keys.
+ // the following code turns the ADD_INDEX and DROP_INDEX flags so that
+ // we can do hot column addition later.
+ if (handler_flags &
+ (Alter_inplace_info::ADD_COLUMN + Alter_inplace_info::DROP_COLUMN)) {
+ if (handler_flags &
+ (Alter_inplace_info::ADD_INDEX + Alter_inplace_info::DROP_INDEX)) {
+ if (tables_have_same_keys(
+ table,
+ altered_table,
+ tokudb::sysvars::alter_print_error(thd) != 0, false)) {
+ handler_flags &=
+ ~(Alter_inplace_info::ADD_INDEX +
+ Alter_inplace_info::DROP_INDEX);
}
}
}
- // always allow rename table + any other operation, so turn off the rename flag
+ // always allow rename table + any other operation, so turn off the
+ // rename flag
if (handler_flags & Alter_inplace_info::TOKU_ALTER_RENAME) {
handler_flags &= ~Alter_inplace_info::TOKU_ALTER_RENAME;
}
- // ALTER_COLUMN_TYPE may be set when no columns have been changed, so turn off the flag
+ // ALTER_COLUMN_TYPE may be set when no columns have been changed,
+ // so turn off the flag
if (handler_flags & Alter_inplace_info::ALTER_COLUMN_TYPE) {
if (all_fields_are_same_type(table, altered_table)) {
handler_flags &= ~Alter_inplace_info::ALTER_COLUMN_TYPE;
@@ -191,9 +230,10 @@ static ulong fix_handler_flags(THD *thd, TABLE *table, TABLE *altered_table, Alt
// Require that there is no intersection of add and drop names.
static bool is_disjoint_add_drop(Alter_inplace_info *ha_alter_info) {
for (uint d = 0; d < ha_alter_info->index_drop_count; d++) {
- KEY *drop_key = ha_alter_info->index_drop_buffer[d];
+ KEY* drop_key = ha_alter_info->index_drop_buffer[d];
for (uint a = 0; a < ha_alter_info->index_add_count; a++) {
- KEY *add_key = &ha_alter_info->key_info_buffer[ha_alter_info->index_add_buffer[a]];
+ KEY* add_key =
+ &ha_alter_info->key_info_buffer[ha_alter_info->index_add_buffer[a]];
if (strcmp(drop_key->name, add_key->name) == 0) {
return false;
}
@@ -202,196 +242,294 @@ static bool is_disjoint_add_drop(Alter_inplace_info *ha_alter_info) {
return true;
}
-// Return true if some bit in mask is set and no bit in ~mask is set, otherwise return false.
+// Return true if some bit in mask is set and no bit in ~mask is set,
+// otherwise return false.
static bool only_flags(ulong bits, ulong mask) {
return (bits & mask) != 0 && (bits & ~mask) == 0;
}
-// Check if an alter table operation on this table and described by the alter table parameters is supported inplace
-// and if so, what type of locking is needed to execute it.
-// return values:
+// Check if an alter table operation on this table and described by the alter
+// table parameters is supported inplace and if so, what type of locking is
+// needed to execute it. return values:
+
+// HA_ALTER_INPLACE_NOT_SUPPORTED: alter operation is not supported as an
+// inplace operation, a table copy is required
-// HA_ALTER_INPLACE_NOT_SUPPORTED: alter operation is not supported as an inplace operation, a table copy is required
// HA_ALTER_ERROR: the alter table operation should fail
// HA_ALTER_INPLACE_EXCLUSIVE_LOCK: prepare and alter runs with MDL X
-// HA_ALTER_INPLACE_SHARED_LOCK_AFTER_PREPARE: prepare runs with MDL X, alter runs with MDL SNW
-// HA_ALTER_INPLACE_SHARED_LOCK: prepare and alter methods called with MDL SNW, concurrent reads, no writes
+// HA_ALTER_INPLACE_SHARED_LOCK_AFTER_PREPARE: prepare runs with MDL X,
+// alter runs with MDL SNW
+
+// HA_ALTER_INPLACE_SHARED_LOCK: prepare and alter methods called with MDL SNW,
+// concurrent reads, no writes
+
+// HA_ALTER_INPLACE_NO_LOCK_AFTER_PREPARE: prepare runs with MDL X,
+// alter runs with MDL SW
+
+// HA_ALTER_INPLACE_NO_LOCK: prepare and alter methods called with MDL SW,
+// concurrent reads, writes.
+// must set WRITE_ALLOW_WRITE lock type in the external lock method to avoid
+// deadlocks with the MDL lock and the table lock
+enum_alter_inplace_result ha_tokudb::check_if_supported_inplace_alter(
+ TABLE* altered_table,
+ Alter_inplace_info* ha_alter_info) {
-// HA_ALTER_INPLACE_NO_LOCK_AFTER_PREPARE: prepare runs with MDL X, alter runs with MDL SW
-// HA_ALTER_INPLACE_NO_LOCK: prepare and alter methods called with MDL SW, concurrent reads, writes.
-// must set WRITE_ALLOW_WRITE lock type in the external lock method to avoid deadlocks
-// with the MDL lock and the table lock
-enum_alter_inplace_result ha_tokudb::check_if_supported_inplace_alter(TABLE *altered_table, Alter_inplace_info *ha_alter_info) {
TOKUDB_HANDLER_DBUG_ENTER("");
- if (tokudb_debug & TOKUDB_DEBUG_ALTER_TABLE) {
+ if (TOKUDB_UNLIKELY(TOKUDB_DEBUG_FLAGS(TOKUDB_DEBUG_ALTER_TABLE))) {
print_alter_info(altered_table, ha_alter_info);
}
- enum_alter_inplace_result result = HA_ALTER_INPLACE_NOT_SUPPORTED; // default is NOT inplace
- THD *thd = ha_thd();
+ // default is NOT inplace
+ enum_alter_inplace_result result = HA_ALTER_INPLACE_NOT_SUPPORTED;
+ THD* thd = ha_thd();
// setup context
- tokudb_alter_ctx *ctx = new tokudb_alter_ctx;
+ tokudb_alter_ctx* ctx = new tokudb_alter_ctx;
ha_alter_info->handler_ctx = ctx;
- ctx->handler_flags = fix_handler_flags(thd, table, altered_table, ha_alter_info);
+ ctx->handler_flags =
+ fix_handler_flags(thd, table, altered_table, ha_alter_info);
ctx->table_kc_info = &share->kc_info;
ctx->altered_table_kc_info = &ctx->altered_table_kc_info_base;
memset(ctx->altered_table_kc_info, 0, sizeof (KEY_AND_COL_INFO));
- if (get_disable_hot_alter(thd)) {
+ if (tokudb::sysvars::disable_hot_alter(thd)) {
; // do nothing
- } else
- // add or drop index
- if (only_flags(ctx->handler_flags, Alter_inplace_info::DROP_INDEX + Alter_inplace_info::DROP_UNIQUE_INDEX +
- Alter_inplace_info::ADD_INDEX + Alter_inplace_info::ADD_UNIQUE_INDEX)) {
+ } else if (only_flags(
+ ctx->handler_flags,
+ Alter_inplace_info::DROP_INDEX +
+ Alter_inplace_info::DROP_UNIQUE_INDEX +
+ Alter_inplace_info::ADD_INDEX +
+ Alter_inplace_info::ADD_UNIQUE_INDEX)) {
+ // add or drop index
if (table->s->null_bytes == altered_table->s->null_bytes &&
- (ha_alter_info->index_add_count > 0 || ha_alter_info->index_drop_count > 0) &&
- !tables_have_same_keys(table, altered_table, THDVAR(thd, alter_print_error) != 0, false) &&
+ (ha_alter_info->index_add_count > 0 ||
+ ha_alter_info->index_drop_count > 0) &&
+ !tables_have_same_keys(
+ table,
+ altered_table,
+ tokudb::sysvars::alter_print_error(thd) != 0, false) &&
is_disjoint_add_drop(ha_alter_info)) {
- if (ctx->handler_flags & (Alter_inplace_info::DROP_INDEX + Alter_inplace_info::DROP_UNIQUE_INDEX)) {
- // the fractal tree can not handle dropping an index concurrent with querying with the index.
+ if (ctx->handler_flags &
+ (Alter_inplace_info::DROP_INDEX +
+ Alter_inplace_info::DROP_UNIQUE_INDEX)) {
+ // the fractal tree can not handle dropping an index concurrent
+ // with querying with the index.
// we grab an exclusive MDL for the drop index.
result = HA_ALTER_INPLACE_EXCLUSIVE_LOCK;
} else {
result = HA_ALTER_INPLACE_SHARED_LOCK_AFTER_PREPARE;
- // someday, allow multiple hot indexes via alter table add key. don't forget to change the store_lock function.
- // for now, hot indexing is only supported via session variable with the create index sql command
- if (ha_alter_info->index_add_count == 1 && ha_alter_info->index_drop_count == 0 && // only one add or drop
- ctx->handler_flags == Alter_inplace_info::ADD_INDEX && // must be add index not add unique index
- thd_sql_command(thd) == SQLCOM_CREATE_INDEX && // must be a create index command
- get_create_index_online(thd)) { // must be enabled
- // external_lock set WRITE_ALLOW_WRITE which allows writes concurrent with the index creation
+ // someday, allow multiple hot indexes via alter table add key.
+ // don't forget to change the store_lock function.
+ // for now, hot indexing is only supported via session variable
+ // with the create index sql command
+ if (ha_alter_info->index_add_count == 1 &&
+ // only one add or drop
+ ha_alter_info->index_drop_count == 0 &&
+ // must be add index not add unique index
+ ctx->handler_flags == Alter_inplace_info::ADD_INDEX &&
+ // must be a create index command
+ thd_sql_command(thd) == SQLCOM_CREATE_INDEX &&
+ // must be enabled
+ tokudb::sysvars::create_index_online(thd)) {
+ // external_lock set WRITE_ALLOW_WRITE which allows writes
+ // concurrent with the index creation
result = HA_ALTER_INPLACE_NO_LOCK_AFTER_PREPARE;
}
}
}
- } else
- // column default
- if (only_flags(ctx->handler_flags, Alter_inplace_info::ALTER_COLUMN_DEFAULT)) {
+ } else if (only_flags(
+ ctx->handler_flags,
+ Alter_inplace_info::ALTER_COLUMN_DEFAULT)) {
+ // column default
if (table->s->null_bytes == altered_table->s->null_bytes)
result = HA_ALTER_INPLACE_EXCLUSIVE_LOCK;
- } else
- // column rename
- if (ctx->handler_flags & Alter_inplace_info::ALTER_COLUMN_NAME &&
- only_flags(ctx->handler_flags, Alter_inplace_info::ALTER_COLUMN_NAME + Alter_inplace_info::ALTER_COLUMN_DEFAULT)) {
- // we have identified a possible column rename,
+ } else if (ctx->handler_flags & Alter_inplace_info::ALTER_COLUMN_NAME &&
+ only_flags(
+ ctx->handler_flags,
+ Alter_inplace_info::ALTER_COLUMN_NAME +
+ Alter_inplace_info::ALTER_COLUMN_DEFAULT)) {
+ // column rename
+ // we have identified a possible column rename,
// but let's do some more checks
-
+
// we will only allow an hcr if there are no changes
// in column positions (ALTER_COLUMN_ORDER is not set)
-
+
// now need to verify that one and only one column
// has changed only its name. If we find anything to
// the contrary, we don't allow it, also check indexes
if (table->s->null_bytes == altered_table->s->null_bytes) {
- bool cr_supported = column_rename_supported(table, altered_table, (ctx->handler_flags & Alter_inplace_info::ALTER_COLUMN_ORDER) != 0);
+ bool cr_supported =
+ column_rename_supported(
+ table,
+ altered_table,
+ (ctx->handler_flags &
+ Alter_inplace_info::ALTER_COLUMN_ORDER) != 0);
if (cr_supported)
result = HA_ALTER_INPLACE_EXCLUSIVE_LOCK;
}
- } else
- // add column
- if (ctx->handler_flags & Alter_inplace_info::ADD_COLUMN &&
- only_flags(ctx->handler_flags, Alter_inplace_info::ADD_COLUMN + Alter_inplace_info::ALTER_COLUMN_ORDER) &&
- setup_kc_info(altered_table, ctx->altered_table_kc_info) == 0) {
-
+ } else if (ctx->handler_flags & Alter_inplace_info::ADD_COLUMN &&
+ only_flags(
+ ctx->handler_flags,
+ Alter_inplace_info::ADD_COLUMN +
+ Alter_inplace_info::ALTER_COLUMN_ORDER) &&
+ setup_kc_info(altered_table, ctx->altered_table_kc_info) == 0) {
+
+ // add column
uint32_t added_columns[altered_table->s->fields];
uint32_t num_added_columns = 0;
- int r = find_changed_columns(added_columns, &num_added_columns, table, altered_table);
+ int r =
+ find_changed_columns(
+ added_columns,
+ &num_added_columns,
+ table,
+ altered_table);
if (r == 0) {
- if (tokudb_debug & TOKUDB_DEBUG_ALTER_TABLE) {
+ if (TOKUDB_UNLIKELY(TOKUDB_DEBUG_FLAGS(TOKUDB_DEBUG_ALTER_TABLE))) {
for (uint32_t i = 0; i < num_added_columns; i++) {
uint32_t curr_added_index = added_columns[i];
- Field* curr_added_field = altered_table->field[curr_added_index];
- printf("Added column: index %d, name %s\n", curr_added_index, curr_added_field->field_name);
+ Field* curr_added_field =
+ altered_table->field[curr_added_index];
+ TOKUDB_TRACE(
+ "Added column: index %d, name %s",
+ curr_added_index,
+ curr_added_field->field_name);
}
}
result = HA_ALTER_INPLACE_EXCLUSIVE_LOCK;
}
- } else
- // drop column
- if (ctx->handler_flags & Alter_inplace_info::DROP_COLUMN &&
- only_flags(ctx->handler_flags, Alter_inplace_info::DROP_COLUMN + Alter_inplace_info::ALTER_COLUMN_ORDER) &&
- setup_kc_info(altered_table, ctx->altered_table_kc_info) == 0) {
-
+ } else if (ctx->handler_flags & Alter_inplace_info::DROP_COLUMN &&
+ only_flags(
+ ctx->handler_flags,
+ Alter_inplace_info::DROP_COLUMN +
+ Alter_inplace_info::ALTER_COLUMN_ORDER) &&
+ setup_kc_info(altered_table, ctx->altered_table_kc_info) == 0) {
+
+ // drop column
uint32_t dropped_columns[table->s->fields];
uint32_t num_dropped_columns = 0;
- int r = find_changed_columns(dropped_columns, &num_dropped_columns, altered_table, table);
+ int r =
+ find_changed_columns(
+ dropped_columns,
+ &num_dropped_columns,
+ altered_table,
+ table);
if (r == 0) {
- if (tokudb_debug & TOKUDB_DEBUG_ALTER_TABLE) {
+ if (TOKUDB_UNLIKELY(TOKUDB_DEBUG_FLAGS(TOKUDB_DEBUG_ALTER_TABLE))) {
for (uint32_t i = 0; i < num_dropped_columns; i++) {
uint32_t curr_dropped_index = dropped_columns[i];
Field* curr_dropped_field = table->field[curr_dropped_index];
- printf("Dropped column: index %d, name %s\n", curr_dropped_index, curr_dropped_field->field_name);
+ TOKUDB_TRACE(
+ "Dropped column: index %d, name %s",
+ curr_dropped_index,
+ curr_dropped_field->field_name);
}
}
result = HA_ALTER_INPLACE_EXCLUSIVE_LOCK;
}
- } else
- // change column length
- if ((ctx->handler_flags & Alter_inplace_info::ALTER_COLUMN_EQUAL_PACK_LENGTH) &&
- only_flags(ctx->handler_flags, Alter_inplace_info::ALTER_COLUMN_EQUAL_PACK_LENGTH + Alter_inplace_info::ALTER_COLUMN_DEFAULT) &&
- table->s->fields == altered_table->s->fields &&
- find_changed_fields(table, altered_table, ctx->changed_fields) > 0 &&
- setup_kc_info(altered_table, ctx->altered_table_kc_info) == 0) {
-
- if (change_length_is_supported(table, altered_table, ha_alter_info, ctx)) {
+ } else if ((ctx->handler_flags &
+ Alter_inplace_info::ALTER_COLUMN_EQUAL_PACK_LENGTH) &&
+ only_flags(
+ ctx->handler_flags,
+ Alter_inplace_info::ALTER_COLUMN_EQUAL_PACK_LENGTH +
+ Alter_inplace_info::ALTER_COLUMN_DEFAULT) &&
+ table->s->fields == altered_table->s->fields &&
+ find_changed_fields(
+ table,
+ altered_table,
+ ctx->changed_fields) > 0 &&
+ setup_kc_info(altered_table, ctx->altered_table_kc_info) == 0) {
+
+ // change column length
+ if (change_length_is_supported(
+ table,
+ altered_table,
+ ha_alter_info, ctx)) {
result = HA_ALTER_INPLACE_EXCLUSIVE_LOCK;
}
- } else
- // change column type
- if ((ctx->handler_flags & Alter_inplace_info::ALTER_COLUMN_TYPE) &&
- only_flags(ctx->handler_flags, Alter_inplace_info::ALTER_COLUMN_TYPE + Alter_inplace_info::ALTER_COLUMN_DEFAULT) &&
- table->s->fields == altered_table->s->fields &&
- find_changed_fields(table, altered_table, ctx->changed_fields) > 0 &&
- setup_kc_info(altered_table, ctx->altered_table_kc_info) == 0) {
-
- if (change_type_is_supported(table, altered_table, ha_alter_info, ctx)) {
+ } else if ((ctx->handler_flags & Alter_inplace_info::ALTER_COLUMN_TYPE) &&
+ only_flags(
+ ctx->handler_flags,
+ Alter_inplace_info::ALTER_COLUMN_TYPE +
+ Alter_inplace_info::ALTER_COLUMN_DEFAULT) &&
+ table->s->fields == altered_table->s->fields &&
+ find_changed_fields(
+ table,
+ altered_table,
+ ctx->changed_fields) > 0 &&
+ setup_kc_info(altered_table, ctx->altered_table_kc_info) == 0) {
+
+ // change column type
+ if (change_type_is_supported(
+ table,
+ altered_table,
+ ha_alter_info, ctx)) {
result = HA_ALTER_INPLACE_EXCLUSIVE_LOCK;
}
- } else
- if (only_flags(ctx->handler_flags, Alter_inplace_info::CHANGE_CREATE_OPTION)) {
- HA_CREATE_INFO *create_info = ha_alter_info->create_info;
+ } else if (only_flags(
+ ctx->handler_flags,
+ Alter_inplace_info::CHANGE_CREATE_OPTION)) {
+
+ HA_CREATE_INFO* create_info = ha_alter_info->create_info;
#if TOKU_INCLUDE_OPTION_STRUCTS
// set the USED_ROW_FORMAT flag for use later in this file for changes in the table's
// compression
- if (create_info->option_struct->row_format != table_share->option_struct->row_format)
+ if (create_info->option_struct->row_format !=
+ table_share->option_struct->row_format)
create_info->used_fields |= HA_CREATE_USED_ROW_FORMAT;
#endif
// alter auto_increment
if (only_flags(create_info->used_fields, HA_CREATE_USED_AUTO)) {
// do a sanity check that the table is what we think it is
- if (tables_have_same_keys_and_columns(table, altered_table, THDVAR(thd, alter_print_error) != 0)) {
+ if (tables_have_same_keys_and_columns(
+ table,
+ altered_table,
+ tokudb::sysvars::alter_print_error(thd) != 0)) {
result = HA_ALTER_INPLACE_EXCLUSIVE_LOCK;
}
- }
- // alter row_format
- else if (only_flags(create_info->used_fields, HA_CREATE_USED_ROW_FORMAT)) {
+ } else if (only_flags(
+ create_info->used_fields,
+ HA_CREATE_USED_ROW_FORMAT)) {
+ // alter row_format
// do a sanity check that the table is what we think it is
- if (tables_have_same_keys_and_columns(table, altered_table, THDVAR(thd, alter_print_error) != 0)) {
+ if (tables_have_same_keys_and_columns(
+ table,
+ altered_table,
+ tokudb::sysvars::alter_print_error(thd) != 0)) {
result = HA_ALTER_INPLACE_EXCLUSIVE_LOCK;
}
}
- }
+ }
#if TOKU_OPTIMIZE_WITH_RECREATE
- else if (only_flags(ctx->handler_flags, Alter_inplace_info::RECREATE_TABLE + Alter_inplace_info::ALTER_COLUMN_DEFAULT)) {
+ else if (only_flags(
+ ctx->handler_flags,
+ Alter_inplace_info::RECREATE_TABLE +
+ Alter_inplace_info::ALTER_COLUMN_DEFAULT)) {
ctx->optimize_needed = true;
result = HA_ALTER_INPLACE_NO_LOCK_AFTER_PREPARE;
}
#endif
- if (result != HA_ALTER_INPLACE_NOT_SUPPORTED && table->s->null_bytes != altered_table->s->null_bytes &&
- (tokudb_debug & TOKUDB_DEBUG_ALTER_TABLE)) {
+ if (TOKUDB_UNLIKELY(TOKUDB_DEBUG_FLAGS(TOKUDB_DEBUG_ALTER_TABLE)) &&
+ result != HA_ALTER_INPLACE_NOT_SUPPORTED &&
+ table->s->null_bytes != altered_table->s->null_bytes) {
+
TOKUDB_HANDLER_TRACE("q %s", thd->query());
- TOKUDB_HANDLER_TRACE("null bytes %u -> %u", table->s->null_bytes, altered_table->s->null_bytes);
+ TOKUDB_HANDLER_TRACE(
+ "null bytes %u -> %u",
+ table->s->null_bytes,
+ altered_table->s->null_bytes);
}
- // turn a not supported result into an error if the slow alter table (copy) is disabled
- if (result == HA_ALTER_INPLACE_NOT_SUPPORTED && get_disable_slow_alter(thd)) {
+ // turn a not supported result into an error if the slow alter table
+ // (copy) is disabled
+ if (result == HA_ALTER_INPLACE_NOT_SUPPORTED &&
+ tokudb::sysvars::disable_slow_alter(thd)) {
print_error(HA_ERR_UNSUPPORTED, MYF(0));
result = HA_ALTER_ERROR;
}
@@ -400,46 +538,77 @@ enum_alter_inplace_result ha_tokudb::check_if_supported_inplace_alter(TABLE *alt
}
// Prepare for the alter operations
-bool ha_tokudb::prepare_inplace_alter_table(TABLE *altered_table, Alter_inplace_info *ha_alter_info) {
+bool ha_tokudb::prepare_inplace_alter_table(
+ TABLE* altered_table,
+ Alter_inplace_info* ha_alter_info) {
+
TOKUDB_HANDLER_DBUG_ENTER("");
- tokudb_alter_ctx *ctx = static_cast<tokudb_alter_ctx *>(ha_alter_info->handler_ctx);
- assert(transaction); // transaction must exist after table is locked
+ tokudb_alter_ctx* ctx =
+ static_cast<tokudb_alter_ctx*>(ha_alter_info->handler_ctx);
+ assert_always(transaction); // transaction must exist after table is locked
ctx->alter_txn = transaction;
bool result = false; // success
DBUG_RETURN(result);
}
// Execute the alter operations.
-bool ha_tokudb::inplace_alter_table(TABLE *altered_table, Alter_inplace_info *ha_alter_info) {
+bool ha_tokudb::inplace_alter_table(
+ TABLE* altered_table,
+ Alter_inplace_info* ha_alter_info) {
+
TOKUDB_HANDLER_DBUG_ENTER("");
int error = 0;
- tokudb_alter_ctx *ctx = static_cast<tokudb_alter_ctx *>(ha_alter_info->handler_ctx);
- HA_CREATE_INFO *create_info = ha_alter_info->create_info;
-
- if (error == 0 && (ctx->handler_flags & (Alter_inplace_info::DROP_INDEX + Alter_inplace_info::DROP_UNIQUE_INDEX))) {
+ tokudb_alter_ctx* ctx =
+ static_cast<tokudb_alter_ctx*>(ha_alter_info->handler_ctx);
+ HA_CREATE_INFO* create_info = ha_alter_info->create_info;
+
+ // this should be enough to handle locking as the higher level MDL
+ // on this table should prevent any new analyze tasks.
+ share->cancel_background_jobs();
+
+ if (error == 0 &&
+ (ctx->handler_flags &
+ (Alter_inplace_info::DROP_INDEX +
+ Alter_inplace_info::DROP_UNIQUE_INDEX))) {
error = alter_table_drop_index(altered_table, ha_alter_info);
}
- if (error == 0 && (ctx->handler_flags & (Alter_inplace_info::ADD_INDEX + Alter_inplace_info::ADD_UNIQUE_INDEX))) {
+ if (error == 0 &&
+ (ctx->handler_flags &
+ (Alter_inplace_info::ADD_INDEX +
+ Alter_inplace_info::ADD_UNIQUE_INDEX))) {
error = alter_table_add_index(altered_table, ha_alter_info);
}
- if (error == 0 && (ctx->handler_flags & (Alter_inplace_info::ADD_COLUMN + Alter_inplace_info::DROP_COLUMN))) {
+ if (error == 0 &&
+ (ctx->handler_flags &
+ (Alter_inplace_info::ADD_COLUMN +
+ Alter_inplace_info::DROP_COLUMN))) {
error = alter_table_add_or_drop_column(altered_table, ha_alter_info);
}
- if (error == 0 && (ctx->handler_flags & Alter_inplace_info::CHANGE_CREATE_OPTION) && (create_info->used_fields & HA_CREATE_USED_AUTO)) {
- error = write_auto_inc_create(share->status_block, create_info->auto_increment_value, ctx->alter_txn);
+ if (error == 0 &&
+ (ctx->handler_flags & Alter_inplace_info::CHANGE_CREATE_OPTION) &&
+ (create_info->used_fields & HA_CREATE_USED_AUTO)) {
+ error = write_auto_inc_create(
+ share->status_block,
+ create_info->auto_increment_value,
+ ctx->alter_txn);
}
- if (error == 0 && (ctx->handler_flags & Alter_inplace_info::CHANGE_CREATE_OPTION) && (create_info->used_fields & HA_CREATE_USED_ROW_FORMAT)) {
+ if (error == 0 &&
+ (ctx->handler_flags & Alter_inplace_info::CHANGE_CREATE_OPTION) &&
+ (create_info->used_fields & HA_CREATE_USED_ROW_FORMAT)) {
// Get the current compression
DB *db = share->key_file[0];
error = db->get_compression_method(db, &ctx->orig_compression_method);
- assert(error == 0);
+ assert_always(error == 0);
// Set the new compression
#if TOKU_INCLUDE_OPTION_STRUCTS
- toku_compression_method method = row_format_to_toku_compression_method((srv_row_format_t) create_info->option_struct->row_format);
+ toku_compression_method method =
+ row_format_to_toku_compression_method(
+ (tokudb::sysvars::row_format_t)create_info->option_struct->row_format);
#else
- toku_compression_method method = row_type_to_toku_compression_method(create_info->row_type);
+ toku_compression_method method =
+ row_type_to_toku_compression_method(create_info->row_type);
#endif
uint32_t curr_num_DBs = table->s->keys + tokudb_test(hidden_primary_key);
for (uint32_t i = 0; i < curr_num_DBs; i++) {
@@ -457,13 +626,19 @@ bool ha_tokudb::inplace_alter_table(TABLE *altered_table, Alter_inplace_info *ha
error = alter_table_expand_columns(altered_table, ha_alter_info);
if (error == 0 && ctx->expand_varchar_update_needed)
- error = alter_table_expand_varchar_offsets(altered_table, ha_alter_info);
+ error = alter_table_expand_varchar_offsets(
+ altered_table,
+ ha_alter_info);
if (error == 0 && ctx->expand_blob_update_needed)
error = alter_table_expand_blobs(altered_table, ha_alter_info);
if (error == 0 && ctx->reset_card) {
- error = tokudb::alter_card(share->status_block, ctx->alter_txn, table->s, altered_table->s);
+ error = tokudb::alter_card(
+ share->status_block,
+ ctx->alter_txn,
+ table->s,
+ altered_table->s);
}
if (error == 0 && ctx->optimize_needed) {
error = do_optimize(ha_thd());
@@ -472,11 +647,15 @@ bool ha_tokudb::inplace_alter_table(TABLE *altered_table, Alter_inplace_info *ha
#if (50600 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 50699) || \
(50700 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 50799)
#if WITH_PARTITION_STORAGE_ENGINE
- if (error == 0 && (TOKU_PARTITION_WRITE_FRM_DATA || altered_table->part_info == NULL)) {
+ if (error == 0 &&
+ (TOKU_PARTITION_WRITE_FRM_DATA || altered_table->part_info == NULL)) {
#else
if (error == 0) {
#endif
- error = write_frm_data(share->status_block, ctx->alter_txn, altered_table->s->path.str);
+ error = write_frm_data(
+ share->status_block,
+ ctx->alter_txn,
+ altered_table->s->path.str);
}
#endif
@@ -489,20 +668,34 @@ bool ha_tokudb::inplace_alter_table(TABLE *altered_table, Alter_inplace_info *ha
DBUG_RETURN(result);
}
-int ha_tokudb::alter_table_add_index(TABLE *altered_table, Alter_inplace_info *ha_alter_info) {
+int ha_tokudb::alter_table_add_index(
+ TABLE* altered_table,
+ Alter_inplace_info* ha_alter_info) {
// sort keys in add index order
- KEY *key_info = (KEY*) tokudb_my_malloc(sizeof (KEY) * ha_alter_info->index_add_count, MYF(MY_WME));
+ KEY* key_info = (KEY*)tokudb::memory::malloc(
+ sizeof(KEY) * ha_alter_info->index_add_count,
+ MYF(MY_WME));
for (uint i = 0; i < ha_alter_info->index_add_count; i++) {
KEY *key = &key_info[i];
*key = ha_alter_info->key_info_buffer[ha_alter_info->index_add_buffer[i]];
- for (KEY_PART_INFO *key_part= key->key_part; key_part < key->key_part + get_key_parts(key); key_part++)
+ for (KEY_PART_INFO* key_part = key->key_part;
+ key_part < key->key_part + key->user_defined_key_parts;
+ key_part++) {
key_part->field = table->field[key_part->fieldnr];
+ }
}
- tokudb_alter_ctx *ctx = static_cast<tokudb_alter_ctx *>(ha_alter_info->handler_ctx);
+ tokudb_alter_ctx* ctx =
+ static_cast<tokudb_alter_ctx*>(ha_alter_info->handler_ctx);
ctx->add_index_changed = true;
- int error = tokudb_add_index(table, key_info, ha_alter_info->index_add_count, ctx->alter_txn, &ctx->incremented_num_DBs, &ctx->modified_DBs);
+ int error = tokudb_add_index(
+ table,
+ key_info,
+ ha_alter_info->index_add_count,
+ ctx->alter_txn,
+ &ctx->incremented_num_DBs,
+ &ctx->modified_DBs);
if (error == HA_ERR_FOUND_DUPP_KEY) {
// hack for now, in case of duplicate key error,
// because at the moment we cannot display the right key
@@ -511,7 +704,7 @@ int ha_tokudb::alter_table_add_index(TABLE *altered_table, Alter_inplace_info *h
last_dup_key = MAX_KEY;
}
- tokudb_my_free(key_info);
+ tokudb::memory::free(key_info);
if (error == 0)
ctx->reset_card = true;
@@ -519,7 +712,11 @@ int ha_tokudb::alter_table_add_index(TABLE *altered_table, Alter_inplace_info *h
return error;
}
-static bool find_index_of_key(const char *key_name, TABLE *table, uint *index_offset_ptr) {
+static bool find_index_of_key(
+ const char* key_name,
+ TABLE* table,
+ uint* index_offset_ptr) {
+
for (uint i = 0; i < table->s->keys; i++) {
if (strcmp(key_name, table->key_info[i].name) == 0) {
*index_offset_ptr = i;
@@ -529,7 +726,12 @@ static bool find_index_of_key(const char *key_name, TABLE *table, uint *index_of
return false;
}
-static bool find_index_of_key(const char *key_name, KEY *key_info, uint key_count, uint *index_offset_ptr) {
+static bool find_index_of_key(
+ const char* key_name,
+ KEY* key_info,
+ uint key_count,
+ uint* index_offset_ptr) {
+
for (uint i = 0; i < key_count; i++) {
if (strcmp(key_name, key_info[i].name) == 0) {
*index_offset_ptr = i;
@@ -539,26 +741,42 @@ static bool find_index_of_key(const char *key_name, KEY *key_info, uint key_coun
return false;
}
-int ha_tokudb::alter_table_drop_index(TABLE *altered_table, Alter_inplace_info *ha_alter_info) {
+int ha_tokudb::alter_table_drop_index(
+ TABLE* altered_table,
+ Alter_inplace_info* ha_alter_info) {
+
KEY *key_info = table->key_info;
// translate key names to indexes into the key_info array
uint index_drop_offsets[ha_alter_info->index_drop_count];
for (uint i = 0; i < ha_alter_info->index_drop_count; i++) {
bool found;
- found = find_index_of_key(ha_alter_info->index_drop_buffer[i]->name, table, &index_drop_offsets[i]);
+ found = find_index_of_key(
+ ha_alter_info->index_drop_buffer[i]->name,
+ table,
+ &index_drop_offsets[i]);
if (!found) {
// undo of add key in partition engine
- found = find_index_of_key(ha_alter_info->index_drop_buffer[i]->name, ha_alter_info->key_info_buffer, ha_alter_info->key_count, &index_drop_offsets[i]);
- assert(found);
+ found = find_index_of_key(
+ ha_alter_info->index_drop_buffer[i]->name,
+ ha_alter_info->key_info_buffer,
+ ha_alter_info->key_count,
+ &index_drop_offsets[i]);
+ assert_always(found);
key_info = ha_alter_info->key_info_buffer;
}
}
// drop indexes
- tokudb_alter_ctx *ctx = static_cast<tokudb_alter_ctx *>(ha_alter_info->handler_ctx);
+ tokudb_alter_ctx* ctx =
+ static_cast<tokudb_alter_ctx*>(ha_alter_info->handler_ctx);
ctx->drop_index_changed = true;
- int error = drop_indexes(table, index_drop_offsets, ha_alter_info->index_drop_count, key_info, ctx->alter_txn);
+ int error = drop_indexes(
+ table,
+ index_drop_offsets,
+ ha_alter_info->index_drop_count,
+ key_info,
+ ctx->alter_txn);
if (error == 0)
ctx->reset_card = true;
@@ -566,93 +784,122 @@ int ha_tokudb::alter_table_drop_index(TABLE *altered_table, Alter_inplace_info *
return error;
}
-int ha_tokudb::alter_table_add_or_drop_column(TABLE *altered_table, Alter_inplace_info *ha_alter_info) {
- tokudb_alter_ctx *ctx = static_cast<tokudb_alter_ctx *>(ha_alter_info->handler_ctx);
+int ha_tokudb::alter_table_add_or_drop_column(
+ TABLE* altered_table,
+ Alter_inplace_info* ha_alter_info) {
+
+ tokudb_alter_ctx* ctx =
+ static_cast<tokudb_alter_ctx*>(ha_alter_info->handler_ctx);
int error;
uchar *column_extra = NULL;
uint32_t max_column_extra_size;
uint32_t num_column_extra;
uint32_t num_columns = 0;
uint32_t curr_num_DBs = table->s->keys + tokudb_test(hidden_primary_key);
-
- uint32_t columns[table->s->fields + altered_table->s->fields]; // set size such that we know it is big enough for both cases
+ // set size such that we know it is big enough for both cases
+ uint32_t columns[table->s->fields + altered_table->s->fields];
memset(columns, 0, sizeof(columns));
// generate the array of columns
if (ha_alter_info->handler_flags & Alter_inplace_info::DROP_COLUMN) {
find_changed_columns(
- columns,
- &num_columns,
- altered_table,
- table
- );
- } else
- if (ha_alter_info->handler_flags & Alter_inplace_info::ADD_COLUMN) {
+ columns,
+ &num_columns,
+ altered_table,
+ table);
+ } else if (ha_alter_info->handler_flags & Alter_inplace_info::ADD_COLUMN) {
find_changed_columns(
- columns,
- &num_columns,
- table,
- altered_table
- );
- } else
- assert(0);
+ columns,
+ &num_columns,
+ table,
+ altered_table);
+ } else {
+ assert_unreachable();
+ }
max_column_extra_size =
- STATIC_ROW_MUTATOR_SIZE + //max static row_mutator
- 4 + num_columns*(1+1+4+1+1+4) + altered_table->s->reclength + // max dynamic row_mutator
- (4 + share->kc_info.num_blobs) + // max static blob size
- (num_columns*(1+4+1+4)); // max dynamic blob size
- column_extra = (uchar *)tokudb_my_malloc(max_column_extra_size, MYF(MY_WME));
- if (column_extra == NULL) { error = ENOMEM; goto cleanup; }
+ // max static row_mutator
+ STATIC_ROW_MUTATOR_SIZE +
+ // max dynamic row_mutator
+ 4 + num_columns*(1+1+4+1+1+4) + altered_table->s->reclength +
+ // max static blob size
+ (4 + share->kc_info.num_blobs) +
+ // max dynamic blob size
+ (num_columns*(1+4+1+4));
+ column_extra = (uchar*)tokudb::memory::malloc(
+ max_column_extra_size,
+ MYF(MY_WME));
+ if (column_extra == NULL) {
+ error = ENOMEM;
+ goto cleanup;
+ }
for (uint32_t i = 0; i < curr_num_DBs; i++) {
// change to a new descriptor
DBT row_descriptor; memset(&row_descriptor, 0, sizeof row_descriptor);
- error = new_row_descriptor(table, altered_table, ha_alter_info, i, &row_descriptor);
+ error = new_row_descriptor(
+ table,
+ altered_table,
+ ha_alter_info,
+ i,
+ &row_descriptor);
if (error)
goto cleanup;
- error = share->key_file[i]->change_descriptor(share->key_file[i], ctx->alter_txn, &row_descriptor, 0);
- tokudb_my_free(row_descriptor.data);
+ error = share->key_file[i]->change_descriptor(
+ share->key_file[i],
+ ctx->alter_txn,
+ &row_descriptor,
+ 0);
+ tokudb::memory::free(row_descriptor.data);
if (error)
goto cleanup;
if (i == primary_key || key_is_clustering(&table_share->key_info[i])) {
num_column_extra = fill_row_mutator(
- column_extra,
- columns,
- num_columns,
- altered_table,
- ctx->altered_table_kc_info,
- i,
- (ha_alter_info->handler_flags & Alter_inplace_info::ADD_COLUMN) != 0 // true if adding columns, otherwise is a drop
- );
+ column_extra,
+ columns,
+ num_columns,
+ altered_table,
+ ctx->altered_table_kc_info,
+ i,
+ // true if adding columns, otherwise is a drop
+ (ha_alter_info->handler_flags &
+ Alter_inplace_info::ADD_COLUMN) != 0);
DBT column_dbt; memset(&column_dbt, 0, sizeof column_dbt);
column_dbt.data = column_extra;
column_dbt.size = num_column_extra;
DBUG_ASSERT(num_column_extra <= max_column_extra_size);
error = share->key_file[i]->update_broadcast(
- share->key_file[i],
- ctx->alter_txn,
- &column_dbt,
- DB_IS_RESETTING_OP
- );
- if (error) { goto cleanup; }
+ share->key_file[i],
+ ctx->alter_txn,
+ &column_dbt,
+ DB_IS_RESETTING_OP);
+ if (error) {
+ goto cleanup;
+ }
}
}
error = 0;
cleanup:
- tokudb_my_free(column_extra);
+ tokudb::memory::free(column_extra);
return error;
}
// Commit or abort the alter operations.
-// If commit then write the new frm data to the status using the alter transaction.
-// If abort then abort the alter transaction and try to rollback the non-transactional changes.
-bool ha_tokudb::commit_inplace_alter_table(TABLE *altered_table, Alter_inplace_info *ha_alter_info, bool commit) {
+// If commit then write the new frm data to the status using the alter
+// transaction.
+// If abort then abort the alter transaction and try to rollback the
+// non-transactional changes.
+bool ha_tokudb::commit_inplace_alter_table(
+ TABLE* altered_table,
+ Alter_inplace_info* ha_alter_info,
+ bool commit) {
+
TOKUDB_HANDLER_DBUG_ENTER("");
- tokudb_alter_ctx *ctx = static_cast<tokudb_alter_ctx *>(ha_alter_info->handler_ctx);
+ tokudb_alter_ctx* ctx =
+ static_cast<tokudb_alter_ctx*>(ha_alter_info->handler_ctx);
bool result = false; // success
THD *thd = ha_thd();
@@ -671,7 +918,10 @@ bool ha_tokudb::commit_inplace_alter_table(TABLE *altered_table, Alter_inplace_i
#else
if (true) {
#endif
- int error = write_frm_data(share->status_block, ctx->alter_txn, altered_table->s->path.str);
+ int error = write_frm_data(
+ share->status_block,
+ ctx->alter_txn,
+ altered_table->s->path.str);
if (error) {
commit = false;
result = true;
@@ -683,42 +933,55 @@ bool ha_tokudb::commit_inplace_alter_table(TABLE *altered_table, Alter_inplace_i
if (!commit) {
if (table->mdl_ticket->get_type() != MDL_EXCLUSIVE &&
- (ctx->add_index_changed || ctx->drop_index_changed || ctx->compression_changed)) {
+ (ctx->add_index_changed || ctx->drop_index_changed ||
+ ctx->compression_changed)) {
// get exclusive lock no matter what
#if defined(MARIADB_BASE_VERSION)
killed_state saved_killed_state = thd->killed;
thd->killed = NOT_KILLED;
- for (volatile uint i = 0; wait_while_table_is_used(thd, table, HA_EXTRA_NOT_USED); i++) {
+ for (volatile uint i = 0;
+ wait_while_table_is_used(thd, table, HA_EXTRA_NOT_USED);
+ i++) {
if (thd->killed != NOT_KILLED)
thd->killed = NOT_KILLED;
sleep(1);
}
- assert(table->mdl_ticket->get_type() == MDL_EXCLUSIVE);
+ assert_always(table->mdl_ticket->get_type() == MDL_EXCLUSIVE);
if (thd->killed == NOT_KILLED)
thd->killed = saved_killed_state;
#else
THD::killed_state saved_killed_state = thd->killed;
thd->killed = THD::NOT_KILLED;
- // MySQL does not handle HA_EXTRA_NOT_USED so we use HA_EXTRA_PREPARE_FOR_RENAME since it is passed through
+ // MySQL does not handle HA_EXTRA_NOT_USED so we use
+ // HA_EXTRA_PREPARE_FOR_RENAME since it is passed through
// the partition storage engine and is treated as a NOP by tokudb
- for (volatile uint i = 0; wait_while_table_is_used(thd, table, HA_EXTRA_PREPARE_FOR_RENAME); i++) {
+ for (volatile uint i = 0;
+ wait_while_table_is_used(
+ thd,
+ table,
+ HA_EXTRA_PREPARE_FOR_RENAME);
+ i++) {
if (thd->killed != THD::NOT_KILLED)
thd->killed = THD::NOT_KILLED;
sleep(1);
}
- assert(table->mdl_ticket->get_type() == MDL_EXCLUSIVE);
+ assert_always(table->mdl_ticket->get_type() == MDL_EXCLUSIVE);
if (thd->killed == THD::NOT_KILLED)
thd->killed = saved_killed_state;
#endif
}
- // abort the alter transaction NOW so that any alters are rolled back. this allows the following restores to work.
- tokudb_trx_data *trx = (tokudb_trx_data *) thd_get_ha_data(thd, tokudb_hton);
- assert(ctx->alter_txn == trx->stmt);
- assert(trx->tokudb_lock_count > 0);
- // for partitioned tables, we use a single transaction to do all of the partition changes. the tokudb_lock_count
- // is a reference count for each of the handlers to the same transaction. obviously, we want to only abort once.
+ // abort the alter transaction NOW so that any alters are rolled back.
+ // this allows the following restores to work.
+ tokudb_trx_data* trx =
+ (tokudb_trx_data*)thd_get_ha_data(thd, tokudb_hton);
+ assert_always(ctx->alter_txn == trx->stmt);
+ assert_always(trx->tokudb_lock_count > 0);
+ // for partitioned tables, we use a single transaction to do all of the
+ // partition changes. the tokudb_lock_count is a reference count for
+ // each of the handlers to the same transaction. obviously, we want
+ // to only abort once.
if (trx->tokudb_lock_count > 0) {
if (--trx->tokudb_lock_count <= trx->create_lock_count) {
trx->create_lock_count = 0;
@@ -731,82 +994,125 @@ bool ha_tokudb::commit_inplace_alter_table(TABLE *altered_table, Alter_inplace_i
}
if (ctx->add_index_changed) {
- restore_add_index(table, ha_alter_info->index_add_count, ctx->incremented_num_DBs, ctx->modified_DBs);
+ restore_add_index(
+ table,
+ ha_alter_info->index_add_count,
+ ctx->incremented_num_DBs,
+ ctx->modified_DBs);
}
if (ctx->drop_index_changed) {
// translate key names to indexes into the key_info array
uint index_drop_offsets[ha_alter_info->index_drop_count];
for (uint i = 0; i < ha_alter_info->index_drop_count; i++) {
- bool found = find_index_of_key(ha_alter_info->index_drop_buffer[i]->name, table, &index_drop_offsets[i]);
- assert(found);
+ bool found = find_index_of_key(
+ ha_alter_info->index_drop_buffer[i]->name,
+ table,
+ &index_drop_offsets[i]);
+ assert_always(found);
}
- restore_drop_indexes(table, index_drop_offsets, ha_alter_info->index_drop_count);
+ restore_drop_indexes(
+ table,
+ index_drop_offsets,
+ ha_alter_info->index_drop_count);
}
if (ctx->compression_changed) {
- uint32_t curr_num_DBs = table->s->keys + tokudb_test(hidden_primary_key);
+ uint32_t curr_num_DBs =
+ table->s->keys + tokudb_test(hidden_primary_key);
for (uint32_t i = 0; i < curr_num_DBs; i++) {
DB *db = share->key_file[i];
- int error = db->change_compression_method(db, ctx->orig_compression_method);
- assert(error == 0);
+ int error = db->change_compression_method(
+ db,
+ ctx->orig_compression_method);
+ assert_always(error == 0);
}
}
}
-
DBUG_RETURN(result);
}
// Setup the altered table's key and col info.
-int ha_tokudb::setup_kc_info(TABLE *altered_table, KEY_AND_COL_INFO *altered_kc_info) {
+int ha_tokudb::setup_kc_info(
+ TABLE* altered_table,
+ KEY_AND_COL_INFO* altered_kc_info) {
+
int error = allocate_key_and_col_info(altered_table->s, altered_kc_info);
if (error == 0)
- error = initialize_key_and_col_info(altered_table->s, altered_table, altered_kc_info, hidden_primary_key, primary_key);
+ error = initialize_key_and_col_info(
+ altered_table->s,
+ altered_table,
+ altered_kc_info,
+ hidden_primary_key,
+ primary_key);
return error;
}
// Expand the variable length fields offsets from 1 to 2 bytes.
-int ha_tokudb::alter_table_expand_varchar_offsets(TABLE *altered_table, Alter_inplace_info *ha_alter_info) {
+int ha_tokudb::alter_table_expand_varchar_offsets(
+ TABLE* altered_table,
+ Alter_inplace_info* ha_alter_info) {
+
int error = 0;
- tokudb_alter_ctx *ctx = static_cast<tokudb_alter_ctx *>(ha_alter_info->handler_ctx);
+ tokudb_alter_ctx* ctx =
+ static_cast<tokudb_alter_ctx*>(ha_alter_info->handler_ctx);
uint32_t curr_num_DBs = table->s->keys + tokudb_test(hidden_primary_key);
for (uint32_t i = 0; i < curr_num_DBs; i++) {
// change to a new descriptor
DBT row_descriptor; memset(&row_descriptor, 0, sizeof row_descriptor);
- error = new_row_descriptor(table, altered_table, ha_alter_info, i, &row_descriptor);
+ error = new_row_descriptor(
+ table,
+ altered_table,
+ ha_alter_info,
+ i,
+ &row_descriptor);
if (error)
break;
- error = share->key_file[i]->change_descriptor(share->key_file[i], ctx->alter_txn, &row_descriptor, 0);
- tokudb_my_free(row_descriptor.data);
+ error = share->key_file[i]->change_descriptor(
+ share->key_file[i],
+ ctx->alter_txn,
+ &row_descriptor,
+ 0);
+ tokudb::memory::free(row_descriptor.data);
if (error)
break;
- // for all trees that have values, make an update variable offsets message and broadcast it into the tree
+ // for all trees that have values, make an update variable offsets
+ // message and broadcast it into the tree
if (i == primary_key || key_is_clustering(&table_share->key_info[i])) {
- uint32_t offset_start = table_share->null_bytes + share->kc_info.mcp_info[i].fixed_field_size;
- uint32_t offset_end = offset_start + share->kc_info.mcp_info[i].len_of_offsets;
+ uint32_t offset_start =
+ table_share->null_bytes +
+ share->kc_info.mcp_info[i].fixed_field_size;
+ uint32_t offset_end =
+ offset_start +
+ share->kc_info.mcp_info[i].len_of_offsets;
uint32_t number_of_offsets = offset_end - offset_start;
// make the expand variable offsets message
DBT expand; memset(&expand, 0, sizeof expand);
- expand.size = sizeof (uchar) + sizeof offset_start + sizeof offset_end;
- expand.data = tokudb_my_malloc(expand.size, MYF(MY_WME));
+ expand.size =
+ sizeof(uchar) + sizeof(offset_start) + sizeof(offset_end);
+ expand.data = tokudb::memory::malloc(expand.size, MYF(MY_WME));
if (!expand.data) {
error = ENOMEM;
break;
}
- uchar *expand_ptr = (uchar *)expand.data;
+ uchar* expand_ptr = (uchar*)expand.data;
expand_ptr[0] = UPDATE_OP_EXPAND_VARIABLE_OFFSETS;
- expand_ptr += sizeof (uchar);
+ expand_ptr += sizeof(uchar);
- memcpy(expand_ptr, &number_of_offsets, sizeof number_of_offsets);
- expand_ptr += sizeof number_of_offsets;
+ memcpy(expand_ptr, &number_of_offsets, sizeof(number_of_offsets));
+ expand_ptr += sizeof(number_of_offsets);
- memcpy(expand_ptr, &offset_start, sizeof offset_start);
- expand_ptr += sizeof offset_start;
+ memcpy(expand_ptr, &offset_start, sizeof(offset_start));
+ expand_ptr += sizeof(offset_start);
// and broadcast it into the tree
- error = share->key_file[i]->update_broadcast(share->key_file[i], ctx->alter_txn, &expand, DB_IS_RESETTING_OP);
- tokudb_my_free(expand.data);
+ error = share->key_file[i]->update_broadcast(
+ share->key_file[i],
+ ctx->alter_txn,
+ &expand,
+ DB_IS_RESETTING_OP);
+ tokudb::memory::free(expand.data);
if (error)
break;
}
@@ -817,7 +1123,7 @@ int ha_tokudb::alter_table_expand_varchar_offsets(TABLE *altered_table, Alter_in
// Return true if a field is part of a key
static bool field_in_key(KEY *key, Field *field) {
- for (uint i = 0; i < get_key_parts(key); i++) {
+ for (uint i = 0; i < key->user_defined_key_parts; i++) {
KEY_PART_INFO *key_part = &key->key_part[i];
if (strcmp(key_part->field->field_name, field->field_name) == 0)
return true;
@@ -834,30 +1140,49 @@ static bool field_in_key_of_table(TABLE *table, Field *field) {
return false;
}
-// Return true if all changed varchar/varbinary field lengths can be changed inplace, otherwise return false
-static bool change_varchar_length_is_supported(Field *old_field, Field *new_field, TABLE *table, TABLE *altered_table, Alter_inplace_info *ha_alter_info, tokudb_alter_ctx *ctx) {
+// Return true if all changed varchar/varbinary field lengths can be changed
+// inplace, otherwise return false
+static bool change_varchar_length_is_supported(
+ Field* old_field,
+ Field* new_field,
+ TABLE* table,
+ TABLE* altered_table,
+ Alter_inplace_info* ha_alter_info,
+ tokudb_alter_ctx* ctx) {
+
if (old_field->real_type() != MYSQL_TYPE_VARCHAR ||
new_field->real_type() != MYSQL_TYPE_VARCHAR ||
old_field->binary() != new_field->binary() ||
old_field->charset()->number != new_field->charset()->number ||
old_field->field_length > new_field->field_length)
return false;
- if (ctx->table_kc_info->num_offset_bytes > ctx->altered_table_kc_info->num_offset_bytes)
+ if (ctx->table_kc_info->num_offset_bytes >
+ ctx->altered_table_kc_info->num_offset_bytes)
return false; // shrink is not supported
- if (ctx->table_kc_info->num_offset_bytes < ctx->altered_table_kc_info->num_offset_bytes)
- ctx->expand_varchar_update_needed = true; // sum of varchar lengths changed from 1 to 2
+ if (ctx->table_kc_info->num_offset_bytes <
+ ctx->altered_table_kc_info->num_offset_bytes)
+ // sum of varchar lengths changed from 1 to 2
+ ctx->expand_varchar_update_needed = true;
return true;
}
-// Return true if all changed field lengths can be changed inplace, otherwise return false
-static bool change_length_is_supported(TABLE *table, TABLE *altered_table, Alter_inplace_info *ha_alter_info, tokudb_alter_ctx *ctx) {
+// Return true if all changed field lengths can be changed inplace, otherwise
+// return false
+static bool change_length_is_supported(
+ TABLE* table,
+ TABLE* altered_table,
+ Alter_inplace_info* ha_alter_info,
+ tokudb_alter_ctx* ctx) {
+
if (table->s->fields != altered_table->s->fields)
return false;
if (table->s->null_bytes != altered_table->s->null_bytes)
return false;
if (ctx->changed_fields.elements() > 1)
return false; // only support one field change
- for (DYNAMIC_ARRAY_ELEMENTS_TYPE ai = 0; ai < ctx->changed_fields.elements(); ai++) {
+ for (DYNAMIC_ARRAY_ELEMENTS_TYPE ai = 0;
+ ai < ctx->changed_fields.elements();
+ ai++) {
uint i = ctx->changed_fields.at(ai);
Field *old_field = table->field[i];
Field *new_field = altered_table->field[i];
@@ -865,9 +1190,16 @@ static bool change_length_is_supported(TABLE *table, TABLE *altered_table, Alter
return false; // no type conversions
if (old_field->real_type() != MYSQL_TYPE_VARCHAR)
return false; // only varchar
- if (field_in_key_of_table(table, old_field) || field_in_key_of_table(altered_table, new_field))
+ if (field_in_key_of_table(table, old_field) ||
+ field_in_key_of_table(altered_table, new_field))
return false; // not in any key
- if (!change_varchar_length_is_supported(old_field, new_field, table, altered_table, ha_alter_info, ctx))
+ if (!change_varchar_length_is_supported(
+ old_field,
+ new_field,
+ table,
+ altered_table,
+ ha_alter_info,
+ ctx))
return false;
}
@@ -886,13 +1218,23 @@ static bool is_sorted(Dynamic_array<uint> &a) {
return r;
}
-int ha_tokudb::alter_table_expand_columns(TABLE *altered_table, Alter_inplace_info *ha_alter_info) {
+int ha_tokudb::alter_table_expand_columns(
+ TABLE* altered_table,
+ Alter_inplace_info* ha_alter_info) {
+
int error = 0;
- tokudb_alter_ctx *ctx = static_cast<tokudb_alter_ctx *>(ha_alter_info->handler_ctx);
- assert(is_sorted(ctx->changed_fields)); // since we build the changed_fields array in field order, it must be sorted
- for (DYNAMIC_ARRAY_ELEMENTS_TYPE ai = 0; error == 0 && ai < ctx->changed_fields.elements(); ai++) {
+ tokudb_alter_ctx* ctx =
+ static_cast<tokudb_alter_ctx*>(ha_alter_info->handler_ctx);
+ // since we build the changed_fields array in field order, it must be sorted
+ assert_always(is_sorted(ctx->changed_fields));
+ for (DYNAMIC_ARRAY_ELEMENTS_TYPE ai = 0;
+ error == 0 && ai < ctx->changed_fields.elements();
+ ai++) {
uint expand_field_num = ctx->changed_fields.at(ai);
- error = alter_table_expand_one_column(altered_table, ha_alter_info, expand_field_num);
+ error = alter_table_expand_one_column(
+ altered_table,
+ ha_alter_info,
+ expand_field_num);
}
return error;
@@ -903,10 +1245,15 @@ static bool is_unsigned(Field *f) {
return (f->flags & UNSIGNED_FLAG) != 0;
}
-// Return the starting offset in the value for a particular index (selected by idx) of a
-// particular field (selected by expand_field_num)
+// Return the starting offset in the value for a particular index (selected by
+// idx) of a particular field (selected by expand_field_num)
// TODO: replace this?
-static uint32_t alter_table_field_offset(uint32_t null_bytes, KEY_AND_COL_INFO *kc_info, int idx, int expand_field_num) {
+static uint32_t alter_table_field_offset(
+ uint32_t null_bytes,
+ KEY_AND_COL_INFO* kc_info,
+ int idx,
+ int expand_field_num) {
+
uint32_t offset = null_bytes;
for (int i = 0; i < expand_field_num; i++) {
if (bitmap_is_set(&kc_info->key_filters[idx], i)) // skip key fields
@@ -917,21 +1264,26 @@ static uint32_t alter_table_field_offset(uint32_t null_bytes, KEY_AND_COL_INFO *
}
// Send an expand message into all clustered indexes including the primary
-int ha_tokudb::alter_table_expand_one_column(TABLE *altered_table, Alter_inplace_info *ha_alter_info, int expand_field_num) {
+int ha_tokudb::alter_table_expand_one_column(
+ TABLE* altered_table,
+ Alter_inplace_info* ha_alter_info,
+ int expand_field_num) {
+
int error = 0;
- tokudb_alter_ctx *ctx = static_cast<tokudb_alter_ctx *>(ha_alter_info->handler_ctx);
+ tokudb_alter_ctx* ctx =
+ static_cast<tokudb_alter_ctx*>(ha_alter_info->handler_ctx);
Field *old_field = table->field[expand_field_num];
TOKU_TYPE old_field_type = mysql_to_toku_type(old_field);
Field *new_field = altered_table->field[expand_field_num];
TOKU_TYPE new_field_type = mysql_to_toku_type(new_field);
- assert(old_field_type == new_field_type);
+ assert_always(old_field_type == new_field_type);
uchar operation;
uchar pad_char;
switch (old_field_type) {
case toku_type_int:
- assert(is_unsigned(old_field) == is_unsigned(new_field));
+ assert_always(is_unsigned(old_field) == is_unsigned(new_field));
if (is_unsigned(old_field))
operation = UPDATE_OP_EXPAND_UINT;
else
@@ -947,38 +1299,61 @@ int ha_tokudb::alter_table_expand_one_column(TABLE *altered_table, Alter_inplace
pad_char = 0;
break;
default:
- assert(0);
+ assert_unreachable();
}
uint32_t curr_num_DBs = table->s->keys + tokudb_test(hidden_primary_key);
for (uint32_t i = 0; i < curr_num_DBs; i++) {
// change to a new descriptor
DBT row_descriptor; memset(&row_descriptor, 0, sizeof row_descriptor);
- error = new_row_descriptor(table, altered_table, ha_alter_info, i, &row_descriptor);
+ error = new_row_descriptor(
+ table,
+ altered_table,
+ ha_alter_info,
+ i,
+ &row_descriptor);
if (error)
break;
- error = share->key_file[i]->change_descriptor(share->key_file[i], ctx->alter_txn, &row_descriptor, 0);
- tokudb_my_free(row_descriptor.data);
+ error = share->key_file[i]->change_descriptor(
+ share->key_file[i],
+ ctx->alter_txn,
+ &row_descriptor,
+ 0);
+ tokudb::memory::free(row_descriptor.data);
if (error)
break;
- // for all trees that have values, make an expand update message and broadcast it into the tree
+ // for all trees that have values, make an expand update message and
+ // broadcast it into the tree
if (i == primary_key || key_is_clustering(&table_share->key_info[i])) {
- uint32_t old_offset = alter_table_field_offset(table_share->null_bytes, ctx->table_kc_info, i, expand_field_num);
- uint32_t new_offset = alter_table_field_offset(table_share->null_bytes, ctx->altered_table_kc_info, i, expand_field_num);
- assert(old_offset <= new_offset);
-
- uint32_t old_length = ctx->table_kc_info->field_lengths[expand_field_num];
- assert(old_length == old_field->pack_length());
-
- uint32_t new_length = ctx->altered_table_kc_info->field_lengths[expand_field_num];
- assert(new_length == new_field->pack_length());
-
- DBT expand; memset(&expand, 0, sizeof expand);
- expand.size = sizeof operation + sizeof new_offset + sizeof old_length + sizeof new_length;
- if (operation == UPDATE_OP_EXPAND_CHAR || operation == UPDATE_OP_EXPAND_BINARY)
- expand.size += sizeof pad_char;
- expand.data = tokudb_my_malloc(expand.size, MYF(MY_WME));
+ uint32_t old_offset = alter_table_field_offset(
+ table_share->null_bytes,
+ ctx->table_kc_info,
+ i,
+ expand_field_num);
+ uint32_t new_offset = alter_table_field_offset(
+ table_share->null_bytes,
+ ctx->altered_table_kc_info,
+ i,
+ expand_field_num);
+ assert_always(old_offset <= new_offset);
+
+ uint32_t old_length =
+ ctx->table_kc_info->field_lengths[expand_field_num];
+ assert_always(old_length == old_field->pack_length());
+
+ uint32_t new_length =
+ ctx->altered_table_kc_info->field_lengths[expand_field_num];
+ assert_always(new_length == new_field->pack_length());
+
+ DBT expand; memset(&expand, 0, sizeof(expand));
+ expand.size =
+ sizeof(operation) + sizeof(new_offset) +
+ sizeof(old_length) + sizeof(new_length);
+ if (operation == UPDATE_OP_EXPAND_CHAR ||
+ operation == UPDATE_OP_EXPAND_BINARY)
+ expand.size += sizeof(pad_char);
+ expand.data = tokudb::memory::malloc(expand.size, MYF(MY_WME));
if (!expand.data) {
error = ENOMEM;
break;
@@ -987,27 +1362,34 @@ int ha_tokudb::alter_table_expand_one_column(TABLE *altered_table, Alter_inplace
expand_ptr[0] = operation;
expand_ptr += sizeof operation;
- // for the first altered field, old_offset == new_offset. for the subsequent altered fields, the new_offset
- // should be used as it includes the length changes from the previous altered fields.
- memcpy(expand_ptr, &new_offset, sizeof new_offset);
- expand_ptr += sizeof new_offset;
+ // for the first altered field, old_offset == new_offset.
+ // for the subsequent altered fields, the new_offset
+ // should be used as it includes the length changes from the
+ // previous altered fields.
+ memcpy(expand_ptr, &new_offset, sizeof(new_offset));
+ expand_ptr += sizeof(new_offset);
- memcpy(expand_ptr, &old_length, sizeof old_length);
- expand_ptr += sizeof old_length;
+ memcpy(expand_ptr, &old_length, sizeof(old_length));
+ expand_ptr += sizeof(old_length);
- memcpy(expand_ptr, &new_length, sizeof new_length);
- expand_ptr += sizeof new_length;
+ memcpy(expand_ptr, &new_length, sizeof(new_length));
+ expand_ptr += sizeof(new_length);
- if (operation == UPDATE_OP_EXPAND_CHAR || operation == UPDATE_OP_EXPAND_BINARY) {
- memcpy(expand_ptr, &pad_char, sizeof pad_char);
- expand_ptr += sizeof pad_char;
+ if (operation == UPDATE_OP_EXPAND_CHAR ||
+ operation == UPDATE_OP_EXPAND_BINARY) {
+ memcpy(expand_ptr, &pad_char, sizeof(pad_char));
+ expand_ptr += sizeof(pad_char);
}
- assert(expand_ptr == (uchar *)expand.data + expand.size);
+ assert_always(expand_ptr == (uchar*)expand.data + expand.size);
// and broadcast it into the tree
- error = share->key_file[i]->update_broadcast(share->key_file[i], ctx->alter_txn, &expand, DB_IS_RESETTING_OP);
- tokudb_my_free(expand.data);
+ error = share->key_file[i]->update_broadcast(
+ share->key_file[i],
+ ctx->alter_txn,
+ &expand,
+ DB_IS_RESETTING_OP);
+ tokudb::memory::free(expand.data);
if (error)
break;
}
@@ -1016,52 +1398,85 @@ int ha_tokudb::alter_table_expand_one_column(TABLE *altered_table, Alter_inplace
return error;
}
-static void marshall_blob_lengths(tokudb::buffer &b, uint32_t n, TABLE *table, KEY_AND_COL_INFO *kc_info) {
+static void marshall_blob_lengths(
+ tokudb::buffer& b,
+ uint32_t n,
+ TABLE* table,
+ KEY_AND_COL_INFO* kc_info) {
+
for (uint i = 0; i < n; i++) {
uint blob_field_index = kc_info->blob_fields[i];
- assert(blob_field_index < table->s->fields);
- uint8_t blob_field_length = table->s->field[blob_field_index]->row_pack_length();
+ assert_always(blob_field_index < table->s->fields);
+ uint8_t blob_field_length =
+ table->s->field[blob_field_index]->row_pack_length();
b.append(&blob_field_length, sizeof blob_field_length);
}
}
-int ha_tokudb::alter_table_expand_blobs(TABLE *altered_table, Alter_inplace_info *ha_alter_info) {
+int ha_tokudb::alter_table_expand_blobs(
+ TABLE* altered_table,
+ Alter_inplace_info* ha_alter_info) {
+
int error = 0;
- tokudb_alter_ctx *ctx = static_cast<tokudb_alter_ctx *>(ha_alter_info->handler_ctx);
+ tokudb_alter_ctx* ctx =
+ static_cast<tokudb_alter_ctx*>(ha_alter_info->handler_ctx);
uint32_t curr_num_DBs = table->s->keys + tokudb_test(hidden_primary_key);
for (uint32_t i = 0; i < curr_num_DBs; i++) {
// change to a new descriptor
DBT row_descriptor; memset(&row_descriptor, 0, sizeof row_descriptor);
- error = new_row_descriptor(table, altered_table, ha_alter_info, i, &row_descriptor);
+ error = new_row_descriptor(
+ table,
+ altered_table,
+ ha_alter_info,
+ i,
+ &row_descriptor);
if (error)
break;
- error = share->key_file[i]->change_descriptor(share->key_file[i], ctx->alter_txn, &row_descriptor, 0);
- tokudb_my_free(row_descriptor.data);
+ error = share->key_file[i]->change_descriptor(
+ share->key_file[i],
+ ctx->alter_txn,
+ &row_descriptor,
+ 0);
+ tokudb::memory::free(row_descriptor.data);
if (error)
break;
- // for all trees that have values, make an update blobs message and broadcast it into the tree
+ // for all trees that have values, make an update blobs message and
+ // broadcast it into the tree
if (i == primary_key || key_is_clustering(&table_share->key_info[i])) {
tokudb::buffer b;
uint8_t op = UPDATE_OP_EXPAND_BLOB;
b.append(&op, sizeof op);
- b.append_ui<uint32_t>(table->s->null_bytes + ctx->table_kc_info->mcp_info[i].fixed_field_size);
- uint32_t var_offset_bytes = ctx->table_kc_info->mcp_info[i].len_of_offsets;
+ b.append_ui<uint32_t>(
+ table->s->null_bytes +
+ ctx->table_kc_info->mcp_info[i].fixed_field_size);
+ uint32_t var_offset_bytes =
+ ctx->table_kc_info->mcp_info[i].len_of_offsets;
b.append_ui<uint32_t>(var_offset_bytes);
- b.append_ui<uint32_t>(var_offset_bytes == 0 ? 0 : ctx->table_kc_info->num_offset_bytes);
+ b.append_ui<uint32_t>(
+ var_offset_bytes == 0 ? 0 :
+ ctx->table_kc_info->num_offset_bytes);
// add blobs info
uint32_t num_blobs = ctx->table_kc_info->num_blobs;
b.append_ui<uint32_t>(num_blobs);
marshall_blob_lengths(b, num_blobs, table, ctx->table_kc_info);
- marshall_blob_lengths(b, num_blobs, altered_table, ctx->altered_table_kc_info);
+ marshall_blob_lengths(
+ b,
+ num_blobs,
+ altered_table,
+ ctx->altered_table_kc_info);
// and broadcast it into the tree
DBT expand; memset(&expand, 0, sizeof expand);
expand.data = b.data();
expand.size = b.size();
- error = share->key_file[i]->update_broadcast(share->key_file[i], ctx->alter_txn, &expand, DB_IS_RESETTING_OP);
+ error = share->key_file[i]->update_broadcast(
+ share->key_file[i],
+ ctx->alter_txn,
+ &expand,
+ DB_IS_RESETTING_OP);
if (error)
break;
}
@@ -1071,7 +1486,13 @@ int ha_tokudb::alter_table_expand_blobs(TABLE *altered_table, Alter_inplace_info
}
// Return true if two fixed length fields can be changed inplace
-static bool change_fixed_length_is_supported(TABLE *table, TABLE *altered_table, Field *old_field, Field *new_field, tokudb_alter_ctx *ctx) {
+static bool change_fixed_length_is_supported(
+ TABLE* table,
+ TABLE* altered_table,
+ Field* old_field,
+ Field* new_field,
+ tokudb_alter_ctx* ctx) {
+
// no change in size is supported
if (old_field->pack_length() == new_field->pack_length())
return true;
@@ -1082,9 +1503,16 @@ static bool change_fixed_length_is_supported(TABLE *table, TABLE *altered_table,
return true;
}
-static bool change_blob_length_is_supported(TABLE *table, TABLE *altered_table, Field *old_field, Field *new_field, tokudb_alter_ctx *ctx) {
+static bool change_blob_length_is_supported(
+ TABLE* table,
+ TABLE* altered_table,
+ Field* old_field,
+ Field* new_field,
+ tokudb_alter_ctx* ctx) {
+
// blob -> longer or equal length blob
- if (old_field->binary() && new_field->binary() && old_field->pack_length() <= new_field->pack_length()) {
+ if (old_field->binary() && new_field->binary() &&
+ old_field->pack_length() <= new_field->pack_length()) {
ctx->expand_blob_update_needed = true;
return true;
}
@@ -1113,13 +1541,26 @@ static bool is_int_type(enum_field_types t) {
}
// Return true if two field types can be changed inplace
-static bool change_field_type_is_supported(Field *old_field, Field *new_field, TABLE *table, TABLE *altered_table, Alter_inplace_info *ha_alter_info, tokudb_alter_ctx *ctx) {
+static bool change_field_type_is_supported(
+ Field* old_field,
+ Field* new_field,
+ TABLE* table,
+ TABLE* altered_table,
+ Alter_inplace_info* ha_alter_info,
+ tokudb_alter_ctx* ctx) {
+
enum_field_types old_type = old_field->real_type();
enum_field_types new_type = new_field->real_type();
if (is_int_type(old_type)) {
// int and unsigned int expansion
- if (is_int_type(new_type) && is_unsigned(old_field) == is_unsigned(new_field))
- return change_fixed_length_is_supported(table, altered_table, old_field, new_field, ctx);
+ if (is_int_type(new_type) &&
+ is_unsigned(old_field) == is_unsigned(new_field))
+ return change_fixed_length_is_supported(
+ table,
+ altered_table,
+ old_field,
+ new_field,
+ ctx);
else
return false;
} else if (old_type == MYSQL_TYPE_STRING) {
@@ -1127,67 +1568,112 @@ static bool change_field_type_is_supported(Field *old_field, Field *new_field, T
if (new_type == MYSQL_TYPE_STRING &&
old_field->binary() == new_field->binary() &&
old_field->charset()->number == new_field->charset()->number)
- return change_fixed_length_is_supported(table, altered_table, old_field, new_field, ctx);
+ return change_fixed_length_is_supported(
+ table,
+ altered_table,
+ old_field,
+ new_field,
+ ctx);
else
return false;
} else if (old_type == MYSQL_TYPE_VARCHAR) {
- // varchar(X) -> varchar(Y) and varbinary(X) -> varbinary(Y) expansion where X < 256 <= Y
- // the ALTER_COLUMN_TYPE handler flag is set for these cases
- return change_varchar_length_is_supported(old_field, new_field, table, altered_table, ha_alter_info, ctx);
+ // varchar(X) -> varchar(Y) and varbinary(X) -> varbinary(Y) expansion
+ // where X < 256 <= Y the ALTER_COLUMN_TYPE handler flag is set for
+ // these cases
+ return change_varchar_length_is_supported(
+ old_field,
+ new_field,
+ table,
+ altered_table,
+ ha_alter_info,
+ ctx);
} else if (old_type == MYSQL_TYPE_BLOB && new_type == MYSQL_TYPE_BLOB) {
- return change_blob_length_is_supported(table, altered_table, old_field, new_field, ctx);
+ return change_blob_length_is_supported(
+ table,
+ altered_table,
+ old_field,
+ new_field,
+ ctx);
} else
return false;
}
// Return true if all changed field types can be changed inplace
-static bool change_type_is_supported(TABLE *table, TABLE *altered_table, Alter_inplace_info *ha_alter_info, tokudb_alter_ctx *ctx) {
+static bool change_type_is_supported(
+ TABLE* table,
+ TABLE* altered_table,
+ Alter_inplace_info* ha_alter_info,
+ tokudb_alter_ctx* ctx) {
+
if (table->s->null_bytes != altered_table->s->null_bytes)
return false;
if (table->s->fields != altered_table->s->fields)
return false;
if (ctx->changed_fields.elements() > 1)
return false; // only support one field change
- for (DYNAMIC_ARRAY_ELEMENTS_TYPE ai = 0; ai < ctx->changed_fields.elements(); ai++) {
+ for (DYNAMIC_ARRAY_ELEMENTS_TYPE ai = 0;
+ ai < ctx->changed_fields.elements();
+ ai++) {
uint i = ctx->changed_fields.at(ai);
Field *old_field = table->field[i];
Field *new_field = altered_table->field[i];
- if (field_in_key_of_table(table, old_field) || field_in_key_of_table(altered_table, new_field))
+ if (field_in_key_of_table(table, old_field) ||
+ field_in_key_of_table(altered_table, new_field))
return false;
- if (!change_field_type_is_supported(old_field, new_field, table, altered_table, ha_alter_info, ctx))
+ if (!change_field_type_is_supported(
+ old_field,
+ new_field,
+ table,
+ altered_table,
+ ha_alter_info,
+ ctx))
return false;
}
return true;
}
-// Allocate and initialize a new descriptor for a dictionary in the altered table identified with idx.
+// Allocate and initialize a new descriptor for a dictionary in the altered
+// table identified with idx.
// Return the new descriptor in the row_descriptor DBT.
// Return non-zero on error.
-int ha_tokudb::new_row_descriptor(TABLE *table, TABLE *altered_table, Alter_inplace_info *ha_alter_info, uint32_t idx, DBT *row_descriptor) {
+int ha_tokudb::new_row_descriptor(
+ TABLE* table,
+ TABLE* altered_table,
+ Alter_inplace_info* ha_alter_info,
+ uint32_t idx,
+ DBT* row_descriptor) {
+
int error = 0;
- tokudb_alter_ctx *ctx = static_cast<tokudb_alter_ctx *>(ha_alter_info->handler_ctx);
- row_descriptor->size = get_max_desc_size(ctx->altered_table_kc_info, altered_table);
- row_descriptor->data = (uchar *) tokudb_my_malloc(row_descriptor->size, MYF(MY_WME));
+ tokudb_alter_ctx* ctx =
+ static_cast<tokudb_alter_ctx*>(ha_alter_info->handler_ctx);
+ row_descriptor->size =
+ get_max_desc_size(ctx->altered_table_kc_info, altered_table);
+ row_descriptor->data =
+ (uchar*)tokudb::memory::malloc(row_descriptor->size, MYF(MY_WME));
if (row_descriptor->data == NULL) {
error = ENOMEM;
} else {
- KEY* prim_key = hidden_primary_key ? NULL : &altered_table->s->key_info[primary_key];
+ KEY* prim_key =
+ hidden_primary_key ? NULL :
+ &altered_table->s->key_info[primary_key];
if (idx == primary_key) {
- row_descriptor->size = create_main_key_descriptor((uchar *)row_descriptor->data,
- prim_key,
- hidden_primary_key,
- primary_key,
- altered_table,
- ctx->altered_table_kc_info);
+ row_descriptor->size = create_main_key_descriptor(
+ (uchar*)row_descriptor->data,
+ prim_key,
+ hidden_primary_key,
+ primary_key,
+ altered_table,
+ ctx->altered_table_kc_info);
} else {
- row_descriptor->size = create_secondary_key_descriptor((uchar *)row_descriptor->data,
- &altered_table->key_info[idx],
- prim_key,
- hidden_primary_key,
- altered_table,
- primary_key,
- idx,
- ctx->altered_table_kc_info);
+ row_descriptor->size = create_secondary_key_descriptor(
+ (uchar*)row_descriptor->data,
+ &altered_table->key_info[idx],
+ prim_key,
+ hidden_primary_key,
+ altered_table,
+ primary_key,
+ idx,
+ ctx->altered_table_kc_info);
}
error = 0;
}
diff --git a/storage/tokudb/ha_tokudb_alter_common.cc b/storage/tokudb/ha_tokudb_alter_common.cc
index b2c2a2b0252..d41a676de1f 100644
--- a/storage/tokudb/ha_tokudb_alter_common.cc
+++ b/storage/tokudb/ha_tokudb_alter_common.cc
@@ -26,8 +26,18 @@ Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
#if !defined(TOKUDB_ALTER_COMMON)
#define TOKUDB_ALTER_COMMON
-static bool tables_have_same_keys(TABLE* table, TABLE* altered_table, bool print_error, bool check_field_index) __attribute__((unused));
-static bool tables_have_same_keys(TABLE* table, TABLE* altered_table, bool print_error, bool check_field_index) {
+TOKUDB_UNUSED(static bool tables_have_same_keys(
+ TABLE* table,
+ TABLE* altered_table,
+ bool print_error,
+ bool check_field_index));
+
+static bool tables_have_same_keys(
+ TABLE* table,
+ TABLE* altered_table,
+ bool print_error,
+ bool check_field_index) {
+
bool retval;
if (table->s->keys != altered_table->s->keys) {
if (print_error) {
@@ -39,10 +49,9 @@ static bool tables_have_same_keys(TABLE* table, TABLE* altered_table, bool print
if (table->s->primary_key != altered_table->s->primary_key) {
if (print_error) {
sql_print_error(
- "Tables have different primary keys, %d %d",
+ "Tables have different primary keys, %d %d",
table->s->primary_key,
- altered_table->s->primary_key
- );
+ altered_table->s->primary_key);
}
retval = false;
goto cleanup;
@@ -53,44 +62,43 @@ static bool tables_have_same_keys(TABLE* table, TABLE* altered_table, bool print
if (strcmp(curr_orig_key->name, curr_altered_key->name)) {
if (print_error) {
sql_print_error(
- "key %d has different name, %s %s",
- i,
+ "key %d has different name, %s %s",
+ i,
curr_orig_key->name,
- curr_altered_key->name
- );
+ curr_altered_key->name);
}
retval = false;
goto cleanup;
}
- if (key_is_clustering(curr_orig_key) != key_is_clustering(curr_altered_key)) {
+ if (key_is_clustering(curr_orig_key) !=
+ key_is_clustering(curr_altered_key)) {
if (print_error) {
sql_print_error(
"keys disagree on if they are clustering, %d, %d",
- get_key_parts(curr_orig_key),
- get_key_parts(curr_altered_key)
- );
+ curr_orig_key->user_defined_key_parts,
+ curr_altered_key->user_defined_key_parts);
}
retval = false;
goto cleanup;
}
- if (((curr_orig_key->flags & HA_NOSAME) == 0) != ((curr_altered_key->flags & HA_NOSAME) == 0)) {
+ if (((curr_orig_key->flags & HA_NOSAME) == 0) !=
+ ((curr_altered_key->flags & HA_NOSAME) == 0)) {
if (print_error) {
sql_print_error(
"keys disagree on if they are unique, %d, %d",
- get_key_parts(curr_orig_key),
- get_key_parts(curr_altered_key)
- );
+ curr_orig_key->user_defined_key_parts,
+ curr_altered_key->user_defined_key_parts);
}
retval = false;
goto cleanup;
}
- if (get_key_parts(curr_orig_key) != get_key_parts(curr_altered_key)) {
+ if (curr_orig_key->user_defined_key_parts !=
+ curr_altered_key->user_defined_key_parts) {
if (print_error) {
sql_print_error(
"keys have different number of parts, %d, %d",
- get_key_parts(curr_orig_key),
- get_key_parts(curr_altered_key)
- );
+ curr_orig_key->user_defined_key_parts,
+ curr_altered_key->user_defined_key_parts);
}
retval = false;
goto cleanup;
@@ -98,7 +106,7 @@ static bool tables_have_same_keys(TABLE* table, TABLE* altered_table, bool print
//
// now verify that each field in the key is the same
//
- for (uint32_t j = 0; j < get_key_parts(curr_orig_key); j++) {
+ for (uint32_t j = 0; j < curr_orig_key->user_defined_key_parts; j++) {
KEY_PART_INFO* curr_orig_part = &curr_orig_key->key_part[j];
KEY_PART_INFO* curr_altered_part = &curr_altered_key->key_part[j];
Field* curr_orig_field = curr_orig_part->field;
@@ -106,10 +114,9 @@ static bool tables_have_same_keys(TABLE* table, TABLE* altered_table, bool print
if (curr_orig_part->length != curr_altered_part->length) {
if (print_error) {
sql_print_error(
- "Key %s has different length at index %d",
- curr_orig_key->name,
- j
- );
+ "Key %s has different length at index %d",
+ curr_orig_key->name,
+ j);
}
retval = false;
goto cleanup;
@@ -123,10 +130,9 @@ static bool tables_have_same_keys(TABLE* table, TABLE* altered_table, bool print
if (!are_fields_same) {
if (print_error) {
sql_print_error(
- "Key %s has different field at index %d",
- curr_orig_key->name,
- j
- );
+ "Key %s has different field at index %d",
+ curr_orig_key->name,
+ j);
}
retval = false;
goto cleanup;
@@ -143,7 +149,8 @@ cleanup:
// to evaluate whether a field is NULL or not. This value is a power of 2, from
// 2^0 to 2^7. We return the position of the bit within the byte, which is
// lg null_bit
-static inline uint32_t get_null_bit_position(uint32_t null_bit) __attribute__((unused));
+TOKUDB_UNUSED(static inline uint32_t get_null_bit_position(
+ uint32_t null_bit));
static inline uint32_t get_null_bit_position(uint32_t null_bit) {
uint32_t retval = 0;
switch(null_bit) {
@@ -170,23 +177,28 @@ static inline uint32_t get_null_bit_position(uint32_t null_bit) {
break;
case (128):
retval = 7;
- break;
+ break;
default:
- assert(false);
+ assert_unreachable();
}
return retval;
}
// returns the index of the null bit of field.
-static inline uint32_t get_overall_null_bit_position(TABLE* table, Field* field) __attribute__((unused));
-static inline uint32_t get_overall_null_bit_position(TABLE* table, Field* field) {
+TOKUDB_UNUSED(static inline uint32_t get_overall_null_bit_position(
+ TABLE* table,
+ Field* field));
+static inline uint32_t get_overall_null_bit_position(
+ TABLE* table,
+ Field* field) {
+
uint32_t offset = get_null_offset(table, field);
uint32_t null_bit = field->null_bit;
return offset*8 + get_null_bit_position(null_bit);
}
// not static since 51 uses this and 56 does not
-static bool are_null_bits_in_order(TABLE* table) __attribute__((unused));
+TOKUDB_UNUSED(static bool are_null_bits_in_order(TABLE* table));
static bool are_null_bits_in_order(TABLE* table) {
uint32_t curr_null_pos = 0;
bool first = true;
@@ -195,10 +207,8 @@ static bool are_null_bits_in_order(TABLE* table) {
Field* curr_field = table->field[i];
bool nullable = (curr_field->null_bit != 0);
if (nullable) {
- uint32_t pos = get_overall_null_bit_position(
- table,
- curr_field
- );
+ uint32_t pos =
+ get_overall_null_bit_position(table, curr_field);
if (!first && pos != curr_null_pos+1){
retval = false;
break;
@@ -210,34 +220,38 @@ static bool are_null_bits_in_order(TABLE* table) {
return retval;
}
-static uint32_t get_first_null_bit_pos(TABLE* table) __attribute__((unused));
+TOKUDB_UNUSED(static uint32_t get_first_null_bit_pos(TABLE* table));
static uint32_t get_first_null_bit_pos(TABLE* table) {
uint32_t table_pos = 0;
for (uint i = 0; i < table->s->fields; i++) {
Field* curr_field = table->field[i];
bool nullable = (curr_field->null_bit != 0);
if (nullable) {
- table_pos = get_overall_null_bit_position(
- table,
- curr_field
- );
+ table_pos =
+ get_overall_null_bit_position(table, curr_field);
break;
}
}
return table_pos;
}
-static bool is_column_default_null(TABLE* src_table, uint32_t field_index) __attribute__((unused));
-static bool is_column_default_null(TABLE* src_table, uint32_t field_index) {
+TOKUDB_UNUSED(static bool is_column_default_null(
+ TABLE* src_table,
+ uint32_t field_index));
+static bool is_column_default_null(
+ TABLE* src_table,
+ uint32_t field_index) {
+
Field* curr_field = src_table->field[field_index];
bool is_null_default = false;
bool nullable = curr_field->null_bit != 0;
if (nullable) {
- uint32_t null_bit_position = get_overall_null_bit_position(src_table, curr_field);
- is_null_default = is_overall_null_position_set(
- src_table->s->default_values,
- null_bit_position
- );
+ uint32_t null_bit_position =
+ get_overall_null_bit_position(src_table, curr_field);
+ is_null_default =
+ is_overall_null_position_set(
+ src_table->s->default_values,
+ null_bit_position);
}
return is_null_default;
}
@@ -248,9 +262,8 @@ static uint32_t fill_static_row_mutator(
TABLE* altered_table,
KEY_AND_COL_INFO* orig_kc_info,
KEY_AND_COL_INFO* altered_kc_info,
- uint32_t keynr
- )
-{
+ uint32_t keynr) {
+
//
// start packing extra
//
@@ -258,25 +271,28 @@ static uint32_t fill_static_row_mutator(
// says what the operation is
pos[0] = UP_COL_ADD_OR_DROP;
pos++;
-
+
//
// null byte information
//
memcpy(pos, &orig_table->s->null_bytes, sizeof(orig_table->s->null_bytes));
pos += sizeof(orig_table->s->null_bytes);
- memcpy(pos, &altered_table->s->null_bytes, sizeof(orig_table->s->null_bytes));
+ memcpy(
+ pos,
+ &altered_table->s->null_bytes,
+ sizeof(orig_table->s->null_bytes));
pos += sizeof(altered_table->s->null_bytes);
-
+
//
// num_offset_bytes
//
- assert(orig_kc_info->num_offset_bytes <= 2);
+ assert_always(orig_kc_info->num_offset_bytes <= 2);
pos[0] = orig_kc_info->num_offset_bytes;
pos++;
- assert(altered_kc_info->num_offset_bytes <= 2);
+ assert_always(altered_kc_info->num_offset_bytes <= 2);
pos[0] = altered_kc_info->num_offset_bytes;
pos++;
-
+
//
// size of fixed fields
//
@@ -286,7 +302,7 @@ static uint32_t fill_static_row_mutator(
fixed_field_size = altered_kc_info->mcp_info[keynr].fixed_field_size;
memcpy(pos, &fixed_field_size, sizeof(fixed_field_size));
pos += sizeof(fixed_field_size);
-
+
//
// length of offsets
//
@@ -304,7 +320,7 @@ static uint32_t fill_static_row_mutator(
memcpy(pos, &altered_start_null_pos, sizeof(altered_start_null_pos));
pos += sizeof(altered_start_null_pos);
- assert((pos-buf) == STATIC_ROW_MUTATOR_SIZE);
+ assert_always((pos-buf) == STATIC_ROW_MUTATOR_SIZE);
return pos - buf;
}
@@ -316,9 +332,8 @@ static uint32_t fill_dynamic_row_mutator(
KEY_AND_COL_INFO* src_kc_info,
uint32_t keynr,
bool is_add,
- bool* out_has_blobs
- )
-{
+ bool* out_has_blobs) {
+
uchar* pos = buf;
bool has_blobs = false;
uint32_t cols = num_columns;
@@ -327,7 +342,7 @@ static uint32_t fill_dynamic_row_mutator(
for (uint32_t i = 0; i < num_columns; i++) {
uint32_t curr_index = columns[i];
Field* curr_field = src_table->field[curr_index];
-
+
pos[0] = is_add ? COL_ADD : COL_DROP;
pos++;
//
@@ -338,22 +353,22 @@ static uint32_t fill_dynamic_row_mutator(
if (!nullable) {
pos[0] = 0;
pos++;
- }
- else {
+ } else {
pos[0] = 1;
pos++;
// write position of null byte that is to be removed
- uint32_t null_bit_position = get_overall_null_bit_position(src_table, curr_field);
+ uint32_t null_bit_position =
+ get_overall_null_bit_position(src_table, curr_field);
memcpy(pos, &null_bit_position, sizeof(null_bit_position));
pos += sizeof(null_bit_position);
//
// if adding a column, write the value of the default null_bit
//
if (is_add) {
- is_null_default = is_overall_null_position_set(
- src_table->s->default_values,
- null_bit_position
- );
+ is_null_default =
+ is_overall_null_position_set(
+ src_table->s->default_values,
+ null_bit_position);
pos[0] = is_null_default ? 1 : 0;
pos++;
}
@@ -364,7 +379,8 @@ static uint32_t fill_dynamic_row_mutator(
pos[0] = COL_FIXED;
pos++;
//store the offset
- uint32_t fixed_field_offset = src_kc_info->cp_info[keynr][curr_index].col_pack_val;
+ uint32_t fixed_field_offset =
+ src_kc_info->cp_info[keynr][curr_index].col_pack_val;
memcpy(pos, &fixed_field_offset, sizeof(fixed_field_offset));
pos += sizeof(fixed_field_offset);
//store the number of bytes
@@ -374,38 +390,35 @@ static uint32_t fill_dynamic_row_mutator(
if (is_add && !is_null_default) {
uint curr_field_offset = field_offset(curr_field, src_table);
memcpy(
- pos,
- src_table->s->default_values + curr_field_offset,
- num_bytes
- );
+ pos,
+ src_table->s->default_values + curr_field_offset,
+ num_bytes);
pos += num_bytes;
}
- }
- else if (is_variable_field(src_kc_info, curr_index)) {
+ } else if (is_variable_field(src_kc_info, curr_index)) {
pos[0] = COL_VAR;
pos++;
//store the index of the variable column
- uint32_t var_field_index = src_kc_info->cp_info[keynr][curr_index].col_pack_val;
+ uint32_t var_field_index =
+ src_kc_info->cp_info[keynr][curr_index].col_pack_val;
memcpy(pos, &var_field_index, sizeof(var_field_index));
pos += sizeof(var_field_index);
if (is_add && !is_null_default) {
uint curr_field_offset = field_offset(curr_field, src_table);
uint32_t len_bytes = src_kc_info->length_bytes[curr_index];
- uint32_t data_length = get_var_data_length(
- src_table->s->default_values + curr_field_offset,
- len_bytes
- );
+ uint32_t data_length =
+ get_var_data_length(
+ src_table->s->default_values + curr_field_offset,
+ len_bytes);
memcpy(pos, &data_length, sizeof(data_length));
pos += sizeof(data_length);
memcpy(
pos,
src_table->s->default_values + curr_field_offset + len_bytes,
- data_length
- );
+ data_length);
pos += data_length;
}
- }
- else {
+ } else {
pos[0] = COL_BLOB;
pos++;
has_blobs = true;
@@ -418,9 +431,8 @@ static uint32_t fill_dynamic_row_mutator(
static uint32_t fill_static_blob_row_mutator(
uchar* buf,
TABLE* src_table,
- KEY_AND_COL_INFO* src_kc_info
- )
-{
+ KEY_AND_COL_INFO* src_kc_info) {
+
uchar* pos = buf;
// copy number of blobs
memcpy(pos, &src_kc_info->num_blobs, sizeof(src_kc_info->num_blobs));
@@ -430,11 +442,11 @@ static uint32_t fill_static_blob_row_mutator(
uint32_t curr_field_index = src_kc_info->blob_fields[i];
Field* field = src_table->field[curr_field_index];
uint32_t len_bytes = field->row_pack_length();
- assert(len_bytes <= 4);
+ assert_always(len_bytes <= 4);
pos[0] = len_bytes;
pos++;
}
-
+
return pos-buf;
}
@@ -444,9 +456,8 @@ static uint32_t fill_dynamic_blob_row_mutator(
uint32_t num_columns,
TABLE* src_table,
KEY_AND_COL_INFO* src_kc_info,
- bool is_add
- )
-{
+ bool is_add) {
+
uchar* pos = buf;
for (uint32_t i = 0; i < num_columns; i++) {
uint32_t curr_field_index = columns[i];
@@ -461,19 +472,19 @@ static uint32_t fill_dynamic_blob_row_mutator(
}
}
// assert we found blob in list
- assert(blob_index < src_kc_info->num_blobs);
+ assert_always(blob_index < src_kc_info->num_blobs);
pos[0] = is_add ? COL_ADD : COL_DROP;
pos++;
memcpy(pos, &blob_index, sizeof(blob_index));
pos += sizeof(blob_index);
if (is_add) {
uint32_t len_bytes = curr_field->row_pack_length();
- assert(len_bytes <= 4);
+ assert_always(len_bytes <= 4);
pos[0] = len_bytes;
pos++;
- // create a zero length blob field that can be directly copied in
- // for now, in MySQL, we can only have blob fields
+ // create a zero length blob field that can be directly copied
+ // in for now, in MySQL, we can only have blob fields
// that have no default value
memset(pos, 0, len_bytes);
pos += len_bytes;
@@ -487,93 +498,86 @@ static uint32_t fill_dynamic_blob_row_mutator(
// TODO: namely, when do we get stuff from share->kc_info and when we get
// TODO: it from altered_kc_info, and when is keynr associated with the right thing
uint32_t ha_tokudb::fill_row_mutator(
- uchar* buf,
- uint32_t* columns,
+ uchar* buf,
+ uint32_t* columns,
uint32_t num_columns,
TABLE* altered_table,
KEY_AND_COL_INFO* altered_kc_info,
uint32_t keynr,
- bool is_add
- )
-{
- if (tokudb_debug & TOKUDB_DEBUG_ALTER_TABLE) {
- printf("*****some info:*************\n");
- printf(
- "old things: num_null_bytes %d, num_offset_bytes %d, fixed_field_size %d, fixed_field_size %d\n",
+ bool is_add) {
+
+ if (TOKUDB_UNLIKELY(TOKUDB_DEBUG_FLAGS(TOKUDB_DEBUG_ALTER_TABLE))) {
+ TOKUDB_HANDLER_TRACE("*****some info:*************");
+ TOKUDB_HANDLER_TRACE(
+ "old things: num_null_bytes %d, num_offset_bytes %d, "
+ "fixed_field_size %d, fixed_field_size %d",
table->s->null_bytes,
share->kc_info.num_offset_bytes,
share->kc_info.mcp_info[keynr].fixed_field_size,
- share->kc_info.mcp_info[keynr].len_of_offsets
- );
- printf(
- "new things: num_null_bytes %d, num_offset_bytes %d, fixed_field_size %d, fixed_field_size %d\n",
+ share->kc_info.mcp_info[keynr].len_of_offsets);
+ TOKUDB_HANDLER_TRACE(
+ "new things: num_null_bytes %d, num_offset_bytes %d, "
+ "fixed_field_size %d, fixed_field_size %d",
altered_table->s->null_bytes,
altered_kc_info->num_offset_bytes,
altered_kc_info->mcp_info[keynr].fixed_field_size,
- altered_kc_info->mcp_info[keynr].len_of_offsets
- );
- printf("****************************\n");
+ altered_kc_info->mcp_info[keynr].len_of_offsets);
+ TOKUDB_HANDLER_TRACE("****************************");
}
uchar* pos = buf;
bool has_blobs = false;
- pos += fill_static_row_mutator(
- pos,
- table,
- altered_table,
- &share->kc_info,
- altered_kc_info,
- keynr
- );
-
- if (is_add) {
- pos += fill_dynamic_row_mutator(
- pos,
- columns,
- num_columns,
- altered_table,
- altered_kc_info,
- keynr,
- is_add,
- &has_blobs
- );
- }
- else {
- pos += fill_dynamic_row_mutator(
+ pos +=
+ fill_static_row_mutator(
pos,
- columns,
- num_columns,
table,
+ altered_table,
&share->kc_info,
- keynr,
- is_add,
- &has_blobs
- );
- }
- if (has_blobs) {
- pos += fill_static_blob_row_mutator(
- pos,
- table,
- &share->kc_info
- );
- if (is_add) {
- pos += fill_dynamic_blob_row_mutator(
+ altered_kc_info,
+ keynr);
+
+ if (is_add) {
+ pos +=
+ fill_dynamic_row_mutator(
pos,
columns,
num_columns,
altered_table,
altered_kc_info,
- is_add
- );
- }
- else {
- pos += fill_dynamic_blob_row_mutator(
+ keynr,
+ is_add,
+ &has_blobs);
+ } else {
+ pos +=
+ fill_dynamic_row_mutator(
pos,
columns,
num_columns,
table,
&share->kc_info,
- is_add
- );
+ keynr,
+ is_add,
+ &has_blobs);
+ }
+ if (has_blobs) {
+ pos += fill_static_blob_row_mutator(pos, table, &share->kc_info);
+ if (is_add) {
+ pos +=
+ fill_dynamic_blob_row_mutator(
+ pos,
+ columns,
+ num_columns,
+ altered_table,
+ altered_kc_info,
+ is_add);
+ } else {
+ pos +=
+ fill_dynamic_blob_row_mutator(
+ pos,
+ columns,
+ num_columns,
+ table,
+ &share->kc_info,
+ is_add);
}
}
return pos-buf;
@@ -583,16 +587,23 @@ static bool all_fields_are_same_type(TABLE *table_a, TABLE *table_b) {
if (table_a->s->fields != table_b->s->fields)
return false;
for (uint i = 0; i < table_a->s->fields; i++) {
- Field *field_a = table_a->field[i];
- Field *field_b = table_b->field[i];
+ Field* field_a = table_a->field[i];
+ Field* field_b = table_b->field[i];
if (!fields_are_same_type(field_a, field_b))
return false;
}
return true;
}
-static bool column_rename_supported(TABLE* orig_table, TABLE* new_table, bool alter_column_order) __attribute__((unused));
-static bool column_rename_supported(TABLE* orig_table, TABLE* new_table, bool alter_column_order) {
+TOKUDB_UNUSED(static bool column_rename_supported(
+ TABLE* orig_table,
+ TABLE* new_table,
+ bool alter_column_order));
+static bool column_rename_supported(
+ TABLE* orig_table,
+ TABLE* new_table,
+ bool alter_column_order) {
+
bool retval = false;
bool keys_same_for_cr;
uint num_fields_with_different_names = 0;
@@ -622,20 +633,20 @@ static bool column_rename_supported(TABLE* orig_table, TABLE* new_table, bool al
retval = false;
goto cleanup;
}
- assert(field_with_different_name < orig_table->s->fields);
+ assert_always(field_with_different_name < orig_table->s->fields);
//
// at this point, we have verified that the two tables have
- // the same field types and with ONLY one field with a different name.
+ // the same field types and with ONLY one field with a different name.
// We have also identified the field with the different name
//
// Now we need to check the indexes
//
- keys_same_for_cr = tables_have_same_keys(
- orig_table,
- new_table,
- false,
- true
- );
+ keys_same_for_cr =
+ tables_have_same_keys(
+ orig_table,
+ new_table,
+ false,
+ true);
if (!keys_same_for_cr) {
retval = false;
goto cleanup;
@@ -645,12 +656,21 @@ cleanup:
return retval;
}
-static int find_changed_columns(uint32_t* changed_columns, uint32_t* num_changed_columns, TABLE* smaller_table, TABLE* bigger_table) __attribute__((unused));
-static int find_changed_columns(uint32_t* changed_columns, uint32_t* num_changed_columns, TABLE* smaller_table, TABLE* bigger_table) {
+TOKUDB_UNUSED(static int find_changed_columns(
+ uint32_t* changed_columns,
+ uint32_t* num_changed_columns,
+ TABLE* smaller_table,
+ TABLE* bigger_table));
+static int find_changed_columns(
+ uint32_t* changed_columns,
+ uint32_t* num_changed_columns,
+ TABLE* smaller_table,
+ TABLE* bigger_table) {
+
int retval;
uint curr_new_col_index = 0;
uint32_t curr_num_changed_columns=0;
- assert(bigger_table->s->fields > smaller_table->s->fields);
+ assert_always(bigger_table->s->fields > smaller_table->s->fields);
for (uint i = 0; i < smaller_table->s->fields; i++, curr_new_col_index++) {
if (curr_new_col_index >= bigger_table->s->fields) {
sql_print_error("error in determining changed columns");
@@ -670,15 +690,15 @@ static int find_changed_columns(uint32_t* changed_columns, uint32_t* num_changed
goto cleanup;
}
}
- // at this point, curr_field_in_orig and curr_field_in_new should be the same, let's verify
- // make sure the two fields that have the same name are ok
+ // at this point, curr_field_in_orig and curr_field_in_new should be
+ // the same, let's verify make sure the two fields that have the same
+ // name are ok
if (!are_two_fields_same(curr_field_in_orig, curr_field_in_new)) {
sql_print_error(
- "Two fields that were supposedly the same are not: \
- %s in original, %s in new",
+ "Two fields that were supposedly the same are not: %s in "
+ "original, %s in new",
curr_field_in_orig->field_name,
- curr_field_in_new->field_name
- );
+ curr_field_in_new->field_name);
retval = 1;
goto cleanup;
}
@@ -693,17 +713,23 @@ cleanup:
return retval;
}
-static bool tables_have_same_keys_and_columns(TABLE* first_table, TABLE* second_table, bool print_error) __attribute__((unused));
-static bool tables_have_same_keys_and_columns(TABLE* first_table, TABLE* second_table, bool print_error) {
+TOKUDB_UNUSED(static bool tables_have_same_keys_and_columns(
+ TABLE* first_table,
+ TABLE* second_table,
+ bool print_error));
+static bool tables_have_same_keys_and_columns(
+ TABLE* first_table,
+ TABLE* second_table,
+ bool print_error) {
+
bool retval;
if (first_table->s->null_bytes != second_table->s->null_bytes) {
retval = false;
if (print_error) {
sql_print_error(
- "tables have different number of null bytes, %d, %d",
- first_table->s->null_bytes,
- second_table->s->null_bytes
- );
+ "tables have different number of null bytes, %d, %d",
+ first_table->s->null_bytes,
+ second_table->s->null_bytes);
}
goto exit;
}
@@ -711,10 +737,9 @@ static bool tables_have_same_keys_and_columns(TABLE* first_table, TABLE* second_
retval = false;
if (print_error) {
sql_print_error(
- "tables have different number of fields, %d, %d",
- first_table->s->fields,
- second_table->s->fields
- );
+ "tables have different number of fields, %d, %d",
+ first_table->s->fields,
+ second_table->s->fields);
}
goto exit;
}
@@ -724,9 +749,8 @@ static bool tables_have_same_keys_and_columns(TABLE* first_table, TABLE* second_
if (!are_two_fields_same(a,b)) {
retval = false;
sql_print_error(
- "tables have different fields at position %d",
- i
- );
+ "tables have different fields at position %d",
+ i);
goto exit;
}
}
@@ -741,21 +765,29 @@ exit:
}
#if TOKU_INCLUDE_WRITE_FRM_DATA
-// write the new frm data to the status dictionary using the alter table transaction
-int ha_tokudb::write_frm_data(const uchar *frm_data, size_t frm_len) {
+// write the new frm data to the status dictionary using the alter table
+// transaction
+int ha_tokudb::write_frm_data(const uchar* frm_data, size_t frm_len) {
TOKUDB_DBUG_ENTER("write_frm_data");
int error = 0;
if (TOKU_PARTITION_WRITE_FRM_DATA || table->part_info == NULL) {
// write frmdata to status
- THD *thd = ha_thd();
- tokudb_trx_data *trx = (tokudb_trx_data *) thd_get_ha_data(thd, tokudb_hton);
- assert(trx);
- DB_TXN *txn = trx->stmt; // use alter table transaction
- assert(txn);
- error = write_to_status(share->status_block, hatoku_frm_data, (void *)frm_data, (uint)frm_len, txn);
+ THD* thd = ha_thd();
+ tokudb_trx_data* trx =
+ (tokudb_trx_data*)thd_get_ha_data(thd, tokudb_hton);
+ assert_always(trx);
+ DB_TXN* txn = trx->stmt; // use alter table transaction
+ assert_always(txn);
+ error =
+ write_to_status(
+ share->status_block,
+ hatoku_frm_data,
+ (void*)frm_data,
+ (uint)frm_len,
+ txn);
}
-
+
TOKUDB_DBUG_RETURN(error);
}
#endif
diff --git a/storage/tokudb/ha_tokudb_update.cc b/storage/tokudb/ha_tokudb_update.cc
index 1e2d6c0cdbf..23de81f3d8a 100644
--- a/storage/tokudb/ha_tokudb_update.cc
+++ b/storage/tokudb/ha_tokudb_update.cc
@@ -53,14 +53,19 @@ Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
// Replace field_offset
// Debug function to dump an Item
-static void dump_item(Item *item) {
+static void dump_item(Item* item) {
fprintf(stderr, "%u", item->type());
switch (item->type()) {
case Item::FUNC_ITEM: {
- Item_func *func = static_cast<Item_func*>(item);
+ Item_func* func = static_cast<Item_func*>(item);
uint n = func->argument_count();
- Item **arguments = func->arguments();
- fprintf(stderr, ":func=%u,%s,%u(", func->functype(), func->func_name(), n);
+ Item** arguments = func->arguments();
+ fprintf(
+ stderr,
+ ":func=%u,%s,%u(",
+ func->functype(),
+ func->func_name(),
+ n);
for (uint i = 0; i < n ; i++) {
dump_item(arguments[i]);
if (i < n-1)
@@ -70,25 +75,30 @@ static void dump_item(Item *item) {
break;
}
case Item::INT_ITEM: {
- Item_int *int_item = static_cast<Item_int*>(item);
+ Item_int* int_item = static_cast<Item_int*>(item);
fprintf(stderr, ":int=%lld", int_item->val_int());
break;
}
case Item::STRING_ITEM: {
- Item_string *str_item = static_cast<Item_string*>(item);
+ Item_string* str_item = static_cast<Item_string*>(item);
fprintf(stderr, ":str=%s", str_item->val_str(NULL)->c_ptr());
break;
}
case Item::FIELD_ITEM: {
- Item_field *field_item = static_cast<Item_field*>(item);
- fprintf(stderr, ":field=%s.%s.%s", field_item->db_name, field_item->table_name, field_item->field_name);
+ Item_field* field_item = static_cast<Item_field*>(item);
+ fprintf(
+ stderr,
+ ":field=%s.%s.%s",
+ field_item->db_name,
+ field_item->table_name,
+ field_item->field_name);
break;
}
case Item::COND_ITEM: {
- Item_cond *cond_item = static_cast<Item_cond*>(item);
+ Item_cond* cond_item = static_cast<Item_cond*>(item);
fprintf(stderr, ":cond=%s(\n", cond_item->func_name());
List_iterator<Item> li(*cond_item->argument_list());
- Item *list_item;
+ Item* list_item;
while ((list_item = li++)) {
dump_item(list_item);
fprintf(stderr, "\n");
@@ -97,7 +107,7 @@ static void dump_item(Item *item) {
break;
}
case Item::INSERT_VALUE_ITEM: {
- Item_insert_value *value_item = static_cast<Item_insert_value*>(item);
+ Item_insert_value* value_item = static_cast<Item_insert_value*>(item);
fprintf(stderr, ":insert_value");
dump_item(value_item->arg);
break;
@@ -109,10 +119,10 @@ static void dump_item(Item *item) {
}
// Debug function to dump an Item list
-static void dump_item_list(const char *h, List<Item> &l) {
+static void dump_item_list(const char* h, List<Item> &l) {
fprintf(stderr, "%s elements=%u\n", h, l.elements);
List_iterator<Item> li(l);
- Item *item;
+ Item* item;
while ((item = li++) != NULL) {
dump_item(item);
fprintf(stderr, "\n");
@@ -120,10 +130,10 @@ static void dump_item_list(const char *h, List<Item> &l) {
}
// Find a Field by its Item name
-static Field *find_field_by_name(TABLE *table, Item *item) {
+static Field* find_field_by_name(TABLE* table, Item* item) {
if (item->type() != Item::FIELD_ITEM)
return NULL;
- Item_field *field_item = static_cast<Item_field*>(item);
+ Item_field* field_item = static_cast<Item_field*>(item);
#if 0
if (strcmp(table->s->db.str, field_item->db_name) != 0 ||
strcmp(table->s->table_name.str, field_item->table_name) != 0)
@@ -146,7 +156,12 @@ static Field *find_field_by_name(TABLE *table, Item *item) {
// Return the starting offset in the value for a particular index (selected by idx) of a
// particular field (selected by expand_field_num).
// This only works for fixed length fields
-static uint32_t fixed_field_offset(uint32_t null_bytes, KEY_AND_COL_INFO *kc_info, uint idx, uint expand_field_num) {
+static uint32_t fixed_field_offset(
+ uint32_t null_bytes,
+ KEY_AND_COL_INFO* kc_info,
+ uint idx,
+ uint expand_field_num) {
+
uint32_t offset = null_bytes;
for (uint i = 0; i < expand_field_num; i++) {
if (bitmap_is_set(&kc_info->key_filters[idx], i))
@@ -156,8 +171,13 @@ static uint32_t fixed_field_offset(uint32_t null_bytes, KEY_AND_COL_INFO *kc_inf
return offset;
}
-static uint32_t var_field_index(TABLE *table, KEY_AND_COL_INFO *kc_info, uint idx, uint field_num) {
- assert(field_num < table->s->fields);
+static uint32_t var_field_index(
+ TABLE* table,
+ KEY_AND_COL_INFO* kc_info,
+ uint idx,
+ uint field_num) {
+
+ assert_always(field_num < table->s->fields);
uint v_index = 0;
for (uint i = 0; i < table->s->fields; i++) {
if (bitmap_is_set(&kc_info->key_filters[idx], i))
@@ -171,26 +191,37 @@ static uint32_t var_field_index(TABLE *table, KEY_AND_COL_INFO *kc_info, uint id
return v_index;
}
-static uint32_t blob_field_index(TABLE *table, KEY_AND_COL_INFO *kc_info, uint idx, uint field_num) {
- assert(field_num < table->s->fields);
+static uint32_t blob_field_index(
+ TABLE* table,
+ KEY_AND_COL_INFO* kc_info,
+ uint idx,
+ uint field_num) {
+
+ assert_always(field_num < table->s->fields);
uint b_index;
for (b_index = 0; b_index < kc_info->num_blobs; b_index++) {
if (kc_info->blob_fields[b_index] == field_num)
break;
}
- assert(b_index < kc_info->num_blobs);
+ assert_always(b_index < kc_info->num_blobs);
return b_index;
}
// Determine if an update operation can be offloaded to the storage engine.
-// The update operation consists of a list of update expressions (fields[i] = values[i]), and a list
-// of where conditions (conds). The function returns 0 if the update is handled in the storage engine.
+// The update operation consists of a list of update expressions
+// (fields[i] = values[i]), and a list of where conditions (conds).
+// The function returns 0 if the update is handled in the storage engine.
// Otherwise, an error is returned.
-int ha_tokudb::fast_update(THD *thd, List<Item> &update_fields, List<Item> &update_values, Item *conds) {
+int ha_tokudb::fast_update(
+ THD* thd,
+ List<Item>& update_fields,
+ List<Item>& update_values,
+ Item* conds) {
+
TOKUDB_HANDLER_DBUG_ENTER("");
int error = 0;
- if (tokudb_debug & TOKUDB_DEBUG_UPSERT) {
+ if (TOKUDB_UNLIKELY(TOKUDB_DEBUG_FLAGS(TOKUDB_DEBUG_UPSERT))) {
dump_item_list("fields", update_fields);
dump_item_list("values", update_values);
if (conds) {
@@ -198,7 +229,8 @@ int ha_tokudb::fast_update(THD *thd, List<Item> &update_fields, List<Item> &upda
}
}
- if (update_fields.elements < 1 || update_fields.elements != update_values.elements) {
+ if (update_fields.elements < 1 ||
+ update_fields.elements != update_values.elements) {
error = ENOTSUP; // something is fishy with the parameters
goto return_error;
}
@@ -208,14 +240,18 @@ int ha_tokudb::fast_update(THD *thd, List<Item> &update_fields, List<Item> &upda
goto check_error;
}
- error = send_update_message(update_fields, update_values, conds, transaction);
+ error = send_update_message(
+ update_fields,
+ update_values,
+ conds,
+ transaction);
if (error != 0) {
goto check_error;
}
check_error:
if (error != 0) {
- if (THDVAR(thd, disable_slow_update) != 0)
+ if (tokudb::sysvars::disable_slow_update(thd) != 0)
error = HA_ERR_UNSUPPORTED;
if (error != ENOTSUP)
print_error(error, MYF(0));
@@ -225,18 +261,20 @@ return_error:
TOKUDB_HANDLER_DBUG_RETURN(error);
}
-// Return true if an expression is a simple int expression or a simple function of +- int expression.
-static bool check_int_result(Item *item) {
+// Return true if an expression is a simple int expression or a simple function
+// of +- int expression.
+static bool check_int_result(Item* item) {
Item::Type t = item->type();
if (t == Item::INT_ITEM) {
return true;
} else if (t == Item::FUNC_ITEM) {
- Item_func *item_func = static_cast<Item_func*>(item);
- if (strcmp(item_func->func_name(), "+") != 0 && strcmp(item_func->func_name(), "-") != 0)
+ Item_func* item_func = static_cast<Item_func*>(item);
+ if (strcmp(item_func->func_name(), "+") != 0 &&
+ strcmp(item_func->func_name(), "-") != 0)
return false;
if (item_func->argument_count() != 1)
return false;
- Item **arguments = item_func->arguments();
+ Item** arguments = item_func->arguments();
if (arguments[0]->type() != Item::INT_ITEM)
return false;
return true;
@@ -245,36 +283,43 @@ static bool check_int_result(Item *item) {
}
// check that an item is an insert value item with the same field name
-static bool check_insert_value(Item *item, const char *field_name) {
+static bool check_insert_value(Item* item, const char* field_name) {
if (item->type() != Item::INSERT_VALUE_ITEM)
return false;
- Item_insert_value *value_item = static_cast<Item_insert_value*>(item);
+ Item_insert_value* value_item = static_cast<Item_insert_value*>(item);
if (value_item->arg->type() != Item::FIELD_ITEM)
return false;
- Item_field *arg = static_cast<Item_field*>(value_item->arg);
+ Item_field* arg = static_cast<Item_field*>(value_item->arg);
if (strcmp(field_name, arg->field_name) != 0)
return false;
return true;
}
// Return true if an expression looks like field_name op constant.
-static bool check_x_op_constant(const char *field_name, Item *item, const char *op, Item **item_constant, bool allow_insert_value) {
+static bool check_x_op_constant(
+ const char* field_name,
+ Item* item,
+ const char* op,
+ Item** item_constant,
+ bool allow_insert_value) {
+
if (item->type() != Item::FUNC_ITEM)
return false;
- Item_func *item_func = static_cast<Item_func*>(item);
+ Item_func* item_func = static_cast<Item_func*>(item);
if (strcmp(item_func->func_name(), op) != 0)
return false;
- Item **arguments = item_func->arguments();
+ Item** arguments = item_func->arguments();
uint n = item_func->argument_count();
if (n != 2)
return false;
if (arguments[0]->type() != Item::FIELD_ITEM)
return false;
- Item_field *arg0 = static_cast<Item_field*>(arguments[0]);
+ Item_field* arg0 = static_cast<Item_field*>(arguments[0]);
if (strcmp(field_name, arg0->field_name) != 0)
return false;
if (!check_int_result(arguments[1]))
- if (!(allow_insert_value && check_insert_value(arguments[1], field_name)))
+ if (!(allow_insert_value &&
+ check_insert_value(arguments[1], field_name)))
return false;
*item_constant = arguments[1];
return true;
@@ -282,33 +327,35 @@ static bool check_x_op_constant(const char *field_name, Item *item, const char *
// Return true if an expression looks like field_name = constant
static bool check_x_equal_0(const char *field_name, Item *item) {
- Item *item_constant;
+ Item* item_constant;
if (!check_x_op_constant(field_name, item, "=", &item_constant, false))
return false;
- if (item_constant->type() != Item::INT_ITEM || item_constant->val_int() != 0)
+ if (item_constant->type() != Item::INT_ITEM ||
+ item_constant->val_int() != 0)
return false;
return true;
}
// Return true if an expression looks like fieldname - 1
-static bool check_x_minus_1(const char *field_name, Item *item) {
- Item *item_constant;
+static bool check_x_minus_1(const char* field_name, Item* item) {
+ Item* item_constant;
if (!check_x_op_constant(field_name, item, "-", &item_constant, false))
return false;
- if (item_constant->type() != Item::INT_ITEM || item_constant->val_int() != 1)
+ if (item_constant->type() != Item::INT_ITEM ||
+ item_constant->val_int() != 1)
return false;
return true;
}
// Return true if an expression looks like if(fieldname=0, 0, fieldname-1) and
// the field named by fieldname is an unsigned int.
-static bool check_decr_floor_expression(Field *lhs_field, Item *item) {
+static bool check_decr_floor_expression(Field* lhs_field, Item* item) {
if (item->type() != Item::FUNC_ITEM)
return false;
- Item_func *item_func = static_cast<Item_func*>(item);
+ Item_func* item_func = static_cast<Item_func*>(item);
if (strcmp(item_func->func_name(), "if") != 0)
return false;
- Item **arguments = item_func->arguments();
+ Item** arguments = item_func->arguments();
uint n = item_func->argument_count();
if (n != 3)
return false;
@@ -324,8 +371,13 @@ static bool check_decr_floor_expression(Field *lhs_field, Item *item) {
}
// Check if lhs = rhs expression is simple. Return true if it is.
-static bool check_update_expression(Item *lhs_item, Item *rhs_item, TABLE *table, bool allow_insert_value) {
- Field *lhs_field = find_field_by_name(table, lhs_item);
+static bool check_update_expression(
+ Item* lhs_item,
+ Item* rhs_item,
+ TABLE* table,
+ bool allow_insert_value) {
+
+ Field* lhs_field = find_field_by_name(table, lhs_item);
if (lhs_field == NULL)
return false;
if (!lhs_field->part_of_key.is_clear_all())
@@ -340,16 +392,26 @@ static bool check_update_expression(Item *lhs_item, Item *rhs_item, TABLE *table
case MYSQL_TYPE_LONGLONG:
if (check_int_result(rhs_item))
return true;
- Item *item_constant;
- if (check_x_op_constant(lhs_field->field_name, rhs_item, "+", &item_constant, allow_insert_value))
+ Item* item_constant;
+ if (check_x_op_constant(
+ lhs_field->field_name,
+ rhs_item,
+ "+",
+ &item_constant,
+ allow_insert_value))
return true;
- if (check_x_op_constant(lhs_field->field_name, rhs_item, "-", &item_constant, allow_insert_value))
+ if (check_x_op_constant(
+ lhs_field->field_name,
+ rhs_item,
+ "-",
+ &item_constant,
+ allow_insert_value))
return true;
if (check_decr_floor_expression(lhs_field, rhs_item))
return true;
break;
case MYSQL_TYPE_STRING:
- if (rhs_type == Item::INT_ITEM || rhs_type == Item::STRING_ITEM)
+ if (rhs_type == Item::INT_ITEM || rhs_type == Item::STRING_ITEM)
return true;
break;
case MYSQL_TYPE_VARCHAR:
@@ -364,26 +426,35 @@ static bool check_update_expression(Item *lhs_item, Item *rhs_item, TABLE *table
}
// Check that all update expressions are simple. Return true if they are.
-static bool check_all_update_expressions(List<Item> &fields, List<Item> &values, TABLE *table, bool allow_insert_value) {
+static bool check_all_update_expressions(
+ List<Item>& fields,
+ List<Item>& values,
+ TABLE* table,
+ bool allow_insert_value) {
+
List_iterator<Item> lhs_i(fields);
List_iterator<Item> rhs_i(values);
while (1) {
- Item *lhs_item = lhs_i++;
+ Item* lhs_item = lhs_i++;
if (lhs_item == NULL)
break;
- Item *rhs_item = rhs_i++;
- assert(rhs_item != NULL);
- if (!check_update_expression(lhs_item, rhs_item, table, allow_insert_value))
+ Item* rhs_item = rhs_i++;
+ assert_always(rhs_item != NULL);
+ if (!check_update_expression(
+ lhs_item,
+ rhs_item,
+ table,
+ allow_insert_value))
return false;
}
return true;
}
-static bool full_field_in_key(TABLE *table, Field *field) {
- assert(table->s->primary_key < table->s->keys);
- KEY *key = &table->s->key_info[table->s->primary_key];
- for (uint i = 0; i < get_key_parts(key); i++) {
- KEY_PART_INFO *key_part = &key->key_part[i];
+static bool full_field_in_key(TABLE* table, Field* field) {
+ assert_always(table->s->primary_key < table->s->keys);
+ KEY* key = &table->s->key_info[table->s->primary_key];
+ for (uint i = 0; i < key->user_defined_key_parts; i++) {
+ KEY_PART_INFO* key_part = &key->key_part[i];
if (strcmp(field->field_name, key_part->field->field_name) == 0) {
return key_part->length == field->field_length;
}
@@ -391,19 +462,24 @@ static bool full_field_in_key(TABLE *table, Field *field) {
return false;
}
-// Check that an expression looks like fieldname = constant, fieldname is part of the
-// primary key, and the named field is an int, char or varchar type. Return true if it does.
-static bool check_pk_field_equal_constant(Item *item, TABLE *table, MY_BITMAP *pk_fields) {
+// Check that an expression looks like fieldname = constant, fieldname is part
+// of the primary key, and the named field is an int, char or varchar type.
+// Return true if it does.
+static bool check_pk_field_equal_constant(
+ Item* item,
+ TABLE* table,
+ MY_BITMAP* pk_fields) {
+
if (item->type() != Item::FUNC_ITEM)
return false;
- Item_func *func = static_cast<Item_func*>(item);
+ Item_func* func = static_cast<Item_func*>(item);
if (strcmp(func->func_name(), "=") != 0)
return false;
uint n = func->argument_count();
if (n != 2)
return false;
- Item **arguments = func->arguments();
- Field *field = find_field_by_name(table, arguments[0]);
+ Item** arguments = func->arguments();
+ Field* field = find_field_by_name(table, arguments[0]);
if (field == NULL)
return false;
if (!bitmap_test_and_clear(pk_fields, field->field_index))
@@ -414,19 +490,21 @@ static bool check_pk_field_equal_constant(Item *item, TABLE *table, MY_BITMAP *p
case MYSQL_TYPE_INT24:
case MYSQL_TYPE_LONG:
case MYSQL_TYPE_LONGLONG:
- return arguments[1]->type() == Item::INT_ITEM || arguments[1]->type() == Item::STRING_ITEM;
+ return arguments[1]->type() == Item::INT_ITEM ||
+ arguments[1]->type() == Item::STRING_ITEM;
case MYSQL_TYPE_STRING:
case MYSQL_TYPE_VARCHAR:
return full_field_in_key(table, field) &&
- (arguments[1]->type() == Item::INT_ITEM || arguments[1]->type() == Item::STRING_ITEM);
+ (arguments[1]->type() == Item::INT_ITEM ||
+ arguments[1]->type() == Item::STRING_ITEM);
default:
return false;
}
}
-// Check that the where condition covers all of the primary key components with fieldname = constant
-// expressions. Return true if it does.
-static bool check_point_update(Item *conds, TABLE *table) {
+// Check that the where condition covers all of the primary key components
+// with fieldname = constant expressions. Return true if it does.
+static bool check_point_update(Item* conds, TABLE* table) {
bool result = false;
if (conds == NULL)
@@ -435,13 +513,13 @@ static bool check_point_update(Item *conds, TABLE *table) {
if (table->s->primary_key >= table->s->keys)
return false; // no primary key defined
- // use a bitmap of the primary key fields to keep track of those fields that are covered
- // by the where conditions
+ // use a bitmap of the primary key fields to keep track of those fields
+ // that are covered by the where conditions
MY_BITMAP pk_fields;
if (bitmap_init(&pk_fields, NULL, table->s->fields, FALSE)) // 1 -> failure
return false;
KEY *key = &table->s->key_info[table->s->primary_key];
- for (uint i = 0; i < get_key_parts(key); i++)
+ for (uint i = 0; i < key->user_defined_key_parts; i++)
bitmap_set_bit(&pk_fields, key->key_part[i].field->field_index);
switch (conds->type()) {
@@ -449,14 +527,17 @@ static bool check_point_update(Item *conds, TABLE *table) {
result = check_pk_field_equal_constant(conds, table, &pk_fields);
break;
case Item::COND_ITEM: {
- Item_cond *cond_item = static_cast<Item_cond*>(conds);
+ Item_cond* cond_item = static_cast<Item_cond*>(conds);
if (strcmp(cond_item->func_name(), "and") != 0)
break;
List_iterator<Item> li(*cond_item->argument_list());
- Item *list_item;
+ Item* list_item;
result = true;
while (result == true && (list_item = li++)) {
- result = check_pk_field_equal_constant(list_item, table, &pk_fields);
+ result = check_pk_field_equal_constant(
+ list_item,
+ table,
+ &pk_fields);
}
break;
}
@@ -474,13 +555,14 @@ static bool check_point_update(Item *conds, TABLE *table) {
// Precompute this when the table is opened.
static bool clustering_keys_exist(TABLE *table) {
for (uint keynr = 0; keynr < table->s->keys; keynr++) {
- if (keynr != table->s->primary_key && key_is_clustering(&table->s->key_info[keynr]))
+ if (keynr != table->s->primary_key &&
+ key_is_clustering(&table->s->key_info[keynr]))
return true;
}
return false;
}
-static bool is_strict_mode(THD *thd) {
+static bool is_strict_mode(THD* thd) {
#if 50600 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 50699
return thd->is_strict_mode();
#else
@@ -488,8 +570,14 @@ static bool is_strict_mode(THD *thd) {
#endif
}
-// Check if an update operation can be handled by this storage engine. Return true if it can.
-bool ha_tokudb::check_fast_update(THD *thd, List<Item> &fields, List<Item> &values, Item *conds) {
+// Check if an update operation can be handled by this storage engine.
+// Return true if it can.
+bool ha_tokudb::check_fast_update(
+ THD* thd,
+ List<Item>& fields,
+ List<Item>& values,
+ Item* conds) {
+
if (!transaction)
return false;
@@ -503,10 +591,12 @@ bool ha_tokudb::check_fast_update(THD *thd, List<Item> &fields, List<Item> &valu
// no binlog
if (mysql_bin_log.is_open() &&
- !(thd->variables.binlog_format == BINLOG_FORMAT_STMT || thd->variables.binlog_format == BINLOG_FORMAT_MIXED))
+ !(thd->variables.binlog_format == BINLOG_FORMAT_STMT ||
+ thd->variables.binlog_format == BINLOG_FORMAT_MIXED))
return false;
- // no clustering keys (need to broadcast an increment into the clustering keys since we are selecting with the primary key)
+ // no clustering keys (need to broadcast an increment into the clustering
+ // keys since we are selecting with the primary key)
if (clustering_keys_exist(table))
return false;
@@ -519,22 +609,34 @@ bool ha_tokudb::check_fast_update(THD *thd, List<Item> &fields, List<Item> &valu
return true;
}
-static void marshall_varchar_descriptor(tokudb::buffer &b, TABLE *table, KEY_AND_COL_INFO *kc_info, uint key_num) {
+static void marshall_varchar_descriptor(
+ tokudb::buffer& b,
+ TABLE* table,
+ KEY_AND_COL_INFO* kc_info,
+ uint key_num) {
+
b.append_ui<uint32_t>('v');
- b.append_ui<uint32_t>(table->s->null_bytes + kc_info->mcp_info[key_num].fixed_field_size);
+ b.append_ui<uint32_t>(
+ table->s->null_bytes + kc_info->mcp_info[key_num].fixed_field_size);
uint32_t var_offset_bytes = kc_info->mcp_info[key_num].len_of_offsets;
b.append_ui<uint32_t>(var_offset_bytes);
- b.append_ui<uint32_t>(var_offset_bytes == 0 ? 0 : kc_info->num_offset_bytes);
+ b.append_ui<uint32_t>(
+ var_offset_bytes == 0 ? 0 : kc_info->num_offset_bytes);
}
-static void marshall_blobs_descriptor(tokudb::buffer &b, TABLE *table, KEY_AND_COL_INFO *kc_info) {
+static void marshall_blobs_descriptor(
+ tokudb::buffer& b,
+ TABLE* table,
+ KEY_AND_COL_INFO* kc_info) {
+
b.append_ui<uint32_t>('b');
uint32_t n = kc_info->num_blobs;
b.append_ui<uint32_t>(n);
for (uint i = 0; i < n; i++) {
uint blob_field_index = kc_info->blob_fields[i];
- assert(blob_field_index < table->s->fields);
- uint8_t blob_field_length = table->s->field[blob_field_index]->row_pack_length();
+ assert_always(blob_field_index < table->s->fields);
+ uint8_t blob_field_length =
+ table->s->field[blob_field_index]->row_pack_length();
b.append(&blob_field_length, sizeof blob_field_length);
}
}
@@ -542,30 +644,37 @@ static void marshall_blobs_descriptor(tokudb::buffer &b, TABLE *table, KEY_AND_C
static inline uint32_t get_null_bit_position(uint32_t null_bit);
// evaluate the int value of an item
-static longlong item_val_int(Item *item) {
+static longlong item_val_int(Item* item) {
Item::Type t = item->type();
if (t == Item::INSERT_VALUE_ITEM) {
- Item_insert_value *value_item = static_cast<Item_insert_value*>(item);
+ Item_insert_value* value_item = static_cast<Item_insert_value*>(item);
return value_item->arg->val_int();
} else
return item->val_int();
}
// Marshall update operations to a buffer.
-static void marshall_update(tokudb::buffer &b, Item *lhs_item, Item *rhs_item, TABLE *table, TOKUDB_SHARE *share) {
+static void marshall_update(
+ tokudb::buffer& b,
+ Item* lhs_item,
+ Item* rhs_item,
+ TABLE* table,
+ TOKUDB_SHARE* share) {
+
// figure out the update operation type (again)
- Field *lhs_field = find_field_by_name(table, lhs_item);
- assert(lhs_field); // we found it before, so this should work
+ Field* lhs_field = find_field_by_name(table, lhs_item);
+ assert_always(lhs_field); // we found it before, so this should work
// compute the update info
uint32_t field_type;
uint32_t field_null_num = 0;
if (lhs_field->real_maybe_null()) {
uint32_t field_num = lhs_field->field_index;
- field_null_num = ((field_num/8)*8 + get_null_bit_position(lhs_field->null_bit)) + 1;
+ field_null_num =
+ ((field_num/8)*8 + get_null_bit_position(lhs_field->null_bit)) + 1;
}
uint32_t offset;
- void *v_ptr = NULL;
+ void* v_ptr = NULL;
uint32_t v_length;
uint32_t update_operation;
longlong v_ll;
@@ -577,9 +686,14 @@ static void marshall_update(tokudb::buffer &b, Item *lhs_item, Item *rhs_item, T
case MYSQL_TYPE_INT24:
case MYSQL_TYPE_LONG:
case MYSQL_TYPE_LONGLONG: {
- Field_num *lhs_num = static_cast<Field_num*>(lhs_field);
+ Field_num* lhs_num = static_cast<Field_num*>(lhs_field);
field_type = lhs_num->unsigned_flag ? UPDATE_TYPE_UINT : UPDATE_TYPE_INT;
- offset = fixed_field_offset(table->s->null_bytes, &share->kc_info, table->s->primary_key, lhs_field->field_index);
+ offset =
+ fixed_field_offset(
+ table->s->null_bytes,
+ &share->kc_info,
+ table->s->primary_key,
+ lhs_field->field_index);
switch (rhs_item->type()) {
case Item::INT_ITEM: {
update_operation = '=';
@@ -589,10 +703,12 @@ static void marshall_update(tokudb::buffer &b, Item *lhs_item, Item *rhs_item, T
break;
}
case Item::FUNC_ITEM: {
- Item_func *rhs_func = static_cast<Item_func*>(rhs_item);
- Item **arguments = rhs_func->arguments();
+ Item_func* rhs_func = static_cast<Item_func*>(rhs_item);
+ Item** arguments = rhs_func->arguments();
+ // we only support one if function for now, and it is a
+ // decrement with floor.
if (strcmp(rhs_func->func_name(), "if") == 0) {
- update_operation = '-'; // we only support one if function for now, and it is a decrement with floor.
+ update_operation = '-';
v_ll = 1;
} else if (rhs_func->argument_count() == 1) {
update_operation = '=';
@@ -606,14 +722,20 @@ static void marshall_update(tokudb::buffer &b, Item *lhs_item, Item *rhs_item, T
break;
}
default:
- assert(0);
+ assert_unreachable();
}
break;
}
case MYSQL_TYPE_STRING: {
update_operation = '=';
- field_type = lhs_field->binary() ? UPDATE_TYPE_BINARY : UPDATE_TYPE_CHAR;
- offset = fixed_field_offset(table->s->null_bytes, &share->kc_info, table->s->primary_key, lhs_field->field_index);
+ field_type =
+ lhs_field->binary() ? UPDATE_TYPE_BINARY : UPDATE_TYPE_CHAR;
+ offset =
+ fixed_field_offset(
+ table->s->null_bytes,
+ &share->kc_info,
+ table->s->primary_key,
+ lhs_field->field_index);
v_str = *rhs_item->val_str(&v_str);
v_length = v_str.length();
if (v_length >= lhs_field->pack_length()) {
@@ -621,7 +743,8 @@ static void marshall_update(tokudb::buffer &b, Item *lhs_item, Item *rhs_item, T
v_str.length(v_length); // truncate
} else {
v_length = lhs_field->pack_length();
- uchar pad_char = lhs_field->binary() ? 0 : lhs_field->charset()->pad_char;
+ uchar pad_char =
+ lhs_field->binary() ? 0 : lhs_field->charset()->pad_char;
v_str.fill(lhs_field->pack_length(), pad_char); // pad
}
v_ptr = v_str.c_ptr();
@@ -629,8 +752,14 @@ static void marshall_update(tokudb::buffer &b, Item *lhs_item, Item *rhs_item, T
}
case MYSQL_TYPE_VARCHAR: {
update_operation = '=';
- field_type = lhs_field->binary() ? UPDATE_TYPE_VARBINARY : UPDATE_TYPE_VARCHAR;
- offset = var_field_index(table, &share->kc_info, table->s->primary_key, lhs_field->field_index);
+ field_type =
+ lhs_field->binary() ? UPDATE_TYPE_VARBINARY : UPDATE_TYPE_VARCHAR;
+ offset =
+ var_field_index(
+ table,
+ &share->kc_info,
+ table->s->primary_key,
+ lhs_field->field_index);
v_str = *rhs_item->val_str(&v_str);
v_length = v_str.length();
if (v_length >= lhs_field->row_pack_length()) {
@@ -643,7 +772,12 @@ static void marshall_update(tokudb::buffer &b, Item *lhs_item, Item *rhs_item, T
case MYSQL_TYPE_BLOB: {
update_operation = '=';
field_type = lhs_field->binary() ? UPDATE_TYPE_BLOB : UPDATE_TYPE_TEXT;
- offset = blob_field_index(table, &share->kc_info, table->s->primary_key, lhs_field->field_index);
+ offset =
+ blob_field_index(
+ table,
+ &share->kc_info,
+ table->s->primary_key,
+ lhs_field->field_index);
v_str = *rhs_item->val_str(&v_str);
v_length = v_str.length();
if (v_length >= lhs_field->max_data_length()) {
@@ -654,7 +788,7 @@ static void marshall_update(tokudb::buffer &b, Item *lhs_item, Item *rhs_item, T
break;
}
default:
- assert(0);
+ assert_unreachable();
}
// marshall the update fields into the buffer
@@ -667,14 +801,14 @@ static void marshall_update(tokudb::buffer &b, Item *lhs_item, Item *rhs_item, T
}
// Save an item's value into the appropriate field. Return 0 if successful.
-static int save_in_field(Item *item, TABLE *table) {
- assert(item->type() == Item::FUNC_ITEM);
+static int save_in_field(Item* item, TABLE* table) {
+ assert_always(item->type() == Item::FUNC_ITEM);
Item_func *func = static_cast<Item_func*>(item);
- assert(strcmp(func->func_name(), "=") == 0);
+ assert_always(strcmp(func->func_name(), "=") == 0);
uint n = func->argument_count();
- assert(n == 2);
+ assert_always(n == 2);
Item **arguments = func->arguments();
- assert(arguments[0]->type() == Item::FIELD_ITEM);
+ assert_always(arguments[0]->type() == Item::FIELD_ITEM);
Item_field *field_item = static_cast<Item_field*>(arguments[0]);
my_bitmap_map *old_map = dbug_tmp_use_all_columns(table, table->write_set);
int error = arguments[1]->save_in_field(field_item->field, 0);
@@ -682,7 +816,11 @@ static int save_in_field(Item *item, TABLE *table) {
return error;
}
-static void count_update_types(Field *lhs_field, uint *num_varchars, uint *num_blobs) {
+static void count_update_types(
+ Field* lhs_field,
+ uint* num_varchars,
+ uint* num_blobs) {
+
switch (lhs_field->type()) {
case MYSQL_TYPE_VARCHAR:
*num_varchars += 1;
@@ -695,8 +833,14 @@ static void count_update_types(Field *lhs_field, uint *num_varchars, uint *num_b
}
}
-// Generate an update message for an update operation and send it into the primary tree. Return 0 if successful.
-int ha_tokudb::send_update_message(List<Item> &update_fields, List<Item> &update_values, Item *conds, DB_TXN *txn) {
+// Generate an update message for an update operation and send it into the
+// primary tree. Return 0 if successful.
+int ha_tokudb::send_update_message(
+ List<Item>& update_fields,
+ List<Item>& update_values,
+ Item* conds,
+ DB_TXN* txn) {
+
int error;
// Save the primary key from the where conditions
@@ -704,26 +848,32 @@ int ha_tokudb::send_update_message(List<Item> &update_fields, List<Item> &update
if (t == Item::FUNC_ITEM) {
error = save_in_field(conds, table);
} else if (t == Item::COND_ITEM) {
- Item_cond *cond_item = static_cast<Item_cond*>(conds);
+ Item_cond* cond_item = static_cast<Item_cond*>(conds);
List_iterator<Item> li(*cond_item->argument_list());
- Item *list_item;
+ Item* list_item;
error = 0;
while (error == 0 && (list_item = li++)) {
error = save_in_field(list_item, table);
}
- } else
- assert(0);
+ } else {
+ assert_unreachable();
+ }
if (error)
return error;
// put the primary key into key_buff and wrap it with key_dbt
DBT key_dbt;
bool has_null;
- create_dbt_key_from_table(&key_dbt, primary_key, key_buff, table->record[0], &has_null);
-
+ create_dbt_key_from_table(
+ &key_dbt,
+ primary_key,
+ key_buff,
+ table->record[0],
+ &has_null);
+
// construct the update message
tokudb::buffer update_message;
-
+
uint8_t op = UPDATE_OP_UPDATE_2;
update_message.append(&op, sizeof op);
@@ -731,12 +881,12 @@ int ha_tokudb::send_update_message(List<Item> &update_fields, List<Item> &update
uint num_varchars = 0, num_blobs = 0;
if (1) {
List_iterator<Item> lhs_i(update_fields);
- Item *lhs_item;
+ Item* lhs_item;
while ((lhs_item = lhs_i++)) {
if (lhs_item == NULL)
break;
- Field *lhs_field = find_field_by_name(table, lhs_item);
- assert(lhs_field); // we found it before, so this should work
+ Field* lhs_field = find_field_by_name(table, lhs_item);
+ assert_always(lhs_field); // we found it before, so this should work
count_update_types(lhs_field, &num_varchars, &num_blobs);
}
if (num_varchars > 0 || num_blobs > 0)
@@ -747,56 +897,75 @@ int ha_tokudb::send_update_message(List<Item> &update_fields, List<Item> &update
// append the updates
update_message.append_ui<uint32_t>(num_updates);
-
+
if (num_varchars > 0 || num_blobs > 0)
- marshall_varchar_descriptor(update_message, table, &share->kc_info, table->s->primary_key);
+ marshall_varchar_descriptor(
+ update_message,
+ table,
+ &share->kc_info,
+ table->s->primary_key);
if (num_blobs > 0)
marshall_blobs_descriptor(update_message, table, &share->kc_info);
List_iterator<Item> lhs_i(update_fields);
List_iterator<Item> rhs_i(update_values);
while (error == 0) {
- Item *lhs_item = lhs_i++;
+ Item* lhs_item = lhs_i++;
if (lhs_item == NULL)
break;
- Item *rhs_item = rhs_i++;
- assert(rhs_item != NULL);
+ Item* rhs_item = rhs_i++;
+ assert_always(rhs_item != NULL);
marshall_update(update_message, lhs_item, rhs_item, table, share);
}
- rw_rdlock(&share->num_DBs_lock);
+ share->_num_DBs_lock.lock_read();
- if (share->num_DBs > table->s->keys + tokudb_test(hidden_primary_key)) { // hot index in progress
+ // hot index in progress
+ if (share->num_DBs > table->s->keys + tokudb_test(hidden_primary_key)) {
error = ENOTSUP; // run on the slow path
} else {
- // send the update message
+ // send the update message
DBT update_dbt; memset(&update_dbt, 0, sizeof update_dbt);
update_dbt.data = update_message.data();
update_dbt.size = update_message.size();
- error = share->key_file[primary_key]->update(share->key_file[primary_key], txn, &key_dbt, &update_dbt, 0);
+ error =
+ share->key_file[primary_key]->update(
+ share->key_file[primary_key],
+ txn,
+ &key_dbt,
+ &update_dbt,
+ 0);
}
- rw_unlock(&share->num_DBs_lock);
-
+ share->_num_DBs_lock.unlock();
+
return error;
}
// Determine if an upsert operation can be offloaded to the storage engine.
-// An upsert consists of a row and a list of update expressions (update_fields[i] = update_values[i]).
-// The function returns 0 if the upsert is handled in the storage engine. Otherwise, an error code is returned.
-int ha_tokudb::upsert(THD *thd, List<Item> &update_fields, List<Item> &update_values) {
+// An upsert consists of a row and a list of update expressions
+// (update_fields[i] = update_values[i]).
+// The function returns 0 if the upsert is handled in the storage engine.
+// Otherwise, an error code is returned.
+int ha_tokudb::upsert(
+ THD* thd,
+ List<Item>& update_fields,
+ List<Item>& update_values) {
+
TOKUDB_HANDLER_DBUG_ENTER("");
int error = 0;
- if (tokudb_debug & TOKUDB_DEBUG_UPSERT) {
+ if (TOKUDB_UNLIKELY(TOKUDB_DEBUG_FLAGS(TOKUDB_DEBUG_UPSERT))) {
fprintf(stderr, "upsert\n");
dump_item_list("update_fields", update_fields);
dump_item_list("update_values", update_values);
}
- if (update_fields.elements < 1 || update_fields.elements != update_values.elements) {
- error = ENOTSUP; // not an upsert or something is fishy with the parameters
+ // not an upsert or something is fishy with the parameters
+ if (update_fields.elements < 1 ||
+ update_fields.elements != update_values.elements) {
+ error = ENOTSUP;
goto return_error;
}
@@ -812,7 +981,7 @@ int ha_tokudb::upsert(THD *thd, List<Item> &update_fields, List<Item> &update_va
check_error:
if (error != 0) {
- if (THDVAR(thd, disable_slow_upsert) != 0)
+ if (tokudb::sysvars::disable_slow_upsert(thd) != 0)
error = HA_ERR_UNSUPPORTED;
if (error != ENOTSUP)
print_error(error, MYF(0));
@@ -822,8 +991,13 @@ return_error:
TOKUDB_HANDLER_DBUG_RETURN(error);
}
-// Check if an upsert can be handled by this storage engine. Return trus if it can.
-bool ha_tokudb::check_upsert(THD *thd, List<Item> &update_fields, List<Item> &update_values) {
+// Check if an upsert can be handled by this storage engine.
+// Return true if it can.
+bool ha_tokudb::check_upsert(
+ THD* thd,
+ List<Item>& update_fields,
+ List<Item>& update_values) {
+
if (!transaction)
return false;
@@ -845,23 +1019,38 @@ bool ha_tokudb::check_upsert(THD *thd, List<Item> &update_fields, List<Item> &up
// no binlog
if (mysql_bin_log.is_open() &&
- !(thd->variables.binlog_format == BINLOG_FORMAT_STMT || thd->variables.binlog_format == BINLOG_FORMAT_MIXED))
+ !(thd->variables.binlog_format == BINLOG_FORMAT_STMT ||
+ thd->variables.binlog_format == BINLOG_FORMAT_MIXED))
return false;
- if (!check_all_update_expressions(update_fields, update_values, table, true))
+ if (!check_all_update_expressions(
+ update_fields,
+ update_values,
+ table,
+ true))
return false;
return true;
}
-// Generate an upsert message and send it into the primary tree. Return 0 if successful.
-int ha_tokudb::send_upsert_message(THD *thd, List<Item> &update_fields, List<Item> &update_values, DB_TXN *txn) {
+// Generate an upsert message and send it into the primary tree.
+// Return 0 if successful.
+int ha_tokudb::send_upsert_message(
+ THD* thd,
+ List<Item>& update_fields,
+ List<Item>& update_values,
+ DB_TXN* txn) {
int error = 0;
// generate primary key
DBT key_dbt;
bool has_null;
- create_dbt_key_from_table(&key_dbt, primary_key, primary_key_buff, table->record[0], &has_null);
+ create_dbt_key_from_table(
+ &key_dbt,
+ primary_key,
+ primary_key_buff,
+ table->record[0],
+ &has_null);
// generate packed row
DBT row;
@@ -883,12 +1072,12 @@ int ha_tokudb::send_upsert_message(THD *thd, List<Item> &update_fields, List<Ite
uint num_varchars = 0, num_blobs = 0;
if (1) {
List_iterator<Item> lhs_i(update_fields);
- Item *lhs_item;
+ Item* lhs_item;
while ((lhs_item = lhs_i++)) {
if (lhs_item == NULL)
break;
- Field *lhs_field = find_field_by_name(table, lhs_item);
- assert(lhs_field); // we found it before, so this should work
+ Field* lhs_field = find_field_by_name(table, lhs_item);
+ assert_always(lhs_field); // we found it before, so this should work
count_update_types(lhs_field, &num_varchars, &num_blobs);
}
if (num_varchars > 0 || num_blobs > 0)
@@ -901,35 +1090,44 @@ int ha_tokudb::send_upsert_message(THD *thd, List<Item> &update_fields, List<Ite
update_message.append_ui<uint32_t>(num_updates);
if (num_varchars > 0 || num_blobs > 0)
- marshall_varchar_descriptor(update_message, table, &share->kc_info, table->s->primary_key);
+ marshall_varchar_descriptor(
+ update_message,
+ table, &share->kc_info,
+ table->s->primary_key);
if (num_blobs > 0)
marshall_blobs_descriptor(update_message, table, &share->kc_info);
List_iterator<Item> lhs_i(update_fields);
List_iterator<Item> rhs_i(update_values);
while (1) {
- Item *lhs_item = lhs_i++;
+ Item* lhs_item = lhs_i++;
if (lhs_item == NULL)
break;
- Item *rhs_item = rhs_i++;
- if (rhs_item == NULL)
- assert(0); // can not happen
+ Item* rhs_item = rhs_i++;
+ assert_always(rhs_item != NULL);
marshall_update(update_message, lhs_item, rhs_item, table, share);
}
- rw_rdlock(&share->num_DBs_lock);
+ share->_num_DBs_lock.lock_read();
- if (share->num_DBs > table->s->keys + tokudb_test(hidden_primary_key)) { // hot index in progress
+ // hot index in progress
+ if (share->num_DBs > table->s->keys + tokudb_test(hidden_primary_key)) {
error = ENOTSUP; // run on the slow path
} else {
// send the upsert message
DBT update_dbt; memset(&update_dbt, 0, sizeof update_dbt);
update_dbt.data = update_message.data();
update_dbt.size = update_message.size();
- error = share->key_file[primary_key]->update(share->key_file[primary_key], txn, &key_dbt, &update_dbt, 0);
+ error =
+ share->key_file[primary_key]->update(
+ share->key_file[primary_key],
+ txn,
+ &key_dbt,
+ &update_dbt,
+ 0);
}
- rw_unlock(&share->num_DBs_lock);
+ share->_num_DBs_lock.unlock();
return error;
}
diff --git a/storage/tokudb/hatoku_cmp.cc b/storage/tokudb/hatoku_cmp.cc
index 001af657c1d..13bc60e9d98 100644
--- a/storage/tokudb/hatoku_cmp.cc
+++ b/storage/tokudb/hatoku_cmp.cc
@@ -104,8 +104,7 @@ static void get_var_field_info(
data_end_offset = uint2korr(var_field_offset_ptr + 2*var_field_index);
break;
default:
- assert(false);
- break;
+ assert_unreachable();
}
if (var_field_index) {
@@ -117,8 +116,7 @@ static void get_var_field_info(
data_start_offset = uint2korr(var_field_offset_ptr + 2*(var_field_index-1));
break;
default:
- assert(false);
- break;
+ assert_unreachable();
}
}
else {
@@ -126,7 +124,7 @@ static void get_var_field_info(
}
*start_offset = data_start_offset;
- assert(data_end_offset >= data_start_offset);
+ assert_always(data_end_offset >= data_start_offset);
*field_len = data_end_offset - data_start_offset;
}
@@ -153,8 +151,7 @@ static void get_blob_field_info(
data_end_offset = uint2korr(var_field_data_ptr - 2);
break;
default:
- assert(false);
- break;
+ assert_unreachable();
}
}
else {
@@ -245,7 +242,7 @@ static TOKU_TYPE mysql_to_toku_type (Field* field) {
case MYSQL_TYPE_DECIMAL:
case MYSQL_TYPE_VAR_STRING:
case MYSQL_TYPE_NULL:
- assert(false);
+ assert_unreachable();
}
exit:
return ret_val;
@@ -312,7 +309,7 @@ static inline uchar* pack_toku_int (uchar* to_tokudb, uchar* from_mysql, uint32_
memcpy(to_tokudb, from_mysql, 8);
break;
default:
- assert(false);
+ assert_unreachable();
}
return to_tokudb+num_bytes;
}
@@ -338,7 +335,7 @@ static inline uchar* unpack_toku_int(uchar* to_mysql, uchar* from_tokudb, uint32
memcpy(to_mysql, from_tokudb, 8);
break;
default:
- assert(false);
+ assert_unreachable();
}
return from_tokudb+num_bytes;
}
@@ -390,7 +387,7 @@ static inline int cmp_toku_int (uchar* a_buf, uchar* b_buf, bool is_unsigned, ui
ret_val = 0;
goto exit;
default:
- assert(false);
+ assert_unreachable();
}
}
//
@@ -438,13 +435,13 @@ static inline int cmp_toku_int (uchar* a_buf, uchar* b_buf, bool is_unsigned, ui
ret_val = 0;
goto exit;
default:
- assert(false);
+ assert_unreachable();
}
}
//
// if this is hit, indicates bug in writing of this function
//
- assert(false);
+ assert_unreachable();
exit:
return ret_val;
}
@@ -653,7 +650,7 @@ static inline uchar* unpack_toku_varbinary(
int4store(to_mysql, length);
break;
default:
- assert(false);
+ assert_unreachable();
}
//
// copy the binary data
@@ -779,7 +776,7 @@ static inline uchar* unpack_toku_blob(
int4store(to_mysql, length);
break;
default:
- assert(false);
+ assert_unreachable();
}
//
// copy the binary data
@@ -957,7 +954,9 @@ static inline int tokudb_compare_two_hidden_keys(
const void* saved_key_data,
const uint32_t saved_key_size
) {
- assert( (new_key_size >= TOKUDB_HIDDEN_PRIMARY_KEY_LENGTH) && (saved_key_size >= TOKUDB_HIDDEN_PRIMARY_KEY_LENGTH) );
+ assert_always(
+ (new_key_size >= TOKUDB_HIDDEN_PRIMARY_KEY_LENGTH) &&
+ (saved_key_size >= TOKUDB_HIDDEN_PRIMARY_KEY_LENGTH));
ulonglong a = hpk_char_to_num((uchar *) new_key_data);
ulonglong b = hpk_char_to_num((uchar *) saved_key_data);
return a < b ? -1 : (a > b ? 1 : 0);
@@ -997,8 +996,7 @@ static uint32_t skip_field_in_descriptor(uchar* row_desc) {
row_desc_pos += sizeof(uint32_t);
break;
default:
- assert(false);
- break;
+ assert_unreachable();
}
return (uint32_t)(row_desc_pos - row_desc);
}
@@ -1012,7 +1010,7 @@ static int create_toku_key_descriptor_for_key(KEY* key, uchar* buf) {
uchar* pos = buf;
uint32_t num_bytes_in_field = 0;
uint32_t charset_num = 0;
- for (uint i = 0; i < get_key_parts(key); i++){
+ for (uint i = 0; i < key->user_defined_key_parts; i++) {
Field* field = key->key_part[i].field;
//
// The first byte states if there is a null byte
@@ -1026,7 +1024,7 @@ static int create_toku_key_descriptor_for_key(KEY* key, uchar* buf) {
// The second byte for each field is the type
//
TOKU_TYPE type = mysql_to_toku_type(field);
- assert (type < 256);
+ assert_always((int)type < 256);
*pos = (uchar)(type & 255);
pos++;
@@ -1041,7 +1039,7 @@ static int create_toku_key_descriptor_for_key(KEY* key, uchar* buf) {
//
case (toku_type_int):
num_bytes_in_field = field->pack_length();
- assert (num_bytes_in_field < 256);
+ assert_always (num_bytes_in_field < 256);
*pos = (uchar)(num_bytes_in_field & 255);
pos++;
*pos = (field->flags & UNSIGNED_FLAG) ? 1 : 0;
@@ -1059,7 +1057,7 @@ static int create_toku_key_descriptor_for_key(KEY* key, uchar* buf) {
case (toku_type_fixbinary):
num_bytes_in_field = field->pack_length();
set_if_smaller(num_bytes_in_field, key->key_part[i].length);
- assert(num_bytes_in_field < 256);
+ assert_always(num_bytes_in_field < 256);
pos[0] = (uchar)(num_bytes_in_field & 255);
pos++;
break;
@@ -1087,8 +1085,7 @@ static int create_toku_key_descriptor_for_key(KEY* key, uchar* buf) {
pos += 4;
break;
default:
- assert(false);
-
+ assert_unreachable();
}
}
return pos - buf;
@@ -1277,8 +1274,7 @@ static inline int compare_toku_field(
*read_string = true;
break;
default:
- assert(false);
- break;
+ assert_unreachable();
}
*row_desc_bytes_read = row_desc_pos - row_desc;
@@ -1301,7 +1297,7 @@ static uchar* pack_toku_key_field(
TOKU_TYPE toku_type = mysql_to_toku_type(field);
switch(toku_type) {
case (toku_type_int):
- assert(key_part_length == field->pack_length());
+ assert_always(key_part_length == field->pack_length());
new_pos = pack_toku_int(
to_tokudb,
from_mysql,
@@ -1309,13 +1305,13 @@ static uchar* pack_toku_key_field(
);
goto exit;
case (toku_type_double):
- assert(field->pack_length() == sizeof(double));
- assert(key_part_length == sizeof(double));
+ assert_always(field->pack_length() == sizeof(double));
+ assert_always(key_part_length == sizeof(double));
new_pos = pack_toku_double(to_tokudb, from_mysql);
goto exit;
case (toku_type_float):
- assert(field->pack_length() == sizeof(float));
- assert(key_part_length == sizeof(float));
+ assert_always(field->pack_length() == sizeof(float));
+ assert_always(key_part_length == sizeof(float));
new_pos = pack_toku_float(to_tokudb, from_mysql);
goto exit;
case (toku_type_fixbinary):
@@ -1368,9 +1364,9 @@ static uchar* pack_toku_key_field(
);
goto exit;
default:
- assert(false);
+ assert_unreachable();
}
- assert(false);
+ assert_unreachable();
exit:
return new_pos;
}
@@ -1419,10 +1415,10 @@ static uchar* pack_key_toku_key_field(
);
goto exit;
default:
- assert(false);
+ assert_unreachable();
}
- assert(false);
+ assert_unreachable();
exit:
return new_pos;
}
@@ -1432,16 +1428,15 @@ uchar* unpack_toku_key_field(
uchar* to_mysql,
uchar* from_tokudb,
Field* field,
- uint32_t key_part_length
- )
-{
+ uint32_t key_part_length) {
+
uchar* new_pos = NULL;
uint32_t num_bytes = 0;
uint32_t num_bytes_copied;
TOKU_TYPE toku_type = mysql_to_toku_type(field);
switch(toku_type) {
case (toku_type_int):
- assert(key_part_length == field->pack_length());
+ assert_always(key_part_length == field->pack_length());
new_pos = unpack_toku_int(
to_mysql,
from_tokudb,
@@ -1449,13 +1444,13 @@ uchar* unpack_toku_key_field(
);
goto exit;
case (toku_type_double):
- assert(field->pack_length() == sizeof(double));
- assert(key_part_length == sizeof(double));
+ assert_always(field->pack_length() == sizeof(double));
+ assert_always(key_part_length == sizeof(double));
new_pos = unpack_toku_double(to_mysql, from_tokudb);
goto exit;
case (toku_type_float):
- assert(field->pack_length() == sizeof(float));
- assert(key_part_length == sizeof(float));
+ assert_always(field->pack_length() == sizeof(float));
+ assert_always(key_part_length == sizeof(float));
new_pos = unpack_toku_float(to_mysql, from_tokudb);
goto exit;
case (toku_type_fixbinary):
@@ -1464,8 +1459,7 @@ uchar* unpack_toku_key_field(
new_pos = unpack_toku_binary(
to_mysql,
from_tokudb,
- num_bytes
- );
+ num_bytes);
goto exit;
case (toku_type_fixstring):
num_bytes = field->pack_length();
@@ -1473,11 +1467,15 @@ uchar* unpack_toku_key_field(
to_mysql,
from_tokudb,
get_length_bytes_from_max(key_part_length),
- 0
- );
- num_bytes_copied = new_pos - (from_tokudb + get_length_bytes_from_max(key_part_length));
- assert(num_bytes_copied <= num_bytes);
- memset(to_mysql+num_bytes_copied, field->charset()->pad_char, num_bytes - num_bytes_copied);
+ 0);
+ num_bytes_copied =
+ new_pos -
+ (from_tokudb + get_length_bytes_from_max(key_part_length));
+ assert_always(num_bytes_copied <= num_bytes);
+ memset(
+ to_mysql + num_bytes_copied,
+ field->charset()->pad_char,
+ num_bytes - num_bytes_copied);
goto exit;
case (toku_type_varbinary):
case (toku_type_varstring):
@@ -1485,21 +1483,20 @@ uchar* unpack_toku_key_field(
to_mysql,
from_tokudb,
get_length_bytes_from_max(key_part_length),
- ((Field_varstring *)field)->length_bytes
- );
+ ((Field_varstring*)field)->length_bytes);
goto exit;
case (toku_type_blob):
new_pos = unpack_toku_blob(
to_mysql,
from_tokudb,
get_length_bytes_from_max(key_part_length),
- ((Field_blob *)field)->row_pack_length() //only calling this because packlength is returned
- );
+ //only calling this because packlength is returned
+ ((Field_blob *)field)->row_pack_length());
goto exit;
default:
- assert(false);
+ assert_unreachable();
}
- assert(false);
+ assert_unreachable();
exit:
return new_pos;
}
@@ -1513,9 +1510,8 @@ static int tokudb_compare_two_keys(
const void* row_desc,
const uint32_t row_desc_size,
bool cmp_prefix,
- bool* read_string
- )
-{
+ bool* read_string) {
+
int ret_val = 0;
int8_t new_key_inf_val = COL_NEG_INF;
int8_t saved_key_inf_val = COL_NEG_INF;
@@ -1538,11 +1534,9 @@ static int tokudb_compare_two_keys(
}
row_desc_ptr++;
- while ( (uint32_t)(new_key_ptr - (uchar *)new_key_data) < new_key_size &&
- (uint32_t)(saved_key_ptr - (uchar *)saved_key_data) < saved_key_size &&
- (uint32_t)(row_desc_ptr - (uchar *)row_desc) < row_desc_size
- )
- {
+ while ((uint32_t)(new_key_ptr - (uchar*)new_key_data) < new_key_size &&
+ (uint32_t)(saved_key_ptr - (uchar*)saved_key_data) < saved_key_size &&
+ (uint32_t)(row_desc_ptr - (uchar*)row_desc) < row_desc_size) {
uint32_t new_key_field_length;
uint32_t saved_key_field_length;
uint32_t row_desc_field_length;
@@ -1583,8 +1577,7 @@ static int tokudb_compare_two_keys(
&new_key_field_length,
&saved_key_field_length,
&row_desc_field_length,
- read_string
- );
+ read_string);
new_key_ptr += new_key_field_length;
saved_key_ptr += saved_key_field_length;
row_desc_ptr += row_desc_field_length;
@@ -1592,35 +1585,30 @@ static int tokudb_compare_two_keys(
goto exit;
}
- assert((uint32_t)(new_key_ptr - (uchar *)new_key_data) <= new_key_size);
- assert((uint32_t)(saved_key_ptr - (uchar *)saved_key_data) <= saved_key_size);
- assert((uint32_t)(row_desc_ptr - (uchar *)row_desc) <= row_desc_size);
- }
- new_key_bytes_left = new_key_size - ((uint32_t)(new_key_ptr - (uchar *)new_key_data));
- saved_key_bytes_left = saved_key_size - ((uint32_t)(saved_key_ptr - (uchar *)saved_key_data));
+ assert_always(
+ (uint32_t)(new_key_ptr - (uchar*)new_key_data) <= new_key_size);
+ assert_always(
+ (uint32_t)(saved_key_ptr - (uchar*)saved_key_data) <= saved_key_size);
+ assert_always(
+ (uint32_t)(row_desc_ptr - (uchar*)row_desc) <= row_desc_size);
+ }
+ new_key_bytes_left =
+ new_key_size - ((uint32_t)(new_key_ptr - (uchar*)new_key_data));
+ saved_key_bytes_left =
+ saved_key_size - ((uint32_t)(saved_key_ptr - (uchar*)saved_key_data));
if (cmp_prefix) {
ret_val = 0;
- }
- //
- // in this case, read both keys to completion, now read infinity byte
- //
- else if (new_key_bytes_left== 0 && saved_key_bytes_left== 0) {
+ } else if (new_key_bytes_left== 0 && saved_key_bytes_left== 0) {
+ // in this case, read both keys to completion, now read infinity byte
ret_val = new_key_inf_val - saved_key_inf_val;
- }
- //
- // at this point, one SHOULD be 0
- //
- else if (new_key_bytes_left == 0 && saved_key_bytes_left > 0) {
+ } else if (new_key_bytes_left == 0 && saved_key_bytes_left > 0) {
+ // at this point, one SHOULD be 0
ret_val = (new_key_inf_val == COL_POS_INF ) ? 1 : -1;
- }
- else if (new_key_bytes_left > 0 && saved_key_bytes_left == 0) {
+ } else if (new_key_bytes_left > 0 && saved_key_bytes_left == 0) {
ret_val = (saved_key_inf_val == COL_POS_INF ) ? -1 : 1;
- }
- //
- // this should never happen, perhaps we should assert(false)
- //
- else {
- assert(false);
+ } else {
+ // this should never happen, perhaps we should assert(false)
+ assert_unreachable();
ret_val = new_key_bytes_left - saved_key_bytes_left;
}
exit:
@@ -1765,9 +1753,9 @@ static int tokudb_compare_two_key_parts(
goto exit;
}
- assert((uint32_t)(new_key_ptr - (uchar *)new_key_data) <= new_key_size);
- assert((uint32_t)(saved_key_ptr - (uchar *)saved_key_data) <= saved_key_size);
- assert((uint32_t)(row_desc_ptr - (uchar *)row_desc) <= row_desc_size);
+ assert_always((uint32_t)(new_key_ptr - (uchar *)new_key_data) <= new_key_size);
+ assert_always((uint32_t)(saved_key_ptr - (uchar *)saved_key_data) <= saved_key_size);
+ assert_always((uint32_t)(row_desc_ptr - (uchar *)row_desc) <= row_desc_size);
}
ret_val = 0;
@@ -1776,7 +1764,7 @@ exit:
}
static int tokudb_cmp_dbt_key_parts(DB *file, const DBT *keya, const DBT *keyb, uint max_parts) {
- assert(file->cmp_descriptor->dbt.size);
+ assert_always(file->cmp_descriptor->dbt.size);
return tokudb_compare_two_key_parts(
keya->data,
keya->size,
@@ -1847,7 +1835,7 @@ static uint32_t pack_desc_pk_info(uchar* buf, KEY_AND_COL_INFO* kc_info, TABLE_S
case (toku_type_float):
pos[0] = COL_FIX_FIELD;
pos++;
- assert(kc_info->field_lengths[field_index] < 256);
+ assert_always(kc_info->field_lengths[field_index] < 256);
pos[0] = kc_info->field_lengths[field_index];
pos++;
break;
@@ -1856,7 +1844,7 @@ static uint32_t pack_desc_pk_info(uchar* buf, KEY_AND_COL_INFO* kc_info, TABLE_S
pos++;
field_length = field->pack_length();
set_if_smaller(key_part_length, field_length);
- assert(key_part_length < 256);
+ assert_always(key_part_length < 256);
pos[0] = (uchar)key_part_length;
pos++;
break;
@@ -1871,7 +1859,7 @@ static uint32_t pack_desc_pk_info(uchar* buf, KEY_AND_COL_INFO* kc_info, TABLE_S
pos++;
break;
default:
- assert(false);
+ assert_unreachable();
}
return pos - buf;
@@ -1893,7 +1881,7 @@ static uint32_t pack_desc_pk_offset_info(
bool is_constant_offset = true;
uint32_t offset = 0;
- for (uint i = 0; i < get_key_parts(prim_key); i++) {
+ for (uint i = 0; i < prim_key->user_defined_key_parts; i++) {
KEY_PART_INFO curr = prim_key->key_part[i];
uint16 curr_field_index = curr.field->field_index;
@@ -1908,7 +1896,7 @@ static uint32_t pack_desc_pk_offset_info(
}
offset += pk_info[2*i + 1];
}
- assert(found_col_in_pk);
+ assert_always(found_col_in_pk);
if (is_constant_offset) {
pos[0] = COL_FIX_PK_OFFSET;
pos++;
@@ -1966,10 +1954,10 @@ static uint32_t pack_desc_offset_info(uchar* buf, KEY_AND_COL_INFO* kc_info, uin
break;
}
}
- assert(found_index);
+ assert_always(found_index);
break;
default:
- assert(false);
+ assert_unreachable();
}
return pos - buf;
@@ -2004,7 +1992,7 @@ static uint32_t pack_desc_key_length_info(uchar* buf, KEY_AND_COL_INFO* kc_info,
pos += sizeof(key_part_length);
break;
default:
- assert(false);
+ assert_unreachable();
}
return pos - buf;
@@ -2041,7 +2029,7 @@ static uint32_t pack_desc_char_info(uchar* buf, KEY_AND_COL_INFO* kc_info, TABLE
pos += 4;
break;
default:
- assert(false);
+ assert_unreachable();
}
return pos - buf;
@@ -2151,7 +2139,7 @@ static uint32_t create_toku_clustering_val_pack_descriptor (
bool col_filtered = bitmap_is_set(&kc_info->key_filters[keynr],i);
bool col_filtered_in_pk = bitmap_is_set(&kc_info->key_filters[pk_index],i);
if (col_filtered_in_pk) {
- assert(col_filtered);
+ assert_always(col_filtered);
}
}
@@ -2321,7 +2309,7 @@ static uint32_t pack_clustering_val_from_desc(
memcpy(&end, desc_pos, sizeof(end));
desc_pos += sizeof(end);
- assert (start <= end);
+ assert_always (start <= end);
if (curr == CK_FIX_RANGE) {
length = end - start;
@@ -2367,24 +2355,21 @@ static uint32_t pack_clustering_val_from_desc(
offset_diffs = (end_data_offset + end_data_size) - (uint32_t)(var_dest_data_ptr - orig_var_dest_data_ptr);
for (uint32_t i = start; i <= end; i++) {
if ( num_offset_bytes == 1 ) {
- assert(offset_diffs < 256);
+ assert_always(offset_diffs < 256);
var_dest_offset_ptr[0] = var_src_offset_ptr[i] - (uchar)offset_diffs;
var_dest_offset_ptr++;
- }
- else if ( num_offset_bytes == 2 ) {
+ } else if ( num_offset_bytes == 2 ) {
uint32_t tmp = uint2korr(var_src_offset_ptr + 2*i);
uint32_t new_offset = tmp - offset_diffs;
- assert(new_offset < 1<<16);
+ assert_always(new_offset < 1<<16);
int2store(var_dest_offset_ptr,new_offset);
var_dest_offset_ptr += 2;
- }
- else {
- assert(false);
+ } else {
+ assert_unreachable();
}
}
- }
- else {
- assert(false);
+ } else {
+ assert_unreachable();
}
}
//
@@ -2518,8 +2503,8 @@ static uint32_t create_toku_secondary_key_pack_descriptor (
//
// store number of parts
//
- assert(get_key_parts(prim_key) < 128);
- pos[0] = 2 * get_key_parts(prim_key);
+ assert_always(prim_key->user_defined_key_parts < 128);
+ pos[0] = 2 * prim_key->user_defined_key_parts;
pos++;
//
// for each part, store if it is a fixed field or var field
@@ -2529,7 +2514,7 @@ static uint32_t create_toku_secondary_key_pack_descriptor (
//
pk_info = pos;
uchar* tmp = pos;
- for (uint i = 0; i < get_key_parts(prim_key); i++) {
+ for (uint i = 0; i < prim_key->user_defined_key_parts; i++) {
tmp += pack_desc_pk_info(
tmp,
kc_info,
@@ -2540,18 +2525,18 @@ static uint32_t create_toku_secondary_key_pack_descriptor (
//
// asserting that we moved forward as much as we think we have
//
- assert(tmp - pos == (2 * get_key_parts(prim_key)));
+ assert_always(tmp - pos == (2 * prim_key->user_defined_key_parts));
pos = tmp;
}
- for (uint i = 0; i < get_key_parts(key_info); i++) {
+ for (uint i = 0; i < key_info->user_defined_key_parts; i++) {
KEY_PART_INFO curr_kpi = key_info->key_part[i];
uint16 field_index = curr_kpi.field->field_index;
Field* field = table_share->field[field_index];
bool is_col_in_pk = false;
if (bitmap_is_set(&kc_info->key_filters[pk_index],field_index)) {
- assert(!has_hpk && prim_key != NULL);
+ assert_always(!has_hpk && prim_key != NULL);
is_col_in_pk = true;
}
else {
@@ -2566,7 +2551,7 @@ static uint32_t create_toku_secondary_key_pack_descriptor (
// assert that columns in pk do not have a null bit
// because in MySQL, pk columns cannot be null
//
- assert(!field->null_bit);
+ assert_always(!field->null_bit);
}
if (field->null_bit) {
@@ -2668,7 +2653,7 @@ static uint32_t max_key_size_from_desc(
// skip byte that states if main dictionary
bool is_main_dictionary = desc_pos[0];
desc_pos++;
- assert(!is_main_dictionary);
+ assert_always(!is_main_dictionary);
// skip hpk byte
desc_pos++;
@@ -2731,7 +2716,7 @@ static uint32_t max_key_size_from_desc(
desc_pos += sizeof(charset_num);
}
else {
- assert(has_charset == COL_HAS_NO_CHARSET);
+ assert_always(has_charset == COL_HAS_NO_CHARSET);
}
}
return max_size;
@@ -2742,9 +2727,8 @@ static uint32_t pack_key_from_desc(
void* row_desc,
uint32_t row_desc_size,
const DBT* pk_key,
- const DBT* pk_val
- )
-{
+ const DBT* pk_val) {
+
MULTI_COL_PACK_INFO mcp_info;
uint32_t num_null_bytes;
uint32_t num_blobs;
@@ -2762,7 +2746,7 @@ static uint32_t pack_key_from_desc(
bool is_main_dictionary = desc_pos[0];
desc_pos++;
- assert(!is_main_dictionary);
+ assert_always(!is_main_dictionary);
//
// get the constant info out of descriptor
@@ -2810,7 +2794,7 @@ static uint32_t pack_key_from_desc(
fixed_field_ptr = null_bytes_ptr + num_null_bytes;
var_field_offset_ptr = fixed_field_ptr + mcp_info.fixed_field_size;
var_field_data_ptr = var_field_offset_ptr + mcp_info.len_of_offsets;
- while ( (uint32_t)(desc_pos - (uchar *)row_desc) < row_desc_size) {
+ while ((uint32_t)(desc_pos - (uchar*)row_desc) < row_desc_size) {
uchar col_fix_val;
uchar has_charset;
uint32_t col_pack_val = 0;
@@ -2834,8 +2818,7 @@ static uint32_t pack_key_from_desc(
packed_key_pos++;
desc_pos += skip_key_in_desc(desc_pos);
continue;
- }
- else {
+ } else {
packed_key_pos[0] = NONNULL_COL_VAL;
packed_key_pos++;
}
@@ -2859,42 +2842,46 @@ static uint32_t pack_key_from_desc(
if (has_charset == COL_HAS_CHARSET) {
memcpy(&charset_num, desc_pos, sizeof(charset_num));
desc_pos += sizeof(charset_num);
- }
- else {
- assert(has_charset == COL_HAS_NO_CHARSET);
+ } else {
+ assert_always(has_charset == COL_HAS_NO_CHARSET);
}
//
// case where column is in pk val
//
- if (col_fix_val == COL_FIX_FIELD || col_fix_val == COL_VAR_FIELD || col_fix_val == COL_BLOB_FIELD) {
- if (col_fix_val == COL_FIX_FIELD && has_charset == COL_HAS_NO_CHARSET) {
- memcpy(packed_key_pos, &fixed_field_ptr[col_pack_val], key_length);
+ if (col_fix_val == COL_FIX_FIELD ||
+ col_fix_val == COL_VAR_FIELD ||
+ col_fix_val == COL_BLOB_FIELD) {
+ if (col_fix_val == COL_FIX_FIELD &&
+ has_charset == COL_HAS_NO_CHARSET) {
+ memcpy(
+ packed_key_pos,
+ &fixed_field_ptr[col_pack_val],
+ key_length);
packed_key_pos += key_length;
- }
- else if (col_fix_val == COL_VAR_FIELD && has_charset == COL_HAS_NO_CHARSET) {
+ } else if (col_fix_val == COL_VAR_FIELD &&
+ has_charset == COL_HAS_NO_CHARSET) {
uint32_t data_start_offset = 0;
uint32_t data_size = 0;
get_var_field_info(
- &data_size,
- &data_start_offset,
- col_pack_val,
- var_field_offset_ptr,
- num_offset_bytes
- );
+ &data_size,
+ &data_start_offset,
+ col_pack_val,
+ var_field_offset_ptr,
+ num_offset_bytes);
//
// length of this field in this row is data_size
// data is located beginning at var_field_data_ptr + data_start_offset
//
packed_key_pos = pack_toku_varbinary_from_desc(
- packed_key_pos,
- var_field_data_ptr + data_start_offset,
- key_length, //number of bytes to use to encode the length in to_tokudb
- data_size //length of field
- );
- }
- else {
+ packed_key_pos,
+ var_field_data_ptr + data_start_offset,
+ //number of bytes to use to encode the length in to_tokudb
+ key_length,
+ //length of field
+ data_size);
+ } else {
const uchar* data_start = NULL;
uint32_t data_start_offset = 0;
uint32_t data_size = 0;
@@ -2903,76 +2890,59 @@ static uint32_t pack_key_from_desc(
data_start_offset = col_pack_val;
data_size = key_length;
data_start = fixed_field_ptr + data_start_offset;
- }
- else if (col_fix_val == COL_VAR_FIELD){
+ } else if (col_fix_val == COL_VAR_FIELD){
get_var_field_info(
- &data_size,
- &data_start_offset,
- col_pack_val,
- var_field_offset_ptr,
- num_offset_bytes
- );
+ &data_size,
+ &data_start_offset,
+ col_pack_val,
+ var_field_offset_ptr,
+ num_offset_bytes);
data_start = var_field_data_ptr + data_start_offset;
- }
- else if (col_fix_val == COL_BLOB_FIELD) {
+ } else if (col_fix_val == COL_BLOB_FIELD) {
uint32_t blob_index = col_pack_val;
uint32_t blob_offset;
const uchar* blob_ptr = NULL;
uint32_t field_len;
uint32_t field_len_bytes = blob_lengths[blob_index];
get_blob_field_info(
- &blob_offset,
+ &blob_offset,
mcp_info.len_of_offsets,
- var_field_data_ptr,
- num_offset_bytes
- );
+ var_field_data_ptr,
+ num_offset_bytes);
blob_ptr = var_field_data_ptr + blob_offset;
- assert(num_blobs > 0);
- //
- // skip over other blobs to get to the one we want to make a key out of
- //
+ assert_always(num_blobs > 0);
+
+ // skip over other blobs to get to the one we want to
+ // make a key out of
for (uint32_t i = 0; i < blob_index; i++) {
blob_ptr = unpack_toku_field_blob(
NULL,
blob_ptr,
blob_lengths[i],
- true
- );
+ true);
}
- //
- // at this point, blob_ptr is pointing to the blob we want to make a key from
- //
+ // at this point, blob_ptr is pointing to the blob we
+ // want to make a key from
field_len = get_blob_field_len(blob_ptr, field_len_bytes);
- //
// now we set the variables to make the key
- //
data_start = blob_ptr + field_len_bytes;
data_size = field_len;
-
-
- }
- else {
- assert(false);
+ } else {
+ assert_unreachable();
}
- packed_key_pos = pack_toku_varstring_from_desc(
- packed_key_pos,
+ packed_key_pos = pack_toku_varstring_from_desc(packed_key_pos,
data_start,
key_length,
data_size,
- charset_num
- );
+ charset_num);
}
- }
- //
- // case where column is in pk key
- //
- else {
+ } else {
+ // case where column is in pk key
if (col_fix_val == COL_FIX_PK_OFFSET) {
memcpy(packed_key_pos, &pk_data_ptr[col_pack_val], key_length);
packed_key_pos += key_length;
- }
- else if (col_fix_val == COL_VAR_PK_OFFSET) {
+ } else if (col_fix_val == COL_VAR_PK_OFFSET) {
uchar* tmp_pk_data_ptr = pk_data_ptr;
uint32_t index_in_pk = col_pack_val;
//
@@ -2981,25 +2951,21 @@ static uint32_t pack_key_from_desc(
for (uint32_t i = 0; i < index_in_pk; i++) {
if (pk_info[2*i] == COL_FIX_FIELD) {
tmp_pk_data_ptr += pk_info[2*i + 1];
- }
- else if (pk_info[2*i] == COL_VAR_FIELD) {
+ } else if (pk_info[2*i] == COL_VAR_FIELD) {
uint32_t len_bytes = pk_info[2*i + 1];
uint32_t len;
if (len_bytes == 1) {
len = tmp_pk_data_ptr[0];
tmp_pk_data_ptr++;
- }
- else if (len_bytes == 2) {
+ } else if (len_bytes == 2) {
len = uint2korr(tmp_pk_data_ptr);
tmp_pk_data_ptr += 2;
- }
- else {
- assert(false);
+ } else {
+ assert_unreachable();
}
tmp_pk_data_ptr += len;
- }
- else {
- assert(false);
+ } else {
+ assert_unreachable();
}
}
//
@@ -3009,21 +2975,18 @@ static uint32_t pack_key_from_desc(
if (is_fix_field == COL_FIX_FIELD) {
memcpy(packed_key_pos, tmp_pk_data_ptr, key_length);
packed_key_pos += key_length;
- }
- else if (is_fix_field == COL_VAR_FIELD) {
+ } else if (is_fix_field == COL_VAR_FIELD) {
const uchar* data_start = NULL;
uint32_t data_size = 0;
uint32_t len_bytes = pk_info[2*index_in_pk + 1];
if (len_bytes == 1) {
data_size = tmp_pk_data_ptr[0];
tmp_pk_data_ptr++;
- }
- else if (len_bytes == 2) {
+ } else if (len_bytes == 2) {
data_size = uint2korr(tmp_pk_data_ptr);
tmp_pk_data_ptr += 2;
- }
- else {
- assert(false);
+ } else {
+ assert_unreachable();
}
data_start = tmp_pk_data_ptr;
@@ -3033,32 +2996,26 @@ static uint32_t pack_key_from_desc(
data_start,
key_length,
data_size,
- charset_num
- );
- }
- else if (has_charset == COL_HAS_NO_CHARSET) {
+ charset_num);
+ } else if (has_charset == COL_HAS_NO_CHARSET) {
packed_key_pos = pack_toku_varbinary_from_desc(
- packed_key_pos,
- data_start,
+ packed_key_pos,
+ data_start,
key_length,
- data_size //length of field
- );
- }
- else {
- assert(false);
+ data_size);
+ } else {
+ assert_unreachable();
}
+ } else {
+ assert_unreachable();
}
- else {
- assert(false);
- }
- }
- else {
- assert(false);
+ } else {
+ assert_unreachable();
}
}
}
- assert( (uint32_t)(desc_pos - (uchar *)row_desc) == row_desc_size);
+ assert_always( (uint32_t)(desc_pos - (uchar *)row_desc) == row_desc_size);
//
// now append the primary key to the end of the key
@@ -3066,13 +3023,12 @@ static uint32_t pack_key_from_desc(
if (hpk) {
memcpy(packed_key_pos, pk_key->data, pk_key->size);
packed_key_pos += pk_key->size;
- }
- else {
+ } else {
memcpy(packed_key_pos, (uchar *)pk_key->data + 1, pk_key->size - 1);
packed_key_pos += (pk_key->size - 1);
}
- return (uint32_t)(packed_key_pos - buf); //
+ return (uint32_t)(packed_key_pos - buf);
}
static bool fields_have_same_name(Field* a, Field* b) {
@@ -3249,7 +3205,7 @@ static bool fields_are_same_type(Field* a, Field* b) {
case MYSQL_TYPE_DECIMAL:
case MYSQL_TYPE_VAR_STRING:
case MYSQL_TYPE_NULL:
- assert(false);
+ assert_unreachable();
}
cleanup:
diff --git a/storage/tokudb/hatoku_cmp.h b/storage/tokudb/hatoku_cmp.h
index 9a5358fc9af..34b3cfbe1f8 100644
--- a/storage/tokudb/hatoku_cmp.h
+++ b/storage/tokudb/hatoku_cmp.h
@@ -26,9 +26,9 @@ Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
#ifndef _HATOKU_CMP
#define _HATOKU_CMP
-#include "stdint.h"
+#include "hatoku_defines.h"
+#include "tokudb_debug.h"
-#include <db.h>
//
// A MySQL row is encoded in TokuDB, as follows:
@@ -180,7 +180,7 @@ static inline uint32_t get_blob_field_len(const uchar* from_tokudb, uint32_t len
length = uint4korr(from_tokudb);
break;
default:
- assert(false);
+ assert_unreachable();
}
return length;
}
diff --git a/storage/tokudb/hatoku_defines.h b/storage/tokudb/hatoku_defines.h
index 3602e0b6b5a..231911b9cc9 100644
--- a/storage/tokudb/hatoku_defines.h
+++ b/storage/tokudb/hatoku_defines.h
@@ -23,8 +23,47 @@ Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
-#ifndef _TOKUDB_CONFIG_H
-#define _TOKUDB_CONFIG_H
+#ifndef _HATOKU_DEFINES_H
+#define _HATOKU_DEFINES_H
+
+#include <my_config.h>
+#define MYSQL_SERVER 1
+#include "mysql_version.h"
+#include "sql_table.h"
+#include "handler.h"
+#include "table.h"
+#include "log.h"
+#include "sql_class.h"
+#include "sql_show.h"
+#include "discover.h"
+//#include <binlog.h>
+#include "debug_sync.h"
+
+#undef PACKAGE
+#undef VERSION
+#undef HAVE_DTRACE
+#undef _DTRACE_VERSION
+
+/* We define DTRACE after mysql_priv.h in case it disabled dtrace in the main server */
+#ifdef HAVE_DTRACE
+#define _DTRACE_VERSION 1
+#else
+#endif
+
+#include <mysql/plugin.h>
+
+#include <ctype.h>
+#include <stdint.h>
+#define __STDC_FORMAT_MACROS
+#include <inttypes.h>
+#if defined(_WIN32)
+#include "misc.h"
+#endif
+
+#include "db.h"
+#include "toku_os.h"
+#include "toku_time.h"
+#include "partitioned_counter.h"
#ifdef USE_PRAGMA_INTERFACE
#pragma interface /* gcc class implementation */
@@ -163,326 +202,51 @@ Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
/* Bits for share->status */
#define STATUS_PRIMARY_KEY_INIT 0x1
-#endif // _TOKUDB_CONFIG_H
-
-#ifndef _TOKUDB_DEBUG_H
-#define _TOKUDB_DEBUG_H
-
-#define TOKU_INCLUDE_BACKTRACE 0
-#if TOKU_INCLUDE_BACKTRACE
-static void tokudb_backtrace(void);
-#endif
-
-extern ulong tokudb_debug;
-
-// tokudb debug tracing
-#define TOKUDB_DEBUG_INIT 1
-#define TOKUDB_DEBUG_OPEN 2
-#define TOKUDB_DEBUG_ENTER 4
-#define TOKUDB_DEBUG_RETURN 8
-#define TOKUDB_DEBUG_ERROR 16
-#define TOKUDB_DEBUG_TXN 32
-#define TOKUDB_DEBUG_AUTO_INCREMENT 64
-#define TOKUDB_DEBUG_INDEX_KEY 128
-#define TOKUDB_DEBUG_LOCK 256
-#define TOKUDB_DEBUG_CHECK_KEY 1024
-#define TOKUDB_DEBUG_HIDE_DDL_LOCK_ERRORS 2048
-#define TOKUDB_DEBUG_ALTER_TABLE 4096
-#define TOKUDB_DEBUG_UPSERT 8192
-#define TOKUDB_DEBUG_CHECK (1<<14)
-#define TOKUDB_DEBUG_ANALYZE (1<<15)
-
-#define TOKUDB_TRACE(f, ...) { \
- fprintf(stderr, "%u %s:%u %s " f "\n", my_tid(), __FILE__, __LINE__, __FUNCTION__, ##__VA_ARGS__); \
-}
-
-static inline unsigned int my_tid() {
- return (unsigned int)toku_os_gettid();
-}
-
-#define TOKUDB_DBUG_ENTER(f, ...) { \
- if (tokudb_debug & TOKUDB_DEBUG_ENTER) { \
- TOKUDB_TRACE(f, ##__VA_ARGS__); \
- } \
-} \
- DBUG_ENTER(__FUNCTION__);
-
-#define TOKUDB_DBUG_RETURN(r) { \
- int rr = (r); \
- if ((tokudb_debug & TOKUDB_DEBUG_RETURN) || (rr != 0 && (tokudb_debug & TOKUDB_DEBUG_ERROR))) { \
- TOKUDB_TRACE("return %d", rr); \
- } \
- DBUG_RETURN(rr); \
-}
-
-#define TOKUDB_HANDLER_TRACE(f, ...) \
- fprintf(stderr, "%u %p %s:%u ha_tokudb::%s " f "\n", my_tid(), this, __FILE__, __LINE__, __FUNCTION__, ##__VA_ARGS__);
-
-#define TOKUDB_HANDLER_DBUG_ENTER(f, ...) { \
- if (tokudb_debug & TOKUDB_DEBUG_ENTER) { \
- TOKUDB_HANDLER_TRACE(f, ##__VA_ARGS__); \
- } \
-} \
- DBUG_ENTER(__FUNCTION__);
-
-#define TOKUDB_HANDLER_DBUG_RETURN(r) { \
- int rr = (r); \
- if ((tokudb_debug & TOKUDB_DEBUG_RETURN) || (rr != 0 && (tokudb_debug & TOKUDB_DEBUG_ERROR))) { \
- TOKUDB_HANDLER_TRACE("return %d", rr); \
- } \
- DBUG_RETURN(rr); \
-}
-
-#define TOKUDB_HANDLER_DBUG_VOID_RETURN { \
- if (tokudb_debug & TOKUDB_DEBUG_RETURN) { \
- TOKUDB_HANDLER_TRACE("return"); \
- } \
- DBUG_VOID_RETURN; \
-}
-
-#define TOKUDB_DBUG_DUMP(s, p, len) \
-{ \
- TOKUDB_TRACE("%s", s); \
- uint i; \
- for (i=0; i<len; i++) { \
- fprintf(stderr, "%2.2x", ((uchar*)p)[i]); \
- } \
- fprintf(stderr, "\n"); \
-}
-
-/* The purpose of this file is to define assert() for use by the handlerton.
- * The intention is for a failed handlerton assert to invoke a failed assert
- * in the fractal tree layer, which dumps engine status to the error log.
- */
-
-void toku_hton_assert_fail(const char*/*expr_as_string*/,const char */*fun*/,const char*/*file*/,int/*line*/, int/*errno*/) __attribute__((__visibility__("default"))) __attribute__((__noreturn__));
-
-#undef assert
-#define assert(expr) ((expr) ? (void)0 : toku_hton_assert_fail(#expr, __FUNCTION__, __FILE__, __LINE__, errno))
-
-#endif // _TOKUDB_DEBUG_H
-
-#ifndef _TOKUDB_TXN_H
-#define _TOKUDB_TXN_H
-
-typedef enum {
- hatoku_iso_not_set = 0,
- hatoku_iso_read_uncommitted,
- hatoku_iso_read_committed,
- hatoku_iso_repeatable_read,
- hatoku_iso_serializable
-} HA_TOKU_ISO_LEVEL;
-
-
-
-typedef struct st_tokudb_stmt_progress {
- ulonglong inserted;
- ulonglong updated;
- ulonglong deleted;
- ulonglong queried;
- bool using_loader;
-} tokudb_stmt_progress;
-
-
-typedef struct st_tokudb_trx_data {
- DB_TXN *all;
- DB_TXN *stmt;
- DB_TXN *sp_level;
- DB_TXN *sub_sp_level;
- uint tokudb_lock_count;
- uint create_lock_count;
- tokudb_stmt_progress stmt_progress;
- bool checkpoint_lock_taken;
- LIST *handlers;
-} tokudb_trx_data;
-
-extern char *tokudb_data_dir;
-extern const char *ha_tokudb_ext;
-
-static inline void reset_stmt_progress (tokudb_stmt_progress* val) {
- val->deleted = 0;
- val->inserted = 0;
- val->updated = 0;
- val->queried = 0;
-}
-
-static inline int get_name_length(const char *name) {
- int n = 0;
- const char *newname = name;
- n += strlen(newname);
- n += strlen(ha_tokudb_ext);
- return n;
-}
-
-//
-// returns maximum length of path to a dictionary
-//
-static inline int get_max_dict_name_path_length(const char *tablename) {
- int n = 0;
- n += get_name_length(tablename);
- n += 1; //for the '-'
- n += MAX_DICT_NAME_LEN;
- return n;
-}
-
-static inline void make_name(char *newname, const char *tablename, const char *dictname) {
- const char *newtablename = tablename;
- char *nn = newname;
- assert(tablename);
- assert(dictname);
- nn += sprintf(nn, "%s", newtablename);
- nn += sprintf(nn, "-%s", dictname);
-}
-
-static inline int txn_begin(DB_ENV *env, DB_TXN *parent, DB_TXN **txn, uint32_t flags, THD *thd) {
- *txn = NULL;
- int r = env->txn_begin(env, parent, txn, flags);
- if (r == 0 && thd) {
- DB_TXN *this_txn = *txn;
- this_txn->set_client_id(this_txn, thd_get_thread_id(thd));
- }
- if ((tokudb_debug & TOKUDB_DEBUG_TXN)) {
- TOKUDB_TRACE("begin txn %p %p %u r=%d", parent, *txn, flags, r);
- }
- return r;
-}
-
-static inline void commit_txn(DB_TXN* txn, uint32_t flags) {
- if (tokudb_debug & TOKUDB_DEBUG_TXN)
- TOKUDB_TRACE("commit txn %p", txn);
- int r = txn->commit(txn, flags);
- if (r != 0) {
- sql_print_error("tried committing transaction %p and got error code %d", txn, r);
- }
- assert(r == 0);
-}
-
-static inline void abort_txn(DB_TXN* txn) {
- if (tokudb_debug & TOKUDB_DEBUG_TXN)
- TOKUDB_TRACE("abort txn %p", txn);
- int r = txn->abort(txn);
- if (r != 0) {
- sql_print_error("tried aborting transaction %p and got error code %d", txn, r);
- }
- assert(r == 0);
-}
-
-#endif // _TOKUDB_TXN_H
-
-#ifndef _TOKUDB_PORTABILITY_H
-#define _TOKUDB_PORTABILITY_H
-
-static inline void *tokudb_my_malloc(size_t s, myf flags) {
-#if 50700 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 50799
- return my_malloc(0, s, flags);
+#if defined(TOKUDB_VERSION_MAJOR) && defined(TOKUDB_VERSION_MINOR)
+#define TOKUDB_PLUGIN_VERSION ((TOKUDB_VERSION_MAJOR << 8) + TOKUDB_VERSION_MINOR)
#else
- return my_malloc(s, flags);
+#define TOKUDB_PLUGIN_VERSION 0
#endif
-}
-static inline void *tokudb_my_realloc(void *p, size_t s, myf flags) {
- if (s == 0)
- return p;
-#if 50700 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 50799
- return my_realloc(0, p, s, flags);
-#else
- return my_realloc(p, s, flags | MY_ALLOW_ZERO_PTR);
-#endif
-}
+// Branch prediction macros.
+// If supported by the compiler, will hint in instruction caching for likely
+// branching. Should only be used where there is a very good idea of the correct
+// branch heuristics as determined by profiling. Mostly copied from InnoDB.
+// Use:
+// "if (TOKUDB_LIKELY(x))" where the chances of "x" evaluating true are higher
+// "if (TOKUDB_UNLIKELY(x))" where the chances of "x" evaluating false are higher
+#if defined(__GNUC__) && (__GNUC__ > 2) && ! defined(__INTEL_COMPILER)
-static inline void tokudb_my_free(void *ptr) {
- if (ptr)
- my_free(ptr);
-}
+// Tell the compiler that 'expr' probably evaluates to 'constant'.
+#define TOKUDB_EXPECT(expr,constant) __builtin_expect(expr, constant)
-static inline char *tokudb_my_strdup(const char *p, myf flags) {
-#if 50700 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 50799
- return my_strdup(0, p, flags);
#else
- return my_strdup(p, flags);
-#endif
-}
-
-static inline void* tokudb_my_multi_malloc(myf myFlags, ...) {
- va_list args;
- char **ptr,*start,*res;
- size_t tot_length,length;
-
- va_start(args,myFlags);
- tot_length=0;
- while ((ptr=va_arg(args, char **))) {
- length=va_arg(args,uint);
- tot_length+=ALIGN_SIZE(length);
- }
- va_end(args);
-
- if (!(start=(char *) tokudb_my_malloc(tot_length,myFlags))) {
- return 0;
- }
-
- va_start(args,myFlags);
- res=start;
- while ((ptr=va_arg(args, char **))) {
- *ptr=res;
- length=va_arg(args,uint);
- res+=ALIGN_SIZE(length);
- }
- va_end(args);
- return start;
-}
-
-static inline void tokudb_pthread_mutex_init(pthread_mutex_t *mutex, const pthread_mutexattr_t *attr) {
- int r = pthread_mutex_init(mutex, attr);
- assert(r == 0);
-}
-
-static inline void tokudb_pthread_mutex_destroy(pthread_mutex_t *mutex) {
- int r = pthread_mutex_destroy(mutex);
- assert(r == 0);
-}
-static inline void tokudb_pthread_mutex_lock(pthread_mutex_t *mutex) {
- int r = pthread_mutex_lock(mutex);
- assert(r == 0);
-}
+#error "No TokuDB branch prediction operations in use!"
+#define TOKUDB_EXPECT(expr,constant) (expr)
-static inline void tokudb_pthread_mutex_unlock(pthread_mutex_t *mutex) {
- int r = pthread_mutex_unlock(mutex);
- assert(r == 0);
-}
+#endif // defined(__GNUC__) && (__GNUC__ > 2) && ! defined(__INTEL_COMPILER)
-static inline void tokudb_pthread_cond_init(pthread_cond_t *cond, const pthread_condattr_t *attr) {
- int r = pthread_cond_init(cond, attr);
- assert(r == 0);
-}
+// Tell the compiler that cond is likely to hold
+#define TOKUDB_LIKELY(cond) TOKUDB_EXPECT(cond, 1)
-static inline void tokudb_pthread_cond_destroy(pthread_cond_t *cond) {
- int r = pthread_cond_destroy(cond);
- assert(r == 0);
-}
-
-static inline void tokudb_pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex) {
- int r = pthread_cond_wait(cond, mutex);
- assert(r == 0);
-}
-
-static inline void tokudb_pthread_cond_broadcast(pthread_cond_t *cond) {
- int r = pthread_cond_broadcast(cond);
- assert(r == 0);
-}
+// Tell the compiler that cond is unlikely to hold
+#define TOKUDB_UNLIKELY(cond) TOKUDB_EXPECT(cond, 0)
+// Tell the compiler that the function/argument is unused
+#define TOKUDB_UNUSED(_uu) _uu __attribute__((unused))
// mysql 5.6.15 removed the test macro, so we define our own
#define tokudb_test(e) ((e) ? 1 : 0)
-static const char *tokudb_thd_get_proc_info(THD *thd) {
+inline const char* tokudb_thd_get_proc_info(const THD *thd) {
return thd->proc_info;
}
// uint3korr reads 4 bytes and valgrind reports an error, so we use this function instead
-static uint tokudb_uint3korr(const uchar *a) {
+inline uint tokudb_uint3korr(const uchar *a) {
uchar b[4] = {};
memcpy(b, a, 3);
return uint3korr(b);
}
-#endif // _TOKUDB_PORTABILITY_H
+#endif // _HATOKU_DEFINES_H
diff --git a/storage/tokudb/hatoku_hton.cc b/storage/tokudb/hatoku_hton.cc
index 74e133ecffd..a288fbccbee 100644
--- a/storage/tokudb/hatoku_hton.cc
+++ b/storage/tokudb/hatoku_hton.cc
@@ -24,35 +24,7 @@ Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
-#define MYSQL_SERVER 1
-#include "hatoku_defines.h"
-#include <db.h>
-#include <ctype.h>
-
-#include "stdint.h"
-#if defined(_WIN32)
-#include "misc.h"
-#endif
-#define __STDC_FORMAT_MACROS
-#include <inttypes.h>
-#include "toku_os.h"
-#include "toku_time.h"
-#include "partitioned_counter.h"
-
-/* We define DTRACE after mysql_priv.h in case it disabled dtrace in the main server */
-#ifdef HAVE_DTRACE
-#define _DTRACE_VERSION 1
-#else
-#endif
-
-#include <mysql/plugin.h>
#include "hatoku_hton.h"
-#include "ha_tokudb.h"
-
-#undef PACKAGE
-#undef VERSION
-#undef HAVE_DTRACE
-#undef _DTRACE_VERSION
#define TOKU_METADB_NAME "tokudb_meta"
@@ -62,75 +34,96 @@ typedef struct savepoint_info {
bool in_sub_stmt;
} *SP_INFO, SP_INFO_T;
-#if TOKU_INCLUDE_OPTION_STRUCTS
-ha_create_table_option tokudb_table_options[] = {
- HA_TOPTION_SYSVAR("compression", row_format, row_format),
- HA_TOPTION_END
-};
-
-ha_create_table_option tokudb_index_options[] = {
- HA_IOPTION_BOOL("clustering", clustering, 0),
- HA_IOPTION_END
-};
-#endif
-
-static uchar *tokudb_get_key(TOKUDB_SHARE * share, size_t * length, my_bool not_used __attribute__ ((unused))) {
- *length = share->table_name_length;
- return (uchar *) share->table_name;
-}
+static handler* tokudb_create_handler(
+ handlerton* hton,
+ TABLE_SHARE* table,
+ MEM_ROOT* mem_root);
-static handler *tokudb_create_handler(handlerton * hton, TABLE_SHARE * table, MEM_ROOT * mem_root);
-
-
-static void tokudb_print_error(const DB_ENV * db_env, const char *db_errpfx, const char *buffer);
+static void tokudb_print_error(
+ const DB_ENV* db_env,
+ const char* db_errpfx,
+ const char* buffer);
static void tokudb_cleanup_log_files(void);
-static int tokudb_end(handlerton * hton, ha_panic_function type);
-static bool tokudb_flush_logs(handlerton * hton);
-static bool tokudb_show_status(handlerton * hton, THD * thd, stat_print_fn * print, enum ha_stat_type);
+static int tokudb_end(handlerton* hton, ha_panic_function type);
+static bool tokudb_flush_logs(handlerton* hton);
+static bool tokudb_show_status(
+ handlerton* hton,
+ THD* thd,
+ stat_print_fn* print,
+ enum ha_stat_type);
#if TOKU_INCLUDE_HANDLERTON_HANDLE_FATAL_SIGNAL
-static void tokudb_handle_fatal_signal(handlerton *hton, THD *thd, int sig);
+static void tokudb_handle_fatal_signal(handlerton* hton, THD* thd, int sig);
#endif
-static int tokudb_close_connection(handlerton * hton, THD * thd);
-static int tokudb_commit(handlerton * hton, THD * thd, bool all);
-static int tokudb_rollback(handlerton * hton, THD * thd, bool all);
+static int tokudb_close_connection(handlerton* hton, THD* thd);
+static int tokudb_commit(handlerton* hton, THD* thd, bool all);
+static int tokudb_rollback(handlerton* hton, THD* thd, bool all);
#if TOKU_INCLUDE_XA
static int tokudb_xa_prepare(handlerton* hton, THD* thd, bool all);
-static int tokudb_xa_recover(handlerton* hton, XID* xid_list, uint len);
+static int tokudb_xa_recover(handlerton* hton, XID* xid_list, uint len);
static int tokudb_commit_by_xid(handlerton* hton, XID* xid);
-static int tokudb_rollback_by_xid(handlerton* hton, XID* xid);
+static int tokudb_rollback_by_xid(handlerton* hton, XID* xid);
#endif
-static int tokudb_rollback_to_savepoint(handlerton * hton, THD * thd, void *savepoint);
-static int tokudb_savepoint(handlerton * hton, THD * thd, void *savepoint);
-static int tokudb_release_savepoint(handlerton * hton, THD * thd, void *savepoint);
+static int tokudb_rollback_to_savepoint(
+ handlerton* hton,
+ THD* thd,
+ void* savepoint);
+static int tokudb_savepoint(handlerton* hton, THD* thd, void* savepoint);
+static int tokudb_release_savepoint(
+ handlerton* hton,
+ THD* thd,
+ void* savepoint);
#if 100000 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 100199
static int tokudb_discover_table(handlerton *hton, THD* thd, TABLE_SHARE *ts);
-static int tokudb_discover_table_existence(handlerton *hton, const char *db, const char *name);
+static int tokudb_discover_table_existence(
+ handlerton* hton,
+ const char* db,
+ const char* name);
#endif
-static int tokudb_discover(handlerton *hton, THD* thd, const char *db, const char *name, uchar **frmblob, size_t *frmlen);
-static int tokudb_discover2(handlerton *hton, THD* thd, const char *db, const char *name, bool translate_name, uchar **frmblob, size_t *frmlen);
-static int tokudb_discover3(handlerton *hton, THD* thd, const char *db, const char *name, char *path, uchar **frmblob, size_t *frmlen);
-handlerton *tokudb_hton;
-
-const char *ha_tokudb_ext = ".tokudb";
-char *tokudb_data_dir;
-ulong tokudb_debug;
-DB_ENV *db_env;
-HASH tokudb_open_tables;
-pthread_mutex_t tokudb_mutex;
+static int tokudb_discover(
+ handlerton* hton,
+ THD* thd,
+ const char* db,
+ const char* name,
+ uchar** frmblob,
+ size_t* frmlen);
+static int tokudb_discover2(
+ handlerton* hton,
+ THD* thd,
+ const char* db,
+ const char* name,
+ bool translate_name,
+ uchar** frmblob,
+ size_t* frmlen);
+static int tokudb_discover3(
+ handlerton* hton,
+ THD* thd,
+ const char* db,
+ const char* name,
+ char* path,
+ uchar** frmblob,
+ size_t* frmlen);
+handlerton* tokudb_hton;
+
+const char* ha_tokudb_ext = ".tokudb";
+DB_ENV* db_env;
#if TOKU_THDVAR_MEMALLOC_BUG
-static pthread_mutex_t tokudb_map_mutex;
+static tokudb::thread::mutex_t tokudb_map_mutex;
static TREE tokudb_map;
struct tokudb_map_pair {
- THD *thd;
+ THD* thd;
char *last_lock_timeout;
};
#if 50500 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 50599
static int tokudb_map_pair_cmp(void *custom_arg, const void *a, const void *b) {
#else
-static int tokudb_map_pair_cmp(const void *custom_arg, const void *a, const void *b) {
+static int tokudb_map_pair_cmp(
+ const void* custom_arg,
+ const void* a,
+ const void* b) {
#endif
+
const struct tokudb_map_pair *a_key = (const struct tokudb_map_pair *) a;
const struct tokudb_map_pair *b_key = (const struct tokudb_map_pair *) b;
if (a_key->thd < b_key->thd)
@@ -142,30 +135,41 @@ static int tokudb_map_pair_cmp(const void *custom_arg, const void *a, const void
};
#endif
-#if TOKU_INCLUDE_HANDLERTON_HANDLE_FATAL_SIGNAL
-static my_bool tokudb_gdb_on_fatal;
-static char *tokudb_gdb_path;
-#endif
-
static PARTITIONED_COUNTER tokudb_primary_key_bytes_inserted;
void toku_hton_update_primary_key_bytes_inserted(uint64_t row_size) {
increment_partitioned_counter(tokudb_primary_key_bytes_inserted, row_size);
}
-static void tokudb_lock_timeout_callback(DB *db, uint64_t requesting_txnid, const DBT *left_key, const DBT *right_key, uint64_t blocking_txnid);
-static ulong tokudb_cleaner_period;
-static ulong tokudb_cleaner_iterations;
+static void tokudb_lock_timeout_callback(
+ DB* db,
+ uint64_t requesting_txnid,
+ const DBT* left_key,
+ const DBT* right_key,
+ uint64_t blocking_txnid);
#define ASSERT_MSGLEN 1024
-void toku_hton_assert_fail(const char* expr_as_string, const char * fun, const char * file, int line, int caller_errno) {
+void toku_hton_assert_fail(
+ const char* expr_as_string,
+ const char* fun,
+ const char* file,
+ int line,
+ int caller_errno) {
+
char msg[ASSERT_MSGLEN];
if (db_env) {
snprintf(msg, ASSERT_MSGLEN, "Handlerton: %s ", expr_as_string);
db_env->crash(db_env, msg, fun, file, line,caller_errno);
- }
- else {
- snprintf(msg, ASSERT_MSGLEN, "Handlerton assertion failed, no env, %s, %d, %s, %s (errno=%d)\n", file, line, fun, expr_as_string, caller_errno);
+ } else {
+ snprintf(
+ msg,
+ ASSERT_MSGLEN,
+ "Handlerton assertion failed, no env, %s, %d, %s, %s (errno=%d)\n",
+ file,
+ line,
+ fun,
+ expr_as_string,
+ caller_errno);
perror(msg);
fflush(stderr);
}
@@ -184,36 +188,11 @@ static uint32_t tokudb_env_flags = 0;
// static uint32_t tokudb_lock_type = DB_LOCK_DEFAULT;
// static ulong tokudb_log_buffer_size = 0;
// static ulong tokudb_log_file_size = 0;
-static my_bool tokudb_directio = FALSE;
-static my_bool tokudb_compress_buffers_before_eviction = TRUE;
-static my_bool tokudb_checkpoint_on_flush_logs = FALSE;
-static ulonglong tokudb_cache_size = 0;
-static uint32_t tokudb_client_pool_threads = 0;
-static uint32_t tokudb_cachetable_pool_threads = 0;
-static uint32_t tokudb_checkpoint_pool_threads = 0;
-static ulonglong tokudb_max_lock_memory = 0;
-static my_bool tokudb_enable_partial_eviction = TRUE;
-static char *tokudb_home;
-static char *tokudb_tmp_dir;
-static char *tokudb_log_dir;
+static char* tokudb_home;
// static long tokudb_lock_scan_time = 0;
// static ulong tokudb_region_size = 0;
// static ulong tokudb_cache_parts = 1;
-const char *tokudb_hton_name = "TokuDB";
-static uint32_t tokudb_checkpointing_period;
-static uint32_t tokudb_fsync_log_period;
-uint32_t tokudb_write_status_frequency;
-uint32_t tokudb_read_status_frequency;
-
-#ifdef TOKUDB_VERSION
-#define tokudb_stringify_2(x) #x
-#define tokudb_stringify(x) tokudb_stringify_2(x)
-#define TOKUDB_VERSION_STR tokudb_stringify(TOKUDB_VERSION)
-#else
-#define TOKUDB_VERSION_STR NULL
-#endif
-char *tokudb_version = (char *) TOKUDB_VERSION_STR;
-static int tokudb_fs_reserve_percent; // file system reserve as a percentage of total disk space
+const char* tokudb_hton_name = "TokuDB";
#if defined(_WIN32)
extern "C" {
@@ -226,18 +205,8 @@ extern "C" {
// Since we don't have static initializers for the opaque rwlock type,
// use constructor and destructor functions to create and destroy
// the lock before and after main(), respectively.
-static int tokudb_hton_initialized;
-static rw_lock_t tokudb_hton_initialized_lock;
-
-static void create_tokudb_hton_intialized_lock(void) __attribute__((constructor));
-static void create_tokudb_hton_intialized_lock(void) {
- my_rwlock_init(&tokudb_hton_initialized_lock, 0);
-}
-
-static void destroy_tokudb_hton_initialized_lock(void) __attribute__((destructor));
-static void destroy_tokudb_hton_initialized_lock(void) {
- rwlock_destroy(&tokudb_hton_initialized_lock);
-}
+int tokudb_hton_initialized;
+tokudb::thread::rwlock_t tokudb_hton_initialized_lock;
static SHOW_VAR *toku_global_status_variables = NULL;
static uint64_t toku_global_status_max_rows;
@@ -266,7 +235,10 @@ static void handle_ydb_error(int error) {
sql_print_error("************************************************************");
break;
case TOKUDB_UPGRADE_FAILURE:
- sql_print_error("%s upgrade failed. A clean shutdown of the previous version is required.", tokudb_hton_name);
+ sql_print_error(
+ "%s upgrade failed. A clean shutdown of the previous version is "
+ "required.",
+ tokudb_hton_name);
break;
default:
sql_print_error("%s unknown error %d", tokudb_hton_name, error);
@@ -289,41 +261,51 @@ static int tokudb_init_func(void *p) {
int r;
// 3938: lock the handlerton's initialized status flag for writing
- r = rw_wrlock(&tokudb_hton_initialized_lock);
- assert(r == 0);
+ tokudb_hton_initialized_lock.lock_write();
db_env = NULL;
tokudb_hton = (handlerton *) p;
#if TOKUDB_CHECK_JEMALLOC
- if (tokudb_check_jemalloc) {
- typedef int (*mallctl_type)(const char *, void *, size_t *, void *, size_t);
+ if (tokudb::sysvars::check_jemalloc) {
+ typedef int (*mallctl_type)(
+ const char*,
+ void*,
+ size_t*,
+ void*,
+ size_t);
mallctl_type mallctl_func;
mallctl_func= (mallctl_type)dlsym(RTLD_DEFAULT, "mallctl");
if (!mallctl_func) {
- sql_print_error("%s is not initialized because jemalloc is not loaded", tokudb_hton_name);
+ sql_print_error(
+ "%s is not initialized because jemalloc is not loaded",
+ tokudb_hton_name);
goto error;
}
char *ver;
- size_t len= sizeof(ver);
+ size_t len = sizeof(ver);
mallctl_func("version", &ver, &len, NULL, 0);
/* jemalloc 2.2.5 crashes mysql-test */
if (strcmp(ver, "2.3.") < 0) {
- sql_print_error("%s is not initialized because jemalloc is older than 2.3.0", tokudb_hton_name);
+ sql_print_error(
+ "%s is not initialized because jemalloc is older than 2.3.0",
+ tokudb_hton_name);
goto error;
}
-
}
#endif
r = tokudb_set_product_name();
if (r) {
- sql_print_error("%s can not set product name error %d", tokudb_hton_name, r);
+ sql_print_error(
+ "%s can not set product name error %d",
+ tokudb_hton_name,
+ r);
goto error;
}
- tokudb_pthread_mutex_init(&tokudb_mutex, MY_MUTEX_INIT_FAST);
- (void) my_hash_init(&tokudb_open_tables, table_alias_charset, 32, 0, 0, (my_hash_get_key) tokudb_get_key, 0, 0);
+ TOKUDB_SHARE::static_init();
+ tokudb::background::initialize();
tokudb_hton->state = SHOW_OPTION_YES;
// tokudb_hton->flags= HTON_CAN_RECREATE; // QQQ this came from skeleton
@@ -383,8 +365,8 @@ static int tokudb_init_func(void *p) {
#endif
#if TOKU_INCLUDE_OPTION_STRUCTS
- tokudb_hton->table_options = tokudb_table_options;
- tokudb_hton->index_options = tokudb_index_options;
+ tokudb_hton->table_options = tokudb::sysvars::tokudb_table_options;
+ tokudb_hton->index_options = tokudb::sysvars::tokudb_index_options;
#endif
if (!tokudb_home)
@@ -400,8 +382,11 @@ static int tokudb_init_func(void *p) {
DBUG_PRINT("info", ("tokudb_env_flags: 0x%x\n", tokudb_env_flags));
r = db_env->set_flags(db_env, tokudb_env_flags, 1);
if (r) { // QQQ
- if (tokudb_debug & TOKUDB_DEBUG_INIT)
- TOKUDB_TRACE("WARNING: flags=%x r=%d", tokudb_env_flags, r);
+ TOKUDB_TRACE_FOR_FLAGS(
+ TOKUDB_DEBUG_INIT,
+ "WARNING: flags=%x r=%d",
+ tokudb_env_flags,
+ r);
// goto error;
}
@@ -409,6 +394,16 @@ static int tokudb_init_func(void *p) {
db_env->set_errcall(db_env, tokudb_print_error);
db_env->set_errpfx(db_env, tokudb_hton_name);
+ // Handle deprecated options
+ if (tokudb::sysvars::pk_insert_mode(NULL) != 1) {
+ TOKUDB_TRACE("Using tokudb_pk_insert_mode is deprecated and the "
+ "parameter may be removed in future releases. "
+ "tokudb_pk_insert_mode=0 is now forbidden. "
+ "See documentation and release notes for details");
+ if (tokudb::sysvars::pk_insert_mode(NULL) < 1)
+ tokudb::sysvars::set_pk_insert_mode(NULL, 1);
+ }
+
//
// set default comparison functions
//
@@ -419,8 +414,8 @@ static int tokudb_init_func(void *p) {
}
{
- char *tmp_dir = tokudb_tmp_dir;
- char *data_dir = tokudb_data_dir;
+ char* tmp_dir = tokudb::sysvars::tmp_dir;
+ char* data_dir = tokudb::sysvars::data_dir;
if (data_dir == 0) {
data_dir = mysql_data_home;
}
@@ -433,36 +428,48 @@ static int tokudb_init_func(void *p) {
db_env->set_tmp_dir(db_env, tmp_dir);
}
- if (tokudb_log_dir) {
- DBUG_PRINT("info", ("tokudb_log_dir: %s\n", tokudb_log_dir));
- db_env->set_lg_dir(db_env, tokudb_log_dir);
+ if (tokudb::sysvars::log_dir) {
+ DBUG_PRINT("info", ("tokudb_log_dir: %s\n", tokudb::sysvars::log_dir));
+ db_env->set_lg_dir(db_env, tokudb::sysvars::log_dir);
}
- // config the cache table size to min(1/2 of physical memory, 1/8 of the process address space)
- if (tokudb_cache_size == 0) {
+ // config the cache table size to min(1/2 of physical memory, 1/8 of the
+ // process address space)
+ if (tokudb::sysvars::cache_size == 0) {
uint64_t physmem, maxdata;
physmem = toku_os_get_phys_memory_size();
- tokudb_cache_size = physmem / 2;
+ tokudb::sysvars::cache_size = physmem / 2;
r = toku_os_get_max_process_data_size(&maxdata);
if (r == 0) {
- if (tokudb_cache_size > maxdata / 8)
- tokudb_cache_size = maxdata / 8;
+ if (tokudb::sysvars::cache_size > maxdata / 8)
+ tokudb::sysvars::cache_size = maxdata / 8;
}
}
- if (tokudb_cache_size) {
- DBUG_PRINT("info", ("tokudb_cache_size: %lld\n", tokudb_cache_size));
- r = db_env->set_cachesize(db_env, (uint32_t)(tokudb_cache_size >> 30), (uint32_t)(tokudb_cache_size % (1024L * 1024L * 1024L)), 1);
+ if (tokudb::sysvars::cache_size) {
+ DBUG_PRINT(
+ "info",
+ ("tokudb_cache_size: %lld\n", tokudb::sysvars::cache_size));
+ r = db_env->set_cachesize(
+ db_env,
+ (uint32_t)(tokudb::sysvars::cache_size >> 30),
+ (uint32_t)(tokudb::sysvars::cache_size %
+ (1024L * 1024L * 1024L)), 1);
if (r) {
DBUG_PRINT("info", ("set_cachesize %d\n", r));
goto error;
}
}
- if (tokudb_max_lock_memory == 0) {
- tokudb_max_lock_memory = tokudb_cache_size/8;
- }
- if (tokudb_max_lock_memory) {
- DBUG_PRINT("info", ("tokudb_max_lock_memory: %lld\n", tokudb_max_lock_memory));
- r = db_env->set_lk_max_memory(db_env, tokudb_max_lock_memory);
+ if (tokudb::sysvars::max_lock_memory == 0) {
+ tokudb::sysvars::max_lock_memory = tokudb::sysvars::cache_size/8;
+ }
+ if (tokudb::sysvars::max_lock_memory) {
+ DBUG_PRINT(
+ "info",
+ ("tokudb_max_lock_memory: %lld\n",
+ tokudb::sysvars::max_lock_memory));
+ r = db_env->set_lk_max_memory(
+ db_env,
+ tokudb::sysvars::max_lock_memory);
if (r) {
DBUG_PRINT("info", ("set_lk_max_memory %d\n", r));
goto error;
@@ -471,51 +478,73 @@ static int tokudb_init_func(void *p) {
uint32_t gbytes, bytes; int parts;
r = db_env->get_cachesize(db_env, &gbytes, &bytes, &parts);
- if (tokudb_debug & TOKUDB_DEBUG_INIT)
- TOKUDB_TRACE("tokudb_cache_size=%lld r=%d", ((unsigned long long) gbytes << 30) + bytes, r);
+ TOKUDB_TRACE_FOR_FLAGS(
+ TOKUDB_DEBUG_INIT,
+ "tokudb_cache_size=%lld r=%d",
+ ((unsigned long long) gbytes << 30) + bytes,
+ r);
- r = db_env->set_client_pool_threads(db_env, tokudb_client_pool_threads);
+ r = db_env->set_client_pool_threads(
+ db_env,
+ tokudb::sysvars::client_pool_threads);
if (r) {
DBUG_PRINT("info", ("set_client_pool_threads %d\n", r));
goto error;
}
- r = db_env->set_cachetable_pool_threads(db_env, tokudb_cachetable_pool_threads);
+ r = db_env->set_cachetable_pool_threads(
+ db_env,
+ tokudb::sysvars::cachetable_pool_threads);
if (r) {
DBUG_PRINT("info", ("set_cachetable_pool_threads %d\n", r));
goto error;
}
- r = db_env->set_checkpoint_pool_threads(db_env, tokudb_checkpoint_pool_threads);
+ r = db_env->set_checkpoint_pool_threads(
+ db_env,
+ tokudb::sysvars::checkpoint_pool_threads);
if (r) {
DBUG_PRINT("info", ("set_checkpoint_pool_threads %d\n", r));
goto error;
}
if (db_env->set_redzone) {
- r = db_env->set_redzone(db_env, tokudb_fs_reserve_percent);
- if (tokudb_debug & TOKUDB_DEBUG_INIT)
- TOKUDB_TRACE("set_redzone r=%d", r);
+ r = db_env->set_redzone(db_env, tokudb::sysvars::fs_reserve_percent);
+ TOKUDB_TRACE_FOR_FLAGS(TOKUDB_DEBUG_INIT, "set_redzone r=%d", r);
}
+ TOKUDB_TRACE_FOR_FLAGS(
+ TOKUDB_DEBUG_INIT,
+ "env open:flags=%x",
+ tokudb_init_flags);
- if (tokudb_debug & TOKUDB_DEBUG_INIT)
- TOKUDB_TRACE("env open:flags=%x", tokudb_init_flags);
+ r = db_env->set_generate_row_callback_for_put(db_env, generate_row_for_put);
+ assert_always(r == 0);
+
+ r = db_env->set_generate_row_callback_for_del(db_env, generate_row_for_del);
+ assert_always(r == 0);
- r = db_env->set_generate_row_callback_for_put(db_env,generate_row_for_put);
- assert(r == 0);
- r = db_env->set_generate_row_callback_for_del(db_env,generate_row_for_del);
- assert(r == 0);
db_env->set_update(db_env, tokudb_update_fun);
- db_env_set_direct_io(tokudb_directio == TRUE);
- db_env_set_compress_buffers_before_eviction(tokudb_compress_buffers_before_eviction == TRUE);
- db_env->change_fsync_log_period(db_env, tokudb_fsync_log_period);
+
+ db_env_set_direct_io(tokudb::sysvars::directio == TRUE);
+
+ db_env_set_compress_buffers_before_eviction(
+ tokudb::sysvars::compress_buffers_before_eviction == TRUE);
+
+ db_env->change_fsync_log_period(db_env, tokudb::sysvars::fsync_log_period);
+
db_env->set_lock_timeout_callback(db_env, tokudb_lock_timeout_callback);
- db_env->set_loader_memory_size(db_env, tokudb_get_loader_memory_size_callback);
- r = db_env->open(db_env, tokudb_home, tokudb_init_flags, S_IRUSR|S_IWUSR|S_IRGRP|S_IWGRP|S_IROTH|S_IWOTH);
+ db_env->set_loader_memory_size(
+ db_env,
+ tokudb_get_loader_memory_size_callback);
- if (tokudb_debug & TOKUDB_DEBUG_INIT)
- TOKUDB_TRACE("env opened:return=%d", r);
+ r = db_env->open(
+ db_env,
+ tokudb_home,
+ tokudb_init_flags,
+ S_IRUSR|S_IWUSR|S_IRGRP|S_IWGRP|S_IROTH|S_IWOTH);
+
+ TOKUDB_TRACE_FOR_FLAGS(TOKUDB_DEBUG_INIT, "env opened:return=%d", r);
if (r) {
DBUG_PRINT("info", ("env->open %d", r));
@@ -523,72 +552,106 @@ static int tokudb_init_func(void *p) {
goto error;
}
- r = db_env->checkpointing_set_period(db_env, tokudb_checkpointing_period);
- assert(r == 0);
- r = db_env->cleaner_set_period(db_env, tokudb_cleaner_period);
- assert(r == 0);
- r = db_env->cleaner_set_iterations(db_env, tokudb_cleaner_iterations);
- assert(r == 0);
+ r = db_env->checkpointing_set_period(
+ db_env,
+ tokudb::sysvars::checkpointing_period);
+ assert_always(r == 0);
- r = db_env->set_lock_timeout(db_env, DEFAULT_TOKUDB_LOCK_TIMEOUT, tokudb_get_lock_wait_time_callback);
- assert(r == 0);
+ r = db_env->cleaner_set_period(db_env, tokudb::sysvars::cleaner_period);
+ assert_always(r == 0);
- r = db_env->evictor_set_enable_partial_eviction(db_env,
- tokudb_enable_partial_eviction);
- assert(r == 0);
+ r = db_env->cleaner_set_iterations(
+ db_env,
+ tokudb::sysvars::cleaner_iterations);
+ assert_always(r == 0);
- db_env->set_killed_callback(db_env, DEFAULT_TOKUDB_KILLED_TIME, tokudb_get_killed_time_callback, tokudb_killed_callback);
+ r = db_env->set_lock_timeout(
+ db_env,
+ DEFAULT_TOKUDB_LOCK_TIMEOUT,
+ tokudb_get_lock_wait_time_callback);
+ assert_always(r == 0);
+
+ r = db_env->evictor_set_enable_partial_eviction(
+ db_env,
+ tokudb::sysvars::enable_partial_eviction);
+ assert_always(r == 0);
- r = db_env->get_engine_status_num_rows (db_env, &toku_global_status_max_rows);
- assert(r == 0);
+ db_env->set_killed_callback(
+ db_env,
+ DEFAULT_TOKUDB_KILLED_TIME,
+ tokudb_get_killed_time_callback,
+ tokudb_killed_callback);
+
+ r = db_env->get_engine_status_num_rows(
+ db_env,
+ &toku_global_status_max_rows);
+ assert_always(r == 0);
{
- const myf mem_flags = MY_FAE|MY_WME|MY_ZEROFILL|MY_ALLOW_ZERO_PTR|MY_FREE_ON_ERROR;
- toku_global_status_variables = (SHOW_VAR*)tokudb_my_malloc(sizeof(*toku_global_status_variables)*toku_global_status_max_rows, mem_flags);
- toku_global_status_rows = (TOKU_ENGINE_STATUS_ROW_S*)tokudb_my_malloc(sizeof(*toku_global_status_rows)*toku_global_status_max_rows, mem_flags);
+ const myf mem_flags =
+ MY_FAE|MY_WME|MY_ZEROFILL|MY_ALLOW_ZERO_PTR|MY_FREE_ON_ERROR;
+ toku_global_status_variables =
+ (SHOW_VAR*)tokudb::memory::malloc(
+ sizeof(*toku_global_status_variables) *
+ toku_global_status_max_rows,
+ mem_flags);
+ toku_global_status_rows =
+ (TOKU_ENGINE_STATUS_ROW_S*)tokudb::memory::malloc(
+ sizeof(*toku_global_status_rows)*
+ toku_global_status_max_rows,
+ mem_flags);
}
tokudb_primary_key_bytes_inserted = create_partitioned_counter();
#if TOKU_THDVAR_MEMALLOC_BUG
- tokudb_pthread_mutex_init(&tokudb_map_mutex, MY_MUTEX_INIT_FAST);
init_tree(&tokudb_map, 0, 0, 0, tokudb_map_pair_cmp, true, NULL, NULL);
#endif
+ if (tokudb::sysvars::strip_frm_data) {
+ r = tokudb::metadata::strip_frm_data(db_env);
+ if (r) {
+ DBUG_PRINT("info", ("env->open %d", r));
+ handle_ydb_error(r);
+ goto error;
+ }
+ }
+
//3938: succeeded, set the init status flag and unlock
tokudb_hton_initialized = 1;
- rw_unlock(&tokudb_hton_initialized_lock);
+ tokudb_hton_initialized_lock.unlock();
DBUG_RETURN(false);
error:
if (db_env) {
int rr= db_env->close(db_env, 0);
- assert(rr==0);
+ assert_always(rr==0);
db_env = 0;
}
// 3938: failed to initialized, drop the flag and lock
tokudb_hton_initialized = 0;
- rw_unlock(&tokudb_hton_initialized_lock);
+ tokudb_hton_initialized_lock.unlock();
DBUG_RETURN(true);
}
-static int tokudb_done_func(void *p) {
+static int tokudb_done_func(void* p) {
TOKUDB_DBUG_ENTER("");
- tokudb_my_free(toku_global_status_variables);
+ tokudb::memory::free(toku_global_status_variables);
toku_global_status_variables = NULL;
- tokudb_my_free(toku_global_status_rows);
+ tokudb::memory::free(toku_global_status_rows);
toku_global_status_rows = NULL;
- my_hash_free(&tokudb_open_tables);
- tokudb_pthread_mutex_destroy(&tokudb_mutex);
TOKUDB_DBUG_RETURN(0);
}
-static handler *tokudb_create_handler(handlerton * hton, TABLE_SHARE * table, MEM_ROOT * mem_root) {
+static handler* tokudb_create_handler(
+ handlerton* hton,
+ TABLE_SHARE* table,
+ MEM_ROOT* mem_root) {
return new(mem_root) ha_tokudb(hton, table);
}
-int tokudb_end(handlerton * hton, ha_panic_function type) {
+int tokudb_end(handlerton* hton, ha_panic_function type) {
TOKUDB_DBUG_ENTER("");
int error = 0;
@@ -596,41 +659,59 @@ int tokudb_end(handlerton * hton, ha_panic_function type) {
// initialized. grab a writer lock for the duration of the
// call, so we can drop the flag and destroy the mutexes
// in isolation.
- rw_wrlock(&tokudb_hton_initialized_lock);
- assert(tokudb_hton_initialized);
+ tokudb_hton_initialized_lock.lock_write();
+ assert_always(tokudb_hton_initialized);
+
+ tokudb::background::destroy();
+ TOKUDB_SHARE::static_destroy();
if (db_env) {
if (tokudb_init_flags & DB_INIT_LOG)
tokudb_cleanup_log_files();
- long total_prepared = 0; // count the total number of prepared txn's that we discard
+
+ // count the total number of prepared txn's that we discard
+ long total_prepared = 0;
#if TOKU_INCLUDE_XA
+ TOKUDB_TRACE_FOR_FLAGS(TOKUDB_DEBUG_XA, "begin XA cleanup");
while (1) {
// get xid's
const long n_xid = 1;
TOKU_XA_XID xids[n_xid];
long n_prepared = 0;
- error = db_env->txn_xa_recover(db_env, xids, n_xid, &n_prepared, total_prepared == 0 ? DB_FIRST : DB_NEXT);
- assert(error == 0);
+ error = db_env->txn_xa_recover(
+ db_env,
+ xids,
+ n_xid,
+ &n_prepared,
+ total_prepared == 0 ? DB_FIRST : DB_NEXT);
+ assert_always(error == 0);
if (n_prepared == 0)
break;
// discard xid's
for (long i = 0; i < n_xid; i++) {
DB_TXN *txn = NULL;
error = db_env->get_txn_from_xid(db_env, &xids[i], &txn);
- assert(error == 0);
+ assert_always(error == 0);
error = txn->discard(txn, 0);
- assert(error == 0);
+ assert_always(error == 0);
}
total_prepared += n_prepared;
}
+ TOKUDB_TRACE_FOR_FLAGS(TOKUDB_DEBUG_XA, "end XA cleanup");
#endif
- error = db_env->close(db_env, total_prepared > 0 ? TOKUFT_DIRTY_SHUTDOWN : 0);
+ error = db_env->close(
+ db_env,
+ total_prepared > 0 ? TOKUFT_DIRTY_SHUTDOWN : 0);
#if TOKU_INCLUDE_XA
if (error != 0 && total_prepared > 0) {
- sql_print_error("%s: %ld prepared txns still live, please shutdown, error %d", tokudb_hton_name, total_prepared, error);
+ sql_print_error(
+ "%s: %ld prepared txns still live, please shutdown, error %d",
+ tokudb_hton_name,
+ total_prepared,
+ error);
} else
#endif
- assert(error == 0);
+ assert_always(error == 0);
db_env = NULL;
}
@@ -640,33 +721,34 @@ int tokudb_end(handlerton * hton, ha_panic_function type) {
}
#if TOKU_THDVAR_MEMALLOC_BUG
- tokudb_pthread_mutex_destroy(&tokudb_map_mutex);
delete_tree(&tokudb_map);
#endif
// 3938: drop the initialized flag and unlock
tokudb_hton_initialized = 0;
- rw_unlock(&tokudb_hton_initialized_lock);
+ tokudb_hton_initialized_lock.unlock();
TOKUDB_DBUG_RETURN(error);
}
-static int tokudb_close_connection(handlerton * hton, THD * thd) {
+static int tokudb_close_connection(handlerton* hton, THD* thd) {
int error = 0;
- tokudb_trx_data* trx = (tokudb_trx_data *) thd_get_ha_data(thd, tokudb_hton);
+ tokudb_trx_data* trx = (tokudb_trx_data*)thd_get_ha_data(thd, tokudb_hton);
if (trx && trx->checkpoint_lock_taken) {
error = db_env->checkpointing_resume(db_env);
}
- tokudb_my_free(trx);
+ tokudb::memory::free(trx);
#if TOKU_THDVAR_MEMALLOC_BUG
- tokudb_pthread_mutex_lock(&tokudb_map_mutex);
+ tokudb_map_mutex.lock();
struct tokudb_map_pair key = { thd, NULL };
- struct tokudb_map_pair *found_key = (struct tokudb_map_pair *) tree_search(&tokudb_map, &key, NULL);
+ struct tokudb_map_pair* found_key =
+ (struct tokudb_map_pair*) tree_search(&tokudb_map, &key, NULL);
+
if (found_key) {
- tokudb_my_free(found_key->last_lock_timeout);
- tree_delete(&tokudb_map, found_key, sizeof *found_key, NULL);
+ tokudb::memory::free(found_key->last_lock_timeout);
+ tree_delete(&tokudb_map, found_key, sizeof(*found_key), NULL);
}
- tokudb_pthread_mutex_unlock(&tokudb_map_mutex);
+ tokudb_map_mutex.unlock();
#endif
return error;
}
@@ -676,7 +758,7 @@ bool tokudb_flush_logs(handlerton * hton) {
int error;
bool result = 0;
- if (tokudb_checkpoint_on_flush_logs) {
+ if (tokudb::sysvars::checkpoint_on_flush_logs) {
//
// take the checkpoint
//
@@ -689,7 +771,7 @@ bool tokudb_flush_logs(handlerton * hton) {
}
else {
error = db_env->log_flush(db_env, NULL);
- assert(error == 0);
+ assert_always(error == 0);
}
result = 0;
@@ -705,14 +787,14 @@ typedef struct txn_progress_info {
static void txn_progress_func(TOKU_TXN_PROGRESS progress, void* extra) {
TXN_PROGRESS_INFO progress_info = (TXN_PROGRESS_INFO)extra;
- int r = sprintf(progress_info->status,
- "%sprocessing %s of transaction, %" PRId64 " out of %" PRId64,
- progress->stalled_on_checkpoint ? "Writing committed changes to disk, " : "",
- progress->is_commit ? "commit" : "abort",
- progress->entries_processed,
- progress->entries_total
- );
- assert(r >= 0);
+ int r = sprintf(
+ progress_info->status,
+ "%sprocessing %s of transaction, %" PRId64 " out of %" PRId64,
+ progress->stalled_on_checkpoint ? "Writing committed changes to disk, " : "",
+ progress->is_commit ? "commit" : "abort",
+ progress->entries_processed,
+ progress->entries_total);
+ assert_always(r >= 0);
thd_proc_info(progress_info->thd, progress_info->status);
}
@@ -722,9 +804,13 @@ static void commit_txn_with_progress(DB_TXN* txn, uint32_t flags, THD* thd) {
info.thd = thd;
int r = txn->commit_with_progress(txn, flags, txn_progress_func, &info);
if (r != 0) {
- sql_print_error("%s: tried committing transaction %p and got error code %d", tokudb_hton_name, txn, r);
+ sql_print_error(
+ "%s: tried committing transaction %p and got error code %d",
+ tokudb_hton_name,
+ txn,
+ r);
}
- assert(r == 0);
+ assert_always(r == 0);
thd_proc_info(thd, orig_proc_info);
}
@@ -734,9 +820,13 @@ static void abort_txn_with_progress(DB_TXN* txn, THD* thd) {
info.thd = thd;
int r = txn->abort_with_progress(txn, txn_progress_func, &info);
if (r != 0) {
- sql_print_error("%s: tried aborting transaction %p and got error code %d", tokudb_hton_name, txn, r);
+ sql_print_error(
+ "%s: tried aborting transaction %p and got error code %d",
+ tokudb_hton_name,
+ txn,
+ r);
}
- assert(r == 0);
+ assert_always(r == 0);
thd_proc_info(thd, orig_proc_info);
}
@@ -750,11 +840,12 @@ static void tokudb_cleanup_handlers(tokudb_trx_data *trx, DB_TXN *txn) {
}
#if MYSQL_VERSION_ID >= 50600
-extern "C" enum durability_properties thd_get_durability_property(const MYSQL_THD thd);
+extern "C" enum durability_properties thd_get_durability_property(
+ const MYSQL_THD thd);
#endif
// Determine if an fsync is used when a transaction is committed.
-static bool tokudb_sync_on_commit(THD *thd, tokudb_trx_data *trx, DB_TXN *txn) {
+static bool tokudb_sync_on_commit(THD* thd, tokudb_trx_data* trx, DB_TXN* txn) {
#if MYSQL_VERSION_ID >= 50600
// Check the client durability property which is set during 2PC
if (thd_get_durability_property(thd) == HA_IGNORE_DURABILITY)
@@ -765,9 +856,9 @@ static bool tokudb_sync_on_commit(THD *thd, tokudb_trx_data *trx, DB_TXN *txn) {
if (txn->is_prepared(txn) && mysql_bin_log.is_open())
return false;
#endif
- if (tokudb_fsync_log_period > 0)
+ if (tokudb::sysvars::fsync_log_period > 0)
return false;
- return THDVAR(thd, commit_sync) != 0;
+ return tokudb::sysvars::commit_sync(thd) != 0;
}
static int tokudb_commit(handlerton * hton, THD * thd, bool all) {
@@ -777,10 +868,14 @@ static int tokudb_commit(handlerton * hton, THD * thd, bool all) {
DB_TXN **txn = all ? &trx->all : &trx->stmt;
DB_TXN *this_txn = *txn;
if (this_txn) {
- uint32_t syncflag = tokudb_sync_on_commit(thd, trx, this_txn) ? 0 : DB_TXN_NOSYNC;
- if (tokudb_debug & TOKUDB_DEBUG_TXN) {
- TOKUDB_TRACE("commit trx %u txn %p syncflag %u", all, this_txn, syncflag);
- }
+ uint32_t syncflag =
+ tokudb_sync_on_commit(thd, trx, this_txn) ? 0 : DB_TXN_NOSYNC;
+ TOKUDB_TRACE_FOR_FLAGS(
+ TOKUDB_DEBUG_TXN,
+ "commit trx %u txn %p syncflag %u",
+ all,
+ this_txn,
+ syncflag);
// test hook to induce a crash on a debug build
DBUG_EXECUTE_IF("tokudb_crash_commit_before", DBUG_SUICIDE(););
tokudb_cleanup_handlers(trx, this_txn);
@@ -792,9 +887,8 @@ static int tokudb_commit(handlerton * hton, THD * thd, bool all) {
if (this_txn == trx->sp_level || trx->all == NULL) {
trx->sp_level = NULL;
}
- }
- else if (tokudb_debug & TOKUDB_DEBUG_TXN) {
- TOKUDB_TRACE("nothing to commit %d", all);
+ } else {
+ TOKUDB_TRACE_FOR_FLAGS(TOKUDB_DEBUG_TXN, "nothing to commit %d", all);
}
reset_stmt_progress(&trx->stmt_progress);
TOKUDB_DBUG_RETURN(0);
@@ -807,9 +901,11 @@ static int tokudb_rollback(handlerton * hton, THD * thd, bool all) {
DB_TXN **txn = all ? &trx->all : &trx->stmt;
DB_TXN *this_txn = *txn;
if (this_txn) {
- if (tokudb_debug & TOKUDB_DEBUG_TXN) {
- TOKUDB_TRACE("rollback %u txn %p", all, this_txn);
- }
+ TOKUDB_TRACE_FOR_FLAGS(
+ TOKUDB_DEBUG_TXN,
+ "rollback %u txn %p",
+ all,
+ this_txn);
tokudb_cleanup_handlers(trx, this_txn);
abort_txn_with_progress(this_txn, thd);
*txn = NULL;
@@ -817,11 +913,8 @@ static int tokudb_rollback(handlerton * hton, THD * thd, bool all) {
if (this_txn == trx->sp_level || trx->all == NULL) {
trx->sp_level = NULL;
}
- }
- else {
- if (tokudb_debug & TOKUDB_DEBUG_TXN) {
- TOKUDB_TRACE("abort0");
- }
+ } else {
+ TOKUDB_TRACE_FOR_FLAGS(TOKUDB_DEBUG_TXN, "abort0");
}
reset_stmt_progress(&trx->stmt_progress);
TOKUDB_DBUG_RETURN(0);
@@ -829,19 +922,25 @@ static int tokudb_rollback(handlerton * hton, THD * thd, bool all) {
#if TOKU_INCLUDE_XA
static bool tokudb_sync_on_prepare(void) {
+ TOKUDB_TRACE_FOR_FLAGS(TOKUDB_DEBUG_XA, "enter");
// skip sync of log if fsync log period > 0
- if (tokudb_fsync_log_period > 0)
+ if (tokudb::sysvars::fsync_log_period > 0) {
+ TOKUDB_TRACE_FOR_FLAGS(TOKUDB_DEBUG_XA, "exit");
return false;
- else
+ } else {
+ TOKUDB_TRACE_FOR_FLAGS(TOKUDB_DEBUG_XA, "exit");
return true;
+ }
}
static int tokudb_xa_prepare(handlerton* hton, THD* thd, bool all) {
TOKUDB_DBUG_ENTER("");
+ TOKUDB_TRACE_FOR_FLAGS(TOKUDB_DEBUG_XA, "enter");
int r = 0;
// if tokudb_support_xa is disable, just return
- if (!THDVAR(thd, support_xa)) {
+ if (!tokudb::sysvars::support_xa(thd)) {
+ TOKUDB_TRACE_FOR_FLAGS(TOKUDB_DEBUG_XA, "exit %d", r);
TOKUDB_DBUG_RETURN(r);
}
@@ -850,9 +949,11 @@ static int tokudb_xa_prepare(handlerton* hton, THD* thd, bool all) {
DB_TXN* txn = all ? trx->all : trx->stmt;
if (txn) {
uint32_t syncflag = tokudb_sync_on_prepare() ? 0 : DB_TXN_NOSYNC;
- if (tokudb_debug & TOKUDB_DEBUG_TXN) {
- TOKUDB_TRACE("doing txn prepare:%d:%p", all, txn);
- }
+ TOKUDB_TRACE_FOR_FLAGS(
+ TOKUDB_DEBUG_XA,
+ "doing txn prepare:%d:%p",
+ all,
+ txn);
// a TOKU_XA_XID is identical to a MYSQL_XID
TOKU_XA_XID thd_xid;
thd_get_xid(thd, (MYSQL_XID*) &thd_xid);
@@ -861,17 +962,19 @@ static int tokudb_xa_prepare(handlerton* hton, THD* thd, bool all) {
r = txn->xa_prepare(txn, &thd_xid, syncflag);
// test hook to induce a crash on a debug build
DBUG_EXECUTE_IF("tokudb_crash_prepare_after", DBUG_SUICIDE(););
- }
- else if (tokudb_debug & TOKUDB_DEBUG_TXN) {
- TOKUDB_TRACE("nothing to prepare %d", all);
+ } else {
+ TOKUDB_TRACE_FOR_FLAGS(TOKUDB_DEBUG_XA, "nothing to prepare %d", all);
}
+ TOKUDB_TRACE_FOR_FLAGS(TOKUDB_DEBUG_XA, "exit %d", r);
TOKUDB_DBUG_RETURN(r);
}
static int tokudb_xa_recover(handlerton* hton, XID* xid_list, uint len) {
TOKUDB_DBUG_ENTER("");
+ TOKUDB_TRACE_FOR_FLAGS(TOKUDB_DEBUG_XA, "enter");
int r = 0;
if (len == 0 || xid_list == NULL) {
+ TOKUDB_TRACE_FOR_FLAGS(TOKUDB_DEBUG_XA, "exit %d", 0);
TOKUDB_DBUG_RETURN(0);
}
long num_returned = 0;
@@ -880,14 +983,15 @@ static int tokudb_xa_recover(handlerton* hton, XID* xid_list, uint len) {
(TOKU_XA_XID*)xid_list,
len,
&num_returned,
- DB_NEXT
- );
- assert(r == 0);
+ DB_NEXT);
+ assert_always(r == 0);
+ TOKUDB_TRACE_FOR_FLAGS(TOKUDB_DEBUG_XA, "exit %ld", num_returned);
TOKUDB_DBUG_RETURN((int)num_returned);
}
static int tokudb_commit_by_xid(handlerton* hton, XID* xid) {
TOKUDB_DBUG_ENTER("");
+ TOKUDB_TRACE_FOR_FLAGS(TOKUDB_DEBUG_XA, "enter");
int r = 0;
DB_TXN* txn = NULL;
TOKU_XA_XID* toku_xid = (TOKU_XA_XID*)xid;
@@ -900,11 +1004,13 @@ static int tokudb_commit_by_xid(handlerton* hton, XID* xid) {
r = 0;
cleanup:
+ TOKUDB_TRACE_FOR_FLAGS(TOKUDB_DEBUG_XA, "exit %d", r);
TOKUDB_DBUG_RETURN(r);
}
static int tokudb_rollback_by_xid(handlerton* hton, XID* xid) {
TOKUDB_DBUG_ENTER("");
+ TOKUDB_TRACE_FOR_FLAGS(TOKUDB_DEBUG_XA, "enter");
int r = 0;
DB_TXN* txn = NULL;
TOKU_XA_XID* toku_xid = (TOKU_XA_XID*)xid;
@@ -917,6 +1023,7 @@ static int tokudb_rollback_by_xid(handlerton* hton, XID* xid) {
r = 0;
cleanup:
+ TOKUDB_TRACE_FOR_FLAGS(TOKUDB_DEBUG_XA, "exit %d", r);
TOKUDB_DBUG_RETURN(r);
}
@@ -928,43 +1035,55 @@ static int tokudb_savepoint(handlerton * hton, THD * thd, void *savepoint) {
SP_INFO save_info = (SP_INFO)savepoint;
tokudb_trx_data *trx = (tokudb_trx_data *) thd_get_ha_data(thd, hton);
if (thd->in_sub_stmt) {
- assert(trx->stmt);
- error = txn_begin(db_env, trx->sub_sp_level, &(save_info->txn), DB_INHERIT_ISOLATION, thd);
+ assert_always(trx->stmt);
+ error = txn_begin(
+ db_env,
+ trx->sub_sp_level,
+ &(save_info->txn),
+ DB_INHERIT_ISOLATION,
+ thd);
if (error) {
goto cleanup;
}
trx->sub_sp_level = save_info->txn;
save_info->in_sub_stmt = true;
- }
- else {
- error = txn_begin(db_env, trx->sp_level, &(save_info->txn), DB_INHERIT_ISOLATION, thd);
+ } else {
+ error = txn_begin(
+ db_env,
+ trx->sp_level,
+ &(save_info->txn),
+ DB_INHERIT_ISOLATION,
+ thd);
if (error) {
goto cleanup;
}
trx->sp_level = save_info->txn;
save_info->in_sub_stmt = false;
}
- if (tokudb_debug & TOKUDB_DEBUG_TXN) {
- TOKUDB_TRACE("begin txn %p", save_info->txn);
- }
+ TOKUDB_TRACE_FOR_FLAGS(TOKUDB_DEBUG_TXN, "begin txn %p", save_info->txn);
save_info->trx = trx;
error = 0;
cleanup:
TOKUDB_DBUG_RETURN(error);
}
-static int tokudb_rollback_to_savepoint(handlerton * hton, THD * thd, void *savepoint) {
+static int tokudb_rollback_to_savepoint(
+ handlerton* hton,
+ THD* thd,
+ void* savepoint) {
+
TOKUDB_DBUG_ENTER("%p", savepoint);
int error;
SP_INFO save_info = (SP_INFO)savepoint;
DB_TXN* parent = NULL;
DB_TXN* txn_to_rollback = save_info->txn;
- tokudb_trx_data *trx = (tokudb_trx_data *) thd_get_ha_data(thd, hton);
+ tokudb_trx_data* trx = (tokudb_trx_data*)thd_get_ha_data(thd, hton);
parent = txn_to_rollback->parent;
- if (tokudb_debug & TOKUDB_DEBUG_TXN) {
- TOKUDB_TRACE("rollback txn %p", txn_to_rollback);
- }
+ TOKUDB_TRACE_FOR_FLAGS(
+ TOKUDB_DEBUG_TXN,
+ "rollback txn %p",
+ txn_to_rollback);
if (!(error = txn_to_rollback->abort(txn_to_rollback))) {
if (save_info->in_sub_stmt) {
trx->sub_sp_level = parent;
@@ -977,7 +1096,11 @@ static int tokudb_rollback_to_savepoint(handlerton * hton, THD * thd, void *save
TOKUDB_DBUG_RETURN(error);
}
-static int tokudb_release_savepoint(handlerton * hton, THD * thd, void *savepoint) {
+static int tokudb_release_savepoint(
+ handlerton* hton,
+ THD* thd,
+ void* savepoint) {
+
TOKUDB_DBUG_ENTER("%p", savepoint);
int error = 0;
SP_INFO save_info = (SP_INFO)savepoint;
@@ -986,9 +1109,7 @@ static int tokudb_release_savepoint(handlerton * hton, THD * thd, void *savepoin
tokudb_trx_data *trx = (tokudb_trx_data *) thd_get_ha_data(thd, hton);
parent = txn_to_commit->parent;
- if (tokudb_debug & TOKUDB_DEBUG_TXN) {
- TOKUDB_TRACE("commit txn %p", txn_to_commit);
- }
+ TOKUDB_TRACE_FOR_FLAGS(TOKUDB_DEBUG_TXN, "commit txn %p", txn_to_commit);
DB_TXN *child = txn_to_commit->get_child(txn_to_commit);
if (child == NULL && !(error = txn_to_commit->commit(txn_to_commit, 0))) {
if (save_info->in_sub_stmt) {
@@ -1006,8 +1127,14 @@ static int tokudb_release_savepoint(handlerton * hton, THD * thd, void *savepoin
static int tokudb_discover_table(handlerton *hton, THD* thd, TABLE_SHARE *ts) {
uchar *frmblob = 0;
size_t frmlen;
- int res= tokudb_discover3(hton, thd, ts->db.str, ts->table_name.str,
- ts->normalized_path.str, &frmblob, &frmlen);
+ int res= tokudb_discover3(
+ hton,
+ thd,
+ ts->db.str,
+ ts->table_name.str,
+ ts->normalized_path.str,
+ &frmblob,
+ &frmlen);
if (!res)
res= ts->init_from_binary_frm_image(thd, true, frmblob, frmlen);
@@ -1016,7 +1143,11 @@ static int tokudb_discover_table(handlerton *hton, THD* thd, TABLE_SHARE *ts) {
return res == ENOENT ? HA_ERR_NO_SUCH_TABLE : res;
}
-static int tokudb_discover_table_existence(handlerton *hton, const char *db, const char *name) {
+static int tokudb_discover_table_existence(
+ handlerton* hton,
+ const char* db,
+ const char* name) {
+
uchar *frmblob = 0;
size_t frmlen;
int res= tokudb_discover(hton, current_thd, db, name, &frmblob, &frmlen);
@@ -1025,19 +1156,46 @@ static int tokudb_discover_table_existence(handlerton *hton, const char *db, con
}
#endif
-static int tokudb_discover(handlerton *hton, THD* thd, const char *db, const char *name, uchar **frmblob, size_t *frmlen) {
+static int tokudb_discover(
+ handlerton* hton,
+ THD* thd,
+ const char* db,
+ const char* name,
+ uchar** frmblob,
+ size_t* frmlen) {
+
return tokudb_discover2(hton, thd, db, name, true, frmblob, frmlen);
}
-static int tokudb_discover2(handlerton *hton, THD* thd, const char *db, const char *name, bool translate_name,
- uchar **frmblob, size_t *frmlen) {
+static int tokudb_discover2(
+ handlerton* hton,
+ THD* thd,
+ const char* db,
+ const char* name,
+ bool translate_name,
+ uchar** frmblob,
+ size_t*frmlen) {
+
char path[FN_REFLEN + 1];
- build_table_filename(path, sizeof(path) - 1, db, name, "", translate_name ? 0 : FN_IS_TMP);
+ build_table_filename(
+ path,
+ sizeof(path) - 1,
+ db,
+ name,
+ "",
+ translate_name ? 0 : FN_IS_TMP);
return tokudb_discover3(hton, thd, db, name, path, frmblob, frmlen);
}
-static int tokudb_discover3(handlerton *hton, THD* thd, const char *db, const char *name, char *path,
- uchar **frmblob, size_t *frmlen) {
+static int tokudb_discover3(
+ handlerton* hton,
+ THD* thd,
+ const char* db,
+ const char* name,
+ char* path,
+ uchar** frmblob,
+ size_t* frmlen) {
+
TOKUDB_DBUG_ENTER("%s %s %s", db, name, path);
int error;
DB* status_db = NULL;
@@ -1048,8 +1206,10 @@ static int tokudb_discover3(handlerton *hton, THD* thd, const char *db, const ch
bool do_commit = false;
#if 100000 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 100199
- tokudb_trx_data *trx = (tokudb_trx_data *) thd_get_ha_data(thd, tokudb_hton);
- if (thd_sql_command(thd) == SQLCOM_CREATE_TABLE && trx && trx->sub_sp_level) {
+ tokudb_trx_data* trx = (tokudb_trx_data*)thd_get_ha_data(thd, tokudb_hton);
+ if (thd_sql_command(thd) == SQLCOM_CREATE_TABLE &&
+ trx &&
+ trx->sub_sp_level) {
do_commit = false;
txn = trx->sub_sp_level;
} else {
@@ -1075,8 +1235,7 @@ static int tokudb_discover3(handlerton *hton, THD* thd, const char *db, const ch
0,
&key,
smart_dbt_callback_verify_frm,
- &value
- );
+ &value);
if (error) {
goto cleanup;
}
@@ -1092,19 +1251,23 @@ cleanup:
if (do_commit && txn) {
commit_txn(txn, 0);
}
- TOKUDB_DBUG_RETURN(error);
+ TOKUDB_DBUG_RETURN(error);
}
-#define STATPRINT(legend, val) if (legend != NULL && val != NULL) stat_print(thd, \
- tokudb_hton_name, \
- strlen(tokudb_hton_name), \
- legend, \
- strlen(legend), \
- val, \
- strlen(val))
+#define STATPRINT(legend, val) if (legend != NULL && val != NULL) \
+ stat_print(thd, \
+ tokudb_hton_name, \
+ strlen(tokudb_hton_name), \
+ legend, \
+ strlen(legend), \
+ val, \
+ strlen(val))
-extern sys_var *intern_find_sys_var(const char *str, uint length, bool no_error);
+extern sys_var* intern_find_sys_var(
+ const char* str,
+ uint length,
+ bool no_error);
static bool tokudb_show_engine_status(THD * thd, stat_print_fn * stat_print) {
TOKUDB_DBUG_ENTER("");
@@ -1120,14 +1283,29 @@ static bool tokudb_show_engine_status(THD * thd, stat_print_fn * stat_print) {
#if MYSQL_VERSION_ID < 50500
{
- sys_var * version = intern_find_sys_var("version", 0, false);
- snprintf(buf, bufsiz, "%s", version->value_ptr(thd, (enum_var_type)0, (LEX_STRING*)NULL));
+ sys_var* version = intern_find_sys_var("version", 0, false);
+ snprintf(
+ buf,
+ bufsiz,
+ "%s",
+ version->value_ptr(thd,
+ (enum_var_type)0,
+ (LEX_STRING*)NULL));
STATPRINT("Version", buf);
}
#endif
error = db_env->get_engine_status_num_rows (db_env, &max_rows);
TOKU_ENGINE_STATUS_ROW_S mystat[max_rows];
- error = db_env->get_engine_status (db_env, mystat, max_rows, &num_rows, &redzone_state, &panic, panic_string, panic_string_len, TOKU_ENGINE_STATUS);
+ error = db_env->get_engine_status(
+ db_env,
+ mystat,
+ max_rows,
+ &num_rows,
+ &redzone_state,
+ &panic,
+ panic_string,
+ panic_string_len,
+ TOKU_ENGINE_STATUS);
if (strlen(panic_string)) {
STATPRINT("Environment panic string", panic_string);
@@ -1139,20 +1317,35 @@ static bool tokudb_show_engine_status(THD * thd, stat_print_fn * stat_print) {
}
if(redzone_state == FS_BLOCKED) {
- STATPRINT("*** URGENT WARNING ***", "FILE SYSTEM IS COMPLETELY FULL");
+ STATPRINT(
+ "*** URGENT WARNING ***", "FILE SYSTEM IS COMPLETELY FULL");
snprintf(buf, bufsiz, "FILE SYSTEM IS COMPLETELY FULL");
- }
- else if (redzone_state == FS_GREEN) {
- snprintf(buf, bufsiz, "more than %d percent of total file system space", 2*tokudb_fs_reserve_percent);
- }
- else if (redzone_state == FS_YELLOW) {
- snprintf(buf, bufsiz, "*** WARNING *** FILE SYSTEM IS GETTING FULL (less than %d percent free)", 2*tokudb_fs_reserve_percent);
- }
- else if (redzone_state == FS_RED){
- snprintf(buf, bufsiz, "*** WARNING *** FILE SYSTEM IS GETTING VERY FULL (less than %d percent free): INSERTS ARE PROHIBITED", tokudb_fs_reserve_percent);
- }
- else {
- snprintf(buf, bufsiz, "information unavailable, unknown redzone state %d", redzone_state);
+ } else if (redzone_state == FS_GREEN) {
+ snprintf(
+ buf,
+ bufsiz,
+ "more than %d percent of total file system space",
+ 2 * tokudb::sysvars::fs_reserve_percent);
+ } else if (redzone_state == FS_YELLOW) {
+ snprintf(
+ buf,
+ bufsiz,
+ "*** WARNING *** FILE SYSTEM IS GETTING FULL (less than %d "
+ "percent free)",
+ 2 * tokudb::sysvars::fs_reserve_percent);
+ } else if (redzone_state == FS_RED){
+ snprintf(
+ buf,
+ bufsiz,
+ "*** WARNING *** FILE SYSTEM IS GETTING VERY FULL (less than "
+ "%d percent free): INSERTS ARE PROHIBITED",
+ tokudb::sysvars::fs_reserve_percent);
+ } else {
+ snprintf(
+ buf,
+ bufsiz,
+ "information unavailable, unknown redzone state %d",
+ redzone_state);
}
STATPRINT ("disk free space", buf);
@@ -1179,7 +1372,8 @@ static bool tokudb_show_engine_status(THD * thd, stat_print_fn * stat_print) {
break;
}
case PARCOUNT: {
- uint64_t v = read_partitioned_counter(mystat[row].value.parcount);
+ uint64_t v = read_partitioned_counter(
+ mystat[row].value.parcount);
snprintf(buf, bufsiz, "%" PRIu64, v);
break;
}
@@ -1187,12 +1381,17 @@ static bool tokudb_show_engine_status(THD * thd, stat_print_fn * stat_print) {
snprintf(buf, bufsiz, "%.6f", mystat[row].value.dnum);
break;
default:
- snprintf(buf, bufsiz, "UNKNOWN STATUS TYPE: %d", mystat[row].type);
- break;
+ snprintf(
+ buf,
+ bufsiz,
+ "UNKNOWN STATUS TYPE: %d",
+ mystat[row].type);
+ break;
}
STATPRINT(mystat[row].legend, buf);
}
- uint64_t bytes_inserted = read_partitioned_counter(tokudb_primary_key_bytes_inserted);
+ uint64_t bytes_inserted = read_partitioned_counter(
+ tokudb_primary_key_bytes_inserted);
snprintf(buf, bufsiz, "%" PRIu64, bytes_inserted);
STATPRINT("handlerton: primary key bytes inserted", buf);
}
@@ -1200,16 +1399,16 @@ static bool tokudb_show_engine_status(THD * thd, stat_print_fn * stat_print) {
TOKUDB_DBUG_RETURN(error);
}
-static void tokudb_checkpoint_lock(THD * thd) {
+void tokudb_checkpoint_lock(THD * thd) {
int error;
const char *old_proc_info;
- tokudb_trx_data* trx = (tokudb_trx_data *) thd_get_ha_data(thd, tokudb_hton);
+ tokudb_trx_data* trx = (tokudb_trx_data*)thd_get_ha_data(thd, tokudb_hton);
if (!trx) {
error = create_tokudb_trx_data_instance(&trx);
//
// can only fail due to memory allocation, so ok to assert
//
- assert(!error);
+ assert_always(!error);
thd_set_ha_data(thd, tokudb_hton, trx);
}
@@ -1223,7 +1422,7 @@ static void tokudb_checkpoint_lock(THD * thd) {
old_proc_info = tokudb_thd_get_proc_info(thd);
thd_proc_info(thd, "Trying to grab checkpointing lock.");
error = db_env->checkpointing_postpone(db_env);
- assert(!error);
+ assert_always(!error);
thd_proc_info(thd, old_proc_info);
trx->checkpoint_lock_taken = true;
@@ -1231,10 +1430,10 @@ cleanup:
return;
}
-static void tokudb_checkpoint_unlock(THD * thd) {
+void tokudb_checkpoint_unlock(THD * thd) {
int error;
const char *old_proc_info;
- tokudb_trx_data* trx = (tokudb_trx_data *) thd_get_ha_data(thd, tokudb_hton);
+ tokudb_trx_data* trx = (tokudb_trx_data*)thd_get_ha_data(thd, tokudb_hton);
if (!trx) {
error = 0;
goto cleanup;
@@ -1249,7 +1448,7 @@ static void tokudb_checkpoint_unlock(THD * thd) {
old_proc_info = tokudb_thd_get_proc_info(thd);
thd_proc_info(thd, "Trying to release checkpointing lock.");
error = db_env->checkpointing_resume(db_env);
- assert(!error);
+ assert_always(!error);
thd_proc_info(thd, old_proc_info);
trx->checkpoint_lock_taken = false;
@@ -1258,7 +1457,12 @@ cleanup:
return;
}
-static bool tokudb_show_status(handlerton * hton, THD * thd, stat_print_fn * stat_print, enum ha_stat_type stat_type) {
+static bool tokudb_show_status(
+ handlerton* hton,
+ THD* thd,
+ stat_print_fn* stat_print,
+ enum ha_stat_type stat_type) {
+
switch (stat_type) {
case HA_ENGINE_STATUS:
return tokudb_show_engine_status(thd, stat_print);
@@ -1270,14 +1474,21 @@ static bool tokudb_show_status(handlerton * hton, THD * thd, stat_print_fn * sta
}
#if TOKU_INCLUDE_HANDLERTON_HANDLE_FATAL_SIGNAL
-static void tokudb_handle_fatal_signal(handlerton *hton __attribute__ ((__unused__)), THD *thd __attribute__ ((__unused__)), int sig) {
+static void tokudb_handle_fatal_signal(
+ TOKUDB_UNUSED(handlerton* hton),
+ TOKUDB_UNUSD(THD* thd),
+ int sig) {
+
if (tokudb_gdb_on_fatal) {
db_env_try_gdb_stack_trace(tokudb_gdb_path);
}
}
#endif
-static void tokudb_print_error(const DB_ENV * db_env, const char *db_errpfx, const char *buffer) {
+static void tokudb_print_error(
+ const DB_ENV* db_env,
+ const char* db_errpfx,
+ const char* buffer) {
sql_print_error("%s: %s", db_errpfx, buffer);
}
@@ -1299,7 +1510,7 @@ static void tokudb_cleanup_log_files(void) {
char **np;
for (np = names; *np; ++np) {
#if 1
- if (tokudb_debug)
+ if (TOKUDB_UNLIKELY(tokudb::sysvars::debug))
TOKUDB_TRACE("cleanup:%s", *np);
#else
my_delete(*np, MYF(MY_WME));
@@ -1312,246 +1523,13 @@ static void tokudb_cleanup_log_files(void) {
DBUG_VOID_RETURN;
}
-// options flags
-// PLUGIN_VAR_THDLOCAL Variable is per-connection
-// PLUGIN_VAR_READONLY Server variable is read only
-// PLUGIN_VAR_NOSYSVAR Not a server variable
-// PLUGIN_VAR_NOCMDOPT Not a command line option
-// PLUGIN_VAR_NOCMDARG No argument for cmd line
-// PLUGIN_VAR_RQCMDARG Argument required for cmd line
-// PLUGIN_VAR_OPCMDARG Argument optional for cmd line
-// PLUGIN_VAR_MEMALLOC String needs memory allocated
-
-
-// system variables
-static void tokudb_cleaner_period_update(THD* thd,
- struct st_mysql_sys_var* sys_var,
- void* var, const void * save) {
- ulong * cleaner_period = (ulong *) var;
- *cleaner_period = *(const ulonglong *) save;
- int r = db_env->cleaner_set_period(db_env, *cleaner_period);
- assert(r == 0);
-}
-
-#define DEFAULT_CLEANER_PERIOD 1
-
-static MYSQL_SYSVAR_ULONG(cleaner_period, tokudb_cleaner_period,
- 0, "TokuDB cleaner_period",
- NULL, tokudb_cleaner_period_update, DEFAULT_CLEANER_PERIOD,
- 0, ~0UL, 0);
-
-static void tokudb_cleaner_iterations_update(THD* thd,
- struct st_mysql_sys_var* sys_var,
- void* var, const void* save) {
- ulong * cleaner_iterations = (ulong *) var;
- *cleaner_iterations = *(const ulonglong *) save;
- int r = db_env->cleaner_set_iterations(db_env, *cleaner_iterations);
- assert(r == 0);
-}
-
-#define DEFAULT_CLEANER_ITERATIONS 5
-
-static MYSQL_SYSVAR_ULONG(cleaner_iterations, tokudb_cleaner_iterations,
- 0, "TokuDB cleaner_iterations",
- NULL, tokudb_cleaner_iterations_update, DEFAULT_CLEANER_ITERATIONS,
- 0, ~0UL, 0);
-
-static void tokudb_checkpointing_period_update(THD* thd,
- struct st_mysql_sys_var* sys_var,
- void* var, const void* save) {
- uint * checkpointing_period = (uint *) var;
- *checkpointing_period = *(const ulonglong *) save;
- int r = db_env->checkpointing_set_period(db_env, *checkpointing_period);
- assert(r == 0);
-}
-
-static MYSQL_SYSVAR_UINT(checkpointing_period, tokudb_checkpointing_period,
- 0, "TokuDB Checkpointing period",
- NULL, tokudb_checkpointing_period_update, 60,
- 0, ~0U, 0);
-
-static MYSQL_SYSVAR_BOOL(directio, tokudb_directio,
- PLUGIN_VAR_READONLY, "TokuDB Enable Direct I/O ",
- NULL, NULL, FALSE);
-
-static MYSQL_SYSVAR_BOOL(compress_buffers_before_eviction,
- tokudb_compress_buffers_before_eviction,
- PLUGIN_VAR_READONLY,
- "TokuDB Enable buffer compression before partial eviction",
- NULL, NULL, TRUE);
-
-static MYSQL_SYSVAR_BOOL(checkpoint_on_flush_logs, tokudb_checkpoint_on_flush_logs,
- 0, "TokuDB Checkpoint on Flush Logs ",
- NULL, NULL, FALSE);
-
-static MYSQL_SYSVAR_ULONGLONG(cache_size, tokudb_cache_size,
- PLUGIN_VAR_READONLY, "TokuDB cache table size",
- NULL, NULL, 0,
- 0, ~0ULL, 0);
-
-static MYSQL_SYSVAR_ULONGLONG(max_lock_memory, tokudb_max_lock_memory,
- PLUGIN_VAR_READONLY, "TokuDB max memory for locks",
- NULL, NULL, 0,
- 0, ~0ULL, 0);
-
-static MYSQL_SYSVAR_UINT(client_pool_threads, tokudb_client_pool_threads,
- PLUGIN_VAR_READONLY, "TokuDB client ops thread pool size", NULL, NULL, 0,
- 0, 1024, 0);
-
-static MYSQL_SYSVAR_UINT(cachetable_pool_threads, tokudb_cachetable_pool_threads,
- PLUGIN_VAR_READONLY, "TokuDB cachetable ops thread pool size", NULL, NULL, 0,
- 0, 1024, 0);
-
-static MYSQL_SYSVAR_UINT(checkpoint_pool_threads, tokudb_checkpoint_pool_threads,
- PLUGIN_VAR_READONLY, "TokuDB checkpoint ops thread pool size", NULL, NULL, 0,
- 0, 1024, 0);
-
-static void tokudb_enable_partial_eviction_update(THD* thd,
- struct st_mysql_sys_var* sys_var,
- void* var, const void* save) {
- my_bool * enable_partial_eviction = (my_bool *) var;
- *enable_partial_eviction = *(const my_bool *) save;
- int r = db_env->evictor_set_enable_partial_eviction(db_env, *enable_partial_eviction);
- assert(r == 0);
-}
-
-static MYSQL_SYSVAR_BOOL(enable_partial_eviction, tokudb_enable_partial_eviction,
- 0, "TokuDB enable partial node eviction",
- NULL, tokudb_enable_partial_eviction_update, TRUE);
-
-static MYSQL_SYSVAR_ULONG(debug, tokudb_debug,
- 0, "TokuDB Debug",
- NULL, NULL, 0,
- 0, ~0UL, 0);
-
-static MYSQL_SYSVAR_STR(log_dir, tokudb_log_dir,
- PLUGIN_VAR_READONLY, "TokuDB Log Directory",
- NULL, NULL, NULL);
-
-static MYSQL_SYSVAR_STR(data_dir, tokudb_data_dir,
- PLUGIN_VAR_READONLY, "TokuDB Data Directory",
- NULL, NULL, NULL);
-
-static MYSQL_SYSVAR_STR(version, tokudb_version,
- PLUGIN_VAR_READONLY, "TokuDB Version",
- NULL, NULL, NULL);
-
-static MYSQL_SYSVAR_UINT(write_status_frequency, tokudb_write_status_frequency,
- 0, "TokuDB frequency that show processlist updates status of writes",
- NULL, NULL, 1000,
- 0, ~0U, 0);
-
-static MYSQL_SYSVAR_UINT(read_status_frequency, tokudb_read_status_frequency,
- 0, "TokuDB frequency that show processlist updates status of reads",
- NULL, NULL, 10000,
- 0, ~0U, 0);
-
-static MYSQL_SYSVAR_INT(fs_reserve_percent, tokudb_fs_reserve_percent,
- PLUGIN_VAR_READONLY, "TokuDB file system space reserve (percent free required)",
- NULL, NULL, 5,
- 0, 100, 0);
-
-static MYSQL_SYSVAR_STR(tmp_dir, tokudb_tmp_dir,
- PLUGIN_VAR_READONLY, "Tokudb Tmp Dir",
- NULL, NULL, NULL);
-
-#if TOKU_INCLUDE_HANDLERTON_HANDLE_FATAL_SIGNAL
-static MYSQL_SYSVAR_STR(gdb_path, tokudb_gdb_path,
- PLUGIN_VAR_READONLY|PLUGIN_VAR_RQCMDARG, "TokuDB path to gdb for extra debug info on fatal signal",
- NULL, NULL, "/usr/bin/gdb");
-
-static MYSQL_SYSVAR_BOOL(gdb_on_fatal, tokudb_gdb_on_fatal,
- 0, "TokuDB enable gdb debug info on fatal signal",
- NULL, NULL, true);
-#endif
-
-static void tokudb_fsync_log_period_update(THD* thd,
- struct st_mysql_sys_var* sys_var,
- void* var, const void* save) {
- uint32 *period = (uint32 *) var;
- *period = *(const ulonglong *) save;
- db_env->change_fsync_log_period(db_env, *period);
-}
-
-static MYSQL_SYSVAR_UINT(fsync_log_period, tokudb_fsync_log_period,
- 0, "TokuDB fsync log period",
- NULL, tokudb_fsync_log_period_update, 0,
- 0, ~0U, 0);
-
-static struct st_mysql_sys_var *tokudb_system_variables[] = {
- MYSQL_SYSVAR(cache_size),
- MYSQL_SYSVAR(client_pool_threads),
- MYSQL_SYSVAR(cachetable_pool_threads),
- MYSQL_SYSVAR(checkpoint_pool_threads),
- MYSQL_SYSVAR(max_lock_memory),
- MYSQL_SYSVAR(enable_partial_eviction),
- MYSQL_SYSVAR(data_dir),
- MYSQL_SYSVAR(log_dir),
- MYSQL_SYSVAR(debug),
- MYSQL_SYSVAR(commit_sync),
- MYSQL_SYSVAR(lock_timeout),
- MYSQL_SYSVAR(cleaner_period),
- MYSQL_SYSVAR(cleaner_iterations),
- MYSQL_SYSVAR(pk_insert_mode),
- MYSQL_SYSVAR(load_save_space),
- MYSQL_SYSVAR(disable_slow_alter),
- MYSQL_SYSVAR(disable_hot_alter),
- MYSQL_SYSVAR(alter_print_error),
- MYSQL_SYSVAR(create_index_online),
- MYSQL_SYSVAR(disable_prefetching),
- MYSQL_SYSVAR(version),
- MYSQL_SYSVAR(checkpointing_period),
- MYSQL_SYSVAR(prelock_empty),
- MYSQL_SYSVAR(checkpoint_lock),
- MYSQL_SYSVAR(write_status_frequency),
- MYSQL_SYSVAR(read_status_frequency),
- MYSQL_SYSVAR(fs_reserve_percent),
- MYSQL_SYSVAR(tmp_dir),
- MYSQL_SYSVAR(block_size),
- MYSQL_SYSVAR(read_block_size),
- MYSQL_SYSVAR(read_buf_size),
- MYSQL_SYSVAR(fanout),
- MYSQL_SYSVAR(row_format),
- MYSQL_SYSVAR(directio),
- MYSQL_SYSVAR(compress_buffers_before_eviction),
- MYSQL_SYSVAR(checkpoint_on_flush_logs),
-#if TOKU_INCLUDE_UPSERT
- MYSQL_SYSVAR(disable_slow_update),
- MYSQL_SYSVAR(disable_slow_upsert),
-#endif
- MYSQL_SYSVAR(analyze_time),
- MYSQL_SYSVAR(analyze_delete_fraction),
- MYSQL_SYSVAR(fsync_log_period),
-#if TOKU_INCLUDE_HANDLERTON_HANDLE_FATAL_SIGNAL
- MYSQL_SYSVAR(gdb_path),
- MYSQL_SYSVAR(gdb_on_fatal),
-#endif
- MYSQL_SYSVAR(last_lock_timeout),
- MYSQL_SYSVAR(lock_timeout_debug),
- MYSQL_SYSVAR(loader_memory_size),
- MYSQL_SYSVAR(hide_default_row_format),
- MYSQL_SYSVAR(killed_time),
- MYSQL_SYSVAR(empty_scan),
-#if TOKUDB_CHECK_JEMALLOC
- MYSQL_SYSVAR(check_jemalloc),
-#endif
- MYSQL_SYSVAR(bulk_fetch),
-#if TOKU_INCLUDE_XA
- MYSQL_SYSVAR(support_xa),
-#endif
- MYSQL_SYSVAR(rpl_unique_checks),
- MYSQL_SYSVAR(rpl_unique_checks_delay),
- MYSQL_SYSVAR(rpl_lookup_rows),
- MYSQL_SYSVAR(rpl_lookup_rows_delay),
- MYSQL_SYSVAR(rpl_check_readonly),
- MYSQL_SYSVAR(optimize_index_name),
- MYSQL_SYSVAR(optimize_index_fraction),
- MYSQL_SYSVAR(optimize_throttle),
- NULL
-};
-
// Split ./database/table-dictionary into database, table and dictionary strings
-static void tokudb_split_dname(const char *dname, String &database_name, String &table_name, String &dictionary_name) {
+void tokudb_split_dname(
+ const char* dname,
+ String& database_name,
+ String& table_name,
+ String& dictionary_name) {
+
const char *splitter = strchr(dname, '/');
if (splitter) {
const char *database_ptr = splitter+1;
@@ -1564,482 +1542,18 @@ static void tokudb_split_dname(const char *dname, String &database_name, String
table_name.append(table_ptr, dictionary_ptr - table_ptr);
dictionary_ptr += 1;
dictionary_name.append(dictionary_ptr);
+ } else {
+ table_name.append(table_ptr);
}
- }
- }
-}
-
-struct st_mysql_storage_engine tokudb_storage_engine = { MYSQL_HANDLERTON_INTERFACE_VERSION };
-
-static struct st_mysql_information_schema tokudb_file_map_information_schema = { MYSQL_INFORMATION_SCHEMA_INTERFACE_VERSION };
-
-static ST_FIELD_INFO tokudb_file_map_field_info[] = {
- {"dictionary_name", 256, MYSQL_TYPE_STRING, 0, 0, NULL, SKIP_OPEN_TABLE },
- {"internal_file_name", 256, MYSQL_TYPE_STRING, 0, 0, NULL, SKIP_OPEN_TABLE },
- {"table_schema", 256, MYSQL_TYPE_STRING, 0, 0, NULL, SKIP_OPEN_TABLE },
- {"table_name", 256, MYSQL_TYPE_STRING, 0, 0, NULL, SKIP_OPEN_TABLE },
- {"table_dictionary_name", 256, MYSQL_TYPE_STRING, 0, 0, NULL, SKIP_OPEN_TABLE },
- {NULL, 0, MYSQL_TYPE_NULL, 0, 0, NULL, SKIP_OPEN_TABLE}
-};
-
-static int tokudb_file_map(TABLE *table, THD *thd) {
- int error;
- DB_TXN* txn = NULL;
- DBC* tmp_cursor = NULL;
- DBT curr_key;
- DBT curr_val;
- memset(&curr_key, 0, sizeof curr_key);
- memset(&curr_val, 0, sizeof curr_val);
- error = txn_begin(db_env, 0, &txn, DB_READ_UNCOMMITTED, thd);
- if (error) {
- goto cleanup;
- }
- error = db_env->get_cursor_for_directory(db_env, txn, &tmp_cursor);
- if (error) {
- goto cleanup;
- }
- while (error == 0) {
- error = tmp_cursor->c_get(tmp_cursor, &curr_key, &curr_val, DB_NEXT);
- if (!error) {
- // We store the NULL terminator in the directory so it's included in the size.
- // See #5789
- // Recalculate and check just to be safe.
- const char *dname = (const char *) curr_key.data;
- size_t dname_len = strlen(dname);
- assert(dname_len == curr_key.size - 1);
- table->field[0]->store(dname, dname_len, system_charset_info);
-
- const char *iname = (const char *) curr_val.data;
- size_t iname_len = strlen(iname);
- assert(iname_len == curr_val.size - 1);
- table->field[1]->store(iname, iname_len, system_charset_info);
-
- // split the dname
- String database_name, table_name, dictionary_name;
- tokudb_split_dname(dname, database_name, table_name, dictionary_name);
- table->field[2]->store(database_name.c_ptr(), database_name.length(), system_charset_info);
- table->field[3]->store(table_name.c_ptr(), table_name.length(), system_charset_info);
- table->field[4]->store(dictionary_name.c_ptr(), dictionary_name.length(), system_charset_info);
-
- error = schema_table_store_record(thd, table);
- }
- if (!error && thd_killed(thd))
- error = ER_QUERY_INTERRUPTED;
- }
- if (error == DB_NOTFOUND) {
- error = 0;
- }
-cleanup:
- if (tmp_cursor) {
- int r = tmp_cursor->c_close(tmp_cursor);
- assert(r == 0);
- }
- if (txn) {
- commit_txn(txn, 0);
- }
- return error;
-}
-
-#if MYSQL_VERSION_ID >= 50600
-static int tokudb_file_map_fill_table(THD *thd, TABLE_LIST *tables, Item *cond) {
-#else
-static int tokudb_file_map_fill_table(THD *thd, TABLE_LIST *tables, COND *cond) {
-#endif
- TOKUDB_DBUG_ENTER("");
- int error;
- TABLE *table = tables->table;
-
- rw_rdlock(&tokudb_hton_initialized_lock);
-
- if (!tokudb_hton_initialized) {
- error = ER_PLUGIN_IS_NOT_LOADED;
- my_error(error, MYF(0), tokudb_hton_name);
- } else {
- error = tokudb_file_map(table, thd);
- if (error)
- my_error(ER_GET_ERRNO, MYF(0), error, tokudb_hton_name);
- }
-
- rw_unlock(&tokudb_hton_initialized_lock);
- TOKUDB_DBUG_RETURN(error);
-}
-
-static int tokudb_file_map_init(void *p) {
- ST_SCHEMA_TABLE *schema = (ST_SCHEMA_TABLE *) p;
- schema->fields_info = tokudb_file_map_field_info;
- schema->fill_table = tokudb_file_map_fill_table;
- return 0;
-}
-
-static int tokudb_file_map_done(void *p) {
- return 0;
-}
-
-static struct st_mysql_information_schema tokudb_fractal_tree_info_information_schema = { MYSQL_INFORMATION_SCHEMA_INTERFACE_VERSION };
-
-static ST_FIELD_INFO tokudb_fractal_tree_info_field_info[] = {
- {"dictionary_name", 256, MYSQL_TYPE_STRING, 0, 0, NULL, SKIP_OPEN_TABLE },
- {"internal_file_name", 256, MYSQL_TYPE_STRING, 0, 0, NULL, SKIP_OPEN_TABLE },
- {"bt_num_blocks_allocated", 0, MYSQL_TYPE_LONGLONG, 0, 0, NULL, SKIP_OPEN_TABLE },
- {"bt_num_blocks_in_use", 0, MYSQL_TYPE_LONGLONG, 0, 0, NULL, SKIP_OPEN_TABLE },
- {"bt_size_allocated", 0, MYSQL_TYPE_LONGLONG, 0, 0, NULL, SKIP_OPEN_TABLE },
- {"bt_size_in_use", 0, MYSQL_TYPE_LONGLONG, 0, 0, NULL, SKIP_OPEN_TABLE },
- {"table_schema", 256, MYSQL_TYPE_STRING, 0, 0, NULL, SKIP_OPEN_TABLE },
- {"table_name", 256, MYSQL_TYPE_STRING, 0, 0, NULL, SKIP_OPEN_TABLE },
- {"table_dictionary_name", 256, MYSQL_TYPE_STRING, 0, 0, NULL, SKIP_OPEN_TABLE },
- {NULL, 0, MYSQL_TYPE_NULL, 0, 0, NULL, SKIP_OPEN_TABLE}
-};
-
-static int tokudb_report_fractal_tree_info_for_db(const DBT *dname, const DBT *iname, TABLE *table, THD *thd) {
- int error;
- uint64_t bt_num_blocks_allocated;
- uint64_t bt_num_blocks_in_use;
- uint64_t bt_size_allocated;
- uint64_t bt_size_in_use;
-
- DB *db = NULL;
- error = db_create(&db, db_env, 0);
- if (error) {
- goto exit;
- }
- error = db->open(db, NULL, (char *)dname->data, NULL, DB_BTREE, 0, 0666);
- if (error) {
- goto exit;
- }
- error = db->get_fractal_tree_info64(db,
- &bt_num_blocks_allocated, &bt_num_blocks_in_use,
- &bt_size_allocated, &bt_size_in_use);
- if (error) {
- goto exit;
- }
-
- // We store the NULL terminator in the directory so it's included in the size.
- // See #5789
- // Recalculate and check just to be safe.
- {
- size_t dname_len = strlen((const char *)dname->data);
- assert(dname_len == dname->size - 1);
- table->field[0]->store((char *)dname->data, dname_len, system_charset_info);
- size_t iname_len = strlen((const char *)iname->data);
- assert(iname_len == iname->size - 1);
- table->field[1]->store((char *)iname->data, iname_len, system_charset_info);
- }
- table->field[2]->store(bt_num_blocks_allocated, false);
- table->field[3]->store(bt_num_blocks_in_use, false);
- table->field[4]->store(bt_size_allocated, false);
- table->field[5]->store(bt_size_in_use, false);
-
- // split the dname
- {
- String database_name, table_name, dictionary_name;
- tokudb_split_dname((const char *)dname->data, database_name, table_name, dictionary_name);
- table->field[6]->store(database_name.c_ptr(), database_name.length(), system_charset_info);
- table->field[7]->store(table_name.c_ptr(), table_name.length(), system_charset_info);
- table->field[8]->store(dictionary_name.c_ptr(), dictionary_name.length(), system_charset_info);
- }
- error = schema_table_store_record(thd, table);
-
-exit:
- if (db) {
- int close_error = db->close(db, 0);
- if (error == 0)
- error = close_error;
- }
- return error;
-}
-
-static int tokudb_fractal_tree_info(TABLE *table, THD *thd) {
- int error;
- DB_TXN* txn = NULL;
- DBC* tmp_cursor = NULL;
- DBT curr_key;
- DBT curr_val;
- memset(&curr_key, 0, sizeof curr_key);
- memset(&curr_val, 0, sizeof curr_val);
- error = txn_begin(db_env, 0, &txn, DB_READ_UNCOMMITTED, thd);
- if (error) {
- goto cleanup;
- }
- error = db_env->get_cursor_for_directory(db_env, txn, &tmp_cursor);
- if (error) {
- goto cleanup;
- }
- while (error == 0) {
- error = tmp_cursor->c_get(tmp_cursor, &curr_key, &curr_val, DB_NEXT);
- if (!error) {
- error = tokudb_report_fractal_tree_info_for_db(&curr_key, &curr_val, table, thd);
- if (error)
- error = 0; // ignore read uncommitted errors
- }
- if (!error && thd_killed(thd))
- error = ER_QUERY_INTERRUPTED;
- }
- if (error == DB_NOTFOUND) {
- error = 0;
- }
-cleanup:
- if (tmp_cursor) {
- int r = tmp_cursor->c_close(tmp_cursor);
- assert(r == 0);
- }
- if (txn) {
- commit_txn(txn, 0);
- }
- return error;
-}
-
-#if MYSQL_VERSION_ID >= 50600
-static int tokudb_fractal_tree_info_fill_table(THD *thd, TABLE_LIST *tables, Item *cond) {
-#else
-static int tokudb_fractal_tree_info_fill_table(THD *thd, TABLE_LIST *tables, COND *cond) {
-#endif
- TOKUDB_DBUG_ENTER("");
- int error;
- TABLE *table = tables->table;
-
- // 3938: Get a read lock on the status flag, since we must
- // read it before safely proceeding
- rw_rdlock(&tokudb_hton_initialized_lock);
-
- if (!tokudb_hton_initialized) {
- error = ER_PLUGIN_IS_NOT_LOADED;
- my_error(error, MYF(0), tokudb_hton_name);
- } else {
- error = tokudb_fractal_tree_info(table, thd);
- if (error)
- my_error(ER_GET_ERRNO, MYF(0), error, tokudb_hton_name);
- }
-
- //3938: unlock the status flag lock
- rw_unlock(&tokudb_hton_initialized_lock);
- TOKUDB_DBUG_RETURN(error);
-}
-
-static int tokudb_fractal_tree_info_init(void *p) {
- ST_SCHEMA_TABLE *schema = (ST_SCHEMA_TABLE *) p;
- schema->fields_info = tokudb_fractal_tree_info_field_info;
- schema->fill_table = tokudb_fractal_tree_info_fill_table;
- return 0;
-}
-
-static int tokudb_fractal_tree_info_done(void *p) {
- return 0;
-}
-
-static struct st_mysql_information_schema tokudb_fractal_tree_block_map_information_schema = { MYSQL_INFORMATION_SCHEMA_INTERFACE_VERSION };
-
-static ST_FIELD_INFO tokudb_fractal_tree_block_map_field_info[] = {
- {"dictionary_name", 256, MYSQL_TYPE_STRING, 0, 0, NULL, SKIP_OPEN_TABLE },
- {"internal_file_name", 256, MYSQL_TYPE_STRING, 0, 0, NULL, SKIP_OPEN_TABLE },
- {"checkpoint_count", 0, MYSQL_TYPE_LONGLONG, 0, 0, NULL, SKIP_OPEN_TABLE },
- {"blocknum", 0, MYSQL_TYPE_LONGLONG, 0, 0, NULL, SKIP_OPEN_TABLE },
- {"offset", 0, MYSQL_TYPE_LONGLONG, 0, MY_I_S_MAYBE_NULL, NULL, SKIP_OPEN_TABLE },
- {"size", 0, MYSQL_TYPE_LONGLONG, 0, MY_I_S_MAYBE_NULL, NULL, SKIP_OPEN_TABLE },
- {"table_schema", 256, MYSQL_TYPE_STRING, 0, 0, NULL, SKIP_OPEN_TABLE },
- {"table_name", 256, MYSQL_TYPE_STRING, 0, 0, NULL, SKIP_OPEN_TABLE },
- {"table_dictionary_name", 256, MYSQL_TYPE_STRING, 0, 0, NULL, SKIP_OPEN_TABLE },
- {NULL, 0, MYSQL_TYPE_NULL, 0, 0, NULL, SKIP_OPEN_TABLE}
-};
-
-struct tokudb_report_fractal_tree_block_map_iterator_extra {
- int64_t num_rows;
- int64_t i;
- uint64_t *checkpoint_counts;
- int64_t *blocknums;
- int64_t *diskoffs;
- int64_t *sizes;
-};
-
-// This iterator is called while holding the blocktable lock. We should be as quick as possible.
-// We don't want to do one call to get the number of rows, release the blocktable lock, and then do another call to get all the rows because the number of rows may change if we don't hold the lock.
-// As a compromise, we'll do some mallocs inside the lock on the first call, but everything else should be fast.
-static int tokudb_report_fractal_tree_block_map_iterator(uint64_t checkpoint_count,
- int64_t num_rows,
- int64_t blocknum,
- int64_t diskoff,
- int64_t size,
- void *iter_extra) {
- struct tokudb_report_fractal_tree_block_map_iterator_extra *e = static_cast<struct tokudb_report_fractal_tree_block_map_iterator_extra *>(iter_extra);
-
- assert(num_rows > 0);
- if (e->num_rows == 0) {
- e->checkpoint_counts = (uint64_t *) tokudb_my_malloc(num_rows * (sizeof *e->checkpoint_counts), MYF(MY_WME|MY_ZEROFILL|MY_FAE));
- e->blocknums = (int64_t *) tokudb_my_malloc(num_rows * (sizeof *e->blocknums), MYF(MY_WME|MY_ZEROFILL|MY_FAE));
- e->diskoffs = (int64_t *) tokudb_my_malloc(num_rows * (sizeof *e->diskoffs), MYF(MY_WME|MY_ZEROFILL|MY_FAE));
- e->sizes = (int64_t *) tokudb_my_malloc(num_rows * (sizeof *e->sizes), MYF(MY_WME|MY_ZEROFILL|MY_FAE));
- e->num_rows = num_rows;
- }
-
- e->checkpoint_counts[e->i] = checkpoint_count;
- e->blocknums[e->i] = blocknum;
- e->diskoffs[e->i] = diskoff;
- e->sizes[e->i] = size;
- ++(e->i);
-
- return 0;
-}
-
-static int tokudb_report_fractal_tree_block_map_for_db(const DBT *dname, const DBT *iname, TABLE *table, THD *thd) {
- int error;
- DB *db;
- struct tokudb_report_fractal_tree_block_map_iterator_extra e = {}; // avoid struct initializers so that we can compile with older gcc versions
-
- error = db_create(&db, db_env, 0);
- if (error) {
- goto exit;
- }
- error = db->open(db, NULL, (char *)dname->data, NULL, DB_BTREE, 0, 0666);
- if (error) {
- goto exit;
- }
- error = db->iterate_fractal_tree_block_map(db, tokudb_report_fractal_tree_block_map_iterator, &e);
- {
- int close_error = db->close(db, 0);
- if (!error) {
- error = close_error;
- }
- }
- if (error) {
- goto exit;
- }
-
- // If not, we should have gotten an error and skipped this section of code
- assert(e.i == e.num_rows);
- for (int64_t i = 0; error == 0 && i < e.num_rows; ++i) {
- // We store the NULL terminator in the directory so it's included in the size.
- // See #5789
- // Recalculate and check just to be safe.
- size_t dname_len = strlen((const char *)dname->data);
- assert(dname_len == dname->size - 1);
- table->field[0]->store((char *)dname->data, dname_len, system_charset_info);
-
- size_t iname_len = strlen((const char *)iname->data);
- assert(iname_len == iname->size - 1);
- table->field[1]->store((char *)iname->data, iname_len, system_charset_info);
-
- table->field[2]->store(e.checkpoint_counts[i], false);
- table->field[3]->store(e.blocknums[i], false);
- static const int64_t freelist_null = -1;
- static const int64_t diskoff_unused = -2;
- if (e.diskoffs[i] == diskoff_unused || e.diskoffs[i] == freelist_null) {
- table->field[4]->set_null();
} else {
- table->field[4]->set_notnull();
- table->field[4]->store(e.diskoffs[i], false);
+ database_name.append(database_ptr);
}
- static const int64_t size_is_free = -1;
- if (e.sizes[i] == size_is_free) {
- table->field[5]->set_null();
- } else {
- table->field[5]->set_notnull();
- table->field[5]->store(e.sizes[i], false);
- }
-
- // split the dname
- String database_name, table_name, dictionary_name;
- tokudb_split_dname((const char *)dname->data, database_name, table_name,dictionary_name);
- table->field[6]->store(database_name.c_ptr(), database_name.length(), system_charset_info);
- table->field[7]->store(table_name.c_ptr(), table_name.length(), system_charset_info);
- table->field[8]->store(dictionary_name.c_ptr(), dictionary_name.length(), system_charset_info);
-
- error = schema_table_store_record(thd, table);
}
-
-exit:
- if (e.checkpoint_counts != NULL) {
- tokudb_my_free(e.checkpoint_counts);
- e.checkpoint_counts = NULL;
- }
- if (e.blocknums != NULL) {
- tokudb_my_free(e.blocknums);
- e.blocknums = NULL;
- }
- if (e.diskoffs != NULL) {
- tokudb_my_free(e.diskoffs);
- e.diskoffs = NULL;
- }
- if (e.sizes != NULL) {
- tokudb_my_free(e.sizes);
- e.sizes = NULL;
- }
- return error;
}
-static int tokudb_fractal_tree_block_map(TABLE *table, THD *thd) {
- int error;
- DB_TXN* txn = NULL;
- DBC* tmp_cursor = NULL;
- DBT curr_key;
- DBT curr_val;
- memset(&curr_key, 0, sizeof curr_key);
- memset(&curr_val, 0, sizeof curr_val);
- error = txn_begin(db_env, 0, &txn, DB_READ_UNCOMMITTED, thd);
- if (error) {
- goto cleanup;
- }
- error = db_env->get_cursor_for_directory(db_env, txn, &tmp_cursor);
- if (error) {
- goto cleanup;
- }
- while (error == 0) {
- error = tmp_cursor->c_get(tmp_cursor, &curr_key, &curr_val, DB_NEXT);
- if (!error) {
- error = tokudb_report_fractal_tree_block_map_for_db(&curr_key, &curr_val, table, thd);
- }
- if (!error && thd_killed(thd))
- error = ER_QUERY_INTERRUPTED;
- }
- if (error == DB_NOTFOUND) {
- error = 0;
- }
-cleanup:
- if (tmp_cursor) {
- int r = tmp_cursor->c_close(tmp_cursor);
- assert(r == 0);
- }
- if (txn) {
- commit_txn(txn, 0);
- }
- return error;
-}
-
-#if MYSQL_VERSION_ID >= 50600
-static int tokudb_fractal_tree_block_map_fill_table(THD *thd, TABLE_LIST *tables, Item *cond) {
-#else
-static int tokudb_fractal_tree_block_map_fill_table(THD *thd, TABLE_LIST *tables, COND *cond) {
-#endif
- TOKUDB_DBUG_ENTER("");
- int error;
- TABLE *table = tables->table;
-
- // 3938: Get a read lock on the status flag, since we must
- // read it before safely proceeding
- rw_rdlock(&tokudb_hton_initialized_lock);
-
- if (!tokudb_hton_initialized) {
- error = ER_PLUGIN_IS_NOT_LOADED;
- my_error(error, MYF(0), tokudb_hton_name);
- } else {
- error = tokudb_fractal_tree_block_map(table, thd);
- if (error)
- my_error(ER_GET_ERRNO, MYF(0), error, tokudb_hton_name);
- }
-
- //3938: unlock the status flag lock
- rw_unlock(&tokudb_hton_initialized_lock);
- TOKUDB_DBUG_RETURN(error);
-}
-
-static int tokudb_fractal_tree_block_map_init(void *p) {
- ST_SCHEMA_TABLE *schema = (ST_SCHEMA_TABLE *) p;
- schema->fields_info = tokudb_fractal_tree_block_map_field_info;
- schema->fill_table = tokudb_fractal_tree_block_map_fill_table;
- return 0;
-}
-
-static int tokudb_fractal_tree_block_map_done(void *p) {
- return 0;
-}
+struct st_mysql_storage_engine tokudb_storage_engine = {
+ MYSQL_HANDLERTON_INTERFACE_VERSION
+};
#if TOKU_INCLUDE_LOCK_TIMEOUT_QUERY_STRING
struct tokudb_search_txn_extra {
@@ -2048,10 +1562,16 @@ struct tokudb_search_txn_extra {
uint64_t match_client_id;
};
-static int tokudb_search_txn_callback(DB_TXN *txn, iterate_row_locks_callback iterate_locks, void *locks_extra, void *extra) {
+static int tokudb_search_txn_callback(
+ DB_TXN* txn,
+ iterate_row_locks_callback iterate_locks,
+ void* locks_extra,
+ void* extra) {
+
uint64_t txn_id = txn->id64(txn);
uint64_t client_id = txn->get_client_id(txn);
- struct tokudb_search_txn_extra *e = reinterpret_cast<struct tokudb_search_txn_extra *>(extra);
+ struct tokudb_search_txn_extra* e =
+ reinterpret_cast<struct tokudb_search_txn_extra*>(extra);
if (e->match_txn_id == txn_id) {
e->match_found = true;
e->match_client_id = client_id;
@@ -2060,9 +1580,17 @@ static int tokudb_search_txn_callback(DB_TXN *txn, iterate_row_locks_callback it
return 0;
}
-static bool tokudb_txn_id_to_client_id(THD *thd, uint64_t blocking_txnid, uint64_t *blocking_client_id) {
- struct tokudb_search_txn_extra e = { false, blocking_txnid, 0};
- (void) db_env->iterate_live_transactions(db_env, tokudb_search_txn_callback, &e);
+static bool tokudb_txn_id_to_client_id(
+ THD* thd,
+ uint64_t blocking_txnid,
+ uint64_t* blocking_client_id) {
+
+ struct tokudb_search_txn_extra e = {
+ false,
+ blocking_txnid,
+ 0
+ };
+ db_env->iterate_live_transactions(db_env, tokudb_search_txn_callback, &e);
if (e.match_found) {
*blocking_client_id = e.match_client_id;
}
@@ -2070,14 +1598,20 @@ static bool tokudb_txn_id_to_client_id(THD *thd, uint64_t blocking_txnid, uint64
}
#endif
-static void tokudb_pretty_key(const DB *db, const DBT *key, const char *default_key, String *out) {
+static void tokudb_pretty_key(
+ const DB* db,
+ const DBT* key,
+ const char* default_key,
+ String* out) {
+
if (key->data == NULL) {
out->append(default_key);
} else {
bool do_hexdump = true;
if (do_hexdump) {
// hexdump the key
- const unsigned char *data = reinterpret_cast<const unsigned char *>(key->data);
+ const unsigned char* data =
+ reinterpret_cast<const unsigned char*>(key->data);
for (size_t i = 0; i < key->size; i++) {
char str[3];
snprintf(str, sizeof str, "%2.2x", data[i]);
@@ -2087,15 +1621,15 @@ static void tokudb_pretty_key(const DB *db, const DBT *key, const char *default_
}
}
-static void tokudb_pretty_left_key(const DB *db, const DBT *key, String *out) {
+void tokudb_pretty_left_key(const DB* db, const DBT* key, String* out) {
tokudb_pretty_key(db, key, "-infinity", out);
}
-static void tokudb_pretty_right_key(const DB *db, const DBT *key, String *out) {
+void tokudb_pretty_right_key(const DB* db, const DBT* key, String* out) {
tokudb_pretty_key(db, key, "+infinity", out);
}
-static const char *tokudb_get_index_name(DB *db) {
+const char* tokudb_get_index_name(DB* db) {
if (db != NULL) {
return db->get_dname(db);
} else {
@@ -2104,17 +1638,24 @@ static const char *tokudb_get_index_name(DB *db) {
}
static int tokudb_equal_key(const DBT *left_key, const DBT *right_key) {
- if (left_key->data == NULL || right_key->data == NULL || left_key->size != right_key->size)
+ if (left_key->data == NULL || right_key->data == NULL ||
+ left_key->size != right_key->size)
return 0;
else
return memcmp(left_key->data, right_key->data, left_key->size) == 0;
}
-static void tokudb_lock_timeout_callback(DB *db, uint64_t requesting_txnid, const DBT *left_key, const DBT *right_key, uint64_t blocking_txnid) {
- THD *thd = current_thd;
+static void tokudb_lock_timeout_callback(
+ DB* db,
+ uint64_t requesting_txnid,
+ const DBT* left_key,
+ const DBT* right_key,
+ uint64_t blocking_txnid) {
+
+ THD* thd = current_thd;
if (!thd)
return;
- ulong lock_timeout_debug = THDVAR(thd, lock_timeout_debug);
+ ulong lock_timeout_debug = tokudb::sysvars::lock_timeout_debug(thd);
if (lock_timeout_debug != 0) {
// generate a JSON document with the lock timeout info
String log_str;
@@ -2123,7 +1664,9 @@ static void tokudb_lock_timeout_callback(DB *db, uint64_t requesting_txnid, cons
log_str.append("\"mysql_thread_id\":");
log_str.append_ulonglong(mysql_thread_id);
log_str.append(", \"dbname\":");
- log_str.append("\""); log_str.append(tokudb_get_index_name(db)); log_str.append("\"");
+ log_str.append("\"");
+ log_str.append(tokudb_get_index_name(db));
+ log_str.append("\"");
log_str.append(", \"requesting_txnid\":");
log_str.append_ulonglong(requesting_txnid);
log_str.append(", \"blocking_txnid\":");
@@ -2132,44 +1675,71 @@ static void tokudb_lock_timeout_callback(DB *db, uint64_t requesting_txnid, cons
String key_str;
tokudb_pretty_key(db, left_key, "?", &key_str);
log_str.append(", \"key\":");
- log_str.append("\""); log_str.append(key_str); log_str.append("\"");
+ log_str.append("\"");
+ log_str.append(key_str);
+ log_str.append("\"");
} else {
String left_str;
tokudb_pretty_left_key(db, left_key, &left_str);
log_str.append(", \"key_left\":");
- log_str.append("\""); log_str.append(left_str); log_str.append("\"");
+ log_str.append("\"");
+ log_str.append(left_str);
+ log_str.append("\"");
String right_str;
tokudb_pretty_right_key(db, right_key, &right_str);
log_str.append(", \"key_right\":");
- log_str.append("\""); log_str.append(right_str); log_str.append("\"");
+ log_str.append("\"");
+ log_str.append(right_str);
+ log_str.append("\"");
}
log_str.append("}");
// set last_lock_timeout
if (lock_timeout_debug & 1) {
- char *old_lock_timeout = THDVAR(thd, last_lock_timeout);
- char *new_lock_timeout = tokudb_my_strdup(log_str.c_ptr(), MY_FAE);
- THDVAR(thd, last_lock_timeout) = new_lock_timeout;
- tokudb_my_free(old_lock_timeout);
+ char* old_lock_timeout = tokudb::sysvars::last_lock_timeout(thd);
+ char* new_lock_timeout =
+ tokudb::memory::strdup(log_str.c_ptr(), MY_FAE);
+ tokudb::sysvars::set_last_lock_timeout(thd, new_lock_timeout);
#if TOKU_THDVAR_MEMALLOC_BUG
- tokudb_pthread_mutex_lock(&tokudb_map_mutex);
+ tokudb_map_mutex.lock();
struct tokudb_map_pair old_key = { thd, old_lock_timeout };
tree_delete(&tokudb_map, &old_key, sizeof old_key, NULL);
struct tokudb_map_pair new_key = { thd, new_lock_timeout };
tree_insert(&tokudb_map, &new_key, sizeof new_key, NULL);
- tokudb_pthread_mutex_unlock(&tokudb_map_mutex);
+ tokudb_map_mutex.unlock();
#endif
+ tokudb::memory::free(old_lock_timeout);
}
// dump to stderr
if (lock_timeout_debug & 2) {
- sql_print_error("%s: lock timeout %s", tokudb_hton_name, log_str.c_ptr());
+ sql_print_error(
+ "%s: lock timeout %s",
+ tokudb_hton_name,
+ log_str.c_ptr());
LEX_STRING *qs = thd_query_string(thd);
- sql_print_error("%s: requesting_thread_id:%" PRIu64 " q:%.*s", tokudb_hton_name, mysql_thread_id, (int) qs->length, qs->str);
+ sql_print_error(
+ "%s: requesting_thread_id:%" PRIu64 " q:%.*s",
+ tokudb_hton_name,
+ mysql_thread_id,
+ (int)qs->length,
+ qs->str);
#if TOKU_INCLUDE_LOCK_TIMEOUT_QUERY_STRING
uint64_t blocking_thread_id = 0;
- if (tokudb_txn_id_to_client_id(thd, blocking_txnid, &blocking_thread_id)) {
+ if (tokudb_txn_id_to_client_id(
+ thd,
+ blocking_txnid,
+ &blocking_thread_id)) {
+
String blocking_qs;
- if (get_thread_query_string(blocking_thread_id, blocking_qs) == 0) {
- sql_print_error("%s: blocking_thread_id:%" PRIu64 " q:%.*s", tokudb_hton_name, blocking_thread_id, blocking_qs.length(), blocking_qs.c_ptr());
+ if (get_thread_query_string(
+ blocking_thread_id,
+ blocking_qs) == 0) {
+
+ sql_print_error(
+ "%s: blocking_thread_id:%" PRIu64 " q:%.*s",
+ tokudb_hton_name,
+ blocking_thread_id,
+ blocking_qs.length(),
+ blocking_qs.c_ptr());
}
}
#endif
@@ -2177,254 +1747,9 @@ static void tokudb_lock_timeout_callback(DB *db, uint64_t requesting_txnid, cons
}
}
-static struct st_mysql_information_schema tokudb_trx_information_schema = { MYSQL_INFORMATION_SCHEMA_INTERFACE_VERSION };
-
-static ST_FIELD_INFO tokudb_trx_field_info[] = {
- {"trx_id", 0, MYSQL_TYPE_LONGLONG, 0, 0, NULL, SKIP_OPEN_TABLE },
- {"trx_mysql_thread_id", 0, MYSQL_TYPE_LONGLONG, 0, 0, NULL, SKIP_OPEN_TABLE },
- {"trx_time", 0, MYSQL_TYPE_LONGLONG, 0, 0, NULL, SKIP_OPEN_TABLE },
- {NULL, 0, MYSQL_TYPE_NULL, 0, 0, NULL, SKIP_OPEN_TABLE}
-};
-
-struct tokudb_trx_extra {
- THD *thd;
- TABLE *table;
-};
-
-static int tokudb_trx_callback(DB_TXN *txn, iterate_row_locks_callback iterate_locks, void *locks_extra, void *extra) {
- uint64_t txn_id = txn->id64(txn);
- uint64_t client_id = txn->get_client_id(txn);
- uint64_t start_time = txn->get_start_time(txn);
- struct tokudb_trx_extra *e = reinterpret_cast<struct tokudb_trx_extra *>(extra);
- THD *thd = e->thd;
- TABLE *table = e->table;
- table->field[0]->store(txn_id, false);
- table->field[1]->store(client_id, false);
- uint64_t tnow = (uint64_t) time(NULL);
- table->field[2]->store(tnow >= start_time ? tnow - start_time : 0, false);
- int error = schema_table_store_record(thd, table);
- if (!error && thd_killed(thd))
- error = ER_QUERY_INTERRUPTED;
- return error;
-}
-
-#if MYSQL_VERSION_ID >= 50600
-static int tokudb_trx_fill_table(THD *thd, TABLE_LIST *tables, Item *cond) {
-#else
-static int tokudb_trx_fill_table(THD *thd, TABLE_LIST *tables, COND *cond) {
-#endif
- TOKUDB_DBUG_ENTER("");
- int error;
-
- rw_rdlock(&tokudb_hton_initialized_lock);
-
- if (!tokudb_hton_initialized) {
- error = ER_PLUGIN_IS_NOT_LOADED;
- my_error(error, MYF(0), tokudb_hton_name);
- } else {
- struct tokudb_trx_extra e = { thd, tables->table };
- error = db_env->iterate_live_transactions(db_env, tokudb_trx_callback, &e);
- if (error)
- my_error(ER_GET_ERRNO, MYF(0), error, tokudb_hton_name);
- }
-
- rw_unlock(&tokudb_hton_initialized_lock);
- TOKUDB_DBUG_RETURN(error);
-}
-
-static int tokudb_trx_init(void *p) {
- ST_SCHEMA_TABLE *schema = (ST_SCHEMA_TABLE *) p;
- schema->fields_info = tokudb_trx_field_info;
- schema->fill_table = tokudb_trx_fill_table;
- return 0;
-}
-
-static int tokudb_trx_done(void *p) {
- return 0;
-}
-
-static struct st_mysql_information_schema tokudb_lock_waits_information_schema = { MYSQL_INFORMATION_SCHEMA_INTERFACE_VERSION };
-
-static ST_FIELD_INFO tokudb_lock_waits_field_info[] = {
- {"requesting_trx_id", 0, MYSQL_TYPE_LONGLONG, 0, 0, NULL, SKIP_OPEN_TABLE },
- {"blocking_trx_id", 0, MYSQL_TYPE_LONGLONG, 0, 0, NULL, SKIP_OPEN_TABLE },
- {"lock_waits_dname", 256, MYSQL_TYPE_STRING, 0, 0, NULL, SKIP_OPEN_TABLE },
- {"lock_waits_key_left", 256, MYSQL_TYPE_STRING, 0, 0, NULL, SKIP_OPEN_TABLE },
- {"lock_waits_key_right", 256, MYSQL_TYPE_STRING, 0, 0, NULL, SKIP_OPEN_TABLE },
- {"lock_waits_start_time", 0, MYSQL_TYPE_LONGLONG, 0, 0, NULL, SKIP_OPEN_TABLE },
- {"lock_waits_table_schema", 256, MYSQL_TYPE_STRING, 0, 0, NULL, SKIP_OPEN_TABLE },
- {"lock_waits_table_name", 256, MYSQL_TYPE_STRING, 0, 0, NULL, SKIP_OPEN_TABLE },
- {"lock_waits_table_dictionary_name", 256, MYSQL_TYPE_STRING, 0, 0, NULL, SKIP_OPEN_TABLE },
- {NULL, 0, MYSQL_TYPE_NULL, 0, 0, NULL, SKIP_OPEN_TABLE}
-};
-
-struct tokudb_lock_waits_extra {
- THD *thd;
- TABLE *table;
-};
-
-static int tokudb_lock_waits_callback(DB *db, uint64_t requesting_txnid, const DBT *left_key, const DBT *right_key,
- uint64_t blocking_txnid, uint64_t start_time, void *extra) {
- struct tokudb_lock_waits_extra *e = reinterpret_cast<struct tokudb_lock_waits_extra *>(extra);
- THD *thd = e->thd;
- TABLE *table = e->table;
- table->field[0]->store(requesting_txnid, false);
- table->field[1]->store(blocking_txnid, false);
- const char *dname = tokudb_get_index_name(db);
- size_t dname_length = strlen(dname);
- table->field[2]->store(dname, dname_length, system_charset_info);
- String left_str;
- tokudb_pretty_left_key(db, left_key, &left_str);
- table->field[3]->store(left_str.ptr(), left_str.length(), system_charset_info);
- String right_str;
- tokudb_pretty_right_key(db, right_key, &right_str);
- table->field[4]->store(right_str.ptr(), right_str.length(), system_charset_info);
- table->field[5]->store(start_time, false);
-
- String database_name, table_name, dictionary_name;
- tokudb_split_dname(dname, database_name, table_name, dictionary_name);
- table->field[6]->store(database_name.c_ptr(), database_name.length(), system_charset_info);
- table->field[7]->store(table_name.c_ptr(), table_name.length(), system_charset_info);
- table->field[8]->store(dictionary_name.c_ptr(), dictionary_name.length(), system_charset_info);
-
- int error = schema_table_store_record(thd, table);
-
- if (!error && thd_killed(thd))
- error = ER_QUERY_INTERRUPTED;
-
- return error;
-}
-
-#if MYSQL_VERSION_ID >= 50600
-static int tokudb_lock_waits_fill_table(THD *thd, TABLE_LIST *tables, Item *cond) {
-#else
-static int tokudb_lock_waits_fill_table(THD *thd, TABLE_LIST *tables, COND *cond) {
-#endif
- TOKUDB_DBUG_ENTER("");
- int error;
-
- rw_rdlock(&tokudb_hton_initialized_lock);
-
- if (!tokudb_hton_initialized) {
- error = ER_PLUGIN_IS_NOT_LOADED;
- my_error(error, MYF(0), tokudb_hton_name);
- } else {
- struct tokudb_lock_waits_extra e = { thd, tables->table };
- error = db_env->iterate_pending_lock_requests(db_env, tokudb_lock_waits_callback, &e);
- if (error)
- my_error(ER_GET_ERRNO, MYF(0), error, tokudb_hton_name);
- }
-
- rw_unlock(&tokudb_hton_initialized_lock);
- TOKUDB_DBUG_RETURN(error);
-}
-
-static int tokudb_lock_waits_init(void *p) {
- ST_SCHEMA_TABLE *schema = (ST_SCHEMA_TABLE *) p;
- schema->fields_info = tokudb_lock_waits_field_info;
- schema->fill_table = tokudb_lock_waits_fill_table;
- return 0;
-}
-
-static int tokudb_lock_waits_done(void *p) {
- return 0;
-}
-
-static struct st_mysql_information_schema tokudb_locks_information_schema = { MYSQL_INFORMATION_SCHEMA_INTERFACE_VERSION };
-
-static ST_FIELD_INFO tokudb_locks_field_info[] = {
- {"locks_trx_id", 0, MYSQL_TYPE_LONGLONG, 0, 0, NULL, SKIP_OPEN_TABLE },
- {"locks_mysql_thread_id", 0, MYSQL_TYPE_LONGLONG, 0, 0, NULL, SKIP_OPEN_TABLE },
- {"locks_dname", 256, MYSQL_TYPE_STRING, 0, 0, NULL, SKIP_OPEN_TABLE },
- {"locks_key_left", 256, MYSQL_TYPE_STRING, 0, 0, NULL, SKIP_OPEN_TABLE },
- {"locks_key_right", 256, MYSQL_TYPE_STRING, 0, 0, NULL, SKIP_OPEN_TABLE },
- {"locks_table_schema", 256, MYSQL_TYPE_STRING, 0, 0, NULL, SKIP_OPEN_TABLE },
- {"locks_table_name", 256, MYSQL_TYPE_STRING, 0, 0, NULL, SKIP_OPEN_TABLE },
- {"locks_table_dictionary_name", 256, MYSQL_TYPE_STRING, 0, 0, NULL, SKIP_OPEN_TABLE },
- {NULL, 0, MYSQL_TYPE_NULL, 0, 0, NULL, SKIP_OPEN_TABLE}
-};
-
-struct tokudb_locks_extra {
- THD *thd;
- TABLE *table;
-};
-
-static int tokudb_locks_callback(DB_TXN *txn, iterate_row_locks_callback iterate_locks, void *locks_extra, void *extra) {
- uint64_t txn_id = txn->id64(txn);
- uint64_t client_id = txn->get_client_id(txn);
- struct tokudb_locks_extra *e = reinterpret_cast<struct tokudb_locks_extra *>(extra);
- THD *thd = e->thd;
- TABLE *table = e->table;
- int error = 0;
- DB *db;
- DBT left_key, right_key;
- while (error == 0 && iterate_locks(&db, &left_key, &right_key, locks_extra) == 0) {
- table->field[0]->store(txn_id, false);
- table->field[1]->store(client_id, false);
-
- const char *dname = tokudb_get_index_name(db);
- size_t dname_length = strlen(dname);
- table->field[2]->store(dname, dname_length, system_charset_info);
-
- String left_str;
- tokudb_pretty_left_key(db, &left_key, &left_str);
- table->field[3]->store(left_str.ptr(), left_str.length(), system_charset_info);
-
- String right_str;
- tokudb_pretty_right_key(db, &right_key, &right_str);
- table->field[4]->store(right_str.ptr(), right_str.length(), system_charset_info);
-
- String database_name, table_name, dictionary_name;
- tokudb_split_dname(dname, database_name, table_name, dictionary_name);
- table->field[5]->store(database_name.c_ptr(), database_name.length(), system_charset_info);
- table->field[6]->store(table_name.c_ptr(), table_name.length(), system_charset_info);
- table->field[7]->store(dictionary_name.c_ptr(), dictionary_name.length(), system_charset_info);
-
- error = schema_table_store_record(thd, table);
-
- if (!error && thd_killed(thd))
- error = ER_QUERY_INTERRUPTED;
- }
- return error;
-}
-
-#if MYSQL_VERSION_ID >= 50600
-static int tokudb_locks_fill_table(THD *thd, TABLE_LIST *tables, Item *cond) {
-#else
-static int tokudb_locks_fill_table(THD *thd, TABLE_LIST *tables, COND *cond) {
-#endif
- TOKUDB_DBUG_ENTER("");
- int error;
-
- rw_rdlock(&tokudb_hton_initialized_lock);
-
- if (!tokudb_hton_initialized) {
- error = ER_PLUGIN_IS_NOT_LOADED;
- my_error(error, MYF(0), tokudb_hton_name);
- } else {
- struct tokudb_locks_extra e = { thd, tables->table };
- error = db_env->iterate_live_transactions(db_env, tokudb_locks_callback, &e);
- if (error)
- my_error(ER_GET_ERRNO, MYF(0), error, tokudb_hton_name);
- }
-
- rw_unlock(&tokudb_hton_initialized_lock);
- TOKUDB_DBUG_RETURN(error);
-}
-
-static int tokudb_locks_init(void *p) {
- ST_SCHEMA_TABLE *schema = (ST_SCHEMA_TABLE *) p;
- schema->fields_info = tokudb_locks_field_info;
- schema->fill_table = tokudb_locks_fill_table;
- return 0;
-}
-
-static int tokudb_locks_done(void *p) {
- return 0;
-}
-
// Retrieves variables for information_schema.global_status.
-// Names (columnname) are automatically converted to upper case, and prefixed with "TOKUDB_"
+// Names (columnname) are automatically converted to upper case,
+// and prefixed with "TOKUDB_"
static int show_tokudb_vars(THD *thd, SHOW_VAR *var, char *buff) {
TOKUDB_DBUG_ENTER("");
@@ -2435,13 +1760,23 @@ static int show_tokudb_vars(THD *thd, SHOW_VAR *var, char *buff) {
fs_redzone_state redzone_state;
uint64_t num_rows;
- error = db_env->get_engine_status (db_env, toku_global_status_rows, toku_global_status_max_rows, &num_rows, &redzone_state, &panic, panic_string, panic_string_len, TOKU_GLOBAL_STATUS);
+ error = db_env->get_engine_status(
+ db_env,
+ toku_global_status_rows,
+ toku_global_status_max_rows,
+ &num_rows,
+ &redzone_state,
+ &panic,
+ panic_string,
+ panic_string_len,
+ TOKU_GLOBAL_STATUS);
//TODO: Maybe do something with the panic output?
if (error == 0) {
- assert(num_rows <= toku_global_status_max_rows);
+ assert_always(num_rows <= toku_global_status_max_rows);
//TODO: Maybe enable some of the items here: (copied from engine status
- //TODO: (optionally) add redzone state, panic, panic string, etc. Right now it's being ignored.
+ //TODO: (optionally) add redzone state, panic, panic string, etc.
+ //Right now it's being ignored.
for (uint64_t row = 0; row < num_rows; row++) {
SHOW_VAR &status_var = toku_global_status_variables[row];
@@ -2463,7 +1798,11 @@ static int show_tokudb_vars(THD *thd, SHOW_VAR *var, char *buff) {
time_t t = status_row.value.num;
char tbuf[26];
// Reuse the memory in status_row. (It belongs to us).
- snprintf(status_row.value.datebuf, sizeof(status_row.value.datebuf), "%.24s", ctime_r(&t, tbuf));
+ snprintf(
+ status_row.value.datebuf,
+ sizeof(status_row.value.datebuf),
+ "%.24s",
+ ctime_r(&t, tbuf));
status_var.value = (char*)&status_row.value.datebuf[0];
break;
}
@@ -2489,7 +1828,11 @@ static int show_tokudb_vars(THD *thd, SHOW_VAR *var, char *buff) {
status_var.type = SHOW_CHAR;
// Reuse the memory in status_row.datebuf. (It belongs to us).
// UNKNOWN TYPE: %d fits in 26 bytes (sizeof datebuf) for any integer.
- snprintf(status_row.value.datebuf, sizeof(status_row.value.datebuf), "UNKNOWN TYPE: %d", status_row.type);
+ snprintf(
+ status_row.value.datebuf,
+ sizeof(status_row.value.datebuf),
+ "UNKNOWN TYPE: %d",
+ status_row.type);
status_var.value = (char*)&status_row.value.datebuf[0];
break;
}
@@ -2521,157 +1864,38 @@ static void tokudb_backtrace(void) {
}
#endif
-#if defined(TOKUDB_VERSION_MAJOR) && defined(TOKUDB_VERSION_MINOR)
-#define TOKUDB_PLUGIN_VERSION ((TOKUDB_VERSION_MAJOR << 8) + TOKUDB_VERSION_MINOR)
-#else
-#define TOKUDB_PLUGIN_VERSION 0
-#endif
-
#ifdef MARIA_PLUGIN_INTERFACE_VERSION
maria_declare_plugin(tokudb)
#else
mysql_declare_plugin(tokudb)
#endif
-{
- MYSQL_STORAGE_ENGINE_PLUGIN,
- &tokudb_storage_engine,
- tokudb_hton_name,
- "Percona",
- "Percona TokuDB Storage Engine with Fractal Tree(tm) Technology",
- PLUGIN_LICENSE_GPL,
- tokudb_init_func, /* plugin init */
- tokudb_done_func, /* plugin deinit */
- TOKUDB_PLUGIN_VERSION,
- toku_global_status_variables_export, /* status variables */
- tokudb_system_variables, /* system variables */
-#ifdef MARIA_PLUGIN_INTERFACE_VERSION
- tokudb_version,
- MariaDB_PLUGIN_MATURITY_STABLE /* maturity */
-#else
- NULL, /* config options */
- 0, /* flags */
-#endif
-},
-{
- MYSQL_INFORMATION_SCHEMA_PLUGIN,
- &tokudb_trx_information_schema,
- "TokuDB_trx",
- "Percona",
- "Percona TokuDB Storage Engine with Fractal Tree(tm) Technology",
- PLUGIN_LICENSE_GPL,
- tokudb_trx_init, /* plugin init */
- tokudb_trx_done, /* plugin deinit */
- TOKUDB_PLUGIN_VERSION,
- NULL, /* status variables */
- NULL, /* system variables */
-#ifdef MARIA_PLUGIN_INTERFACE_VERSION
- tokudb_version,
- MariaDB_PLUGIN_MATURITY_STABLE /* maturity */
-#else
- NULL, /* config options */
- 0, /* flags */
-#endif
-},
-{
- MYSQL_INFORMATION_SCHEMA_PLUGIN,
- &tokudb_lock_waits_information_schema,
- "TokuDB_lock_waits",
- "Percona",
- "Percona TokuDB Storage Engine with Fractal Tree(tm) Technology",
- PLUGIN_LICENSE_GPL,
- tokudb_lock_waits_init, /* plugin init */
- tokudb_lock_waits_done, /* plugin deinit */
- TOKUDB_PLUGIN_VERSION,
- NULL, /* status variables */
- NULL, /* system variables */
-#ifdef MARIA_PLUGIN_INTERFACE_VERSION
- tokudb_version,
- MariaDB_PLUGIN_MATURITY_STABLE /* maturity */
-#else
- NULL, /* config options */
- 0, /* flags */
-#endif
-},
-{
- MYSQL_INFORMATION_SCHEMA_PLUGIN,
- &tokudb_locks_information_schema,
- "TokuDB_locks",
- "Percona",
- "Percona TokuDB Storage Engine with Fractal Tree(tm) Technology",
- PLUGIN_LICENSE_GPL,
- tokudb_locks_init, /* plugin init */
- tokudb_locks_done, /* plugin deinit */
- TOKUDB_PLUGIN_VERSION,
- NULL, /* status variables */
- NULL, /* system variables */
-#ifdef MARIA_PLUGIN_INTERFACE_VERSION
- tokudb_version,
- MariaDB_PLUGIN_MATURITY_STABLE /* maturity */
-#else
- NULL, /* config options */
- 0, /* flags */
-#endif
-},
-{
- MYSQL_INFORMATION_SCHEMA_PLUGIN,
- &tokudb_file_map_information_schema,
- "TokuDB_file_map",
- "Percona",
- "Percona TokuDB Storage Engine with Fractal Tree(tm) Technology",
- PLUGIN_LICENSE_GPL,
- tokudb_file_map_init, /* plugin init */
- tokudb_file_map_done, /* plugin deinit */
- TOKUDB_PLUGIN_VERSION,
- NULL, /* status variables */
- NULL, /* system variables */
-#ifdef MARIA_PLUGIN_INTERFACE_VERSION
- tokudb_version,
- MariaDB_PLUGIN_MATURITY_STABLE /* maturity */
-#else
- NULL, /* config options */
- 0, /* flags */
-#endif
-},
-{
- MYSQL_INFORMATION_SCHEMA_PLUGIN,
- &tokudb_fractal_tree_info_information_schema,
- "TokuDB_fractal_tree_info",
- "Percona",
- "Percona TokuDB Storage Engine with Fractal Tree(tm) Technology",
- PLUGIN_LICENSE_GPL,
- tokudb_fractal_tree_info_init, /* plugin init */
- tokudb_fractal_tree_info_done, /* plugin deinit */
- TOKUDB_PLUGIN_VERSION,
- NULL, /* status variables */
- NULL, /* system variables */
-#ifdef MARIA_PLUGIN_INTERFACE_VERSION
- tokudb_version,
- MariaDB_PLUGIN_MATURITY_STABLE /* maturity */
-#else
- NULL, /* config options */
- 0, /* flags */
-#endif
-},
-{
- MYSQL_INFORMATION_SCHEMA_PLUGIN,
- &tokudb_fractal_tree_block_map_information_schema,
- "TokuDB_fractal_tree_block_map",
- "Percona",
- "Percona TokuDB Storage Engine with Fractal Tree(tm) Technology",
- PLUGIN_LICENSE_GPL,
- tokudb_fractal_tree_block_map_init, /* plugin init */
- tokudb_fractal_tree_block_map_done, /* plugin deinit */
- TOKUDB_PLUGIN_VERSION,
- NULL, /* status variables */
- NULL, /* system variables */
+ {
+ MYSQL_STORAGE_ENGINE_PLUGIN,
+ &tokudb_storage_engine,
+ tokudb_hton_name,
+ "Percona",
+ "Percona TokuDB Storage Engine with Fractal Tree(tm) Technology",
+ PLUGIN_LICENSE_GPL,
+ tokudb_init_func, /* plugin init */
+ tokudb_done_func, /* plugin deinit */
+ TOKUDB_PLUGIN_VERSION,
+ toku_global_status_variables_export, /* status variables */
+ tokudb::sysvars::system_variables, /* system variables */
#ifdef MARIA_PLUGIN_INTERFACE_VERSION
- tokudb_version,
- MariaDB_PLUGIN_MATURITY_STABLE /* maturity */
+ tokudb::sysvars::version,
+ MariaDB_PLUGIN_MATURITY_STABLE /* maturity */
#else
- NULL, /* config options */
- 0, /* flags */
+ NULL, /* config options */
+ 0, /* flags */
#endif
-}
+ },
+ tokudb::information_schema::trx,
+ tokudb::information_schema::lock_waits,
+ tokudb::information_schema::locks,
+ tokudb::information_schema::file_map,
+ tokudb::information_schema::fractal_tree_info,
+ tokudb::information_schema::fractal_tree_block_map,
+ tokudb::information_schema::background_job_status
#ifdef MARIA_PLUGIN_INTERFACE_VERSION
maria_declare_plugin_end;
#else
diff --git a/storage/tokudb/hatoku_hton.h b/storage/tokudb/hatoku_hton.h
index 2237047ef75..ade7be128a5 100644
--- a/storage/tokudb/hatoku_hton.h
+++ b/storage/tokudb/hatoku_hton.h
@@ -26,434 +26,177 @@ Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
#ifndef _HATOKU_HTON_H
#define _HATOKU_HTON_H
-#include "db.h"
+#include "hatoku_defines.h"
+#include "tokudb_debug.h"
+#include "tokudb_information_schema.h"
+#include "tokudb_memory.h"
+#include "tokudb_thread.h"
+#include "tokudb_time.h"
+#include "tokudb_txn.h"
+#include "tokudb_sysvars.h"
-extern handlerton *tokudb_hton;
+extern handlerton* tokudb_hton;
-extern DB_ENV *db_env;
+extern DB_ENV* db_env;
-enum srv_row_format_enum {
- SRV_ROW_FORMAT_UNCOMPRESSED = 0,
- SRV_ROW_FORMAT_ZLIB = 1,
- SRV_ROW_FORMAT_SNAPPY = 2,
- SRV_ROW_FORMAT_QUICKLZ = 3,
- SRV_ROW_FORMAT_LZMA = 4,
- SRV_ROW_FORMAT_FAST = 5,
- SRV_ROW_FORMAT_SMALL = 6,
- SRV_ROW_FORMAT_DEFAULT = 7
-};
-typedef enum srv_row_format_enum srv_row_format_t;
+inline tokudb::sysvars::row_format_t toku_compression_method_to_row_format(
+ toku_compression_method method) {
-static inline srv_row_format_t toku_compression_method_to_row_format(toku_compression_method method) {
switch (method) {
case TOKU_NO_COMPRESSION:
- return SRV_ROW_FORMAT_UNCOMPRESSED;
+ return tokudb::sysvars::SRV_ROW_FORMAT_UNCOMPRESSED;
case TOKU_ZLIB_WITHOUT_CHECKSUM_METHOD:
case TOKU_ZLIB_METHOD:
- return SRV_ROW_FORMAT_ZLIB;
+ return tokudb::sysvars::SRV_ROW_FORMAT_ZLIB;
case TOKU_SNAPPY_METHOD:
- return SRV_ROW_FORMAT_SNAPPY;
+ return tokudb::sysvars::SRV_ROW_FORMAT_SNAPPY;
case TOKU_QUICKLZ_METHOD:
- return SRV_ROW_FORMAT_QUICKLZ;
+ return tokudb::sysvars::SRV_ROW_FORMAT_QUICKLZ;
case TOKU_LZMA_METHOD:
- return SRV_ROW_FORMAT_LZMA;
+ return tokudb::sysvars::SRV_ROW_FORMAT_LZMA;
case TOKU_DEFAULT_COMPRESSION_METHOD:
- return SRV_ROW_FORMAT_DEFAULT;
+ return tokudb::sysvars::SRV_ROW_FORMAT_DEFAULT;
case TOKU_FAST_COMPRESSION_METHOD:
- return SRV_ROW_FORMAT_FAST;
+ return tokudb::sysvars::SRV_ROW_FORMAT_FAST;
case TOKU_SMALL_COMPRESSION_METHOD:
- return SRV_ROW_FORMAT_SMALL;
+ return tokudb::sysvars::SRV_ROW_FORMAT_SMALL;
default:
- assert(0);
+ assert_unreachable();
}
}
-static inline toku_compression_method row_format_to_toku_compression_method(srv_row_format_t row_format) {
+inline toku_compression_method row_format_to_toku_compression_method(
+ tokudb::sysvars::row_format_t row_format) {
+
switch (row_format) {
- case SRV_ROW_FORMAT_UNCOMPRESSED:
+ case tokudb::sysvars::SRV_ROW_FORMAT_UNCOMPRESSED:
return TOKU_NO_COMPRESSION;
- case SRV_ROW_FORMAT_QUICKLZ:
- case SRV_ROW_FORMAT_FAST:
+ case tokudb::sysvars::SRV_ROW_FORMAT_QUICKLZ:
+ case tokudb::sysvars::SRV_ROW_FORMAT_FAST:
return TOKU_QUICKLZ_METHOD;
- case SRV_ROW_FORMAT_SNAPPY:
+ case tokudb::sysvars::SRV_ROW_FORMAT_SNAPPY:
return TOKU_SNAPPY_METHOD;
- case SRV_ROW_FORMAT_ZLIB:
- case SRV_ROW_FORMAT_DEFAULT:
+ case tokudb::sysvars::SRV_ROW_FORMAT_ZLIB:
+ case tokudb::sysvars::SRV_ROW_FORMAT_DEFAULT:
return TOKU_ZLIB_WITHOUT_CHECKSUM_METHOD;
- case SRV_ROW_FORMAT_LZMA:
- case SRV_ROW_FORMAT_SMALL:
+ case tokudb::sysvars::SRV_ROW_FORMAT_LZMA:
+ case tokudb::sysvars::SRV_ROW_FORMAT_SMALL:
return TOKU_LZMA_METHOD;
default:
- assert(0);
+ assert_unreachable();
}
}
-// thread variables
-
-static MYSQL_THDVAR_BOOL(commit_sync,
- PLUGIN_VAR_THDLOCAL,
- "sync on txn commit",
- /* check */ NULL,
- /* update */ NULL,
- /* default*/ true
-);
-
-static MYSQL_THDVAR_UINT(pk_insert_mode,
- 0,
- "set the primary key insert mode",
- NULL,
- NULL,
- 1, // default
- 0, // min?
- 2, // max
- 1 // blocksize
-);
-
-static uint get_pk_insert_mode(THD* thd) {
- return THDVAR(thd, pk_insert_mode);
-}
-
-static MYSQL_THDVAR_BOOL(load_save_space,
- 0,
- "compress intermediate bulk loader files to save space",
- NULL,
- NULL,
- true
-);
-
-static bool get_load_save_space(THD* thd) {
- return (THDVAR(thd, load_save_space) != 0);
-}
-
-static MYSQL_THDVAR_BOOL(disable_slow_alter,
- 0,
- "if on, alter tables that require copy are disabled",
- NULL,
- NULL,
- false
-);
-
-static bool get_disable_slow_alter(THD* thd) {
- return (THDVAR(thd, disable_slow_alter) != 0);
-}
-
-static MYSQL_THDVAR_BOOL(disable_hot_alter,
- 0,
- "if on, hot alter table is disabled",
- NULL,
- NULL,
- false
-);
-
-static bool get_disable_hot_alter(THD* thd) {
- return THDVAR(thd, disable_hot_alter) != 0;
-}
-
-static MYSQL_THDVAR_BOOL(create_index_online,
- 0,
- "if on, create index done online",
- NULL,
- NULL,
- true
-);
-
-static bool get_create_index_online(THD* thd) {
- return (THDVAR(thd, create_index_online) != 0);
-}
-
-static MYSQL_THDVAR_BOOL(alter_print_error,
- 0,
- "Print errors for alter table operations",
- NULL,
- NULL,
- false
-);
-
-static MYSQL_THDVAR_BOOL(disable_prefetching,
- 0,
- "if on, prefetching disabled",
- NULL,
- NULL,
- false
-);
-
-static bool get_disable_prefetching(THD* thd) {
- return (THDVAR(thd, disable_prefetching) != 0);
-}
-
-static MYSQL_THDVAR_BOOL(prelock_empty,
- 0,
- "Tokudb Prelock Empty Table",
- NULL,
- NULL,
- true
-);
-
-static bool get_prelock_empty(THD* thd) {
- return (THDVAR(thd, prelock_empty) != 0);
-}
-
-static MYSQL_THDVAR_UINT(block_size,
- 0,
- "fractal tree block size",
- NULL,
- NULL,
- 4<<20, // default
- 4096, // min
- ~0U, // max
- 1 // blocksize???
-);
-
-static uint get_tokudb_block_size(THD* thd) {
- return THDVAR(thd, block_size);
-}
-
-static MYSQL_THDVAR_UINT(read_block_size,
- 0,
- "fractal tree read block size",
- NULL,
- NULL,
- 64*1024, // default
- 4096, // min
- ~0U, // max
- 1 // blocksize???
-);
-
-static uint get_tokudb_read_block_size(THD* thd) {
- return THDVAR(thd, read_block_size);
-}
-
-static MYSQL_THDVAR_UINT(read_buf_size,
- 0,
- "fractal tree read block size", //TODO: Is this a typo?
- NULL,
- NULL,
- 128*1024, // default
- 0, // min
- 1*1024*1024, // max
- 1 // blocksize???
-);
-
-static uint get_tokudb_read_buf_size(THD* thd) {
- return THDVAR(thd, read_buf_size);
-}
-
-#if TOKU_INCLUDE_UPSERT
-static MYSQL_THDVAR_BOOL(disable_slow_update,
- PLUGIN_VAR_THDLOCAL,
- "disable slow update",
- NULL, // check
- NULL, // update
- false // default
-);
-
-static MYSQL_THDVAR_BOOL(disable_slow_upsert,
- PLUGIN_VAR_THDLOCAL,
- "disable slow upsert",
- NULL, // check
- NULL, // update
- false // default
-);
-#endif
-
-static MYSQL_THDVAR_UINT(fanout,
- 0,
- "fractal tree fanout",
- NULL,
- NULL,
- 16, // default
- 2, // min
- 16*1024, // max
- 1 // blocksize???
-);
-
-static uint get_tokudb_fanout(THD* thd) {
- return THDVAR(thd, fanout);
-}
-
-static MYSQL_THDVAR_UINT(analyze_time, 0, "analyze time (seconds)", NULL /*check*/, NULL /*update*/, 5 /*default*/, 0 /*min*/, ~0U /*max*/, 1 /*blocksize*/);
-
-static MYSQL_THDVAR_DOUBLE(analyze_delete_fraction, 0, "fraction of rows allowed to be deleted", NULL /*check*/, NULL /*update*/, 1.0 /*def*/, 0 /*min*/, 1.0 /*max*/, 1);
-
-static void tokudb_checkpoint_lock(THD * thd);
-static void tokudb_checkpoint_unlock(THD * thd);
-
-static void tokudb_checkpoint_lock_update(
- THD* thd,
- struct st_mysql_sys_var* var,
- void* var_ptr,
- const void* save)
-{
- my_bool* val = (my_bool *) var_ptr;
- *val= *(my_bool *) save ? true : false;
- if (*val) {
- tokudb_checkpoint_lock(thd);
+inline enum row_type row_format_to_row_type(
+ tokudb::sysvars::row_format_t row_format) {
+#if TOKU_INCLUDE_ROW_TYPE_COMPRESSION
+ switch (row_format) {
+ case tokudb::sysvars::SRV_ROW_FORMAT_UNCOMPRESSED:
+ return ROW_TYPE_TOKU_UNCOMPRESSED;
+ case tokudb::sysvars::SRV_ROW_FORMAT_ZLIB:
+ return ROW_TYPE_TOKU_ZLIB;
+ case tokudb::sysvars::SRV_ROW_FORMAT_SNAPPY:
+ return ROW_TYPE_TOKU_SNAPPY;
+ case tokudb::sysvars::SRV_ROW_FORMAT_QUICKLZ:
+ return ROW_TYPE_TOKU_QUICKLZ;
+ case tokudb::sysvars::SRV_ROW_FORMAT_LZMA:
+ return ROW_TYPE_TOKU_LZMA;
+ case tokudb::sysvars::SRV_ROW_FORMAT_SMALL:
+ return ROW_TYPE_TOKU_SMALL;
+ case tokudb::sysvars::SRV_ROW_FORMAT_FAST:
+ return ROW_TYPE_TOKU_FAST;
+ case tokudb::sysvars::SRV_ROW_FORMAT_DEFAULT:
+ return ROW_TYPE_DEFAULT;
}
- else {
- tokudb_checkpoint_unlock(thd);
+#endif
+ return ROW_TYPE_DEFAULT;
+}
+
+inline tokudb::sysvars::row_format_t row_type_to_row_format(
+ enum row_type type) {
+#if TOKU_INCLUDE_ROW_TYPE_COMPRESSION
+ switch (type) {
+ case ROW_TYPE_TOKU_UNCOMPRESSED:
+ return tokudb::sysvars::SRV_ROW_FORMAT_UNCOMPRESSED;
+ case ROW_TYPE_TOKU_ZLIB:
+ return tokudb::sysvars::SRV_ROW_FORMAT_ZLIB;
+ case ROW_TYPE_TOKU_SNAPPY:
+ return tokudb::sysvars::SRV_ROW_FORMAT_SNAPPY;
+ case ROW_TYPE_TOKU_QUICKLZ:
+ return tokudb::sysvars::SRV_ROW_FORMAT_QUICKLZ;
+ case ROW_TYPE_TOKU_LZMA:
+ return tokudb::sysvars::SRV_ROW_FORMAT_LZMA;
+ case ROW_TYPE_TOKU_SMALL:
+ return tokudb::sysvars::SRV_ROW_FORMAT_SMALL;
+ case ROW_TYPE_TOKU_FAST:
+ return tokudb::sysvars::SRV_ROW_FORMAT_FAST;
+ case ROW_TYPE_DEFAULT:
+ return tokudb::sysvars::SRV_ROW_FORMAT_DEFAULT;
+ default:
+ return tokudb::sysvars::SRV_ROW_FORMAT_DEFAULT;
}
-}
-
-static MYSQL_THDVAR_BOOL(checkpoint_lock,
- 0,
- "Tokudb Checkpoint Lock",
- NULL,
- tokudb_checkpoint_lock_update,
- false
-);
-
-static const char *tokudb_row_format_names[] = {
- "tokudb_uncompressed",
- "tokudb_zlib",
- "tokudb_snappy",
- "tokudb_quicklz",
- "tokudb_lzma",
- "tokudb_fast",
- "tokudb_small",
- "tokudb_default",
- NullS
-};
-
-static TYPELIB tokudb_row_format_typelib = {
- array_elements(tokudb_row_format_names) - 1,
- "tokudb_row_format_typelib",
- tokudb_row_format_names,
- NULL
-};
-
-static MYSQL_THDVAR_ENUM(row_format, PLUGIN_VAR_OPCMDARG,
- "Specifies the compression method for a table during this session. "
- "Possible values are TOKUDB_UNCOMPRESSED, TOKUDB_ZLIB, TOKUDB_SNAPPY, "
- "TOKUDB_QUICKLZ, TOKUDB_LZMA, TOKUDB_FAST, TOKUDB_SMALL and TOKUDB_DEFAULT",
- NULL, NULL, SRV_ROW_FORMAT_ZLIB, &tokudb_row_format_typelib);
-
-static inline srv_row_format_t get_row_format(THD *thd) {
- return (srv_row_format_t) THDVAR(thd, row_format);
+#endif
+ return tokudb::sysvars::SRV_ROW_FORMAT_DEFAULT;
}
-static MYSQL_THDVAR_UINT(lock_timeout_debug, 0, "TokuDB lock timeout debug", NULL /*check*/, NULL /*update*/, 1 /*default*/, 0 /*min*/, ~0U /*max*/, 1);
+inline enum row_type toku_compression_method_to_row_type(
+ toku_compression_method method) {
-static MYSQL_THDVAR_STR(last_lock_timeout, PLUGIN_VAR_MEMALLOC, "last TokuDB lock timeout", NULL /*check*/, NULL /*update*/, NULL /*default*/);
+ return row_format_to_row_type(
+ toku_compression_method_to_row_format(method));
+}
-static MYSQL_THDVAR_BOOL(hide_default_row_format, 0, "hide the default row format", NULL /*check*/, NULL /*update*/, true);
+inline toku_compression_method row_type_to_toku_compression_method(
+ enum row_type type) {
-static const uint64_t DEFAULT_TOKUDB_LOCK_TIMEOUT = 4000; /*milliseconds*/
+ return row_format_to_toku_compression_method(row_type_to_row_format(type));
+}
-static MYSQL_THDVAR_ULONGLONG(lock_timeout, 0, "TokuDB lock timeout", NULL, NULL, DEFAULT_TOKUDB_LOCK_TIMEOUT, 0 /*min*/, ~0ULL /*max*/, 1 /*blocksize*/);
+void tokudb_checkpoint_lock(THD * thd);
+void tokudb_checkpoint_unlock(THD * thd);
-static uint64_t tokudb_get_lock_wait_time_callback(uint64_t default_wait_time) {
+inline uint64_t tokudb_get_lock_wait_time_callback(uint64_t default_wait_time) {
THD *thd = current_thd;
- uint64_t wait_time = THDVAR(thd, lock_timeout);
- return wait_time;
+ return tokudb::sysvars::lock_timeout(thd);
}
-static MYSQL_THDVAR_ULONGLONG(loader_memory_size,
- 0,
- "TokuDB loader memory size",
- NULL,
- NULL,
- 100*1000*1000, /*default*/
- 0, /*min*/
- ~0ULL, /*max*/
- 1 /*blocksize*/
-);
-
-static uint64_t tokudb_get_loader_memory_size_callback(void) {
+inline uint64_t tokudb_get_loader_memory_size_callback(void) {
THD *thd = current_thd;
- uint64_t memory_size = THDVAR(thd, loader_memory_size);
- return memory_size;
+ return tokudb::sysvars::loader_memory_size(thd);
}
-static const uint64_t DEFAULT_TOKUDB_KILLED_TIME = 4000;
-
-static MYSQL_THDVAR_ULONGLONG(killed_time, 0, "TokuDB killed time", NULL, NULL, DEFAULT_TOKUDB_KILLED_TIME, 0 /*min*/, ~0ULL /*max*/, 1 /*blocksize*/);
-
-static uint64_t tokudb_get_killed_time_callback(uint64_t default_killed_time) {
+inline uint64_t tokudb_get_killed_time_callback(uint64_t default_killed_time) {
THD *thd = current_thd;
- uint64_t killed_time = THDVAR(thd, killed_time);
- return killed_time;
+ return tokudb::sysvars::killed_time(thd);
}
-static int tokudb_killed_callback(void) {
+inline int tokudb_killed_callback(void) {
THD *thd = current_thd;
return thd_killed(thd);
}
-static bool tokudb_killed_thd_callback(void *extra, uint64_t deleted_rows) {
+inline bool tokudb_killed_thd_callback(void *extra, uint64_t deleted_rows) {
THD *thd = static_cast<THD *>(extra);
return thd_killed(thd) != 0;
}
-enum {
- TOKUDB_EMPTY_SCAN_DISABLED = 0,
- TOKUDB_EMPTY_SCAN_LR = 1,
- TOKUDB_EMPTY_SCAN_RL = 2,
-};
-
-static const char *tokudb_empty_scan_names[] = {
- "disabled",
- "lr",
- "rl",
- NullS
-};
-
-static TYPELIB tokudb_empty_scan_typelib = {
- array_elements(tokudb_empty_scan_names) - 1,
- "tokudb_empty_scan_typelib",
- tokudb_empty_scan_names,
- NULL
-};
-
-static MYSQL_THDVAR_ENUM(empty_scan, PLUGIN_VAR_OPCMDARG,
- "TokuDB algorithm to check if the table is empty when opened. ",
- NULL, NULL, TOKUDB_EMPTY_SCAN_RL, &tokudb_empty_scan_typelib
-);
-
-#if TOKUDB_CHECK_JEMALLOC
-static uint tokudb_check_jemalloc;
-static MYSQL_SYSVAR_UINT(check_jemalloc, tokudb_check_jemalloc, 0, "Check if jemalloc is linked",
- NULL, NULL, 1, 0, 1, 0);
-#endif
-
-static MYSQL_THDVAR_BOOL(bulk_fetch, PLUGIN_VAR_THDLOCAL, "enable bulk fetch",
- NULL /*check*/, NULL /*update*/, true /*default*/);
-
-#if TOKU_INCLUDE_XA
-static MYSQL_THDVAR_BOOL(support_xa,
- PLUGIN_VAR_OPCMDARG,
- "Enable TokuDB support for the XA two-phase commit",
- NULL, // check
- NULL, // update
- true // default
-);
-#endif
-
-static MYSQL_THDVAR_BOOL(rpl_unique_checks, PLUGIN_VAR_THDLOCAL, "enable unique checks on replication slave",
- NULL /*check*/, NULL /*update*/, true /*default*/);
-
-static MYSQL_THDVAR_ULONGLONG(rpl_unique_checks_delay, PLUGIN_VAR_THDLOCAL, "time in milliseconds to add to unique checks test on replication slave",
- NULL, NULL, 0 /*default*/, 0 /*min*/, ~0ULL /*max*/, 1 /*blocksize*/);
-
-static MYSQL_THDVAR_BOOL(rpl_lookup_rows, PLUGIN_VAR_THDLOCAL, "lookup a row on rpl slave",
- NULL /*check*/, NULL /*update*/, true /*default*/);
-
-static MYSQL_THDVAR_ULONGLONG(rpl_lookup_rows_delay, PLUGIN_VAR_THDLOCAL, "time in milliseconds to add to lookups on replication slave",
- NULL, NULL, 0 /*default*/, 0 /*min*/, ~0ULL /*max*/, 1 /*blocksize*/);
-
-static MYSQL_THDVAR_BOOL(rpl_check_readonly, PLUGIN_VAR_THDLOCAL, "check if the slave is read only",
- NULL /*check*/, NULL /*update*/, true /*default*/);
-
-static MYSQL_THDVAR_STR(optimize_index_name, PLUGIN_VAR_THDLOCAL + PLUGIN_VAR_MEMALLOC, "optimize index name (default all indexes)", NULL /*check*/, NULL /*update*/, NULL /*default*/);
-
-static MYSQL_THDVAR_DOUBLE(optimize_index_fraction, 0, "optimize index fraction (default 1.0 all)", NULL /*check*/, NULL /*update*/, 1.0 /*def*/, 0 /*min*/, 1.0 /*max*/, 1);
-
-static MYSQL_THDVAR_ULONGLONG(optimize_throttle, 0, "optimize throttle (default no throttle)", NULL /*check*/, NULL /*update*/, 0 /*def*/, 0 /*min*/, ~0ULL /*max*/, 1);
extern HASH tokudb_open_tables;
-extern pthread_mutex_t tokudb_mutex;
-extern uint32_t tokudb_write_status_frequency;
-extern uint32_t tokudb_read_status_frequency;
+extern tokudb::thread::mutex_t tokudb_mutex;
+extern const char* tokudb_hton_name;
+extern int tokudb_hton_initialized;
+extern tokudb::thread::rwlock_t tokudb_hton_initialized_lock;
void toku_hton_update_primary_key_bytes_inserted(uint64_t row_size);
+void tokudb_split_dname(
+ const char* dname,
+ String& database_name,
+ String& table_name,
+ String& dictionary_name);
+
+void tokudb_pretty_left_key(const DB* db, const DBT* key, String* out);
+void tokudb_pretty_right_key(const DB* db, const DBT* key, String* out);
+const char *tokudb_get_index_name(DB* db);
+
#endif //#ifdef _HATOKU_HTON
diff --git a/storage/tokudb/mysql-test/rpl/disabled.def b/storage/tokudb/mysql-test/rpl/disabled.def
index 25c44654676..4c1a9a3e785 100644
--- a/storage/tokudb/mysql-test/rpl/disabled.def
+++ b/storage/tokudb/mysql-test/rpl/disabled.def
@@ -13,4 +13,3 @@ rpl_tokudb_write_unique_uc1: unreliable, uses timestamp differences
rpl_tokudb_read_only_ff: unreliable, uses timestamp differences
rpl_tokudb_read_only_tf: unreliable, uses timestamp differences
rpl_tokudb_read_only_tt: unreliable, uses timestamp differences
-rpl_row_sp002_tokudb : tokudb does not support foreign keys
diff --git a/storage/tokudb/mysql-test/rpl/r/rpl_parallel_tokudb_delete_pk.result b/storage/tokudb/mysql-test/rpl/r/rpl_parallel_tokudb_delete_pk.result
index cd8608f4387..5935e5ddcbd 100644
--- a/storage/tokudb/mysql-test/rpl/r/rpl_parallel_tokudb_delete_pk.result
+++ b/storage/tokudb/mysql-test/rpl/r/rpl_parallel_tokudb_delete_pk.result
@@ -1,6 +1,13 @@
include/master-slave.inc
[connection master]
drop table if exists t;
+show variables like 'tokudb_rpl_%';
+Variable_name Value
+tokudb_rpl_check_readonly ON
+tokudb_rpl_lookup_rows OFF
+tokudb_rpl_lookup_rows_delay 10000
+tokudb_rpl_unique_checks OFF
+tokudb_rpl_unique_checks_delay 10000
create table t (a bigint not null, primary key(a)) engine=tokudb;
insert into t values (1);
insert into t values (2),(3);
diff --git a/storage/tokudb/mysql-test/rpl/r/rpl_parallel_tokudb_update_pk_uc0_lookup0.result b/storage/tokudb/mysql-test/rpl/r/rpl_parallel_tokudb_update_pk_uc0_lookup0.result
index 1ba442ecde2..daa04e5f43e 100644
--- a/storage/tokudb/mysql-test/rpl/r/rpl_parallel_tokudb_update_pk_uc0_lookup0.result
+++ b/storage/tokudb/mysql-test/rpl/r/rpl_parallel_tokudb_update_pk_uc0_lookup0.result
@@ -1,6 +1,13 @@
include/master-slave.inc
[connection master]
drop table if exists t;
+show variables like 'tokudb_rpl_%';
+Variable_name Value
+tokudb_rpl_check_readonly ON
+tokudb_rpl_lookup_rows OFF
+tokudb_rpl_lookup_rows_delay 10000
+tokudb_rpl_unique_checks OFF
+tokudb_rpl_unique_checks_delay 10000
create table t (a bigint not null, b bigint not null, primary key(a)) engine=tokudb;
insert into t values (1,0);
insert into t values (2,0),(3,0);
diff --git a/storage/tokudb/mysql-test/rpl/r/rpl_parallel_tokudb_write_pk.result b/storage/tokudb/mysql-test/rpl/r/rpl_parallel_tokudb_write_pk.result
index 608e85249fb..518d3318710 100644
--- a/storage/tokudb/mysql-test/rpl/r/rpl_parallel_tokudb_write_pk.result
+++ b/storage/tokudb/mysql-test/rpl/r/rpl_parallel_tokudb_write_pk.result
@@ -1,6 +1,10 @@
include/master-slave.inc
[connection master]
drop table if exists t;
+show variables like 'tokudb_rpl_unique_checks%';
+Variable_name Value
+tokudb_rpl_unique_checks OFF
+tokudb_rpl_unique_checks_delay 5000
create table t (a bigint not null, primary key(a)) engine=tokudb;
select unix_timestamp() into @tstart;
insert into t values (1);
diff --git a/storage/tokudb/mysql-test/rpl/r/rpl_tokudb_delete_pk.result b/storage/tokudb/mysql-test/rpl/r/rpl_tokudb_delete_pk.result
index c77cbbc71c9..6fab29177d7 100644
--- a/storage/tokudb/mysql-test/rpl/r/rpl_tokudb_delete_pk.result
+++ b/storage/tokudb/mysql-test/rpl/r/rpl_tokudb_delete_pk.result
@@ -4,6 +4,13 @@ Note #### Sending passwords in plain text without SSL/TLS is extremely insecure.
Note #### Storing MySQL user name or password information in the master info repository is not secure and is therefore not recommended. Please consider using the USER and PASSWORD connection options for START SLAVE; see the 'START SLAVE Syntax' in the MySQL Manual for more information.
[connection master]
drop table if exists t;
+show variables like 'tokudb_rpl_%';
+Variable_name Value
+tokudb_rpl_check_readonly ON
+tokudb_rpl_lookup_rows OFF
+tokudb_rpl_lookup_rows_delay 10000
+tokudb_rpl_unique_checks OFF
+tokudb_rpl_unique_checks_delay 10000
create table t (a bigint not null, primary key(a)) engine=tokudb;
insert into t values (1);
insert into t values (2),(3);
diff --git a/storage/tokudb/mysql-test/rpl/r/rpl_tokudb_delete_pk_lookup1.result b/storage/tokudb/mysql-test/rpl/r/rpl_tokudb_delete_pk_lookup1.result
index e178b8ad137..f8efd5e04ee 100644
--- a/storage/tokudb/mysql-test/rpl/r/rpl_tokudb_delete_pk_lookup1.result
+++ b/storage/tokudb/mysql-test/rpl/r/rpl_tokudb_delete_pk_lookup1.result
@@ -4,6 +4,13 @@ Note #### Sending passwords in plain text without SSL/TLS is extremely insecure.
Note #### Storing MySQL user name or password information in the master info repository is not secure and is therefore not recommended. Please consider using the USER and PASSWORD connection options for START SLAVE; see the 'START SLAVE Syntax' in the MySQL Manual for more information.
[connection master]
drop table if exists t;
+show variables like 'tokudb_rpl_%';
+Variable_name Value
+tokudb_rpl_check_readonly ON
+tokudb_rpl_lookup_rows ON
+tokudb_rpl_lookup_rows_delay 10000
+tokudb_rpl_unique_checks ON
+tokudb_rpl_unique_checks_delay 0
create table t (a bigint not null, primary key(a)) engine=tokudb;
insert into t values (1);
insert into t values (2),(3);
diff --git a/storage/tokudb/mysql-test/rpl/r/rpl_tokudb_mixed_dml.result b/storage/tokudb/mysql-test/rpl/r/rpl_tokudb_mixed_dml.result
index 30c3d188f9a..59b05ea4dfb 100644
--- a/storage/tokudb/mysql-test/rpl/r/rpl_tokudb_mixed_dml.result
+++ b/storage/tokudb/mysql-test/rpl/r/rpl_tokudb_mixed_dml.result
@@ -1,3 +1,6 @@
+SET SESSION tokudb_pk_insert_mode = 2;
+Warnings:
+Warning 131 Using tokudb_pk_insert_mode is deprecated and the parameter may be removed in future releases.
include/master-slave.inc
[connection master]
==========MASTER==========
diff --git a/storage/tokudb/mysql-test/rpl/r/rpl_tokudb_read_only_ff.result b/storage/tokudb/mysql-test/rpl/r/rpl_tokudb_read_only_ff.result
index 5926cdda565..76a5e31b969 100644
--- a/storage/tokudb/mysql-test/rpl/r/rpl_tokudb_read_only_ff.result
+++ b/storage/tokudb/mysql-test/rpl/r/rpl_tokudb_read_only_ff.result
@@ -4,6 +4,13 @@ Note #### Sending passwords in plain text without SSL/TLS is extremely insecure.
Note #### Storing MySQL user name or password information in the master info repository is not secure and is therefore not recommended. Please consider using the USER and PASSWORD connection options for START SLAVE; see the 'START SLAVE Syntax' in the MySQL Manual for more information.
[connection master]
drop table if exists t;
+show variables like 'tokudb_rpl%';
+Variable_name Value
+tokudb_rpl_check_readonly OFF
+tokudb_rpl_lookup_rows ON
+tokudb_rpl_lookup_rows_delay 0
+tokudb_rpl_unique_checks OFF
+tokudb_rpl_unique_checks_delay 5000
create table t (a bigint not null, primary key(a)) engine=tokudb;
select unix_timestamp() into @tstart;
insert into t values (1);
diff --git a/storage/tokudb/mysql-test/rpl/r/rpl_tokudb_read_only_ft.result b/storage/tokudb/mysql-test/rpl/r/rpl_tokudb_read_only_ft.result
index 3bcd3e8ccdd..c81a2ebac44 100644
--- a/storage/tokudb/mysql-test/rpl/r/rpl_tokudb_read_only_ft.result
+++ b/storage/tokudb/mysql-test/rpl/r/rpl_tokudb_read_only_ft.result
@@ -1,6 +1,13 @@
include/master-slave.inc
[connection master]
drop table if exists t;
+show variables like 'tokudb_rpl%';
+Variable_name Value
+tokudb_rpl_check_readonly ON
+tokudb_rpl_lookup_rows ON
+tokudb_rpl_lookup_rows_delay 0
+tokudb_rpl_unique_checks OFF
+tokudb_rpl_unique_checks_delay 5000
create table t (a bigint not null, primary key(a)) engine=tokudb;
select unix_timestamp() into @tstart;
insert into t values (1);
diff --git a/storage/tokudb/mysql-test/rpl/r/rpl_tokudb_read_only_tf.result b/storage/tokudb/mysql-test/rpl/r/rpl_tokudb_read_only_tf.result
index 5926cdda565..76a5e31b969 100644
--- a/storage/tokudb/mysql-test/rpl/r/rpl_tokudb_read_only_tf.result
+++ b/storage/tokudb/mysql-test/rpl/r/rpl_tokudb_read_only_tf.result
@@ -4,6 +4,13 @@ Note #### Sending passwords in plain text without SSL/TLS is extremely insecure.
Note #### Storing MySQL user name or password information in the master info repository is not secure and is therefore not recommended. Please consider using the USER and PASSWORD connection options for START SLAVE; see the 'START SLAVE Syntax' in the MySQL Manual for more information.
[connection master]
drop table if exists t;
+show variables like 'tokudb_rpl%';
+Variable_name Value
+tokudb_rpl_check_readonly OFF
+tokudb_rpl_lookup_rows ON
+tokudb_rpl_lookup_rows_delay 0
+tokudb_rpl_unique_checks OFF
+tokudb_rpl_unique_checks_delay 5000
create table t (a bigint not null, primary key(a)) engine=tokudb;
select unix_timestamp() into @tstart;
insert into t values (1);
diff --git a/storage/tokudb/mysql-test/rpl/r/rpl_tokudb_read_only_tt.result b/storage/tokudb/mysql-test/rpl/r/rpl_tokudb_read_only_tt.result
index 5926cdda565..0cba2a1cddb 100644
--- a/storage/tokudb/mysql-test/rpl/r/rpl_tokudb_read_only_tt.result
+++ b/storage/tokudb/mysql-test/rpl/r/rpl_tokudb_read_only_tt.result
@@ -4,6 +4,13 @@ Note #### Sending passwords in plain text without SSL/TLS is extremely insecure.
Note #### Storing MySQL user name or password information in the master info repository is not secure and is therefore not recommended. Please consider using the USER and PASSWORD connection options for START SLAVE; see the 'START SLAVE Syntax' in the MySQL Manual for more information.
[connection master]
drop table if exists t;
+show variables like 'tokudb_rpl%';
+Variable_name Value
+tokudb_rpl_check_readonly ON
+tokudb_rpl_lookup_rows ON
+tokudb_rpl_lookup_rows_delay 0
+tokudb_rpl_unique_checks OFF
+tokudb_rpl_unique_checks_delay 5000
create table t (a bigint not null, primary key(a)) engine=tokudb;
select unix_timestamp() into @tstart;
insert into t values (1);
diff --git a/storage/tokudb/mysql-test/rpl/r/rpl_tokudb_update_pk_uc0_lookup0.result b/storage/tokudb/mysql-test/rpl/r/rpl_tokudb_update_pk_uc0_lookup0.result
index 162655f9896..50f43ebe5bf 100644
--- a/storage/tokudb/mysql-test/rpl/r/rpl_tokudb_update_pk_uc0_lookup0.result
+++ b/storage/tokudb/mysql-test/rpl/r/rpl_tokudb_update_pk_uc0_lookup0.result
@@ -4,6 +4,13 @@ Note #### Sending passwords in plain text without SSL/TLS is extremely insecure.
Note #### Storing MySQL user name or password information in the master info repository is not secure and is therefore not recommended. Please consider using the USER and PASSWORD connection options for START SLAVE; see the 'START SLAVE Syntax' in the MySQL Manual for more information.
[connection master]
drop table if exists t;
+show variables like 'tokudb_rpl_%';
+Variable_name Value
+tokudb_rpl_check_readonly ON
+tokudb_rpl_lookup_rows OFF
+tokudb_rpl_lookup_rows_delay 10000
+tokudb_rpl_unique_checks OFF
+tokudb_rpl_unique_checks_delay 10000
create table t (a bigint not null, b bigint not null, primary key(a)) engine=tokudb;
insert into t values (1,0);
insert into t values (2,0),(3,0);
diff --git a/storage/tokudb/mysql-test/rpl/r/rpl_tokudb_update_pk_uc0_lookup1.result b/storage/tokudb/mysql-test/rpl/r/rpl_tokudb_update_pk_uc0_lookup1.result
index 3c909718426..9e7f932a3c9 100644
--- a/storage/tokudb/mysql-test/rpl/r/rpl_tokudb_update_pk_uc0_lookup1.result
+++ b/storage/tokudb/mysql-test/rpl/r/rpl_tokudb_update_pk_uc0_lookup1.result
@@ -4,6 +4,13 @@ Note #### Sending passwords in plain text without SSL/TLS is extremely insecure.
Note #### Storing MySQL user name or password information in the master info repository is not secure and is therefore not recommended. Please consider using the USER and PASSWORD connection options for START SLAVE; see the 'START SLAVE Syntax' in the MySQL Manual for more information.
[connection master]
drop table if exists t;
+show variables like 'tokudb_rpl_%';
+Variable_name Value
+tokudb_rpl_check_readonly ON
+tokudb_rpl_lookup_rows ON
+tokudb_rpl_lookup_rows_delay 10000
+tokudb_rpl_unique_checks OFF
+tokudb_rpl_unique_checks_delay 10000
create table t (a bigint not null, b bigint not null, primary key(a)) engine=tokudb;
insert into t values (1,0);
insert into t values (2,0),(3,0);
diff --git a/storage/tokudb/mysql-test/rpl/r/rpl_tokudb_update_pk_uc1_lookup0.result b/storage/tokudb/mysql-test/rpl/r/rpl_tokudb_update_pk_uc1_lookup0.result
index 3c909718426..348734b206d 100644
--- a/storage/tokudb/mysql-test/rpl/r/rpl_tokudb_update_pk_uc1_lookup0.result
+++ b/storage/tokudb/mysql-test/rpl/r/rpl_tokudb_update_pk_uc1_lookup0.result
@@ -4,6 +4,13 @@ Note #### Sending passwords in plain text without SSL/TLS is extremely insecure.
Note #### Storing MySQL user name or password information in the master info repository is not secure and is therefore not recommended. Please consider using the USER and PASSWORD connection options for START SLAVE; see the 'START SLAVE Syntax' in the MySQL Manual for more information.
[connection master]
drop table if exists t;
+show variables like 'tokudb_rpl_%';
+Variable_name Value
+tokudb_rpl_check_readonly ON
+tokudb_rpl_lookup_rows OFF
+tokudb_rpl_lookup_rows_delay 10000
+tokudb_rpl_unique_checks ON
+tokudb_rpl_unique_checks_delay 10000
create table t (a bigint not null, b bigint not null, primary key(a)) engine=tokudb;
insert into t values (1,0);
insert into t values (2,0),(3,0);
diff --git a/storage/tokudb/mysql-test/rpl/r/rpl_tokudb_update_pk_uc1_lookup1.result b/storage/tokudb/mysql-test/rpl/r/rpl_tokudb_update_pk_uc1_lookup1.result
index 3c909718426..bfd640e52e9 100644
--- a/storage/tokudb/mysql-test/rpl/r/rpl_tokudb_update_pk_uc1_lookup1.result
+++ b/storage/tokudb/mysql-test/rpl/r/rpl_tokudb_update_pk_uc1_lookup1.result
@@ -4,6 +4,13 @@ Note #### Sending passwords in plain text without SSL/TLS is extremely insecure.
Note #### Storing MySQL user name or password information in the master info repository is not secure and is therefore not recommended. Please consider using the USER and PASSWORD connection options for START SLAVE; see the 'START SLAVE Syntax' in the MySQL Manual for more information.
[connection master]
drop table if exists t;
+show variables like 'tokudb_rpl_%';
+Variable_name Value
+tokudb_rpl_check_readonly ON
+tokudb_rpl_lookup_rows ON
+tokudb_rpl_lookup_rows_delay 10000
+tokudb_rpl_unique_checks ON
+tokudb_rpl_unique_checks_delay 10000
create table t (a bigint not null, b bigint not null, primary key(a)) engine=tokudb;
insert into t values (1,0);
insert into t values (2,0),(3,0);
diff --git a/storage/tokudb/mysql-test/rpl/r/rpl_tokudb_update_unique_uc0_lookup0.result b/storage/tokudb/mysql-test/rpl/r/rpl_tokudb_update_unique_uc0_lookup0.result
index 941010071ae..faf969f851a 100644
--- a/storage/tokudb/mysql-test/rpl/r/rpl_tokudb_update_unique_uc0_lookup0.result
+++ b/storage/tokudb/mysql-test/rpl/r/rpl_tokudb_update_unique_uc0_lookup0.result
@@ -4,6 +4,13 @@ Note #### Sending passwords in plain text without SSL/TLS is extremely insecure.
Note #### Storing MySQL user name or password information in the master info repository is not secure and is therefore not recommended. Please consider using the USER and PASSWORD connection options for START SLAVE; see the 'START SLAVE Syntax' in the MySQL Manual for more information.
[connection master]
drop table if exists t;
+show variables like 'tokudb_rpl_%';
+Variable_name Value
+tokudb_rpl_check_readonly ON
+tokudb_rpl_lookup_rows OFF
+tokudb_rpl_lookup_rows_delay 10000
+tokudb_rpl_unique_checks OFF
+tokudb_rpl_unique_checks_delay 10000
create table t (a bigint not null, b bigint not null, c bigint not null, primary key(a), unique key(c)) engine=tokudb;
insert into t values (1,0,-1);
insert into t values (2,0,-2),(3,0,-3);
diff --git a/storage/tokudb/mysql-test/rpl/r/rpl_tokudb_update_unique_uc0_lookup1.result b/storage/tokudb/mysql-test/rpl/r/rpl_tokudb_update_unique_uc0_lookup1.result
index 6a0b1126710..9ac87512f80 100644
--- a/storage/tokudb/mysql-test/rpl/r/rpl_tokudb_update_unique_uc0_lookup1.result
+++ b/storage/tokudb/mysql-test/rpl/r/rpl_tokudb_update_unique_uc0_lookup1.result
@@ -4,6 +4,13 @@ Note #### Sending passwords in plain text without SSL/TLS is extremely insecure.
Note #### Storing MySQL user name or password information in the master info repository is not secure and is therefore not recommended. Please consider using the USER and PASSWORD connection options for START SLAVE; see the 'START SLAVE Syntax' in the MySQL Manual for more information.
[connection master]
drop table if exists t;
+show variables like 'tokudb_rpl_%';
+Variable_name Value
+tokudb_rpl_check_readonly ON
+tokudb_rpl_lookup_rows ON
+tokudb_rpl_lookup_rows_delay 10000
+tokudb_rpl_unique_checks OFF
+tokudb_rpl_unique_checks_delay 10000
create table t (a bigint not null, b bigint not null, c bigint not null, primary key(a), unique key(c)) engine=tokudb;
insert into t values (1,0,-1);
insert into t values (2,0,-2),(3,0,-3);
diff --git a/storage/tokudb/mysql-test/rpl/r/rpl_tokudb_write_pk.result b/storage/tokudb/mysql-test/rpl/r/rpl_tokudb_write_pk.result
index 5926cdda565..ea1b84f8138 100644
--- a/storage/tokudb/mysql-test/rpl/r/rpl_tokudb_write_pk.result
+++ b/storage/tokudb/mysql-test/rpl/r/rpl_tokudb_write_pk.result
@@ -4,6 +4,10 @@ Note #### Sending passwords in plain text without SSL/TLS is extremely insecure.
Note #### Storing MySQL user name or password information in the master info repository is not secure and is therefore not recommended. Please consider using the USER and PASSWORD connection options for START SLAVE; see the 'START SLAVE Syntax' in the MySQL Manual for more information.
[connection master]
drop table if exists t;
+show variables like 'tokudb_rpl_unique_checks%';
+Variable_name Value
+tokudb_rpl_unique_checks OFF
+tokudb_rpl_unique_checks_delay 5000
create table t (a bigint not null, primary key(a)) engine=tokudb;
select unix_timestamp() into @tstart;
insert into t values (1);
diff --git a/storage/tokudb/mysql-test/rpl/r/rpl_tokudb_write_pk_uc1.result b/storage/tokudb/mysql-test/rpl/r/rpl_tokudb_write_pk_uc1.result
index 0b5ac77481f..c846ec81612 100644
--- a/storage/tokudb/mysql-test/rpl/r/rpl_tokudb_write_pk_uc1.result
+++ b/storage/tokudb/mysql-test/rpl/r/rpl_tokudb_write_pk_uc1.result
@@ -4,6 +4,10 @@ Note #### Sending passwords in plain text without SSL/TLS is extremely insecure.
Note #### Storing MySQL user name or password information in the master info repository is not secure and is therefore not recommended. Please consider using the USER and PASSWORD connection options for START SLAVE; see the 'START SLAVE Syntax' in the MySQL Manual for more information.
[connection master]
drop table if exists t;
+show variables like 'tokudb_rpl_unique_checks%';
+Variable_name Value
+tokudb_rpl_unique_checks ON
+tokudb_rpl_unique_checks_delay 10000
create table t (a bigint not null, primary key(a)) engine=tokudb;
select unix_timestamp() into @tstart;
insert into t values (1);
diff --git a/storage/tokudb/mysql-test/rpl/r/rpl_tokudb_write_unique.result b/storage/tokudb/mysql-test/rpl/r/rpl_tokudb_write_unique.result
index 60ffc0d0530..808303387d5 100644
--- a/storage/tokudb/mysql-test/rpl/r/rpl_tokudb_write_unique.result
+++ b/storage/tokudb/mysql-test/rpl/r/rpl_tokudb_write_unique.result
@@ -4,6 +4,10 @@ Note #### Sending passwords in plain text without SSL/TLS is extremely insecure.
Note #### Storing MySQL user name or password information in the master info repository is not secure and is therefore not recommended. Please consider using the USER and PASSWORD connection options for START SLAVE; see the 'START SLAVE Syntax' in the MySQL Manual for more information.
[connection master]
drop table if exists t;
+show variables like 'tokudb_rpl_unique_checks%';
+Variable_name Value
+tokudb_rpl_unique_checks OFF
+tokudb_rpl_unique_checks_delay 5000
create table t (a bigint not null, b bigint not null, primary key(a), unique key(b)) engine=tokudb;
select unix_timestamp() into @tstart;
insert into t values (1,2);
diff --git a/storage/tokudb/mysql-test/rpl/r/rpl_tokudb_write_unique_uc1.result b/storage/tokudb/mysql-test/rpl/r/rpl_tokudb_write_unique_uc1.result
index 2f2cf58032e..a40548cec62 100644
--- a/storage/tokudb/mysql-test/rpl/r/rpl_tokudb_write_unique_uc1.result
+++ b/storage/tokudb/mysql-test/rpl/r/rpl_tokudb_write_unique_uc1.result
@@ -4,6 +4,10 @@ Note #### Sending passwords in plain text without SSL/TLS is extremely insecure.
Note #### Storing MySQL user name or password information in the master info repository is not secure and is therefore not recommended. Please consider using the USER and PASSWORD connection options for START SLAVE; see the 'START SLAVE Syntax' in the MySQL Manual for more information.
[connection master]
drop table if exists t;
+show variables like 'tokudb_rpl_unique_checks%';
+Variable_name Value
+tokudb_rpl_unique_checks ON
+tokudb_rpl_unique_checks_delay 5000
create table t (a bigint not null, b bigint not null, primary key(a), unique key(b)) engine=tokudb;
select unix_timestamp() into @tstart;
insert into t values (1,2);
diff --git a/storage/tokudb/mysql-test/rpl/t/disabled.def b/storage/tokudb/mysql-test/rpl/t/disabled.def
new file mode 100644
index 00000000000..30bc9fea7c5
--- /dev/null
+++ b/storage/tokudb/mysql-test/rpl/t/disabled.def
@@ -0,0 +1 @@
+rpl_row_sp002_tokudb : tokudb does not support foreign keys
diff --git a/storage/tokudb/mysql-test/rpl/t/rpl_parallel_tokudb.test b/storage/tokudb/mysql-test/rpl/t/rpl_parallel_tokudb.test
index efb60fac2ba..23d2d6cdf51 100644
--- a/storage/tokudb/mysql-test/rpl/t/rpl_parallel_tokudb.test
+++ b/storage/tokudb/mysql-test/rpl/t/rpl_parallel_tokudb.test
@@ -1,3 +1,4 @@
+--source include/have_tokudb.inc
#Want to skip this test from daily Valgrind execution
--source include/no_valgrind_without_big.inc
#
diff --git a/storage/tokudb/mysql-test/rpl/t/rpl_parallel_tokudb_delete_pk-slave.opt b/storage/tokudb/mysql-test/rpl/t/rpl_parallel_tokudb_delete_pk-slave.opt
index 98240735800..0aa48174807 100644
--- a/storage/tokudb/mysql-test/rpl/t/rpl_parallel_tokudb_delete_pk-slave.opt
+++ b/storage/tokudb/mysql-test/rpl/t/rpl_parallel_tokudb_delete_pk-slave.opt
@@ -1,6 +1,5 @@
--read-only=ON
---tokudb-rpl-unique-checks-delay=10000
---tokudb-rpl-unique-checks=OFF
---tokudb-rpl-lookup-rows-delay=10000
---tokudb-rpl-lookup-rows=OFF
-
+--loose-tokudb-rpl-unique-checks-delay=10000
+--loose-tokudb-rpl-unique-checks=OFF
+--loose-tokudb-rpl-lookup-rows-delay=10000
+--loose-tokudb-rpl-lookup-rows=OFF
diff --git a/storage/tokudb/mysql-test/rpl/t/rpl_parallel_tokudb_delete_pk.test b/storage/tokudb/mysql-test/rpl/t/rpl_parallel_tokudb_delete_pk.test
index fb42f40bb62..bedeb9513be 100644
--- a/storage/tokudb/mysql-test/rpl/t/rpl_parallel_tokudb_delete_pk.test
+++ b/storage/tokudb/mysql-test/rpl/t/rpl_parallel_tokudb_delete_pk.test
@@ -14,7 +14,7 @@ enable_warnings;
connection slave;
# show variables like 'read_only';
-# show variables like 'tokudb_rpl_%';
+show variables like 'tokudb_rpl_%';
# insert some rows
connection master;
diff --git a/storage/tokudb/mysql-test/rpl/t/rpl_parallel_tokudb_update_pk_uc0_lookup0-slave.opt b/storage/tokudb/mysql-test/rpl/t/rpl_parallel_tokudb_update_pk_uc0_lookup0-slave.opt
index 8d0e95decb2..0aa48174807 100644
--- a/storage/tokudb/mysql-test/rpl/t/rpl_parallel_tokudb_update_pk_uc0_lookup0-slave.opt
+++ b/storage/tokudb/mysql-test/rpl/t/rpl_parallel_tokudb_update_pk_uc0_lookup0-slave.opt
@@ -1,5 +1,5 @@
--read-only=ON
---tokudb-rpl-unique-checks-delay=10000
---tokudb-rpl-unique-checks=OFF
---tokudb-rpl-lookup-rows-delay=10000
---tokudb-rpl-lookup-rows=OFF
+--loose-tokudb-rpl-unique-checks-delay=10000
+--loose-tokudb-rpl-unique-checks=OFF
+--loose-tokudb-rpl-lookup-rows-delay=10000
+--loose-tokudb-rpl-lookup-rows=OFF
diff --git a/storage/tokudb/mysql-test/rpl/t/rpl_parallel_tokudb_update_pk_uc0_lookup0.test b/storage/tokudb/mysql-test/rpl/t/rpl_parallel_tokudb_update_pk_uc0_lookup0.test
index 62e123d5a04..d6ccf2487ae 100644
--- a/storage/tokudb/mysql-test/rpl/t/rpl_parallel_tokudb_update_pk_uc0_lookup0.test
+++ b/storage/tokudb/mysql-test/rpl/t/rpl_parallel_tokudb_update_pk_uc0_lookup0.test
@@ -13,7 +13,7 @@ enable_warnings;
connection slave;
# show variables like 'read_only';
-# show variables like 'tokudb_rpl_%';
+show variables like 'tokudb_rpl_%';
# insert some rows
connection master;
diff --git a/storage/tokudb/mysql-test/rpl/t/rpl_parallel_tokudb_write_pk-slave.opt b/storage/tokudb/mysql-test/rpl/t/rpl_parallel_tokudb_write_pk-slave.opt
index c11308c89bf..7f49776e4e8 100644
--- a/storage/tokudb/mysql-test/rpl/t/rpl_parallel_tokudb_write_pk-slave.opt
+++ b/storage/tokudb/mysql-test/rpl/t/rpl_parallel_tokudb_write_pk-slave.opt
@@ -1,3 +1,3 @@
--read-only=ON
---tokudb-rpl-unique-checks-delay=5000
---tokudb-rpl-unique-checks=OFF
+--loose-tokudb-rpl-unique-checks-delay=5000
+--loose-tokudb-rpl-unique-checks=OFF
diff --git a/storage/tokudb/mysql-test/rpl/t/rpl_parallel_tokudb_write_pk.test b/storage/tokudb/mysql-test/rpl/t/rpl_parallel_tokudb_write_pk.test
index bd27ceb2c2b..db96ac321c0 100644
--- a/storage/tokudb/mysql-test/rpl/t/rpl_parallel_tokudb_write_pk.test
+++ b/storage/tokudb/mysql-test/rpl/t/rpl_parallel_tokudb_write_pk.test
@@ -14,7 +14,7 @@ enable_warnings;
connection slave;
# show variables like 'read_only';
-# show variables like 'tokudb_rpl_unique_checks%';
+show variables like 'tokudb_rpl_unique_checks%';
# insert some rows
connection master;
diff --git a/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_delete_pk-slave.opt b/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_delete_pk-slave.opt
index dc139282dc4..93a2685e847 100644
--- a/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_delete_pk-slave.opt
+++ b/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_delete_pk-slave.opt
@@ -1 +1 @@
---read-only=ON --tokudb-rpl-unique-checks-delay=10000 --tokudb-rpl-unique-checks=OFF --tokudb-rpl-lookup-rows-delay=10000 --tokudb-rpl-lookup-rows=OFF
+--read-only=ON --loose-tokudb-rpl-unique-checks-delay=10000 --loose-tokudb-rpl-unique-checks=OFF --loose-tokudb-rpl-lookup-rows-delay=10000 --loose-tokudb-rpl-lookup-rows=OFF
diff --git a/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_delete_pk.test b/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_delete_pk.test
index fb42f40bb62..bedeb9513be 100644
--- a/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_delete_pk.test
+++ b/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_delete_pk.test
@@ -14,7 +14,7 @@ enable_warnings;
connection slave;
# show variables like 'read_only';
-# show variables like 'tokudb_rpl_%';
+show variables like 'tokudb_rpl_%';
# insert some rows
connection master;
diff --git a/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_delete_pk_lookup1-slave.opt b/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_delete_pk_lookup1-slave.opt
index 4675b07763d..9a2fec628f9 100644
--- a/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_delete_pk_lookup1-slave.opt
+++ b/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_delete_pk_lookup1-slave.opt
@@ -1 +1 @@
---read-only=ON --tokudb-rpl-unique-checks-delay=0 --tokudb-rpl-unique-checks=ON --tokudb-rpl-lookup-rows-delay=10000 --tokudb-rpl-lookup-rows=ON
+--read-only=ON --loose-tokudb-rpl-unique-checks-delay=0 --loose-tokudb-rpl-unique-checks=ON --loose-tokudb-rpl-lookup-rows-delay=10000 --loose-tokudb-rpl-lookup-rows=ON
diff --git a/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_delete_pk_lookup1.test b/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_delete_pk_lookup1.test
index bf5edbd2c1b..9e9aaede416 100644
--- a/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_delete_pk_lookup1.test
+++ b/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_delete_pk_lookup1.test
@@ -15,7 +15,7 @@ enable_warnings;
connection slave;
# show variables like 'read_only';
-# show variables like 'tokudb_rpl_%';
+show variables like 'tokudb_rpl_%';
# insert some rows
connection master;
diff --git a/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_mixed_dml-master.opt b/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_mixed_dml-master.opt
deleted file mode 100644
index fd3de58d816..00000000000
--- a/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_mixed_dml-master.opt
+++ /dev/null
@@ -1,2 +0,0 @@
---tokudb_pk_insert_mode=2
-
diff --git a/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_mixed_dml.test b/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_mixed_dml.test
index c27625abd7f..6147d321784 100644
--- a/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_mixed_dml.test
+++ b/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_mixed_dml.test
@@ -6,4 +6,7 @@
--source include/have_binlog_format_mixed.inc
--source include/have_tokudb.inc
let $engine_type=TokuDB;
+
+SET SESSION tokudb_pk_insert_mode = 2;
+
--source suite/rpl/include/rpl_mixed_dml.inc
diff --git a/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_read_only_ff-slave.opt b/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_read_only_ff-slave.opt
index b9eb687d8d5..0240c9d6ae4 100644
--- a/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_read_only_ff-slave.opt
+++ b/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_read_only_ff-slave.opt
@@ -1 +1 @@
---read-only=OFF --tokudb-rpl-check-readonly=OFF --tokudb-rpl-unique-checks-delay=5000 --tokudb-rpl-unique-checks=OFF
+--read-only=OFF --loose-tokudb-rpl-check-readonly=OFF --loose-tokudb-rpl-unique-checks-delay=5000 --loose-tokudb-rpl-unique-checks=OFF
diff --git a/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_read_only_ff.test b/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_read_only_ff.test
index c77e4b49605..7b3e8f0c0d3 100644
--- a/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_read_only_ff.test
+++ b/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_read_only_ff.test
@@ -14,7 +14,7 @@ enable_warnings;
connection slave;
# show variables like 'read_only';
-# show variables like 'tokudb_rpl_unique_checks%';
+show variables like 'tokudb_rpl%';
# insert some rows
connection master;
diff --git a/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_read_only_ft-slave.opt b/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_read_only_ft-slave.opt
index 8283875e8a7..e1ee193d4c1 100644
--- a/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_read_only_ft-slave.opt
+++ b/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_read_only_ft-slave.opt
@@ -1 +1 @@
---read-only=OFF --tokudb-rpl-check-readonly=ON --tokudb-rpl-unique-checks-delay=5000 --tokudb-rpl-unique-checks=OFF
+--read-only=OFF --loose-tokudb-rpl-check-readonly=ON --loose-tokudb-rpl-unique-checks-delay=5000 --loose-tokudb-rpl-unique-checks=OFF
diff --git a/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_read_only_ft.test b/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_read_only_ft.test
index c77e4b49605..7b3e8f0c0d3 100644
--- a/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_read_only_ft.test
+++ b/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_read_only_ft.test
@@ -14,7 +14,7 @@ enable_warnings;
connection slave;
# show variables like 'read_only';
-# show variables like 'tokudb_rpl_unique_checks%';
+show variables like 'tokudb_rpl%';
# insert some rows
connection master;
diff --git a/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_read_only_tf-slave.opt b/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_read_only_tf-slave.opt
index 21e57d27c17..e60afd0380a 100644
--- a/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_read_only_tf-slave.opt
+++ b/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_read_only_tf-slave.opt
@@ -1 +1 @@
---read-only=ON --tokudb-rpl-check-readonly=OFF --tokudb-rpl-unique-checks-delay=5000 --tokudb-rpl-unique-checks=OFF
+--read-only=ON --loose-tokudb-rpl-check-readonly=OFF --loose-tokudb-rpl-unique-checks-delay=5000 --loose-tokudb-rpl-unique-checks=OFF
diff --git a/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_read_only_tf.test b/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_read_only_tf.test
index c77e4b49605..7b3e8f0c0d3 100644
--- a/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_read_only_tf.test
+++ b/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_read_only_tf.test
@@ -14,7 +14,7 @@ enable_warnings;
connection slave;
# show variables like 'read_only';
-# show variables like 'tokudb_rpl_unique_checks%';
+show variables like 'tokudb_rpl%';
# insert some rows
connection master;
diff --git a/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_read_only_tt-slave.opt b/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_read_only_tt-slave.opt
index fd77ee0da9c..f6658646e65 100644
--- a/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_read_only_tt-slave.opt
+++ b/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_read_only_tt-slave.opt
@@ -1 +1 @@
---read-only=ON --tokudb-rpl-check-readonly=ON --tokudb-rpl-unique-checks-delay=5000 --tokudb-rpl-unique-checks=OFF
+--read-only=ON --loose-tokudb-rpl-check-readonly=ON --loose-tokudb-rpl-unique-checks-delay=5000 --loose-tokudb-rpl-unique-checks=OFF
diff --git a/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_read_only_tt.test b/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_read_only_tt.test
index c77e4b49605..7b3e8f0c0d3 100644
--- a/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_read_only_tt.test
+++ b/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_read_only_tt.test
@@ -14,7 +14,7 @@ enable_warnings;
connection slave;
# show variables like 'read_only';
-# show variables like 'tokudb_rpl_unique_checks%';
+show variables like 'tokudb_rpl%';
# insert some rows
connection master;
diff --git a/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_update_pk_uc0_lookup0-slave.opt b/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_update_pk_uc0_lookup0-slave.opt
index dc139282dc4..93a2685e847 100644
--- a/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_update_pk_uc0_lookup0-slave.opt
+++ b/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_update_pk_uc0_lookup0-slave.opt
@@ -1 +1 @@
---read-only=ON --tokudb-rpl-unique-checks-delay=10000 --tokudb-rpl-unique-checks=OFF --tokudb-rpl-lookup-rows-delay=10000 --tokudb-rpl-lookup-rows=OFF
+--read-only=ON --loose-tokudb-rpl-unique-checks-delay=10000 --loose-tokudb-rpl-unique-checks=OFF --loose-tokudb-rpl-lookup-rows-delay=10000 --loose-tokudb-rpl-lookup-rows=OFF
diff --git a/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_update_pk_uc0_lookup0.test b/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_update_pk_uc0_lookup0.test
index 998987349c7..6dd9b660eed 100644
--- a/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_update_pk_uc0_lookup0.test
+++ b/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_update_pk_uc0_lookup0.test
@@ -13,7 +13,7 @@ enable_warnings;
connection slave;
# show variables like 'read_only';
-# show variables like 'tokudb_rpl_%';
+show variables like 'tokudb_rpl_%';
# insert some rows
connection master;
diff --git a/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_update_pk_uc0_lookup1-slave.opt b/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_update_pk_uc0_lookup1-slave.opt
index d546dd00669..a4ca1104425 100644
--- a/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_update_pk_uc0_lookup1-slave.opt
+++ b/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_update_pk_uc0_lookup1-slave.opt
@@ -1 +1 @@
---read-only=ON --tokudb-rpl-unique-checks-delay=10000 --tokudb-rpl-unique-checks=OFF --tokudb-rpl-lookup-rows-delay=10000 --tokudb-rpl-lookup-rows=ON
+--read-only=ON --loose-tokudb-rpl-unique-checks-delay=10000 --loose-tokudb-rpl-unique-checks=OFF --loose-tokudb-rpl-lookup-rows-delay=10000 --loose-tokudb-rpl-lookup-rows=ON
diff --git a/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_update_pk_uc0_lookup1.test b/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_update_pk_uc0_lookup1.test
index 998987349c7..6dd9b660eed 100644
--- a/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_update_pk_uc0_lookup1.test
+++ b/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_update_pk_uc0_lookup1.test
@@ -13,7 +13,7 @@ enable_warnings;
connection slave;
# show variables like 'read_only';
-# show variables like 'tokudb_rpl_%';
+show variables like 'tokudb_rpl_%';
# insert some rows
connection master;
diff --git a/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_update_pk_uc1_lookup0-slave.opt b/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_update_pk_uc1_lookup0-slave.opt
index 5cfe5f83a91..4b1d21daf5f 100644
--- a/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_update_pk_uc1_lookup0-slave.opt
+++ b/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_update_pk_uc1_lookup0-slave.opt
@@ -1 +1 @@
---read-only=ON --tokudb-rpl-unique-checks-delay=10000 --tokudb-rpl-unique-checks=ON --tokudb-rpl-lookup-rows-delay=10000 --tokudb-rpl-lookup-rows=OFF
+--read-only=ON --loose-tokudb-rpl-unique-checks-delay=10000 --loose-tokudb-rpl-unique-checks=ON --loose-tokudb-rpl-lookup-rows-delay=10000 --loose-tokudb-rpl-lookup-rows=OFF
diff --git a/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_update_pk_uc1_lookup0.test b/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_update_pk_uc1_lookup0.test
index 998987349c7..6dd9b660eed 100644
--- a/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_update_pk_uc1_lookup0.test
+++ b/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_update_pk_uc1_lookup0.test
@@ -13,7 +13,7 @@ enable_warnings;
connection slave;
# show variables like 'read_only';
-# show variables like 'tokudb_rpl_%';
+show variables like 'tokudb_rpl_%';
# insert some rows
connection master;
diff --git a/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_update_pk_uc1_lookup1-slave.opt b/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_update_pk_uc1_lookup1-slave.opt
index 7cd575c52bb..239e19ac040 100644
--- a/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_update_pk_uc1_lookup1-slave.opt
+++ b/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_update_pk_uc1_lookup1-slave.opt
@@ -1 +1 @@
---read-only=ON --tokudb-rpl-unique-checks-delay=10000 --tokudb-rpl-unique-checks=ON --tokudb-rpl-lookup-rows-delay=10000 --tokudb-rpl-lookup-rows=ON
+--read-only=ON --loose-tokudb-rpl-unique-checks-delay=10000 --loose-tokudb-rpl-unique-checks=ON --loose-tokudb-rpl-lookup-rows-delay=10000 --loose-tokudb-rpl-lookup-rows=ON
diff --git a/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_update_pk_uc1_lookup1.test b/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_update_pk_uc1_lookup1.test
index 998987349c7..6dd9b660eed 100644
--- a/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_update_pk_uc1_lookup1.test
+++ b/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_update_pk_uc1_lookup1.test
@@ -13,7 +13,7 @@ enable_warnings;
connection slave;
# show variables like 'read_only';
-# show variables like 'tokudb_rpl_%';
+show variables like 'tokudb_rpl_%';
# insert some rows
connection master;
diff --git a/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_update_unique_uc0_lookup0-slave.opt b/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_update_unique_uc0_lookup0-slave.opt
index dc139282dc4..93a2685e847 100644
--- a/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_update_unique_uc0_lookup0-slave.opt
+++ b/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_update_unique_uc0_lookup0-slave.opt
@@ -1 +1 @@
---read-only=ON --tokudb-rpl-unique-checks-delay=10000 --tokudb-rpl-unique-checks=OFF --tokudb-rpl-lookup-rows-delay=10000 --tokudb-rpl-lookup-rows=OFF
+--read-only=ON --loose-tokudb-rpl-unique-checks-delay=10000 --loose-tokudb-rpl-unique-checks=OFF --loose-tokudb-rpl-lookup-rows-delay=10000 --loose-tokudb-rpl-lookup-rows=OFF
diff --git a/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_update_unique_uc0_lookup0.test b/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_update_unique_uc0_lookup0.test
index 11401ac0ce0..93fef3699d9 100644
--- a/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_update_unique_uc0_lookup0.test
+++ b/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_update_unique_uc0_lookup0.test
@@ -13,7 +13,7 @@ enable_warnings;
connection slave;
# show variables like 'read_only';
-# show variables like 'tokudb_rpl_%';
+show variables like 'tokudb_rpl_%';
# insert some rows
connection master;
diff --git a/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_update_unique_uc0_lookup1-slave.opt b/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_update_unique_uc0_lookup1-slave.opt
index d546dd00669..a4ca1104425 100644
--- a/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_update_unique_uc0_lookup1-slave.opt
+++ b/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_update_unique_uc0_lookup1-slave.opt
@@ -1 +1 @@
---read-only=ON --tokudb-rpl-unique-checks-delay=10000 --tokudb-rpl-unique-checks=OFF --tokudb-rpl-lookup-rows-delay=10000 --tokudb-rpl-lookup-rows=ON
+--read-only=ON --loose-tokudb-rpl-unique-checks-delay=10000 --loose-tokudb-rpl-unique-checks=OFF --loose-tokudb-rpl-lookup-rows-delay=10000 --loose-tokudb-rpl-lookup-rows=ON
diff --git a/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_update_unique_uc0_lookup1.test b/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_update_unique_uc0_lookup1.test
index ea77447bc75..c8976db8ccd 100644
--- a/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_update_unique_uc0_lookup1.test
+++ b/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_update_unique_uc0_lookup1.test
@@ -13,7 +13,7 @@ enable_warnings;
connection slave;
# show variables like 'read_only';
-# show variables like 'tokudb_rpl_%';
+show variables like 'tokudb_rpl_%';
# insert some rows
connection master;
diff --git a/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_write_pk-slave.opt b/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_write_pk-slave.opt
index 9baf0d65ecf..19b40f86454 100644
--- a/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_write_pk-slave.opt
+++ b/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_write_pk-slave.opt
@@ -1 +1 @@
---read-only=ON --tokudb-rpl-unique-checks-delay=5000 --tokudb-rpl-unique-checks=OFF
+--read-only=ON --loose-tokudb-rpl-unique-checks-delay=5000 --loose-tokudb-rpl-unique-checks=OFF
diff --git a/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_write_pk.test b/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_write_pk.test
index c77e4b49605..0ed12b34e1f 100644
--- a/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_write_pk.test
+++ b/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_write_pk.test
@@ -14,7 +14,7 @@ enable_warnings;
connection slave;
# show variables like 'read_only';
-# show variables like 'tokudb_rpl_unique_checks%';
+show variables like 'tokudb_rpl_unique_checks%';
# insert some rows
connection master;
diff --git a/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_write_pk_uc1-slave.opt b/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_write_pk_uc1-slave.opt
index b1df0b6daf0..646a9991753 100644
--- a/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_write_pk_uc1-slave.opt
+++ b/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_write_pk_uc1-slave.opt
@@ -1 +1 @@
---read-only=ON --tokudb-rpl-unique-checks-delay=10000 --tokudb-rpl-unique-checks=ON
+--read-only=ON --loose-tokudb-rpl-unique-checks-delay=10000 --loose-tokudb-rpl-unique-checks=ON
diff --git a/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_write_pk_uc1.test b/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_write_pk_uc1.test
index c77e4b49605..0ed12b34e1f 100644
--- a/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_write_pk_uc1.test
+++ b/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_write_pk_uc1.test
@@ -14,7 +14,7 @@ enable_warnings;
connection slave;
# show variables like 'read_only';
-# show variables like 'tokudb_rpl_unique_checks%';
+show variables like 'tokudb_rpl_unique_checks%';
# insert some rows
connection master;
diff --git a/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_write_unique-slave.opt b/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_write_unique-slave.opt
index 9baf0d65ecf..19b40f86454 100644
--- a/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_write_unique-slave.opt
+++ b/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_write_unique-slave.opt
@@ -1 +1 @@
---read-only=ON --tokudb-rpl-unique-checks-delay=5000 --tokudb-rpl-unique-checks=OFF
+--read-only=ON --loose-tokudb-rpl-unique-checks-delay=5000 --loose-tokudb-rpl-unique-checks=OFF
diff --git a/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_write_unique.test b/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_write_unique.test
index cf6a26b423d..fc4c9597dac 100644
--- a/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_write_unique.test
+++ b/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_write_unique.test
@@ -14,7 +14,7 @@ enable_warnings;
connection slave;
# show variables like 'read_only';
-# show variables like 'tokudb_rpl_unique_checks%';
+show variables like 'tokudb_rpl_unique_checks%';
# insert some rows
connection master;
diff --git a/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_write_unique_uc1-slave.opt b/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_write_unique_uc1-slave.opt
index 0518efd3da5..9139a370e57 100644
--- a/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_write_unique_uc1-slave.opt
+++ b/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_write_unique_uc1-slave.opt
@@ -1 +1 @@
---read-only=ON --tokudb-rpl-unique-checks-delay=5000 --tokudb-rpl-unique-checks=ON
+--read-only=ON --loose-tokudb-rpl-unique-checks-delay=5000 --loose-tokudb-rpl-unique-checks=ON
diff --git a/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_write_unique_uc1.test b/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_write_unique_uc1.test
index cf6a26b423d..fc4c9597dac 100644
--- a/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_write_unique_uc1.test
+++ b/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_write_unique_uc1.test
@@ -14,7 +14,7 @@ enable_warnings;
connection slave;
# show variables like 'read_only';
-# show variables like 'tokudb_rpl_unique_checks%';
+show variables like 'tokudb_rpl_unique_checks%';
# insert some rows
connection master;
diff --git a/storage/tokudb/mysql-test/rpl/t/suite.opt b/storage/tokudb/mysql-test/rpl/t/suite.opt
new file mode 100644
index 00000000000..23511b05020
--- /dev/null
+++ b/storage/tokudb/mysql-test/rpl/t/suite.opt
@@ -0,0 +1 @@
+$TOKUDB_OPT $TOKUDB_LOAD_ADD --loose-tokudb-check-jemalloc=0
diff --git a/storage/tokudb/mysql-test/tokudb/disabled.def b/storage/tokudb/mysql-test/tokudb/disabled.def
index 5b7afa1753e..c98a8aa622a 100644
--- a/storage/tokudb/mysql-test/tokudb/disabled.def
+++ b/storage/tokudb/mysql-test/tokudb/disabled.def
@@ -26,9 +26,5 @@ type_temporal_fractional:
type_temporal_upgrade:
type_timestamp_explicit:
cluster_key_part: engine options on partitioned tables
-#i_s_tokudb_lock_waits_released: unstable, race conditions
-#i_s_tokudb_locks_released: unstable, race conditions
-mvcc-19: tokutek
-mvcc-20: tokutek
-mvcc-27: tokutek
-storage_engine_default: tokudb is not the default storage engine
+i_s_tokudb_lock_waits_released: unstable, race conditions
+i_s_tokudb_locks_released: unstable, race conditions
diff --git a/storage/tokudb/mysql-test/tokudb/include/cluster_key.inc b/storage/tokudb/mysql-test/tokudb/include/cluster_key.inc
new file mode 100644
index 00000000000..d637b46e8fc
--- /dev/null
+++ b/storage/tokudb/mysql-test/tokudb/include/cluster_key.inc
@@ -0,0 +1,138 @@
+# test for TokuDB clustering keys.
+# test assumes that a table 't1' exists with the following columns:
+# a int, b int, c int, d int
+insert into t1 values (1,10,100,1000),(2,20,200,2000),(3,30,300,3000),(4,40,400,4000),(5,50,500,5000),(6,60,600,6000),(7,70,700,7000),(8,80,800,8000),(9,90,900,9000);
+
+
+#normal queries
+
+# ignore rows column
+--replace_column 9 NULL;
+explain select * from t1 where a > 5;
+select * from t1 where a > 5;
+
+# ignore rows column
+--replace_column 9 NULL;
+explain select * from t1 where b > 30;
+select * from t1 where b > 30;
+
+# ignore rows column
+--replace_column 9 NULL;
+explain select * from t1 where c > 750;
+select * from t1 where c > 750;
+
+#covering indexes
+
+# ignore rows column
+--replace_column 9 NULL;
+explain select a from t1 where a > 8;
+select a from t1 where a > 8;
+
+# ignore rows column
+--replace_column 9 NULL;
+explain select a,b from t1 where b > 30;
+select a,b from t1 where b > 30;
+
+# ignore rows column
+--replace_column 9 NULL;
+explain select a,b from t1 where c > 750;
+select a,c from t1 where c > 750;
+
+
+alter table t1 add index bdca(b,d,c,a) clustering=yes;
+insert into t1 values (10,10,10,10);
+alter table t1 drop index bdca;
+
+alter table t1 drop primary key;
+# ignore rows column
+--replace_column 9 NULL;
+explain select * from t1 where a > 5;
+select * from t1 where a > 5;
+
+# ignore rows column
+--replace_column 9 NULL;
+explain select * from t1 where b > 30;
+select * from t1 where b > 30;
+
+# ignore rows column
+--replace_column 9 NULL;
+explain select * from t1 where c > 750;
+select * from t1 where c > 750;
+
+#covering indexes
+
+# ignore rows column
+--replace_column 9 NULL;
+explain select b from t1 where b > 30;
+select b from t1 where b > 30;
+
+# ignore rows column
+--replace_column 9 NULL;
+explain select b from t1 where c > 750;
+select c from t1 where c > 750;
+
+alter table t1 add e varchar(20);
+
+alter table t1 add primary key (a,b,c);
+
+# ignore rows column
+--replace_column 9 NULL;
+explain select * from t1 where a > 5;
+select * from t1 where a > 5;
+
+# ignore rows column
+--replace_column 9 NULL;
+explain select * from t1 where b > 30;
+select * from t1 where b > 30;
+
+# ignore rows column
+--replace_column 9 NULL;
+explain select * from t1 where c > 750;
+select * from t1 where c > 750;
+
+#covering indexes
+
+# ignore rows column
+--replace_column 9 NULL;
+explain select a from t1 where a > 8;
+select a from t1 where a > 8;
+
+# ignore rows column
+--replace_column 9 NULL;
+explain select a,b from t1 where b > 30;
+select a,b from t1 where b > 30;
+
+# ignore rows column
+--replace_column 9 NULL;
+explain select a,b from t1 where c > 750;
+select a,c from t1 where c > 750;
+
+
+alter table t1 drop primary key;
+# ignore rows column
+--replace_column 9 NULL;
+explain select * from t1 where a > 5;
+select * from t1 where a > 5;
+
+# ignore rows column
+--replace_column 9 NULL;
+explain select * from t1 where b > 30;
+select * from t1 where b > 30;
+
+# ignore rows column
+--replace_column 9 NULL;
+explain select * from t1 where c > 750;
+select * from t1 where c > 750;
+
+#covering indexes
+# ignore rows column
+--replace_column 9 NULL;
+explain select b from t1 where b > 30;
+select b from t1 where b > 30;
+
+# ignore rows column
+--replace_column 9 NULL;
+explain select b from t1 where c > 750;
+select c from t1 where c > 750;
+
+
diff --git a/storage/tokudb/mysql-test/tokudb/r/background_job_manager.result b/storage/tokudb/mysql-test/tokudb/r/background_job_manager.result
new file mode 100644
index 00000000000..5769ee74071
--- /dev/null
+++ b/storage/tokudb/mysql-test/tokudb/r/background_job_manager.result
@@ -0,0 +1,122 @@
+set @orig_auto_analyze = @@session.tokudb_auto_analyze;
+set @orig_in_background = @@session.tokudb_analyze_in_background;
+set @orig_mode = @@session.tokudb_analyze_mode;
+set @orig_throttle = @@session.tokudb_analyze_throttle;
+set @orig_time = @@session.tokudb_analyze_time;
+set @orig_scale_percent = @@global.tokudb_cardinality_scale_percent;
+set @orig_default_storage_engine = @@session.default_storage_engine;
+set @orig_pause_background_job_manager = @@global.tokudb_debug_pause_background_job_manager;
+set session default_storage_engine='tokudb';
+set session tokudb_auto_analyze=1;
+set session tokudb_analyze_in_background=1;
+set session tokudb_analyze_mode=tokudb_analyze_standard;
+set session tokudb_analyze_throttle=0;
+set session tokudb_analyze_time=0;
+set global tokudb_cardinality_scale_percent=DEFAULT;
+set global tokudb_debug_pause_background_job_manager=TRUE;
+show create table information_schema.tokudb_background_job_status;
+Table Create Table
+TokuDB_background_job_status CREATE TEMPORARY TABLE `TokuDB_background_job_status` (
+ `id` bigint(0) NOT NULL DEFAULT '0',
+ `database_name` varchar(256) NOT NULL DEFAULT '',
+ `table_name` varchar(256) NOT NULL DEFAULT '',
+ `job_type` varchar(256) NOT NULL DEFAULT '',
+ `job_params` varchar(256) NOT NULL DEFAULT '',
+ `scheduler` varchar(32) NOT NULL DEFAULT '',
+ `scheduled_time` datetime NOT NULL DEFAULT '0000-00-00 00:00:00',
+ `started_time` datetime DEFAULT NULL,
+ `status` varchar(256) DEFAULT NULL
+) ENGINE=MEMORY DEFAULT CHARSET=utf8
+create table t1 (a int not null auto_increment, b int, c int, primary key(a), key kb(b), key kc(c), key kabc(a,b,c), key kab(a,b), key kbc(b,c));
+insert into t1(b,c) values(0,0), (1,1), (2,2), (3,3);
+select database_name, table_name, job_type, job_params, scheduler from information_schema.tokudb_background_job_status;
+database_name table_name job_type job_params scheduler
+test t1 TOKUDB_ANALYZE_MODE_STANDARD TOKUDB_ANALYZE_DELETE_FRACTION=1.000000; TOKUDB_ANALYZE_TIME=0; TOKUDB_ANALYZE_THROTTLE=0; AUTO
+create table t2 like t1;
+create table t3 like t1;
+create table t4 like t1;
+analyze table t1;
+Table Op Msg_type Msg_text
+test.t1 analyze status Operation failed
+analyze table t2;
+Table Op Msg_type Msg_text
+test.t2 analyze status OK
+analyze table t3;
+Table Op Msg_type Msg_text
+test.t3 analyze status OK
+analyze table t4;
+Table Op Msg_type Msg_text
+test.t4 analyze status OK
+select database_name, table_name, job_type, job_params, scheduler from information_schema.tokudb_background_job_status;
+database_name table_name job_type job_params scheduler
+test t1 TOKUDB_ANALYZE_MODE_STANDARD TOKUDB_ANALYZE_DELETE_FRACTION=1.000000; TOKUDB_ANALYZE_TIME=0; TOKUDB_ANALYZE_THROTTLE=0; AUTO
+test t2 TOKUDB_ANALYZE_MODE_STANDARD TOKUDB_ANALYZE_DELETE_FRACTION=1.000000; TOKUDB_ANALYZE_TIME=0; TOKUDB_ANALYZE_THROTTLE=0; USER
+test t3 TOKUDB_ANALYZE_MODE_STANDARD TOKUDB_ANALYZE_DELETE_FRACTION=1.000000; TOKUDB_ANALYZE_TIME=0; TOKUDB_ANALYZE_THROTTLE=0; USER
+test t4 TOKUDB_ANALYZE_MODE_STANDARD TOKUDB_ANALYZE_DELETE_FRACTION=1.000000; TOKUDB_ANALYZE_TIME=0; TOKUDB_ANALYZE_THROTTLE=0; USER
+set global tokudb_debug_pause_background_job_manager=FALSE;
+set global tokudb_debug_pause_background_job_manager=TRUE;
+analyze table t1;
+Table Op Msg_type Msg_text
+test.t1 analyze status OK
+analyze table t2;
+Table Op Msg_type Msg_text
+test.t2 analyze status OK
+analyze table t3;
+Table Op Msg_type Msg_text
+test.t3 analyze status OK
+analyze table t4;
+Table Op Msg_type Msg_text
+test.t4 analyze status OK
+select database_name, table_name, job_type, job_params, scheduler from information_schema.tokudb_background_job_status;
+database_name table_name job_type job_params scheduler
+test t1 TOKUDB_ANALYZE_MODE_STANDARD TOKUDB_ANALYZE_DELETE_FRACTION=1.000000; TOKUDB_ANALYZE_TIME=0; TOKUDB_ANALYZE_THROTTLE=0; USER
+test t2 TOKUDB_ANALYZE_MODE_STANDARD TOKUDB_ANALYZE_DELETE_FRACTION=1.000000; TOKUDB_ANALYZE_TIME=0; TOKUDB_ANALYZE_THROTTLE=0; USER
+test t3 TOKUDB_ANALYZE_MODE_STANDARD TOKUDB_ANALYZE_DELETE_FRACTION=1.000000; TOKUDB_ANALYZE_TIME=0; TOKUDB_ANALYZE_THROTTLE=0; USER
+test t4 TOKUDB_ANALYZE_MODE_STANDARD TOKUDB_ANALYZE_DELETE_FRACTION=1.000000; TOKUDB_ANALYZE_TIME=0; TOKUDB_ANALYZE_THROTTLE=0; USER
+alter table t1 add column d int;
+select database_name, table_name, job_type, job_params, scheduler from information_schema.tokudb_background_job_status;
+database_name table_name job_type job_params scheduler
+test t2 TOKUDB_ANALYZE_MODE_STANDARD TOKUDB_ANALYZE_DELETE_FRACTION=1.000000; TOKUDB_ANALYZE_TIME=0; TOKUDB_ANALYZE_THROTTLE=0; USER
+test t3 TOKUDB_ANALYZE_MODE_STANDARD TOKUDB_ANALYZE_DELETE_FRACTION=1.000000; TOKUDB_ANALYZE_TIME=0; TOKUDB_ANALYZE_THROTTLE=0; USER
+test t4 TOKUDB_ANALYZE_MODE_STANDARD TOKUDB_ANALYZE_DELETE_FRACTION=1.000000; TOKUDB_ANALYZE_TIME=0; TOKUDB_ANALYZE_THROTTLE=0; USER
+set session tokudb_analyze_mode=tokudb_analyze_cancel;
+analyze table t2;
+Table Op Msg_type Msg_text
+test.t2 analyze status OK
+select database_name, table_name, job_type, job_params, scheduler from information_schema.tokudb_background_job_status;
+database_name table_name job_type job_params scheduler
+test t3 TOKUDB_ANALYZE_MODE_STANDARD TOKUDB_ANALYZE_DELETE_FRACTION=1.000000; TOKUDB_ANALYZE_TIME=0; TOKUDB_ANALYZE_THROTTLE=0; USER
+test t4 TOKUDB_ANALYZE_MODE_STANDARD TOKUDB_ANALYZE_DELETE_FRACTION=1.000000; TOKUDB_ANALYZE_TIME=0; TOKUDB_ANALYZE_THROTTLE=0; USER
+set session tokudb_analyze_mode=tokudb_analyze_recount_rows;
+analyze table t1;
+Table Op Msg_type Msg_text
+test.t1 analyze status OK
+select database_name, table_name, job_type, job_params, scheduler from information_schema.tokudb_background_job_status;
+database_name table_name job_type job_params scheduler
+test t3 TOKUDB_ANALYZE_MODE_STANDARD TOKUDB_ANALYZE_DELETE_FRACTION=1.000000; TOKUDB_ANALYZE_TIME=0; TOKUDB_ANALYZE_THROTTLE=0; USER
+test t4 TOKUDB_ANALYZE_MODE_STANDARD TOKUDB_ANALYZE_DELETE_FRACTION=1.000000; TOKUDB_ANALYZE_TIME=0; TOKUDB_ANALYZE_THROTTLE=0; USER
+test t1 TOKUDB_ANALYZE_MODE_RECOUNT_ROWS TOKUDB_ANALYZE_THROTTLE=0; USER
+set session tokudb_analyze_mode=tokudb_analyze_standard;
+set session tokudb_analyze_in_background=0;
+analyze table t3;
+Table Op Msg_type Msg_text
+test.t3 analyze status OK
+select database_name, table_name, job_type, job_params, scheduler from information_schema.tokudb_background_job_status;
+database_name table_name job_type job_params scheduler
+test t4 TOKUDB_ANALYZE_MODE_STANDARD TOKUDB_ANALYZE_DELETE_FRACTION=1.000000; TOKUDB_ANALYZE_TIME=0; TOKUDB_ANALYZE_THROTTLE=0; USER
+test t1 TOKUDB_ANALYZE_MODE_RECOUNT_ROWS TOKUDB_ANALYZE_THROTTLE=0; USER
+drop table t1;
+drop table t2;
+drop table t3;
+drop table t4;
+select database_name, table_name, job_type, job_params, scheduler from information_schema.tokudb_background_job_status;
+database_name table_name job_type job_params scheduler
+set global tokudb_debug_pause_background_job_manager=FALSE;
+set session tokudb_auto_analyze = @orig_auto_analyze;
+set session tokudb_analyze_in_background = @orig_in_background;
+set session tokudb_analyze_mode = @orig_mode;
+set session tokudb_analyze_throttle = @orig_throttle;
+set session tokudb_analyze_time = @orig_time;
+set global tokudb_cardinality_scale_percent = @orig_scale_percent;
+set session default_storage_engine = @orig_default_storage_engine;
+set global tokudb_debug_pause_background_job_manager = @orig_pause_background_job_manager;
diff --git a/storage/tokudb/mysql-test/tokudb/r/card_add_drop.result b/storage/tokudb/mysql-test/tokudb/r/card_add_drop.result
index 431f0200a7a..71a39eb1f3e 100644
--- a/storage/tokudb/mysql-test/tokudb/r/card_add_drop.result
+++ b/storage/tokudb/mysql-test/tokudb/r/card_add_drop.result
@@ -4,35 +4,35 @@ create table tt (a int, b int, c int, d int, key(a), key(b), key(c));
insert into tt values (0,0,0,0),(1,0,0,0),(2,0,1,0),(3,0,1,0);
show indexes from tt;
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
-tt 1 a 1 a A NULL NULL NULL YES BTREE
-tt 1 b 1 b A NULL NULL NULL YES BTREE
-tt 1 c 1 c A NULL NULL NULL YES BTREE
+tt 1 a 1 a A 4 NULL NULL YES BTREE
+tt 1 b 1 b A 4 NULL NULL YES BTREE
+tt 1 c 1 c A 4 NULL NULL YES BTREE
analyze table tt;
Table Op Msg_type Msg_text
test.tt analyze status OK
show indexes from tt;
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
tt 1 a 1 a A 4 NULL NULL YES BTREE
-tt 1 b 1 b A 1 NULL NULL YES BTREE
-tt 1 c 1 c A 2 NULL NULL YES BTREE
+tt 1 b 1 b A 2 NULL NULL YES BTREE
+tt 1 c 1 c A 4 NULL NULL YES BTREE
alter table tt drop key b, add key (d);
show indexes from tt;
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
tt 1 a 1 a A 4 NULL NULL YES BTREE
-tt 1 c 1 c A 2 NULL NULL YES BTREE
-tt 1 d 1 d A NULL NULL NULL YES BTREE
+tt 1 c 1 c A 4 NULL NULL YES BTREE
+tt 1 d 1 d A 4 NULL NULL YES BTREE
analyze table tt;
Table Op Msg_type Msg_text
test.tt analyze status OK
show indexes from tt;
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
tt 1 a 1 a A 4 NULL NULL YES BTREE
-tt 1 c 1 c A 2 NULL NULL YES BTREE
-tt 1 d 1 d A 1 NULL NULL YES BTREE
+tt 1 c 1 c A 4 NULL NULL YES BTREE
+tt 1 d 1 d A 2 NULL NULL YES BTREE
flush tables;
show indexes from tt;
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
tt 1 a 1 a A 4 NULL NULL YES BTREE
-tt 1 c 1 c A 2 NULL NULL YES BTREE
-tt 1 d 1 d A 1 NULL NULL YES BTREE
+tt 1 c 1 c A 4 NULL NULL YES BTREE
+tt 1 d 1 d A 2 NULL NULL YES BTREE
drop table tt;
diff --git a/storage/tokudb/mysql-test/tokudb/r/card_add_index.result b/storage/tokudb/mysql-test/tokudb/r/card_add_index.result
index f5ba5b58bed..9a929b19a80 100644
--- a/storage/tokudb/mysql-test/tokudb/r/card_add_index.result
+++ b/storage/tokudb/mysql-test/tokudb/r/card_add_index.result
@@ -15,32 +15,32 @@ alter table tt add key (b);
show indexes from tt;
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
tt 0 PRIMARY 1 a A 4 NULL NULL BTREE
-tt 1 b 1 b A NULL NULL NULL YES BTREE
+tt 1 b 1 b A 4 NULL NULL YES BTREE
analyze table tt;
Table Op Msg_type Msg_text
test.tt analyze status OK
show indexes from tt;
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
tt 0 PRIMARY 1 a A 4 NULL NULL BTREE
-tt 1 b 1 b A 1 NULL NULL YES BTREE
+tt 1 b 1 b A 2 NULL NULL YES BTREE
alter table tt add key (c);
show indexes from tt;
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
tt 0 PRIMARY 1 a A 4 NULL NULL BTREE
-tt 1 b 1 b A 1 NULL NULL YES BTREE
-tt 1 c 1 c A NULL NULL NULL YES BTREE
+tt 1 b 1 b A 2 NULL NULL YES BTREE
+tt 1 c 1 c A 4 NULL NULL YES BTREE
analyze table tt;
Table Op Msg_type Msg_text
test.tt analyze status OK
show indexes from tt;
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
tt 0 PRIMARY 1 a A 4 NULL NULL BTREE
-tt 1 b 1 b A 1 NULL NULL YES BTREE
-tt 1 c 1 c A 2 NULL NULL YES BTREE
+tt 1 b 1 b A 2 NULL NULL YES BTREE
+tt 1 c 1 c A 4 NULL NULL YES BTREE
flush tables;
show indexes from tt;
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
tt 0 PRIMARY 1 a A 4 NULL NULL BTREE
-tt 1 b 1 b A 1 NULL NULL YES BTREE
-tt 1 c 1 c A 2 NULL NULL YES BTREE
+tt 1 b 1 b A 2 NULL NULL YES BTREE
+tt 1 c 1 c A 4 NULL NULL YES BTREE
drop table tt;
diff --git a/storage/tokudb/mysql-test/tokudb/r/card_auto_analyze_lots.result b/storage/tokudb/mysql-test/tokudb/r/card_auto_analyze_lots.result
new file mode 100644
index 00000000000..662ffbade25
--- /dev/null
+++ b/storage/tokudb/mysql-test/tokudb/r/card_auto_analyze_lots.result
@@ -0,0 +1,805 @@
+SHOW INDEX FROM ar_200;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_200 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_200 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_199;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_199 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_199 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_198;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_198 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_198 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_197;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_197 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_197 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_196;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_196 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_196 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_195;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_195 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_195 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_194;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_194 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_194 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_193;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_193 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_193 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_192;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_192 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_192 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_191;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_191 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_191 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_190;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_190 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_190 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_189;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_189 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_189 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_188;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_188 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_188 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_187;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_187 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_187 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_186;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_186 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_186 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_185;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_185 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_185 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_184;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_184 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_184 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_183;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_183 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_183 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_182;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_182 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_182 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_181;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_181 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_181 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_180;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_180 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_180 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_179;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_179 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_179 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_178;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_178 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_178 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_177;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_177 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_177 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_176;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_176 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_176 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_175;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_175 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_175 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_174;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_174 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_174 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_173;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_173 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_173 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_172;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_172 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_172 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_171;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_171 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_171 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_170;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_170 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_170 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_169;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_169 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_169 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_168;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_168 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_168 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_167;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_167 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_167 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_166;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_166 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_166 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_165;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_165 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_165 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_164;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_164 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_164 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_163;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_163 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_163 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_162;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_162 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_162 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_161;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_161 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_161 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_160;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_160 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_160 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_159;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_159 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_159 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_158;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_158 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_158 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_157;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_157 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_157 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_156;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_156 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_156 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_155;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_155 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_155 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_154;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_154 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_154 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_153;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_153 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_153 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_152;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_152 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_152 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_151;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_151 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_151 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_150;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_150 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_150 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_149;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_149 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_149 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_148;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_148 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_148 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_147;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_147 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_147 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_146;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_146 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_146 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_145;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_145 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_145 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_144;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_144 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_144 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_143;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_143 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_143 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_142;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_142 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_142 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_141;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_141 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_141 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_140;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_140 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_140 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_139;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_139 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_139 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_138;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_138 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_138 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_137;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_137 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_137 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_136;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_136 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_136 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_135;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_135 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_135 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_134;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_134 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_134 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_133;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_133 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_133 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_132;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_132 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_132 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_131;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_131 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_131 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_130;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_130 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_130 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_129;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_129 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_129 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_128;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_128 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_128 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_127;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_127 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_127 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_126;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_126 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_126 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_125;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_125 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_125 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_124;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_124 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_124 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_123;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_123 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_123 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_122;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_122 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_122 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_121;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_121 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_121 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_120;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_120 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_120 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_119;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_119 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_119 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_118;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_118 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_118 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_117;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_117 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_117 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_116;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_116 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_116 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_115;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_115 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_115 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_114;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_114 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_114 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_113;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_113 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_113 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_112;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_112 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_112 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_111;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_111 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_111 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_110;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_110 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_110 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_109;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_109 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_109 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_108;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_108 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_108 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_107;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_107 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_107 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_106;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_106 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_106 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_105;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_105 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_105 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_104;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_104 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_104 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_103;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_103 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_103 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_102;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_102 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_102 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_101;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_101 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_101 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_100;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_100 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_100 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_99;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_99 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_99 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_98;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_98 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_98 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_97;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_97 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_97 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_96;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_96 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_96 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_95;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_95 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_95 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_94;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_94 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_94 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_93;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_93 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_93 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_92;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_92 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_92 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_91;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_91 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_91 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_90;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_90 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_90 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_89;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_89 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_89 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_88;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_88 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_88 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_87;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_87 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_87 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_86;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_86 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_86 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_85;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_85 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_85 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_84;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_84 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_84 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_83;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_83 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_83 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_82;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_82 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_82 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_81;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_81 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_81 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_80;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_80 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_80 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_79;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_79 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_79 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_78;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_78 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_78 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_77;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_77 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_77 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_76;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_76 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_76 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_75;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_75 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_75 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_74;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_74 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_74 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_73;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_73 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_73 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_72;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_72 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_72 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_71;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_71 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_71 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_70;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_70 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_70 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_69;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_69 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_69 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_68;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_68 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_68 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_67;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_67 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_67 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_66;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_66 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_66 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_65;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_65 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_65 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_64;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_64 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_64 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_63;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_63 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_63 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_62;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_62 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_62 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_61;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_61 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_61 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_60;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_60 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_60 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_59;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_59 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_59 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_58;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_58 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_58 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_57;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_57 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_57 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_56;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_56 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_56 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_55;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_55 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_55 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_54;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_54 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_54 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_53;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_53 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_53 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_52;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_52 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_52 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_51;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_51 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_51 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_50;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_50 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_50 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_49;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_49 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_49 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_48;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_48 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_48 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_47;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_47 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_47 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_46;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_46 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_46 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_45;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_45 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_45 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_44;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_44 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_44 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_43;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_43 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_43 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_42;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_42 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_42 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_41;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_41 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_41 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_40;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_40 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_40 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_39;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_39 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_39 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_38;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_38 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_38 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_37;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_37 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_37 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_36;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_36 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_36 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_35;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_35 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_35 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_34;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_34 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_34 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_33;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_33 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_33 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_32;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_32 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_32 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_31;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_31 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_31 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_30;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_30 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_30 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_29;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_29 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_29 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_28;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_28 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_28 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_27;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_27 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_27 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_26;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_26 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_26 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_25;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_25 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_25 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_24;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_24 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_24 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_23;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_23 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_23 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_22;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_22 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_22 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_21;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_21 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_21 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_20;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_20 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_20 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_19;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_19 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_19 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_18;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_18 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_18 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_17;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_17 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_17 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_16;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_16 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_16 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_15;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_15 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_15 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_14;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_14 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_14 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_13;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_13 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_13 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_12;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_12 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_12 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_11;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_11 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_11 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_10;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_10 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_10 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_9;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_9 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_9 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_8;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_8 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_8 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_7;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_7 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_7 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_6;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_6 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_6 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_5;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_5 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_5 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_4;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_4 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_4 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_3;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_3 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_3 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_2;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_2 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_2 1 bkey 1 b A 1 NULL NULL YES BTREE
+SHOW INDEX FROM ar_1;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ar_1 0 PRIMARY 1 a A 1 NULL NULL BTREE
+ar_1 1 bkey 1 b A 1 NULL NULL YES BTREE
+SET SESSION tokudb_auto_analyze = @orig_auto_analyze;
+SET SESSION tokudb_analyze_in_background = @orig_in_background;
+SET SESSION tokudb_analyze_mode = @orig_mode;
+SET SESSION tokudb_analyze_throttle = @orig_throttle;
+SET SESSION tokudb_analyze_time = @orig_time;
diff --git a/storage/tokudb/mysql-test/tokudb/r/card_drop_index.result b/storage/tokudb/mysql-test/tokudb/r/card_drop_index.result
index 9fc8fb6a6b8..2cfdfe11296 100644
--- a/storage/tokudb/mysql-test/tokudb/r/card_drop_index.result
+++ b/storage/tokudb/mysql-test/tokudb/r/card_drop_index.result
@@ -5,21 +5,21 @@ insert into tt values (1,0,0),(2,0,0),(3,0,1),(4,0,1);
show indexes from tt;
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
tt 0 PRIMARY 1 a A 4 NULL NULL BTREE
-tt 1 b 1 b A NULL NULL NULL YES BTREE
-tt 1 c 1 c A NULL NULL NULL YES BTREE
+tt 1 b 1 b A 4 NULL NULL YES BTREE
+tt 1 c 1 c A 4 NULL NULL YES BTREE
analyze table tt;
Table Op Msg_type Msg_text
test.tt analyze status OK
show indexes from tt;
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
tt 0 PRIMARY 1 a A 4 NULL NULL BTREE
-tt 1 b 1 b A 1 NULL NULL YES BTREE
-tt 1 c 1 c A 2 NULL NULL YES BTREE
+tt 1 b 1 b A 2 NULL NULL YES BTREE
+tt 1 c 1 c A 4 NULL NULL YES BTREE
alter table tt drop key b;
show indexes from tt;
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
tt 0 PRIMARY 1 a A 4 NULL NULL BTREE
-tt 1 c 1 c A 2 NULL NULL YES BTREE
+tt 1 c 1 c A 4 NULL NULL YES BTREE
alter table tt drop key c;
show indexes from tt;
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
diff --git a/storage/tokudb/mysql-test/tokudb/r/card_drop_index_2.result b/storage/tokudb/mysql-test/tokudb/r/card_drop_index_2.result
index 4103a37a6ed..ed28d2a3226 100644
--- a/storage/tokudb/mysql-test/tokudb/r/card_drop_index_2.result
+++ b/storage/tokudb/mysql-test/tokudb/r/card_drop_index_2.result
@@ -132,21 +132,21 @@ count(*)
show indexes from tt;
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
tt 0 PRIMARY 1 a A 500 NULL NULL BTREE
-tt 1 b 1 b A NULL NULL NULL YES BTREE
-tt 1 c 1 c A NULL NULL NULL YES BTREE
+tt 1 b 1 b A 500 NULL NULL YES BTREE
+tt 1 c 1 c A 500 NULL NULL YES BTREE
analyze table tt;
Table Op Msg_type Msg_text
test.tt analyze status OK
show indexes from tt;
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
tt 0 PRIMARY 1 a A 500 NULL NULL BTREE
-tt 1 b 1 b A 125 NULL NULL YES BTREE
-tt 1 c 1 c A 1 NULL NULL YES BTREE
+tt 1 b 1 b A 250 NULL NULL YES BTREE
+tt 1 c 1 c A 2 NULL NULL YES BTREE
alter table tt drop key b;
show indexes from tt;
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
tt 0 PRIMARY 1 a A 500 NULL NULL BTREE
-tt 1 c 1 c A 1 NULL NULL YES BTREE
+tt 1 c 1 c A 2 NULL NULL YES BTREE
alter table tt drop key c;
show indexes from tt;
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
diff --git a/storage/tokudb/mysql-test/tokudb/r/card_drop_pk.result b/storage/tokudb/mysql-test/tokudb/r/card_drop_pk.result
index bdbd9a3f097..2369d88c274 100644
--- a/storage/tokudb/mysql-test/tokudb/r/card_drop_pk.result
+++ b/storage/tokudb/mysql-test/tokudb/r/card_drop_pk.result
@@ -5,24 +5,24 @@ insert into tt values (1,0,0),(2,0,0),(3,0,1),(4,0,1);
show indexes from tt;
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
tt 0 PRIMARY 1 a A 4 NULL NULL BTREE
-tt 1 b 1 b A NULL NULL NULL YES BTREE
-tt 1 c 1 c A NULL NULL NULL YES BTREE
+tt 1 b 1 b A 4 NULL NULL YES BTREE
+tt 1 c 1 c A 4 NULL NULL YES BTREE
analyze table tt;
Table Op Msg_type Msg_text
test.tt analyze status OK
show indexes from tt;
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
tt 0 PRIMARY 1 a A 4 NULL NULL BTREE
-tt 1 b 1 b A 1 NULL NULL YES BTREE
-tt 1 c 1 c A 2 NULL NULL YES BTREE
+tt 1 b 1 b A 2 NULL NULL YES BTREE
+tt 1 c 1 c A 4 NULL NULL YES BTREE
alter table tt drop primary key;
show indexes from tt;
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
-tt 1 b 1 b A NULL NULL NULL YES BTREE
-tt 1 c 1 c A NULL NULL NULL YES BTREE
+tt 1 b 1 b A 4 NULL NULL YES BTREE
+tt 1 c 1 c A 4 NULL NULL YES BTREE
flush tables;
show indexes from tt;
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
-tt 1 b 1 b A NULL NULL NULL YES BTREE
-tt 1 c 1 c A NULL NULL NULL YES BTREE
+tt 1 b 1 b A 4 NULL NULL YES BTREE
+tt 1 c 1 c A 4 NULL NULL YES BTREE
drop table tt;
diff --git a/storage/tokudb/mysql-test/tokudb/r/card_pk_2.result b/storage/tokudb/mysql-test/tokudb/r/card_pk_2.result
index dd850df9a89..3c1b652db15 100644
--- a/storage/tokudb/mysql-test/tokudb/r/card_pk_2.result
+++ b/storage/tokudb/mysql-test/tokudb/r/card_pk_2.result
@@ -4,18 +4,18 @@ create table tt (a int, b int, primary key(a,b));
insert into tt values (0,0),(0,1),(1,0),(1,1);
show indexes from tt;
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
-tt 0 PRIMARY 1 a A NULL NULL NULL BTREE
+tt 0 PRIMARY 1 a A 4 NULL NULL BTREE
tt 0 PRIMARY 2 b A 4 NULL NULL BTREE
analyze table tt;
Table Op Msg_type Msg_text
test.tt analyze status OK
show indexes from tt;
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
-tt 0 PRIMARY 1 a A 2 NULL NULL BTREE
+tt 0 PRIMARY 1 a A 4 NULL NULL BTREE
tt 0 PRIMARY 2 b A 4 NULL NULL BTREE
flush tables;
show indexes from tt;
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
-tt 0 PRIMARY 1 a A 2 NULL NULL BTREE
+tt 0 PRIMARY 1 a A 4 NULL NULL BTREE
tt 0 PRIMARY 2 b A 4 NULL NULL BTREE
drop table tt;
diff --git a/storage/tokudb/mysql-test/tokudb/r/card_pk_sk.result b/storage/tokudb/mysql-test/tokudb/r/card_pk_sk.result
index 5458f19de32..02c8d1f8218 100644
--- a/storage/tokudb/mysql-test/tokudb/r/card_pk_sk.result
+++ b/storage/tokudb/mysql-test/tokudb/r/card_pk_sk.result
@@ -1004,7 +1004,7 @@ insert into tt values (4*999,4*999+1),(4*999+1,4*999+2),(4*999+2,4*999+3),(4*999
show indexes from tt;
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
tt 0 PRIMARY 1 a A 4000 NULL NULL BTREE
-tt 1 b 1 b A NULL NULL NULL YES BTREE
+tt 1 b 1 b A 4000 NULL NULL YES BTREE
analyze table tt;
Table Op Msg_type Msg_text
test.tt analyze status OK
@@ -2022,17 +2022,17 @@ insert into tt values (4*999,0),(4*999+1,0),(4*999+2,0),(4*999+3,0);
show indexes from tt;
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
tt 0 PRIMARY 1 a A 4000 NULL NULL BTREE
-tt 1 b 1 b A NULL NULL NULL YES BTREE
+tt 1 b 1 b A 4000 NULL NULL YES BTREE
analyze table tt;
Table Op Msg_type Msg_text
test.tt analyze status OK
show indexes from tt;
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
tt 0 PRIMARY 1 a A 4000 NULL NULL BTREE
-tt 1 b 1 b A 1 NULL NULL YES BTREE
+tt 1 b 1 b A 2 NULL NULL YES BTREE
flush tables;
show indexes from tt;
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
tt 0 PRIMARY 1 a A 4000 NULL NULL BTREE
-tt 1 b 1 b A 1 NULL NULL YES BTREE
+tt 1 b 1 b A 2 NULL NULL YES BTREE
drop table tt;
diff --git a/storage/tokudb/mysql-test/tokudb/r/card_scale_percent.result b/storage/tokudb/mysql-test/tokudb/r/card_scale_percent.result
new file mode 100644
index 00000000000..cfd7e38179c
--- /dev/null
+++ b/storage/tokudb/mysql-test/tokudb/r/card_scale_percent.result
@@ -0,0 +1,42 @@
+set global tokudb_cardinality_scale_percent = 10;
+analyze table tt;
+Table Op Msg_type Msg_text
+test.tt analyze status OK
+show indexes from tt;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+tt 0 PRIMARY 1 a A 4000 NULL NULL BTREE
+tt 1 b 1 b A 4000 NULL NULL YES BTREE
+tt 1 c 1 c A 4000 NULL NULL YES BTREE
+tt 1 d 1 d A 4000 NULL NULL YES BTREE
+set global tokudb_cardinality_scale_percent = 50;
+analyze table tt;
+Table Op Msg_type Msg_text
+test.tt analyze status OK
+show indexes from tt;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+tt 0 PRIMARY 1 a A 4000 NULL NULL BTREE
+tt 1 b 1 b A 4000 NULL NULL YES BTREE
+tt 1 c 1 c A 4000 NULL NULL YES BTREE
+tt 1 d 1 d A 2000 NULL NULL YES BTREE
+set global tokudb_cardinality_scale_percent = 100;
+analyze table tt;
+Table Op Msg_type Msg_text
+test.tt analyze status OK
+show indexes from tt;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+tt 0 PRIMARY 1 a A 4000 NULL NULL BTREE
+tt 1 b 1 b A 4000 NULL NULL YES BTREE
+tt 1 c 1 c A 2000 NULL NULL YES BTREE
+tt 1 d 1 d A 1000 NULL NULL YES BTREE
+set global tokudb_cardinality_scale_percent = 200;
+Warnings:
+Warning 1292 Truncated incorrect tokudb_cardinality_scale_percent value: '200'
+analyze table tt;
+Table Op Msg_type Msg_text
+test.tt analyze status OK
+show indexes from tt;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+tt 0 PRIMARY 1 a A 4000 NULL NULL BTREE
+tt 1 b 1 b A 4000 NULL NULL YES BTREE
+tt 1 c 1 c A 2000 NULL NULL YES BTREE
+tt 1 d 1 d A 1000 NULL NULL YES BTREE
diff --git a/storage/tokudb/mysql-test/tokudb/r/card_sk.result b/storage/tokudb/mysql-test/tokudb/r/card_sk.result
index 1846b4e82bc..310fc863a9b 100644
--- a/storage/tokudb/mysql-test/tokudb/r/card_sk.result
+++ b/storage/tokudb/mysql-test/tokudb/r/card_sk.result
@@ -5,15 +5,15 @@ insert into tt values (1,0),(2,1),(3,2),(4,3);
insert into tt values (5,0),(6,1),(7,2),(8,3);
show indexes from tt;
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
-tt 1 b 1 b A NULL NULL NULL YES BTREE
+tt 1 b 1 b A 8 NULL NULL YES BTREE
analyze table tt;
Table Op Msg_type Msg_text
test.tt analyze status OK
show indexes from tt;
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
-tt 1 b 1 b A 4 NULL NULL YES BTREE
+tt 1 b 1 b A 8 NULL NULL YES BTREE
flush tables;
show indexes from tt;
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
-tt 1 b 1 b A 4 NULL NULL YES BTREE
+tt 1 b 1 b A 8 NULL NULL YES BTREE
drop table tt;
diff --git a/storage/tokudb/mysql-test/tokudb/r/card_sk_2.result b/storage/tokudb/mysql-test/tokudb/r/card_sk_2.result
index c087bad3b18..8ff57b63e5d 100644
--- a/storage/tokudb/mysql-test/tokudb/r/card_sk_2.result
+++ b/storage/tokudb/mysql-test/tokudb/r/card_sk_2.result
@@ -4,18 +4,18 @@ create table tt (a int, b int, key(a,b));
insert into tt values (0,0),(0,1),(1,0),(1,1);
show indexes from tt;
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
-tt 1 a 1 a A NULL NULL NULL YES BTREE
-tt 1 a 2 b A NULL NULL NULL YES BTREE
+tt 1 a 1 a A 4 NULL NULL YES BTREE
+tt 1 a 2 b A 4 NULL NULL YES BTREE
analyze table tt;
Table Op Msg_type Msg_text
test.tt analyze status OK
show indexes from tt;
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
-tt 1 a 1 a A 2 NULL NULL YES BTREE
+tt 1 a 1 a A 4 NULL NULL YES BTREE
tt 1 a 2 b A 4 NULL NULL YES BTREE
flush tables;
show indexes from tt;
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
-tt 1 a 1 a A 2 NULL NULL YES BTREE
+tt 1 a 1 a A 4 NULL NULL YES BTREE
tt 1 a 2 b A 4 NULL NULL YES BTREE
drop table tt;
diff --git a/storage/tokudb/mysql-test/tokudb/r/cluster_2968-0.result b/storage/tokudb/mysql-test/tokudb/r/cluster_2968-0.result
index 1e0935543dc..9e524df3276 100644
--- a/storage/tokudb/mysql-test/tokudb/r/cluster_2968-0.result
+++ b/storage/tokudb/mysql-test/tokudb/r/cluster_2968-0.result
@@ -1042,5 +1042,5 @@ t CREATE TABLE `t` (
explain select straight_join * from s,t where s.b = t.b;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE s index b b 5 NULL 1000 Using where; Using index
-1 SIMPLE t ref b b 5 test.s.b 11 Using index
+1 SIMPLE t ref b b 5 test.s.b 1 Using index
drop table s,t;
diff --git a/storage/tokudb/mysql-test/tokudb/r/cluster_2968-1.result b/storage/tokudb/mysql-test/tokudb/r/cluster_2968-1.result
index 9337718ce35..1aeb40d182a 100644
--- a/storage/tokudb/mysql-test/tokudb/r/cluster_2968-1.result
+++ b/storage/tokudb/mysql-test/tokudb/r/cluster_2968-1.result
@@ -1042,7 +1042,7 @@ t CREATE TABLE `t` (
explain select straight_join * from s,t where s.b = t.b;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE s ALL b NULL NULL NULL 1000 Using where
-1 SIMPLE t ref b b 5 test.s.b 11
+1 SIMPLE t ref b b 5 test.s.b 1
alter table s add key(b) clustering=yes;
Warnings:
Note 1831 Duplicate index 'b_2' defined on the table 'test.s'. This is deprecated and will be disallowed in a future release.
@@ -1092,7 +1092,7 @@ t CREATE TABLE `t` (
explain select straight_join * from s,t where s.b = t.b;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE s index b_2 b_2 5 NULL 1000 Using where; Using index
-1 SIMPLE t ref b_2 b_2 5 test.s.b 11 Using index
+1 SIMPLE t ref b_2 b_2 5 test.s.b 1 Using index
alter table s add key(b);
Warnings:
Note 1831 Duplicate index 'b' defined on the table 'test.s'. This is deprecated and will be disallowed in a future release.
diff --git a/storage/tokudb/mysql-test/tokudb/r/cluster_2968-2.result b/storage/tokudb/mysql-test/tokudb/r/cluster_2968-2.result
index 710509b2acf..9fdb28404c7 100644
--- a/storage/tokudb/mysql-test/tokudb/r/cluster_2968-2.result
+++ b/storage/tokudb/mysql-test/tokudb/r/cluster_2968-2.result
@@ -1042,7 +1042,7 @@ t CREATE TABLE `t` (
explain select straight_join s.a,t.a from s,t where s.b = t.b;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE s ALL b NULL NULL NULL 1000 Using where
-1 SIMPLE t ref b b 5 test.s.b 11
+1 SIMPLE t ref b b 5 test.s.b 1
alter table s add key(b,a);
alter table t add key(b,a);
show create table s;
@@ -1066,7 +1066,7 @@ t CREATE TABLE `t` (
explain select straight_join s.a,t.a from s,t where s.b = t.b;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE s index b,b_2 b_2 10 NULL 1000 Using where; Using index
-1 SIMPLE t ref b,b_2 b_2 5 test.s.b 11 Using index
+1 SIMPLE t ref b,b_2 b_2 5 test.s.b 1 Using index
alter table s add key(b) clustering=yes;
Warnings:
Note 1831 Duplicate index 'b_3' defined on the table 'test.s'. This is deprecated and will be disallowed in a future release.
@@ -1096,7 +1096,7 @@ t CREATE TABLE `t` (
explain select straight_join s.a,t.a from s,t where s.b = t.b;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE s index b,b_2,b_3 b_2 10 NULL 1000 Using where; Using index
-1 SIMPLE t ref b,b_2,b_3 b_3 5 test.s.b 1 Using index
+1 SIMPLE t ref b,b_2,b_3 b_2 5 test.s.b 1 Using index
alter table s drop key b_2;
alter table t drop key b_2;
show create table s;
diff --git a/storage/tokudb/mysql-test/tokudb/r/cluster_2968-3.result b/storage/tokudb/mysql-test/tokudb/r/cluster_2968-3.result
index 660d63c54c8..eec0e67ef30 100644
--- a/storage/tokudb/mysql-test/tokudb/r/cluster_2968-3.result
+++ b/storage/tokudb/mysql-test/tokudb/r/cluster_2968-3.result
@@ -1062,8 +1062,8 @@ u CREATE TABLE `u` (
explain select straight_join * from s,t,u where s.b = t.b and s.c = u.c;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE s ALL b NULL NULL NULL 1000 Using where
-1 SIMPLE t ref b b 5 test.s.b 11
-1 SIMPLE u ref c c 5 test.s.c 11
+1 SIMPLE t ref b b 5 test.s.b 1
+1 SIMPLE u ref c c 5 test.s.c 1
alter table s add key (b) clustering=yes;
Warnings:
Note 1831 Duplicate index 'b_2' defined on the table 'test.s'. This is deprecated and will be disallowed in a future release.
diff --git a/storage/tokudb/mysql-test/tokudb/r/cluster_key.result b/storage/tokudb/mysql-test/tokudb/r/cluster_key.result
index 74298d69114..fab288047be 100644
--- a/storage/tokudb/mysql-test/tokudb/r/cluster_key.result
+++ b/storage/tokudb/mysql-test/tokudb/r/cluster_key.result
@@ -1,6 +1,6 @@
SET DEFAULT_STORAGE_ENGINE='tokudb';
DROP TABLE IF EXISTS t1;
-create table t1(a int, b int, c int, d int, primary key(a), key(b) clustering=yes, key (c))engine=tokudb;
+create table t1(a int, b int, c int, d int, primary key(a,b,c), key(b) clustering=yes, key (c))engine=tokudb;
insert into t1 values (1,10,100,1000),(2,20,200,2000),(3,30,300,3000),(4,40,400,4000),(5,50,500,5000),(6,60,600,6000),(7,70,700,7000),(8,80,800,8000),(9,90,900,9000);
explain select * from t1 where a > 5;
id select_type table type possible_keys key key_len ref rows Extra
@@ -13,7 +13,7 @@ a b c d
9 90 900 9000
explain select * from t1 where b > 30;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range b b 5 NULL NULL; Using where; Using index
+1 SIMPLE t1 range b b 4 NULL NULL; Using where; Using index
select * from t1 where b > 30;
a b c d
4 40 400 4000
@@ -24,7 +24,7 @@ a b c d
9 90 900 9000
explain select * from t1 where c > 750;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range c c 5 NULL NULL; Using where
+1 SIMPLE t1 range c c 4 NULL NULL; Using where
select * from t1 where c > 750;
a b c d
8 80 800 8000
@@ -37,7 +37,7 @@ a
9
explain select a,b from t1 where b > 30;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range b b 5 NULL NULL; Using where; Using index
+1 SIMPLE t1 index b c 4 NULL NULL; Using where; Using index
select a,b from t1 where b > 30;
a b
4 40
@@ -48,7 +48,7 @@ a b
9 90
explain select a,b from t1 where c > 750;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range c c 5 NULL NULL; Using where
+1 SIMPLE t1 index c c 4 NULL NULL; Using where; Using index
select a,c from t1 where c > 750;
a c
8 800
@@ -69,7 +69,7 @@ a b c d
10 10 10 10
explain select * from t1 where b > 30;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range b b 5 NULL NULL; Using where; Using index
+1 SIMPLE t1 range b b 4 NULL NULL; Using where; Using index
select * from t1 where b > 30;
a b c d
4 40 400 4000
@@ -80,14 +80,14 @@ a b c d
9 90 900 9000
explain select * from t1 where c > 750;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range c c 5 NULL NULL; Using where
+1 SIMPLE t1 range c c 4 NULL NULL; Using where
select * from t1 where c > 750;
a b c d
8 80 800 8000
9 90 900 9000
explain select b from t1 where b > 30;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range b b 5 NULL NULL; Using where; Using index
+1 SIMPLE t1 range b b 4 NULL NULL; Using where; Using index
select b from t1 where b > 30;
b
40
@@ -98,13 +98,13 @@ b
90
explain select b from t1 where c > 750;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range c c 5 NULL NULL; Using where
+1 SIMPLE t1 range c c 4 NULL NULL; Using where
select c from t1 where c > 750;
c
800
900
alter table t1 add e varchar(20);
-alter table t1 add primary key (a);
+alter table t1 add primary key (a,b,c);
explain select * from t1 where a > 5;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 range PRIMARY PRIMARY 4 NULL NULL; Using where
@@ -117,7 +117,7 @@ a b c d e
10 10 10 10 NULL
explain select * from t1 where b > 30;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range b b 5 NULL NULL; Using where; Using index
+1 SIMPLE t1 range b b 4 NULL NULL; Using where; Using index
select * from t1 where b > 30;
a b c d e
4 40 400 4000 NULL
@@ -128,7 +128,7 @@ a b c d e
9 90 900 9000 NULL
explain select * from t1 where c > 750;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range c c 5 NULL NULL; Using where
+1 SIMPLE t1 range c c 4 NULL NULL; Using where
select * from t1 where c > 750;
a b c d e
8 80 800 8000 NULL
@@ -142,7 +142,7 @@ a
10
explain select a,b from t1 where b > 30;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range b b 5 NULL NULL; Using where; Using index
+1 SIMPLE t1 index b c 4 NULL NULL; Using where; Using index
select a,b from t1 where b > 30;
a b
4 40
@@ -153,7 +153,7 @@ a b
9 90
explain select a,b from t1 where c > 750;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range c c 5 NULL NULL; Using where
+1 SIMPLE t1 index c c 4 NULL NULL; Using where; Using index
select a,c from t1 where c > 750;
a c
8 800
@@ -171,7 +171,7 @@ a b c d e
10 10 10 10 NULL
explain select * from t1 where b > 30;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range b b 5 NULL NULL; Using where; Using index
+1 SIMPLE t1 range b b 4 NULL NULL; Using where; Using index
select * from t1 where b > 30;
a b c d e
4 40 400 4000 NULL
@@ -182,14 +182,14 @@ a b c d e
9 90 900 9000 NULL
explain select * from t1 where c > 750;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range c c 5 NULL NULL; Using where
+1 SIMPLE t1 range c c 4 NULL NULL; Using where
select * from t1 where c > 750;
a b c d e
8 80 800 8000 NULL
9 90 900 9000 NULL
explain select b from t1 where b > 30;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range b b 5 NULL NULL; Using where; Using index
+1 SIMPLE t1 range b b 4 NULL NULL; Using where; Using index
select b from t1 where b > 30;
b
40
@@ -200,7 +200,7 @@ b
90
explain select b from t1 where c > 750;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range c c 5 NULL NULL; Using where
+1 SIMPLE t1 range c c 4 NULL NULL; Using where
select c from t1 where c > 750;
c
800
diff --git a/storage/tokudb/mysql-test/tokudb/r/cluster_key_part.result b/storage/tokudb/mysql-test/tokudb/r/cluster_key_part.result
index cd8fc340314..6df54cac05a 100644
--- a/storage/tokudb/mysql-test/tokudb/r/cluster_key_part.result
+++ b/storage/tokudb/mysql-test/tokudb/r/cluster_key_part.result
@@ -1,11 +1,22 @@
set default_storage_engine='tokudb';
-drop table if exists t;
-create table t (
-x int not null,
-y int not null,
-primary key(x))
-partition by hash(x) partitions 2;
-show create table t;
+DROP TABLE IF EXISTS t;
+CREATE TABLE t (a INT NOT NULL AUTO_INCREMENT, b INT, PRIMARY KEY(a), CLUSTERING KEY b(b)) ENGINE=TokuDB
+PARTITION BY RANGE(a) (PARTITION p0 VALUES LESS THAN (100) ENGINE = TokuDB, PARTITION p2 VALUES LESS THAN MAXVALUE ENGINE = TokuDB);
+SHOW CREATE TABLE t;
+Table Create Table
+t CREATE TABLE `t` (
+ `a` int(11) NOT NULL AUTO_INCREMENT,
+ `b` int(11) DEFAULT NULL,
+ PRIMARY KEY (`a`),
+ CLUSTERING KEY `b` (`b`)
+) ENGINE=TokuDB DEFAULT CHARSET=latin1
+/*!50100 PARTITION BY RANGE (a)
+(PARTITION p0 VALUES LESS THAN (100) ENGINE = TokuDB,
+ PARTITION p2 VALUES LESS THAN MAXVALUE ENGINE = TokuDB) */
+DROP TABLE t;
+CREATE TABLE t (x INT NOT NULL, y INT NOT NULL, PRIMARY KEY(x))
+PARTITION BY HASH(x) PARTITIONS 2;
+SHOW CREATE TABLE t;
Table Create Table
t CREATE TABLE `t` (
`x` int(11) NOT NULL,
@@ -14,8 +25,8 @@ t CREATE TABLE `t` (
) ENGINE=TokuDB DEFAULT CHARSET=latin1
/*!50100 PARTITION BY HASH (x)
PARTITIONS 2 */
-alter table t add clustering key(y);
-show create table t;
+ALTER TABLE t ADD CLUSTERING KEY(y);
+SHOW CREATE TABLE t;
Table Create Table
t CREATE TABLE `t` (
`x` int(11) NOT NULL,
@@ -25,4 +36,625 @@ t CREATE TABLE `t` (
) ENGINE=TokuDB DEFAULT CHARSET=latin1
/*!50100 PARTITION BY HASH (x)
PARTITIONS 2 */
-drop table t;
+DROP TABLE t;
+CREATE TABLE t1(a INT, b INT, c INT, d INT, PRIMARY KEY(a,b,c), CLUSTERING KEY(b), KEY (c)) ENGINE=TOKUDB
+PARTITION BY RANGE(a) (PARTITION p0 VALUES LESS THAN (5) ENGINE = TOKUDB, PARTITION p2 VALUES LESS THAN MAXVALUE ENGINE = TOKUDB);
+insert into t1 values (1,10,100,1000),(2,20,200,2000),(3,30,300,3000),(4,40,400,4000),(5,50,500,5000),(6,60,600,6000),(7,70,700,7000),(8,80,800,8000),(9,90,900,9000);
+explain select * from t1 where a > 5;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 range PRIMARY PRIMARY 4 NULL NULL; Using where
+select * from t1 where a > 5;
+a b c d
+6 60 600 6000
+7 70 700 7000
+8 80 800 8000
+9 90 900 9000
+explain select * from t1 where b > 30;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 range b b 4 NULL NULL; Using where
+select * from t1 where b > 30;
+a b c d
+4 40 400 4000
+5 50 500 5000
+6 60 600 6000
+7 70 700 7000
+8 80 800 8000
+9 90 900 9000
+explain select * from t1 where c > 750;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 range c c 4 NULL NULL; Using where
+select * from t1 where c > 750;
+a b c d
+8 80 800 8000
+9 90 900 9000
+explain select a from t1 where a > 8;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 range PRIMARY PRIMARY 4 NULL NULL; Using where; Using index
+select a from t1 where a > 8;
+a
+9
+explain select a,b from t1 where b > 30;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 range b b 4 NULL NULL; Using where; Using index
+select a,b from t1 where b > 30;
+a b
+4 40
+5 50
+6 60
+7 70
+8 80
+9 90
+explain select a,b from t1 where c > 750;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 index c b 4 NULL NULL; Using where; Using index
+select a,c from t1 where c > 750;
+a c
+8 800
+9 900
+alter table t1 add clustering index bdca(b,d,c,a);
+insert into t1 values (10,10,10,10);
+alter table t1 drop index bdca;
+alter table t1 drop primary key;
+explain select * from t1 where a > 5;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL NULL; Using where
+select * from t1 where a > 5;
+a b c d
+6 60 600 6000
+7 70 700 7000
+8 80 800 8000
+9 90 900 9000
+10 10 10 10
+explain select * from t1 where b > 30;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 range b b 4 NULL NULL; Using where
+select * from t1 where b > 30;
+a b c d
+4 40 400 4000
+5 50 500 5000
+6 60 600 6000
+7 70 700 7000
+8 80 800 8000
+9 90 900 9000
+explain select * from t1 where c > 750;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 range c c 4 NULL NULL; Using where
+select * from t1 where c > 750;
+a b c d
+8 80 800 8000
+9 90 900 9000
+explain select b from t1 where b > 30;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 range b b 4 NULL NULL; Using where; Using index
+select b from t1 where b > 30;
+b
+40
+50
+60
+70
+80
+90
+explain select b from t1 where c > 750;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 range c c 4 NULL NULL; Using where
+select c from t1 where c > 750;
+c
+800
+900
+alter table t1 add e varchar(20);
+alter table t1 add primary key (a,b,c);
+explain select * from t1 where a > 5;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 range PRIMARY PRIMARY 4 NULL NULL; Using where
+select * from t1 where a > 5;
+a b c d e
+6 60 600 6000 NULL
+7 70 700 7000 NULL
+8 80 800 8000 NULL
+9 90 900 9000 NULL
+10 10 10 10 NULL
+explain select * from t1 where b > 30;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 range b b 4 NULL NULL; Using where
+select * from t1 where b > 30;
+a b c d e
+4 40 400 4000 NULL
+5 50 500 5000 NULL
+6 60 600 6000 NULL
+7 70 700 7000 NULL
+8 80 800 8000 NULL
+9 90 900 9000 NULL
+explain select * from t1 where c > 750;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 range c c 4 NULL NULL; Using where
+select * from t1 where c > 750;
+a b c d e
+8 80 800 8000 NULL
+9 90 900 9000 NULL
+explain select a from t1 where a > 8;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 range PRIMARY PRIMARY 4 NULL NULL; Using where; Using index
+select a from t1 where a > 8;
+a
+9
+10
+explain select a,b from t1 where b > 30;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 range b b 4 NULL NULL; Using where; Using index
+select a,b from t1 where b > 30;
+a b
+4 40
+5 50
+6 60
+7 70
+8 80
+9 90
+explain select a,b from t1 where c > 750;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 range c c 4 NULL NULL; Using where; Using index
+select a,c from t1 where c > 750;
+a c
+8 800
+9 900
+alter table t1 drop primary key;
+explain select * from t1 where a > 5;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL NULL; Using where
+select * from t1 where a > 5;
+a b c d e
+6 60 600 6000 NULL
+7 70 700 7000 NULL
+8 80 800 8000 NULL
+9 90 900 9000 NULL
+10 10 10 10 NULL
+explain select * from t1 where b > 30;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 range b b 4 NULL NULL; Using where
+select * from t1 where b > 30;
+a b c d e
+4 40 400 4000 NULL
+5 50 500 5000 NULL
+6 60 600 6000 NULL
+7 70 700 7000 NULL
+8 80 800 8000 NULL
+9 90 900 9000 NULL
+explain select * from t1 where c > 750;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 range c c 4 NULL NULL; Using where
+select * from t1 where c > 750;
+a b c d e
+8 80 800 8000 NULL
+9 90 900 9000 NULL
+explain select b from t1 where b > 30;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 range b b 4 NULL NULL; Using where; Using index
+select b from t1 where b > 30;
+b
+40
+50
+60
+70
+80
+90
+explain select b from t1 where c > 750;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 range c c 4 NULL NULL; Using where
+select c from t1 where c > 750;
+c
+800
+900
+DROP TABLE t1;
+CREATE TABLE t1(a INT, b INT, c INT, d INT, PRIMARY KEY(a,b,c), CLUSTERING KEY(b), KEY (c)) ENGINE=TOKUDB
+PARTITION BY RANGE(b) (PARTITION p0 VALUES LESS THAN (50) ENGINE = TOKUDB, PARTITION p2 VALUES LESS THAN MAXVALUE ENGINE = TOKUDB);
+insert into t1 values (1,10,100,1000),(2,20,200,2000),(3,30,300,3000),(4,40,400,4000),(5,50,500,5000),(6,60,600,6000),(7,70,700,7000),(8,80,800,8000),(9,90,900,9000);
+explain select * from t1 where a > 5;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 range PRIMARY PRIMARY 4 NULL NULL; Using where
+select * from t1 where a > 5;
+a b c d
+6 60 600 6000
+7 70 700 7000
+8 80 800 8000
+9 90 900 9000
+explain select * from t1 where b > 30;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 range b b 4 NULL NULL; Using where
+select * from t1 where b > 30;
+a b c d
+4 40 400 4000
+5 50 500 5000
+6 60 600 6000
+7 70 700 7000
+8 80 800 8000
+9 90 900 9000
+explain select * from t1 where c > 750;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 range c c 4 NULL NULL; Using where
+select * from t1 where c > 750;
+a b c d
+8 80 800 8000
+9 90 900 9000
+explain select a from t1 where a > 8;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 range PRIMARY PRIMARY 4 NULL NULL; Using where; Using index
+select a from t1 where a > 8;
+a
+9
+explain select a,b from t1 where b > 30;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 range b b 4 NULL NULL; Using where; Using index
+select a,b from t1 where b > 30;
+a b
+4 40
+5 50
+6 60
+7 70
+8 80
+9 90
+explain select a,b from t1 where c > 750;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 index c b 4 NULL NULL; Using where; Using index
+select a,c from t1 where c > 750;
+a c
+8 800
+9 900
+alter table t1 add clustering index bdca(b,d,c,a);
+insert into t1 values (10,10,10,10);
+alter table t1 drop index bdca;
+alter table t1 drop primary key;
+explain select * from t1 where a > 5;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL NULL; Using where
+select * from t1 where a > 5;
+a b c d
+10 10 10 10
+6 60 600 6000
+7 70 700 7000
+8 80 800 8000
+9 90 900 9000
+explain select * from t1 where b > 30;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 range b b 4 NULL NULL; Using where
+select * from t1 where b > 30;
+a b c d
+4 40 400 4000
+5 50 500 5000
+6 60 600 6000
+7 70 700 7000
+8 80 800 8000
+9 90 900 9000
+explain select * from t1 where c > 750;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 range c c 4 NULL NULL; Using where
+select * from t1 where c > 750;
+a b c d
+8 80 800 8000
+9 90 900 9000
+explain select b from t1 where b > 30;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 range b b 4 NULL NULL; Using where; Using index
+select b from t1 where b > 30;
+b
+40
+50
+60
+70
+80
+90
+explain select b from t1 where c > 750;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 range c c 4 NULL NULL; Using where
+select c from t1 where c > 750;
+c
+800
+900
+alter table t1 add e varchar(20);
+alter table t1 add primary key (a,b,c);
+explain select * from t1 where a > 5;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 range PRIMARY PRIMARY 4 NULL NULL; Using where
+select * from t1 where a > 5;
+a b c d e
+10 10 10 10 NULL
+6 60 600 6000 NULL
+7 70 700 7000 NULL
+8 80 800 8000 NULL
+9 90 900 9000 NULL
+explain select * from t1 where b > 30;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 range b b 4 NULL NULL; Using where
+select * from t1 where b > 30;
+a b c d e
+4 40 400 4000 NULL
+5 50 500 5000 NULL
+6 60 600 6000 NULL
+7 70 700 7000 NULL
+8 80 800 8000 NULL
+9 90 900 9000 NULL
+explain select * from t1 where c > 750;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 range c c 4 NULL NULL; Using where
+select * from t1 where c > 750;
+a b c d e
+8 80 800 8000 NULL
+9 90 900 9000 NULL
+explain select a from t1 where a > 8;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 range PRIMARY PRIMARY 4 NULL NULL; Using where; Using index
+select a from t1 where a > 8;
+a
+10
+9
+explain select a,b from t1 where b > 30;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 range b b 4 NULL NULL; Using where; Using index
+select a,b from t1 where b > 30;
+a b
+4 40
+5 50
+6 60
+7 70
+8 80
+9 90
+explain select a,b from t1 where c > 750;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 index c b 4 NULL NULL; Using where; Using index
+select a,c from t1 where c > 750;
+a c
+8 800
+9 900
+alter table t1 drop primary key;
+explain select * from t1 where a > 5;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL NULL; Using where
+select * from t1 where a > 5;
+a b c d e
+10 10 10 10 NULL
+6 60 600 6000 NULL
+7 70 700 7000 NULL
+8 80 800 8000 NULL
+9 90 900 9000 NULL
+explain select * from t1 where b > 30;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 range b b 4 NULL NULL; Using where
+select * from t1 where b > 30;
+a b c d e
+4 40 400 4000 NULL
+5 50 500 5000 NULL
+6 60 600 6000 NULL
+7 70 700 7000 NULL
+8 80 800 8000 NULL
+9 90 900 9000 NULL
+explain select * from t1 where c > 750;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 range c c 4 NULL NULL; Using where
+select * from t1 where c > 750;
+a b c d e
+8 80 800 8000 NULL
+9 90 900 9000 NULL
+explain select b from t1 where b > 30;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 range b b 4 NULL NULL; Using where; Using index
+select b from t1 where b > 30;
+b
+40
+50
+60
+70
+80
+90
+explain select b from t1 where c > 750;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 range c c 4 NULL NULL; Using where
+select c from t1 where c > 750;
+c
+800
+900
+DROP TABLE t1;
+CREATE TABLE t1(a INT, b INT, c INT, d INT, PRIMARY KEY(a,b,c), CLUSTERING KEY(b), KEY (c)) ENGINE=TOKUDB
+PARTITION BY RANGE(c) (PARTITION p0 VALUES LESS THAN (500) ENGINE = TOKUDB, PARTITION p2 VALUES LESS THAN MAXVALUE ENGINE = TOKUDB);
+insert into t1 values (1,10,100,1000),(2,20,200,2000),(3,30,300,3000),(4,40,400,4000),(5,50,500,5000),(6,60,600,6000),(7,70,700,7000),(8,80,800,8000),(9,90,900,9000);
+explain select * from t1 where a > 5;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 range PRIMARY PRIMARY 4 NULL NULL; Using where
+select * from t1 where a > 5;
+a b c d
+6 60 600 6000
+7 70 700 7000
+8 80 800 8000
+9 90 900 9000
+explain select * from t1 where b > 30;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 range b b 4 NULL NULL; Using where
+select * from t1 where b > 30;
+a b c d
+4 40 400 4000
+5 50 500 5000
+6 60 600 6000
+7 70 700 7000
+8 80 800 8000
+9 90 900 9000
+explain select * from t1 where c > 750;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 range c c 4 NULL NULL; Using where
+select * from t1 where c > 750;
+a b c d
+8 80 800 8000
+9 90 900 9000
+explain select a from t1 where a > 8;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 range PRIMARY PRIMARY 4 NULL NULL; Using where; Using index
+select a from t1 where a > 8;
+a
+9
+explain select a,b from t1 where b > 30;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 range b b 4 NULL NULL; Using where; Using index
+select a,b from t1 where b > 30;
+a b
+4 40
+5 50
+6 60
+7 70
+8 80
+9 90
+explain select a,b from t1 where c > 750;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 index c b 4 NULL NULL; Using where; Using index
+select a,c from t1 where c > 750;
+a c
+8 800
+9 900
+alter table t1 add clustering index bdca(b,d,c,a);
+insert into t1 values (10,10,10,10);
+alter table t1 drop index bdca;
+alter table t1 drop primary key;
+explain select * from t1 where a > 5;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL NULL; Using where
+select * from t1 where a > 5;
+a b c d
+10 10 10 10
+6 60 600 6000
+7 70 700 7000
+8 80 800 8000
+9 90 900 9000
+explain select * from t1 where b > 30;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 range b b 4 NULL NULL; Using where
+select * from t1 where b > 30;
+a b c d
+4 40 400 4000
+5 50 500 5000
+6 60 600 6000
+7 70 700 7000
+8 80 800 8000
+9 90 900 9000
+explain select * from t1 where c > 750;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 range c c 4 NULL NULL; Using where
+select * from t1 where c > 750;
+a b c d
+8 80 800 8000
+9 90 900 9000
+explain select b from t1 where b > 30;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 range b b 4 NULL NULL; Using where; Using index
+select b from t1 where b > 30;
+b
+40
+50
+60
+70
+80
+90
+explain select b from t1 where c > 750;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 range c c 4 NULL NULL; Using where
+select c from t1 where c > 750;
+c
+800
+900
+alter table t1 add e varchar(20);
+alter table t1 add primary key (a,b,c);
+explain select * from t1 where a > 5;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 range PRIMARY PRIMARY 4 NULL NULL; Using where
+select * from t1 where a > 5;
+a b c d e
+10 10 10 10 NULL
+6 60 600 6000 NULL
+7 70 700 7000 NULL
+8 80 800 8000 NULL
+9 90 900 9000 NULL
+explain select * from t1 where b > 30;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 range b b 4 NULL NULL; Using where
+select * from t1 where b > 30;
+a b c d e
+4 40 400 4000 NULL
+5 50 500 5000 NULL
+6 60 600 6000 NULL
+7 70 700 7000 NULL
+8 80 800 8000 NULL
+9 90 900 9000 NULL
+explain select * from t1 where c > 750;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 range c c 4 NULL NULL; Using where
+select * from t1 where c > 750;
+a b c d e
+8 80 800 8000 NULL
+9 90 900 9000 NULL
+explain select a from t1 where a > 8;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 range PRIMARY PRIMARY 4 NULL NULL; Using where; Using index
+select a from t1 where a > 8;
+a
+10
+9
+explain select a,b from t1 where b > 30;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 range b b 4 NULL NULL; Using where; Using index
+select a,b from t1 where b > 30;
+a b
+4 40
+5 50
+6 60
+7 70
+8 80
+9 90
+explain select a,b from t1 where c > 750;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 index c b 4 NULL NULL; Using where; Using index
+select a,c from t1 where c > 750;
+a c
+8 800
+9 900
+alter table t1 drop primary key;
+explain select * from t1 where a > 5;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL NULL; Using where
+select * from t1 where a > 5;
+a b c d e
+10 10 10 10 NULL
+6 60 600 6000 NULL
+7 70 700 7000 NULL
+8 80 800 8000 NULL
+9 90 900 9000 NULL
+explain select * from t1 where b > 30;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 range b b 4 NULL NULL; Using where
+select * from t1 where b > 30;
+a b c d e
+4 40 400 4000 NULL
+5 50 500 5000 NULL
+6 60 600 6000 NULL
+7 70 700 7000 NULL
+8 80 800 8000 NULL
+9 90 900 9000 NULL
+explain select * from t1 where c > 750;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 range c c 4 NULL NULL; Using where
+select * from t1 where c > 750;
+a b c d e
+8 80 800 8000 NULL
+9 90 900 9000 NULL
+explain select b from t1 where b > 30;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 range b b 4 NULL NULL; Using where; Using index
+select b from t1 where b > 30;
+b
+40
+50
+60
+70
+80
+90
+explain select b from t1 where c > 750;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 range c c 4 NULL NULL; Using where
+select c from t1 where c > 750;
+c
+800
+900
+DROP TABLE t1;
diff --git a/storage/tokudb/mysql-test/tokudb/r/i_s_tokudb_lock_waits_released.result b/storage/tokudb/mysql-test/tokudb/r/i_s_tokudb_lock_waits_released.result
index 190581eddae..6f9592ddc1f 100644
--- a/storage/tokudb/mysql-test/tokudb/r/i_s_tokudb_lock_waits_released.result
+++ b/storage/tokudb/mysql-test/tokudb/r/i_s_tokudb_lock_waits_released.result
@@ -10,8 +10,10 @@ select * from information_schema.tokudb_lock_waits;
requesting_trx_id blocking_trx_id lock_waits_dname lock_waits_key_left lock_waits_key_right lock_waits_start_time lock_waits_table_schema lock_waits_table_name lock_waits_table_dictionary_name
set autocommit=0;
set tokudb_prelock_empty=OFF;
+set tokudb_lock_timeout=600000;
insert into t values (1);
set autocommit=0;
+set tokudb_lock_timeout=600000;
insert into t values (1);
select * from information_schema.tokudb_locks;
locks_trx_id locks_mysql_thread_id locks_dname locks_key_left locks_key_right locks_table_schema locks_table_name locks_table_dictionary_name
@@ -38,9 +40,11 @@ locks_trx_id locks_mysql_thread_id locks_dname locks_key_left locks_key_right lo
select * from information_schema.tokudb_lock_waits;
requesting_trx_id blocking_trx_id lock_waits_dname lock_waits_key_left lock_waits_key_right lock_waits_start_time lock_waits_table_schema lock_waits_table_name lock_waits_table_dictionary_name
set autocommit=0;
+set tokudb_lock_timeout=600000;
set tokudb_prelock_empty=OFF;
replace into t values (1);
set autocommit=0;
+set tokudb_lock_timeout=600000;
replace into t values (1);
select * from information_schema.tokudb_locks;
locks_trx_id locks_mysql_thread_id locks_dname locks_key_left locks_key_right locks_table_schema locks_table_name locks_table_dictionary_name
diff --git a/storage/tokudb/mysql-test/tokudb/r/i_s_tokudb_lock_waits_timeout.result b/storage/tokudb/mysql-test/tokudb/r/i_s_tokudb_lock_waits_timeout.result
index 13cdad7a438..ce8f7d2d7ec 100644
--- a/storage/tokudb/mysql-test/tokudb/r/i_s_tokudb_lock_waits_timeout.result
+++ b/storage/tokudb/mysql-test/tokudb/r/i_s_tokudb_lock_waits_timeout.result
@@ -12,7 +12,9 @@ set autocommit=0;
set tokudb_prelock_empty=OFF;
insert into t values (1);
set autocommit=0;
-insert into t values (1);
+set tokudb_prelock_empty=OFF;
+set tokudb_lock_timeout=60000;
+replace into t values (1);
select * from information_schema.tokudb_locks;
locks_trx_id locks_mysql_thread_id locks_dname locks_key_left locks_key_right locks_table_schema locks_table_name locks_table_dictionary_name
TRX_ID MYSQL_ID ./test/t-main 0001000000 0001000000 test t main
diff --git a/storage/tokudb/mysql-test/tokudb/r/i_s_tokudb_locks.result b/storage/tokudb/mysql-test/tokudb/r/i_s_tokudb_locks.result
index a07f7ba52fe..070f42b30de 100644
--- a/storage/tokudb/mysql-test/tokudb/r/i_s_tokudb_locks.result
+++ b/storage/tokudb/mysql-test/tokudb/r/i_s_tokudb_locks.result
@@ -3,8 +3,8 @@ set tokudb_prelock_empty=false;
drop table if exists t;
create table t (id int primary key);
set autocommit=0;
-select * from information_schema.tokudb_locks;
-locks_trx_id locks_mysql_thread_id locks_dname locks_key_left locks_key_right locks_table_schema locks_table_name locks_table_dictionary_name
+select locks_dname,locks_key_left,locks_key_right,locks_table_schema,locks_table_name,locks_table_dictionary_name from information_schema.tokudb_locks where locks_table_schema='test' and locks_table_name='t' and locks_table_dictionary_name='main' order by locks_key_left, locks_key_right;
+locks_dname locks_key_left locks_key_right locks_table_schema locks_table_name locks_table_dictionary_name
insert into t values (1);
insert into t values (3);
insert into t values (5);
@@ -12,17 +12,17 @@ set autocommit=0;
insert into t values (2);
insert into t values (4);
insert into t values (6);
-select * from information_schema.tokudb_locks order by locks_trx_id,locks_key_left;
-locks_trx_id locks_mysql_thread_id locks_dname locks_key_left locks_key_right locks_table_schema locks_table_name locks_table_dictionary_name
-TRX_ID MYSQL_ID ./test/t-main 0001000000 0001000000 test t main
-TRX_ID MYSQL_ID ./test/t-main 0003000000 0003000000 test t main
-TRX_ID MYSQL_ID ./test/t-main 0005000000 0005000000 test t main
-TRX_ID MYSQL_ID ./test/t-main 0002000000 0002000000 test t main
-TRX_ID MYSQL_ID ./test/t-main 0004000000 0004000000 test t main
-TRX_ID MYSQL_ID ./test/t-main 0006000000 0006000000 test t main
+select locks_dname,locks_key_left,locks_key_right,locks_table_schema,locks_table_name,locks_table_dictionary_name from information_schema.tokudb_locks where locks_table_schema='test' and locks_table_name='t' and locks_table_dictionary_name='main' order by locks_key_left, locks_key_right;
+locks_dname locks_key_left locks_key_right locks_table_schema locks_table_name locks_table_dictionary_name
+./test/t-main 0001000000 0001000000 test t main
+./test/t-main 0002000000 0002000000 test t main
+./test/t-main 0003000000 0003000000 test t main
+./test/t-main 0004000000 0004000000 test t main
+./test/t-main 0005000000 0005000000 test t main
+./test/t-main 0006000000 0006000000 test t main
commit;
commit;
-select * from information_schema.tokudb_locks;
-locks_trx_id locks_mysql_thread_id locks_dname locks_key_left locks_key_right locks_table_schema locks_table_name locks_table_dictionary_name
+select locks_dname,locks_key_left,locks_key_right,locks_table_schema,locks_table_name,locks_table_dictionary_name from information_schema.tokudb_locks where locks_table_schema='test' and locks_table_name='t' and locks_table_dictionary_name='main' order by locks_key_left, locks_key_right;
+locks_dname locks_key_left locks_key_right locks_table_schema locks_table_name locks_table_dictionary_name
commit;
drop table t;
diff --git a/storage/tokudb/mysql-test/tokudb/r/i_s_tokudb_locks_released.result b/storage/tokudb/mysql-test/tokudb/r/i_s_tokudb_locks_released.result
index 0a5862e9322..aa58437fc69 100644
--- a/storage/tokudb/mysql-test/tokudb/r/i_s_tokudb_locks_released.result
+++ b/storage/tokudb/mysql-test/tokudb/r/i_s_tokudb_locks_released.result
@@ -9,6 +9,8 @@ set autocommit=0;
set tokudb_prelock_empty=OFF;
insert into t values (1);
set autocommit=0;
+set tokudb_prelock_empty=OFF;
+set tokudb_lock_timeout=600000;
insert into t values (1);
select * from information_schema.tokudb_locks;
locks_trx_id locks_mysql_thread_id locks_dname locks_key_left locks_key_right locks_table_schema locks_table_name locks_table_dictionary_name
diff --git a/storage/tokudb/mysql-test/tokudb/r/i_s_tokudb_trx.result b/storage/tokudb/mysql-test/tokudb/r/i_s_tokudb_trx.result
index 63e4816e16e..3a9a936a7a6 100644
--- a/storage/tokudb/mysql-test/tokudb/r/i_s_tokudb_trx.result
+++ b/storage/tokudb/mysql-test/tokudb/r/i_s_tokudb_trx.result
@@ -1,23 +1,26 @@
set default_storage_engine='tokudb';
set tokudb_prelock_empty=false;
drop table if exists t;
-select trx_id,trx_mysql_thread_id from information_schema.tokudb_trx;
+select trx_id,trx_mysql_thread_id from information_schema.tokudb_trx where trx_mysql_thread_id in(connection_id());
trx_id trx_mysql_thread_id
set autocommit=0;
create table t (id int primary key);
insert into t values (1);
-select trx_id,trx_mysql_thread_id from information_schema.tokudb_trx;
-trx_id trx_mysql_thread_id
-TXN_ID_DEFAULT CLIENT_ID_DEFAULT
+select count(trx_mysql_thread_id) from information_schema.tokudb_trx where trx_mysql_thread_id in(connection_id());
+count(trx_mysql_thread_id)
+1
commit;
-select trx_id,trx_mysql_thread_id from information_schema.tokudb_trx;
+select trx_id,trx_mysql_thread_id from information_schema.tokudb_trx where trx_mysql_thread_id in(connection_id());
trx_id trx_mysql_thread_id
set autocommit=0;
insert into t values (2);
-select trx_id,trx_mysql_thread_id from information_schema.tokudb_trx;
-trx_id trx_mysql_thread_id
-TXN_ID_A CLIENT_ID_A
+select count(trx_mysql_thread_id) from information_schema.tokudb_trx where trx_mysql_thread_id in(connection_id());
+count(trx_mysql_thread_id)
+1
+select count(trx_mysql_thread_id) from information_schema.tokudb_trx where trx_mysql_thread_id in(connection_id());
+count(trx_mysql_thread_id)
+0
commit;
-select trx_id,trx_mysql_thread_id from information_schema.tokudb_trx;
+select trx_id,trx_mysql_thread_id from information_schema.tokudb_trx where trx_mysql_thread_id in(connection_id());
trx_id trx_mysql_thread_id
drop table t;
diff --git a/storage/tokudb/mysql-test/tokudb/r/type_bit.result b/storage/tokudb/mysql-test/tokudb/r/type_bit.result
index a5587de9e54..c147c203d43 100644
--- a/storage/tokudb/mysql-test/tokudb/r/type_bit.result
+++ b/storage/tokudb/mysql-test/tokudb/r/type_bit.result
@@ -675,7 +675,7 @@ INSERT INTO t1(a) VALUES
(65535),(65525),(65535),(65535),(65535),(65535),(65535),(65535),(65535),(65535);
EXPLAIN SELECT 1 FROM t1 GROUP BY a;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range NULL a 3 NULL 6 Using index for group-by
+1 SIMPLE t1 index NULL a 3 NULL 10 Using index
SELECT 1 FROM t1 GROUP BY a;
1
1
diff --git a/storage/tokudb/mysql-test/tokudb/t/background_job_manager.opt b/storage/tokudb/mysql-test/tokudb/t/background_job_manager.opt
new file mode 100644
index 00000000000..3cc9ea3009e
--- /dev/null
+++ b/storage/tokudb/mysql-test/tokudb/t/background_job_manager.opt
@@ -0,0 +1 @@
+--tokudb-background-job-status
diff --git a/storage/tokudb/mysql-test/tokudb/t/background_job_manager.test b/storage/tokudb/mysql-test/tokudb/t/background_job_manager.test
new file mode 100644
index 00000000000..933814442e0
--- /dev/null
+++ b/storage/tokudb/mysql-test/tokudb/t/background_job_manager.test
@@ -0,0 +1,139 @@
+# This is a comprehensive test for the background job manager and
+# the information_schema.tokudb_background_job_status table
+#
+# This test validates that analyze table in various modes operate as expected
+# for both foreground and background jobs.
+#
+# This test is NOT intended to test the actual results of an analysis.
+#
+# This test makes use of a global, debug only tokudb variable
+# tokudb_debug_pause_background_job_manager in order to control the bjm and
+# prevent it from acting on any queued jobs.
+# This variable was necessary since the debug_sync facility requires any thread
+# that is syncing to have a valid THD associated with it, which a background
+# thread would not have. This variable is compiled out of release builds.
+
+-- source include/have_tokudb.inc
+-- source include/have_debug.inc
+
+-- enable_query_log
+
+set @orig_auto_analyze = @@session.tokudb_auto_analyze;
+set @orig_in_background = @@session.tokudb_analyze_in_background;
+set @orig_mode = @@session.tokudb_analyze_mode;
+set @orig_throttle = @@session.tokudb_analyze_throttle;
+set @orig_time = @@session.tokudb_analyze_time;
+set @orig_scale_percent = @@global.tokudb_cardinality_scale_percent;
+set @orig_default_storage_engine = @@session.default_storage_engine;
+set @orig_pause_background_job_manager = @@global.tokudb_debug_pause_background_job_manager;
+
+# first, lets set up to auto analyze in the background with about any activity
+set session default_storage_engine='tokudb';
+set session tokudb_auto_analyze=1;
+set session tokudb_analyze_in_background=1;
+set session tokudb_analyze_mode=tokudb_analyze_standard;
+set session tokudb_analyze_throttle=0;
+set session tokudb_analyze_time=0;
+set global tokudb_cardinality_scale_percent=DEFAULT;
+
+# in debug build, we can prevent the background job manager from running,
+# let's do it so we can see that there was an analyze scheduled on the first
+# insert
+set global tokudb_debug_pause_background_job_manager=TRUE;
+
+# let's see what the i_s table is laid out like
+show create table information_schema.tokudb_background_job_status;
+
+create table t1 (a int not null auto_increment, b int, c int, primary key(a), key kb(b), key kc(c), key kabc(a,b,c), key kab(a,b), key kbc(b,c));
+
+insert into t1(b,c) values(0,0), (1,1), (2,2), (3,3);
+
+# insert above should have triggered an analyze, but since the bjm is paused,
+# we will see it sitting in the queue
+select database_name, table_name, job_type, job_params, scheduler from information_schema.tokudb_background_job_status;
+
+# some more tables
+create table t2 like t1;
+create table t3 like t1;
+create table t4 like t1;
+
+# manually analyze, the t1 should be rejected because there is already a job
+# pending. t2, t3 and t4 should get queued.
+analyze table t1;
+analyze table t2;
+analyze table t3;
+analyze table t4;
+
+select database_name, table_name, job_type, job_params, scheduler from information_schema.tokudb_background_job_status;
+
+# let the bjm go to clear the jobs
+set global tokudb_debug_pause_background_job_manager=FALSE;
+
+# wait for the bjm queue to empty
+-- disable_query_log
+let $wait_condition=select count(*)=0 from information_schema.tokudb_background_job_status;
+-- source include/wait_condition.inc
+-- enable_query_log
+
+# pause the bjm again
+set global tokudb_debug_pause_background_job_manager=TRUE;
+
+# add some new jobs
+analyze table t1;
+analyze table t2;
+analyze table t3;
+analyze table t4;
+
+select database_name, table_name, job_type, job_params, scheduler from information_schema.tokudb_background_job_status;
+
+# alter a table, should kill the job for t1
+alter table t1 add column d int;
+
+select database_name, table_name, job_type, job_params, scheduler from information_schema.tokudb_background_job_status;
+
+# try an explicit cancel on t2
+set session tokudb_analyze_mode=tokudb_analyze_cancel;
+analyze table t2;
+
+select database_name, table_name, job_type, job_params, scheduler from information_schema.tokudb_background_job_status;
+
+# try a recount on t1, should reschedule a new job
+set session tokudb_analyze_mode=tokudb_analyze_recount_rows;
+analyze table t1;
+
+select database_name, table_name, job_type, job_params, scheduler from information_schema.tokudb_background_job_status;
+
+# do a foreground analysis that clashes with a background job, it should
+# kill the background job for t3 and perform the analysis immediately
+set session tokudb_analyze_mode=tokudb_analyze_standard;
+set session tokudb_analyze_in_background=0;
+analyze table t3;
+
+select database_name, table_name, job_type, job_params, scheduler from information_schema.tokudb_background_job_status;
+
+# drop the tables, should kill the remaining jobs for t1, and t4
+drop table t1;
+drop table t2;
+drop table t3;
+drop table t4;
+
+select database_name, table_name, job_type, job_params, scheduler from information_schema.tokudb_background_job_status;
+
+# let the bjm go
+set global tokudb_debug_pause_background_job_manager=FALSE;
+
+#cleanup
+-- disable_query_log
+let $wait_condition=select count(*)=0 from information_schema.tokudb_background_job_status;
+-- source include/wait_condition.inc
+
+set session tokudb_auto_analyze = @orig_auto_analyze;
+set session tokudb_analyze_in_background = @orig_in_background;
+set session tokudb_analyze_mode = @orig_mode;
+set session tokudb_analyze_throttle = @orig_throttle;
+set session tokudb_analyze_time = @orig_time;
+set global tokudb_cardinality_scale_percent = @orig_scale_percent;
+set session default_storage_engine = @orig_default_storage_engine;
+set global tokudb_debug_pause_background_job_manager = @orig_pause_background_job_manager;
+
+-- enable_query_log
diff --git a/storage/tokudb/mysql-test/tokudb/t/card_auto_analyze_lots.test b/storage/tokudb/mysql-test/tokudb/t/card_auto_analyze_lots.test
new file mode 100644
index 00000000000..ec74a4a28bc
--- /dev/null
+++ b/storage/tokudb/mysql-test/tokudb/t/card_auto_analyze_lots.test
@@ -0,0 +1,82 @@
+# Test the auto analyze on lots of tables
+-- source include/have_tokudb.inc
+
+-- disable_query_log
+let $max = 200;
+
+SET @orig_auto_analyze = @@session.tokudb_auto_analyze;
+SET @orig_in_background = @@session.tokudb_analyze_in_background;
+SET @orig_mode = @@session.tokudb_analyze_mode;
+SET @orig_throttle = @@session.tokudb_analyze_throttle;
+SET @orig_time = @@session.tokudb_analyze_time;
+
+SET SESSION tokudb_auto_analyze = 1;
+SET SESSION tokudb_analyze_in_background = 0;
+SET SESSION tokudb_analyze_mode = TOKUDB_ANALYZE_STANDARD;
+SET SESSION tokudb_analyze_throttle = 0;
+SET SESSION tokudb_analyze_time = 0;
+
+let $i = $max;
+while ($i > 0) {
+ eval CREATE TABLE ar_$i (a INT, b INT, PRIMARY KEY (a), KEY bkey (b)) ENGINE=TOKUDB;
+ dec $i;
+}
+
+# check that the one row insertion triggered auto analyze within the calling
+# client context, the cardinality should go from NULL to 1
+let $i = $max;
+while ($i > 0) {
+ eval INSERT INTO ar_$i VALUES (0, 0);
+ dec $i;
+}
+-- enable_query_log
+let $i = $max;
+while ($i > 0) {
+ eval SHOW INDEX FROM ar_$i;
+ dec $i;
+}
+
+
+-- disable_query_log
+# check that lots of background analysis get scheduled and run
+# cleanly and serially in the background
+SET SESSION tokudb_auto_analyze = 1;
+SET SESSION tokudb_analyze_in_background = 1;
+SET SESSION tokudb_analyze_mode = TOKUDB_ANALYZE_STANDARD;
+SET SESSION tokudb_analyze_throttle = 0;
+SET SESSION tokudb_analyze_time = 0;
+
+let $i = $max;
+while ($i > 0) {
+ eval INSERT INTO ar_$i VALUES (1, 1), (2, 1), (3, 2), (4, 2);
+ dec $i;
+}
+
+let $i = $max;
+while ($i > 0) {
+ eval INSERT INTO ar_$i VALUES (5, 3), (6, 3), (7, 4), (8, 4);
+ dec $i;
+}
+
+# would be too long to wait for stats to become up to date here and
+# checking is quite non-deterministic, InnoDB test does same thing
+
+# dropping tables should cancel any running background jobs
+let $i = $max;
+while ($i > 0) {
+ eval DROP TABLE ar_$i;
+ dec $i;
+}
+
+# wait for the bjm queue to empty
+-- disable_query_log
+let $wait_condition=select count(*)=0 from information_schema.tokudb_background_job_status;
+-- source include/wait_condition.inc
+
+SET SESSION tokudb_auto_analyze = @orig_auto_analyze;
+SET SESSION tokudb_analyze_in_background = @orig_in_background;
+SET SESSION tokudb_analyze_mode = @orig_mode;
+SET SESSION tokudb_analyze_throttle = @orig_throttle;
+SET SESSION tokudb_analyze_time = @orig_time;
+
+-- enable_query_log
diff --git a/storage/tokudb/mysql-test/tokudb/t/card_scale_percent.test b/storage/tokudb/mysql-test/tokudb/t/card_scale_percent.test
new file mode 100644
index 00000000000..47f1eb37989
--- /dev/null
+++ b/storage/tokudb/mysql-test/tokudb/t/card_scale_percent.test
@@ -0,0 +1,56 @@
+-- source include/have_tokudb.inc
+
+-- disable_query_log
+
+set @orig_throttle = @@session.tokudb_analyze_throttle;
+set @orig_time = @@session.tokudb_analyze_time;
+set @orig_scale_percent = @@global.tokudb_cardinality_scale_percent;
+
+create table tt (a int, b int, c int, d int, primary key(a), key(b), key(c), key(d)) engine=tokudb;
+let $i=0;
+while ($i < 1000) {
+ eval insert into tt values ($i, $i, $i, $i);
+ inc $i;
+}
+while ($i < 2000) {
+ eval insert into tt values ($i, $i, $i, 0);
+ inc $i;
+}
+while ($i < 3000) {
+ eval insert into tt values ($i, $i, 0, 0);
+ inc $i;
+}
+while ($i < 4000) {
+ eval insert into tt values ($i, 0, 0, 0);
+ inc $i;
+}
+
+set session tokudb_analyze_time=0;
+set session tokudb_analyze_throttle=0;
+
+-- enable_query_log
+
+set global tokudb_cardinality_scale_percent = 10;
+analyze table tt;
+show indexes from tt;
+
+set global tokudb_cardinality_scale_percent = 50;
+analyze table tt;
+show indexes from tt;
+
+set global tokudb_cardinality_scale_percent = 100;
+analyze table tt;
+show indexes from tt;
+
+set global tokudb_cardinality_scale_percent = 200;
+analyze table tt;
+show indexes from tt;
+
+-- disable_query_log
+
+drop table tt;
+set session tokudb_analyze_throttle = @orig_throttle;
+set session tokudb_analyze_time = @orig_time;
+set global tokudb_cardinality_scale_percent = @orig_scale_percent;
+
+-- enable_query_log
diff --git a/storage/tokudb/mysql-test/tokudb/t/cluster_key.test b/storage/tokudb/mysql-test/tokudb/t/cluster_key.test
index 9280324b8cc..d491784577e 100644
--- a/storage/tokudb/mysql-test/tokudb/t/cluster_key.test
+++ b/storage/tokudb/mysql-test/tokudb/t/cluster_key.test
@@ -5,140 +5,8 @@ SET DEFAULT_STORAGE_ENGINE='tokudb';
DROP TABLE IF EXISTS t1;
--enable_warnings
-create table t1(a int, b int, c int, d int, primary key(a), key(b) clustering=yes, key (c))engine=tokudb;
+create table t1(a int, b int, c int, d int, primary key(a,b,c), key(b) clustering=yes, key (c))engine=tokudb;
-insert into t1 values (1,10,100,1000),(2,20,200,2000),(3,30,300,3000),(4,40,400,4000),(5,50,500,5000),(6,60,600,6000),(7,70,700,7000),(8,80,800,8000),(9,90,900,9000);
-
-
-#normal queries
-
-# ignore rows column
---replace_column 9 NULL;
-explain select * from t1 where a > 5;
-select * from t1 where a > 5;
-
-# ignore rows column
---replace_column 9 NULL;
-explain select * from t1 where b > 30;
-select * from t1 where b > 30;
-
-# ignore rows column
---replace_column 9 NULL;
-explain select * from t1 where c > 750;
-select * from t1 where c > 750;
-
-#covering indexes
-
-# ignore rows column
---replace_column 9 NULL;
-explain select a from t1 where a > 8;
-select a from t1 where a > 8;
-
-# ignore rows column
---replace_column 9 NULL;
-explain select a,b from t1 where b > 30;
-select a,b from t1 where b > 30;
-
-# ignore rows column
---replace_column 9 NULL;
-explain select a,b from t1 where c > 750;
-select a,c from t1 where c > 750;
-
-
-alter table t1 add index bdca(b,d,c,a) clustering=yes;
-insert into t1 values (10,10,10,10);
-alter table t1 drop index bdca;
-
-alter table t1 drop primary key;
-# ignore rows column
---replace_column 9 NULL;
-explain select * from t1 where a > 5;
-select * from t1 where a > 5;
-
-# ignore rows column
---replace_column 9 NULL;
-explain select * from t1 where b > 30;
-select * from t1 where b > 30;
-
-# ignore rows column
---replace_column 9 NULL;
-explain select * from t1 where c > 750;
-select * from t1 where c > 750;
-
-#covering indexes
-
-# ignore rows column
---replace_column 9 NULL;
-explain select b from t1 where b > 30;
-select b from t1 where b > 30;
-
-# ignore rows column
---replace_column 9 NULL;
-explain select b from t1 where c > 750;
-select c from t1 where c > 750;
-
-alter table t1 add e varchar(20);
-
-alter table t1 add primary key (a);
-
-# ignore rows column
---replace_column 9 NULL;
-explain select * from t1 where a > 5;
-select * from t1 where a > 5;
-
-# ignore rows column
---replace_column 9 NULL;
-explain select * from t1 where b > 30;
-select * from t1 where b > 30;
-
-# ignore rows column
---replace_column 9 NULL;
-explain select * from t1 where c > 750;
-select * from t1 where c > 750;
-
-#covering indexes
-
-# ignore rows column
---replace_column 9 NULL;
-explain select a from t1 where a > 8;
-select a from t1 where a > 8;
-
-# ignore rows column
---replace_column 9 NULL;
-explain select a,b from t1 where b > 30;
-select a,b from t1 where b > 30;
-
-# ignore rows column
---replace_column 9 NULL;
-explain select a,b from t1 where c > 750;
-select a,c from t1 where c > 750;
-
-
-alter table t1 drop primary key;
-# ignore rows column
---replace_column 9 NULL;
-explain select * from t1 where a > 5;
-select * from t1 where a > 5;
-
-# ignore rows column
---replace_column 9 NULL;
-explain select * from t1 where b > 30;
-select * from t1 where b > 30;
-
-# ignore rows column
---replace_column 9 NULL;
-explain select * from t1 where c > 750;
-select * from t1 where c > 750;
-
-#covering indexes
-# ignore rows column
---replace_column 9 NULL;
-explain select b from t1 where b > 30;
-select b from t1 where b > 30;
-
-# ignore rows column
---replace_column 9 NULL;
-explain select b from t1 where c > 750;
-select c from t1 where c > 750;
+--source ../include/cluster_key.inc
drop table t1;
diff --git a/storage/tokudb/mysql-test/tokudb/t/cluster_key_part.test b/storage/tokudb/mysql-test/tokudb/t/cluster_key_part.test
index 8da7aa1078f..310866d14bc 100644
--- a/storage/tokudb/mysql-test/tokudb/t/cluster_key_part.test
+++ b/storage/tokudb/mysql-test/tokudb/t/cluster_key_part.test
@@ -5,19 +5,47 @@ source include/have_partition.inc;
set default_storage_engine='tokudb';
disable_warnings;
-drop table if exists t;
+DROP TABLE IF EXISTS t;
enable_warnings;
-create table t (
- x int not null,
- y int not null,
- primary key(x))
-partition by hash(x) partitions 2;
+CREATE TABLE t (a INT NOT NULL AUTO_INCREMENT, b INT, PRIMARY KEY(a), KEY b(b) CLUSTERING=YES) ENGINE=TokuDB
+PARTITION BY RANGE(a) (PARTITION p0 VALUES LESS THAN (100) ENGINE = TokuDB, PARTITION p2 VALUES LESS THAN MAXVALUE ENGINE = TokuDB);
-show create table t;
+SHOW CREATE TABLE t;
-alter table t add key(y) clustering=yes;
+DROP TABLE t;
-show create table t;
-drop table t;
+
+CREATE TABLE t (x INT NOT NULL, y INT NOT NULL, PRIMARY KEY(x))
+PARTITION BY HASH(x) PARTITIONS 2;
+
+SHOW CREATE TABLE t;
+
+ALTER TABLE t ADD KEY(y) CLUSTERING=YES;
+
+SHOW CREATE TABLE t;
+
+DROP TABLE t;
+
+
+CREATE TABLE t1(a INT, b INT, c INT, d INT, PRIMARY KEY(a,b,c), KEY(b) CLUSTERING=YES, KEY (c)) ENGINE=TOKUDB
+PARTITION BY RANGE(a) (PARTITION p0 VALUES LESS THAN (5) ENGINE = TOKUDB, PARTITION p2 VALUES LESS THAN MAXVALUE ENGINE = TOKUDB);
+
+--source ../include/cluster_key.inc
+
+DROP TABLE t1;
+
+CREATE TABLE t1(a INT, b INT, c INT, d INT, PRIMARY KEY(a,b,c), KEY(b) CLUSTERING=YES, KEY (c)) ENGINE=TOKUDB
+PARTITION BY RANGE(b) (PARTITION p0 VALUES LESS THAN (50) ENGINE = TOKUDB, PARTITION p2 VALUES LESS THAN MAXVALUE ENGINE = TOKUDB);
+
+--source ../include/cluster_key.inc
+
+DROP TABLE t1;
+
+CREATE TABLE t1(a INT, b INT, c INT, d INT, PRIMARY KEY(a,b,c), KEY(b) CLUSTERING=YES, KEY (c)) ENGINE=TOKUDB
+PARTITION BY RANGE(c) (PARTITION p0 VALUES LESS THAN (500) ENGINE = TOKUDB, PARTITION p2 VALUES LESS THAN MAXVALUE ENGINE = TOKUDB);
+
+--source ../include/cluster_key.inc
+
+DROP TABLE t1;
diff --git a/storage/tokudb/mysql-test/tokudb/t/disabled.def b/storage/tokudb/mysql-test/tokudb/t/disabled.def
new file mode 100644
index 00000000000..f7413a0edc5
--- /dev/null
+++ b/storage/tokudb/mysql-test/tokudb/t/disabled.def
@@ -0,0 +1,28 @@
+mvcc-19: tokutek
+mvcc-20: tokutek
+mvcc-27: tokutek
+storage_engine_default: tokudb is not the default storage engine
+fast_update_binlog_mixed : https://tokutek.atlassian.net/browse/DB-871
+fast_update_binlog_row : https://tokutek.atlassian.net/browse/DB-871
+fast_update_binlog_statement : https://tokutek.atlassian.net/browse/DB-871
+fast_update_blobs_fixed_varchar : https://tokutek.atlassian.net/browse/DB-871
+fast_update_blobs : https://tokutek.atlassian.net/browse/DB-871
+fast_update_blobs_with_varchar : https://tokutek.atlassian.net/browse/DB-871
+fast_update_char : https://tokutek.atlassian.net/browse/DB-871
+fast_update_deadlock : https://tokutek.atlassian.net/browse/DB-871
+fast_update_decr_floor : https://tokutek.atlassian.net/browse/DB-871
+fast_update_disable_slow_update : https://tokutek.atlassian.net/browse/DB-871
+fast_update_error : https://tokutek.atlassian.net/browse/DB-871
+fast_update_int_bounds : https://tokutek.atlassian.net/browse/DB-871
+fast_update_int : https://tokutek.atlassian.net/browse/DB-871
+fast_update_key : https://tokutek.atlassian.net/browse/DB-871
+fast_update_sqlmode : https://tokutek.atlassian.net/browse/DB-871
+fast_update_uint_bounds : https://tokutek.atlassian.net/browse/DB-871
+fast_update_varchar : https://tokutek.atlassian.net/browse/DB-871
+fast_upsert_bin_pad : https://tokutek.atlassian.net/browse/DB-871
+fast_upsert_char : https://tokutek.atlassian.net/browse/DB-871
+fast_upsert_deadlock : https://tokutek.atlassian.net/browse/DB-871
+fast_upsert_int : https://tokutek.atlassian.net/browse/DB-871
+fast_upsert_key : https://tokutek.atlassian.net/browse/DB-871
+fast_upsert_sqlmode : https://tokutek.atlassian.net/browse/DB-871
+fast_upsert_values : https://tokutek.atlassian.net/browse/DB-871
diff --git a/storage/tokudb/mysql-test/tokudb/t/i_s_tokudb_lock_waits_released.test b/storage/tokudb/mysql-test/tokudb/t/i_s_tokudb_lock_waits_released.test
index 3fd6ddb88fe..012c0af63b5 100644
--- a/storage/tokudb/mysql-test/tokudb/t/i_s_tokudb_lock_waits_released.test
+++ b/storage/tokudb/mysql-test/tokudb/t/i_s_tokudb_lock_waits_released.test
@@ -25,10 +25,12 @@ select * from information_schema.tokudb_lock_waits;
connect (conn_a,localhost,root,,);
set autocommit=0;
set tokudb_prelock_empty=OFF; # disable the bulk loader
+set tokudb_lock_timeout=600000; # set lock wait timeout to 10 minutes
insert into t values (1);
connect (conn_b,localhost,root,,);
set autocommit=0;
+set tokudb_lock_timeout=600000; # set lock wait timeout to 10 minutes
send insert into t values (1);
# should find the presence of a lock on 1st transaction
@@ -74,11 +76,13 @@ select * from information_schema.tokudb_lock_waits;
connect (conn_a,localhost,root,,);
set autocommit=0;
+set tokudb_lock_timeout=600000; # set lock wait timeout to 10 minutes
set tokudb_prelock_empty=OFF; # disable the bulk loader
replace into t values (1);
connect (conn_b,localhost,root,,);
set autocommit=0;
+set tokudb_lock_timeout=600000; # set lock wait timeout to 10 minutes
send replace into t values (1);
# should find the presence of a lock on 1st transaction
diff --git a/storage/tokudb/mysql-test/tokudb/t/i_s_tokudb_lock_waits_timeout.test b/storage/tokudb/mysql-test/tokudb/t/i_s_tokudb_lock_waits_timeout.test
index 06923d4ca58..42fb548814f 100644
--- a/storage/tokudb/mysql-test/tokudb/t/i_s_tokudb_lock_waits_timeout.test
+++ b/storage/tokudb/mysql-test/tokudb/t/i_s_tokudb_lock_waits_timeout.test
@@ -21,11 +21,14 @@ insert into t values (1);
connect (conn_b,localhost,root,,);
set autocommit=0;
-send insert into t values (1);
+set tokudb_prelock_empty=OFF;
+set tokudb_lock_timeout=60000; # set lock wait timeout to 1 minute
+
+send replace into t values (1);
# should find the presence of a lock on 1st transaction
connection default;
-let $wait_condition= select count(*)=1 from information_schema.processlist where info='insert into t values (1)' and state='update';
+let $wait_condition= select count(*)=1 from information_schema.processlist where info='replace into t values (1)' and state='update';
source include/wait_condition.inc;
real_sleep 1; # delay a little to shorten the update -> write row -> lock wait race
@@ -41,7 +44,9 @@ replace_column 1 TRX_ID 2 MYSQL_ID;
select trx_id,trx_mysql_thread_id from information_schema.tokudb_trx;
connection conn_a;
-sleep 5; # sleep longer than the lock timer to force a lock timeout on txn_b
+real_sleep 45; # sleep till we get close to timeout since wait_condidion will timeout @ 30 seconds
+let $wait_condition= select count(*)=0 from information_schema.processlist where info='replace into t values (1)' and state='update';
+source include/wait_condition.inc;
commit;
# verify that the lock on the 1st transaction is released and replaced by the lock for the 2nd transaction
diff --git a/storage/tokudb/mysql-test/tokudb/t/i_s_tokudb_locks.test b/storage/tokudb/mysql-test/tokudb/t/i_s_tokudb_locks.test
index e5a67559b1a..8f205ad7f45 100644
--- a/storage/tokudb/mysql-test/tokudb/t/i_s_tokudb_locks.test
+++ b/storage/tokudb/mysql-test/tokudb/t/i_s_tokudb_locks.test
@@ -12,7 +12,7 @@ set autocommit=0;
let $default_id=`select connection_id()`;
# should be empty
-select * from information_schema.tokudb_locks;
+select locks_dname,locks_key_left,locks_key_right,locks_table_schema,locks_table_name,locks_table_dictionary_name from information_schema.tokudb_locks where locks_table_schema='test' and locks_table_name='t' and locks_table_dictionary_name='main' order by locks_key_left, locks_key_right;
insert into t values (1);
insert into t values (3);
@@ -28,8 +28,7 @@ insert into t values (6);
# should find 3 locks for 2 transactions
connection default;
-replace_column 1 TRX_ID 2 MYSQL_ID;
-eval select * from information_schema.tokudb_locks order by locks_trx_id,locks_key_left;
+eval select locks_dname,locks_key_left,locks_key_right,locks_table_schema,locks_table_name,locks_table_dictionary_name from information_schema.tokudb_locks where locks_table_schema='test' and locks_table_name='t' and locks_table_dictionary_name='main' order by locks_key_left, locks_key_right;
connection conn_a;
commit;
@@ -37,9 +36,9 @@ connection default;
commit;
# should be empty
-select * from information_schema.tokudb_locks;
+select locks_dname,locks_key_left,locks_key_right,locks_table_schema,locks_table_name,locks_table_dictionary_name from information_schema.tokudb_locks where locks_table_schema='test' and locks_table_name='t' and locks_table_dictionary_name='main' order by locks_key_left, locks_key_right;
commit;
disconnect conn_a;
-drop table t; \ No newline at end of file
+drop table t;
diff --git a/storage/tokudb/mysql-test/tokudb/t/i_s_tokudb_locks_released.test b/storage/tokudb/mysql-test/tokudb/t/i_s_tokudb_locks_released.test
index 19f413e79f9..bbb0533e784 100644
--- a/storage/tokudb/mysql-test/tokudb/t/i_s_tokudb_locks_released.test
+++ b/storage/tokudb/mysql-test/tokudb/t/i_s_tokudb_locks_released.test
@@ -27,6 +27,8 @@ insert into t values (1);
connect (conn_b,localhost,root,,);
set autocommit=0;
+set tokudb_prelock_empty=OFF;
+set tokudb_lock_timeout=600000; # set lock wait timeout to 10 minutes
send insert into t values (1);
diff --git a/storage/tokudb/mysql-test/tokudb/t/i_s_tokudb_trx.test b/storage/tokudb/mysql-test/tokudb/t/i_s_tokudb_trx.test
index d3c2636ba54..517280391c4 100644
--- a/storage/tokudb/mysql-test/tokudb/t/i_s_tokudb_trx.test
+++ b/storage/tokudb/mysql-test/tokudb/t/i_s_tokudb_trx.test
@@ -8,35 +8,32 @@ drop table if exists t;
enable_warnings;
# should be empty
-select trx_id,trx_mysql_thread_id from information_schema.tokudb_trx;
+select trx_id,trx_mysql_thread_id from information_schema.tokudb_trx where trx_mysql_thread_id in(connection_id());
# should have my txn
-let $default_id=`select connection_id()`;
set autocommit=0;
create table t (id int primary key);
insert into t values (1);
-replace_column 1 TXN_ID_DEFAULT 2 CLIENT_ID_DEFAULT;
-eval select trx_id,trx_mysql_thread_id from information_schema.tokudb_trx;
+select count(trx_mysql_thread_id) from information_schema.tokudb_trx where trx_mysql_thread_id in(connection_id());
# should be empty
commit;
-select trx_id,trx_mysql_thread_id from information_schema.tokudb_trx;
+select trx_id,trx_mysql_thread_id from information_schema.tokudb_trx where trx_mysql_thread_id in(connection_id());
connect(conn_a,localhost,root,,);
-let a_id=`select connection_id()`;
set autocommit=0;
insert into t values (2);
+select count(trx_mysql_thread_id) from information_schema.tokudb_trx where trx_mysql_thread_id in(connection_id());
connection default;
-replace_column 1 TXN_ID_A 2 CLIENT_ID_A;
-eval select trx_id,trx_mysql_thread_id from information_schema.tokudb_trx;
+select count(trx_mysql_thread_id) from information_schema.tokudb_trx where trx_mysql_thread_id in(connection_id());
connection conn_a;
commit;
connection default;
# should be empty
-select trx_id,trx_mysql_thread_id from information_schema.tokudb_trx;
+select trx_id,trx_mysql_thread_id from information_schema.tokudb_trx where trx_mysql_thread_id in(connection_id());
disconnect conn_a;
-drop table t; \ No newline at end of file
+drop table t;
diff --git a/storage/tokudb/mysql-test/tokudb/t/suite.opt b/storage/tokudb/mysql-test/tokudb/t/suite.opt
new file mode 100644
index 00000000000..23511b05020
--- /dev/null
+++ b/storage/tokudb/mysql-test/tokudb/t/suite.opt
@@ -0,0 +1 @@
+$TOKUDB_OPT $TOKUDB_LOAD_ADD --loose-tokudb-check-jemalloc=0
diff --git a/storage/tokudb/mysql-test/tokudb_add_index/disabled.def b/storage/tokudb/mysql-test/tokudb_add_index/disabled.def
deleted file mode 100644
index e69de29bb2d..00000000000
--- a/storage/tokudb/mysql-test/tokudb_add_index/disabled.def
+++ /dev/null
diff --git a/storage/tokudb/mysql-test/tokudb_add_index/t/suite.opt b/storage/tokudb/mysql-test/tokudb_add_index/t/suite.opt
new file mode 100644
index 00000000000..23511b05020
--- /dev/null
+++ b/storage/tokudb/mysql-test/tokudb_add_index/t/suite.opt
@@ -0,0 +1 @@
+$TOKUDB_OPT $TOKUDB_LOAD_ADD --loose-tokudb-check-jemalloc=0
diff --git a/storage/tokudb/mysql-test/tokudb_alter_table/disabled.def b/storage/tokudb/mysql-test/tokudb_alter_table/disabled.def
deleted file mode 100644
index 828c5ee216b..00000000000
--- a/storage/tokudb/mysql-test/tokudb_alter_table/disabled.def
+++ /dev/null
@@ -1,6 +0,0 @@
-# alter table
-hcad_tmp_tables: tmp tables bypass hot alter, we run hcad_tmp_tables_56 instead
-hcad_with_locks: how this could work, if alter needs an exclusive mdl lock?
-
-frm_discover_partition: No partition discovery in MariaDB 5.5
-
diff --git a/storage/tokudb/mysql-test/tokudb_alter_table/t/disabled.def b/storage/tokudb/mysql-test/tokudb_alter_table/t/disabled.def
new file mode 100644
index 00000000000..3ea96dac116
--- /dev/null
+++ b/storage/tokudb/mysql-test/tokudb_alter_table/t/disabled.def
@@ -0,0 +1,8 @@
+# alter table
+hcad_tmp_tables: tmp tables bypass hot alter, we run hcad_tmp_tables_56 instead
+hcad_with_locks: cant alter table concurrent with reading a table in 5.5
+
+# virtual columns
+virtual_columns: no virtual columns in MySQL 5.5
+
+frm_discover_partition: needs the tokutek patch for frm discovery for partitioned tables
diff --git a/storage/tokudb/mysql-test/tokudb_alter_table/t/suite.opt b/storage/tokudb/mysql-test/tokudb_alter_table/t/suite.opt
new file mode 100644
index 00000000000..23511b05020
--- /dev/null
+++ b/storage/tokudb/mysql-test/tokudb_alter_table/t/suite.opt
@@ -0,0 +1 @@
+$TOKUDB_OPT $TOKUDB_LOAD_ADD --loose-tokudb-check-jemalloc=0
diff --git a/storage/tokudb/mysql-test/tokudb_backup/r/tokudb_backup_exclude.result b/storage/tokudb/mysql-test/tokudb_backup/r/tokudb_backup_exclude.result
new file mode 100644
index 00000000000..0bec62e54f6
--- /dev/null
+++ b/storage/tokudb/mysql-test/tokudb_backup/r/tokudb_backup_exclude.result
@@ -0,0 +1,31 @@
+create table t1(a INT, b INT, c INT, KEY(a), KEY(b), KEY(c)) engine='tokudb';
+create table t1a(a INT, b INT, c INT, KEY(a), KEY(b), KEY(c)) engine='tokudb';
+create table t1b(a INT, b INT, c INT, KEY(a), KEY(b), KEY(c)) engine='tokudb';
+create table t1c(a INT, b INT, c INT, KEY(a), KEY(b), KEY(c)) engine='tokudb';
+select @@session.tokudb_backup_last_error;
+@@session.tokudb_backup_last_error
+0
+select @@session.tokudb_backup_last_error_string;
+@@session.tokudb_backup_last_error_string
+NULL
+20
+set session tokudb_backup_exclude='(t1a|t1c)+';
+select @@session.tokudb_backup_last_error;
+@@session.tokudb_backup_last_error
+0
+select @@session.tokudb_backup_last_error_string;
+@@session.tokudb_backup_last_error_string
+NULL
+10
+set session tokudb_backup_exclude='t1[abc]+';
+select @@session.tokudb_backup_last_error;
+@@session.tokudb_backup_last_error
+0
+select @@session.tokudb_backup_last_error_string;
+@@session.tokudb_backup_last_error_string
+NULL
+5
+drop table t1;
+drop table t1a;
+drop table t1b;
+drop table t1c;
diff --git a/storage/tokudb/mysql-test/tokudb_backup/r/tokudb_backup_set_last_error.result b/storage/tokudb/mysql-test/tokudb_backup/r/tokudb_backup_set_last_error.result
new file mode 100644
index 00000000000..e1baf8418ad
--- /dev/null
+++ b/storage/tokudb/mysql-test/tokudb_backup/r/tokudb_backup_set_last_error.result
@@ -0,0 +1,20 @@
+create table t1(a INT, b INT, c INT, KEY(a), KEY(b), KEY(c)) engine='tokudb';
+set session tokudb_backup_dir='/aint/no/way/this/exists/here';
+ERROR 42000: Variable 'tokudb_backup_dir' can't be set to the value of '/aint/no/way/this/exists/here'
+select @@session.tokudb_backup_last_error;
+@@session.tokudb_backup_last_error
+2
+select @@session.tokudb_backup_last_error_string;
+@@session.tokudb_backup_last_error_string
+Could not get real path for /aint/no/way/this/exists/here
+set session tokudb_backup_last_error_string='this should not crash the server';
+select @@session.tokudb_backup_last_error_string;
+@@session.tokudb_backup_last_error_string
+this should not crash the server
+set session tokudb_backup_dir='/aint/no/way/this/exists/here';
+ERROR 42000: Variable 'tokudb_backup_dir' can't be set to the value of '/aint/no/way/this/exists/here'
+select @@session.tokudb_backup_last_error_string;
+@@session.tokudb_backup_last_error_string
+Could not get real path for /aint/no/way/this/exists/here
+set session tokudb_backup_last_error_string = @old_backup_last_error_string;
+drop table t1;
diff --git a/storage/tokudb/mysql-test/tokudb_backup/t/suite.opt b/storage/tokudb/mysql-test/tokudb_backup/t/suite.opt
new file mode 100644
index 00000000000..e52bd6327e0
--- /dev/null
+++ b/storage/tokudb/mysql-test/tokudb_backup/t/suite.opt
@@ -0,0 +1 @@
+$TOKUDB_OPT $TOKUDB_LOAD_ADD $TOKUDB_BACKUP_OPT $TOKUDB_BACKUP_LOAD_ADD --loose-tokudb-check-jemalloc=0
diff --git a/storage/tokudb/mysql-test/tokudb_backup/t/tokudb_backup_exclude.test b/storage/tokudb/mysql-test/tokudb_backup/t/tokudb_backup_exclude.test
new file mode 100644
index 00000000000..a0ff5152f66
--- /dev/null
+++ b/storage/tokudb/mysql-test/tokudb_backup/t/tokudb_backup_exclude.test
@@ -0,0 +1,69 @@
+# This test is to specifically test the TokuDB backup excluse functionality.
+# This is _not_ an illustration of how to exclude tables from a TokuDB backup,
+# if you exclude TokuDB database files in this way, you will have a useless
+# backup.
+source include/have_tokudb_backup.inc;
+
+disable_query_log;
+
+set @old_backup_exclude = @@session.tokudb_backup_exclude;
+
+enable_query_log;
+
+# This should create 20 files prefixed with '_test_t1'
+create table t1(a INT, b INT, c INT, KEY(a), KEY(b), KEY(c)) engine='tokudb';
+create table t1a(a INT, b INT, c INT, KEY(a), KEY(b), KEY(c)) engine='tokudb';
+create table t1b(a INT, b INT, c INT, KEY(a), KEY(b), KEY(c)) engine='tokudb';
+create table t1c(a INT, b INT, c INT, KEY(a), KEY(b), KEY(c)) engine='tokudb';
+
+# This should not filter any files
+disable_query_log;
+--exec mkdir $MYSQLTEST_VARDIR/tmp/backup
+--eval set session tokudb_backup_dir='$MYSQLTEST_VARDIR/tmp/backup'
+enable_query_log;
+
+select @@session.tokudb_backup_last_error;
+select @@session.tokudb_backup_last_error_string;
+
+# 20 files should be in the backup set
+--exec ls $MYSQLTEST_VARDIR/tmp/backup/mysql_data_dir | grep -c _test_t1
+
+--exec rm -rf $MYSQLTEST_VARDIR/tmp/backup
+
+
+# This should filter all files for the t1a and t1c tables
+set session tokudb_backup_exclude='(t1a|t1c)+';
+
+disable_query_log;
+--exec mkdir $MYSQLTEST_VARDIR/tmp/backup
+--eval set session tokudb_backup_dir='$MYSQLTEST_VARDIR/tmp/backup'
+enable_query_log;
+
+select @@session.tokudb_backup_last_error;
+select @@session.tokudb_backup_last_error_string;
+
+# 10 files should be in the backup set
+--exec ls $MYSQLTEST_VARDIR/tmp/backup/mysql_data_dir | grep -c _test_t1
+
+--exec rm -rf $MYSQLTEST_VARDIR/tmp/backup
+
+# This should filter all files for the t1a, t1b, and t1c tables
+set session tokudb_backup_exclude='t1[abc]+';
+
+disable_query_log;
+--exec mkdir $MYSQLTEST_VARDIR/tmp/backup
+--eval set session tokudb_backup_dir='$MYSQLTEST_VARDIR/tmp/backup'
+enable_query_log;
+
+select @@session.tokudb_backup_last_error;
+select @@session.tokudb_backup_last_error_string;
+
+# 5 files should be in the backup set
+--exec ls $MYSQLTEST_VARDIR/tmp/backup/mysql_data_dir | grep -c _test_t1
+
+--exec rm -rf $MYSQLTEST_VARDIR/tmp/backup
+
+drop table t1;
+drop table t1a;
+drop table t1b;
+drop table t1c;
diff --git a/storage/tokudb/mysql-test/tokudb_backup/t/tokudb_backup_set_last_error.test b/storage/tokudb/mysql-test/tokudb_backup/t/tokudb_backup_set_last_error.test
new file mode 100644
index 00000000000..18fc8cc5efb
--- /dev/null
+++ b/storage/tokudb/mysql-test/tokudb_backup/t/tokudb_backup_set_last_error.test
@@ -0,0 +1,32 @@
+# This test validates that the plugin will not crash if a user sets
+# tokudb_backup_last_error_string after performing a backup.
+source include/have_tokudb_backup.inc;
+
+disable_query_log;
+
+set @old_backup_last_error_string = @@session.tokudb_backup_last_error_string;
+
+enable_query_log;
+
+create table t1(a INT, b INT, c INT, KEY(a), KEY(b), KEY(c)) engine='tokudb';
+
+# this should fail and set the error string since the dummy directory
+# doesn't exist
+--error ER_WRONG_VALUE_FOR_VAR
+--eval set session tokudb_backup_dir='/aint/no/way/this/exists/here'
+
+select @@session.tokudb_backup_last_error;
+select @@session.tokudb_backup_last_error_string;
+
+set session tokudb_backup_last_error_string='this should not crash the server';
+select @@session.tokudb_backup_last_error_string;
+
+# this should fail again and set the error string since the dummy directory
+# doesn't exist
+--error ER_WRONG_VALUE_FOR_VAR
+--eval set session tokudb_backup_dir='/aint/no/way/this/exists/here'
+select @@session.tokudb_backup_last_error_string;
+
+set session tokudb_backup_last_error_string = @old_backup_last_error_string;
+
+drop table t1;
diff --git a/storage/tokudb/mysql-test/tokudb_bugs/disabled.def b/storage/tokudb/mysql-test/tokudb_bugs/disabled.def
index 068cbb413e0..d0d1a47e006 100644
--- a/storage/tokudb/mysql-test/tokudb_bugs/disabled.def
+++ b/storage/tokudb/mysql-test/tokudb_bugs/disabled.def
@@ -1,14 +1,2 @@
-memcache_dirty : #4609 memcache patch from facebook not ported to mysql 5.5
-4472 : #4521 MDL for alter table in 5.5 prohibits this test from having any chance of working
-
-tokudb_drop_part_table_668: no tokudb test data in mariadb tree
-tokudb_drop_simple_table_668: no tokudb test data in mariadb tree
5585: times out, too many huge insert...selects
-fileops-3: how this could work, if alter needs an exclusive mdl lock?
-6053: tokudb is not the default storage engine
-
-checkpoint_lock_2: test can not work when the checkpoint_safe_lock is a fair rwlock
-
-# this test was added in 7.5.4 and fails in 10.0
-# but running this very test in 7.5.3 fails in exactly the same manner
-db768: never worked. tokutek was informed, 2015-01-14
+db233: different execution path in mariadb, debug_sync point is not hit
diff --git a/storage/tokudb/mysql-test/tokudb_bugs/r/5585.result b/storage/tokudb/mysql-test/tokudb_bugs/r/5585.result
index 608afa00370..1008764148b 100644
--- a/storage/tokudb/mysql-test/tokudb_bugs/r/5585.result
+++ b/storage/tokudb/mysql-test/tokudb_bugs/r/5585.result
@@ -10,12 +10,6 @@ insert into t1 select t1.file_id+40, t1.file_number+40 from t1;
insert into t1 select t1.file_id+100, t1.file_number+100 from t1;
insert into t1 select t1.file_id+200, t1.file_number+200 from t1;
insert into t1 select t1.file_id+400, t1.file_number+400 from t1;
-insert into t1 select t1.file_id+1000, t1.file_number+1000 from t1;
-insert into t1 select t1.file_id+10000, t1.file_number+10000 from t1;
-insert into t1 select t1.file_id+100000, t1.file_number+100000 from t1;
-insert into t1 select t1.file_id+1000000, t1.file_number+1000000 from t1;
-insert into t1 select t1.file_id+10000000, t1.file_number+10000000 from t1;
-insert into t1 select t1.file_id+100000000, t1.file_number+100000000 from t1;
create table t2 (
file_id bigint unsigned not null,
country char(2) not null,
diff --git a/storage/tokudb/mysql-test/tokudb_bugs/r/db233.result b/storage/tokudb/mysql-test/tokudb_bugs/r/db233.result
new file mode 100644
index 00000000000..e5808f52e69
--- /dev/null
+++ b/storage/tokudb/mysql-test/tokudb_bugs/r/db233.result
@@ -0,0 +1,37 @@
+SET SESSION tokudb_auto_analyze = 0;
+SET SESSION tokudb_analyze_in_background = 0;
+CREATE TABLE t1(
+`id` int(10) unsigned NOT NULL,
+`k` int(10) unsigned NOT NULL DEFAULT '0',
+`c` char(120) NOT NULL DEFAULT '',
+`pad` char(60) NOT NULL DEFAULT '',
+KEY `xid` (`id`),
+KEY `k` (`k`)
+) ENGINE=TokuDB DEFAULT CHARSET=latin1;
+INSERT INTO t1 VALUES(1, 1, '1', '1'), (2, 2, '2', '2'), (3, 3, '3', '3'), (4, 4, '4', '4'),
+(5, 5, '5', '5'), (6, 6, '6', '6'), (6, 6, '6', '6'), (7, 7, '7', '7'),
+(8, 8, '8', '8'), (9, 9, '9', '9'), (10, 10, '10', '10'), (11, 11, '11', '11');
+ANALYZE TABLE t1;
+Table Op Msg_type Msg_text
+test.t1 analyze status OK
+set DEBUG_SYNC = 'tokudb_icp_desc_scan_invalidate SIGNAL hit1 WAIT_FOR done1';
+SELECT c FROM t1 WHERE id BETWEEN 5 AND 8 ORDER BY id DESC;
+set DEBUG_SYNC = 'now WAIT_FOR hit1';
+set DEBUG_SYNC = 'now SIGNAL done1';
+c
+8
+7
+6
+6
+5
+set DEBUG_SYNC = 'tokudb_icp_asc_scan_out_of_range SIGNAL hit2 WAIT_FOR done2';
+SELECT c FROM t1 WHERE id BETWEEN 5 AND 8 ORDER BY id ASC;
+set DEBUG_SYNC = 'now WAIT_FOR hit2';
+set DEBUG_SYNC = 'now SIGNAL done2';
+c
+5
+6
+6
+7
+8
+drop table t1;
diff --git a/storage/tokudb/mysql-test/tokudb_bugs/r/db397_delete_trigger.result b/storage/tokudb/mysql-test/tokudb_bugs/r/db397_delete_trigger.result
index da82fa445e8..c8565fb4b2b 100644
--- a/storage/tokudb/mysql-test/tokudb_bugs/r/db397_delete_trigger.result
+++ b/storage/tokudb/mysql-test/tokudb_bugs/r/db397_delete_trigger.result
@@ -25,11 +25,11 @@ select col1,action from t1_audit;
col1 action
0 DUMMY
1 BEFORE DEL
-select locks_dname,locks_key_left,locks_key_right from information_schema.tokudb_locks;
+select locks_dname,locks_key_left,locks_key_right from information_schema.tokudb_locks order by locks_dname,locks_key_left,locks_key_right;
locks_dname locks_key_left locks_key_right
-./test/t1_audit-main 0200000000000000 0200000000000000
-./test/t1-main ff01000000 0101000000
./test/t1-main 0001000000 0001000000
+./test/t1-main ff01000000 0101000000
+./test/t1_audit-main 0200000000000000 0200000000000000
commit;
drop trigger t1_trigger;
create trigger t1_trigger after delete on t1
@@ -46,11 +46,11 @@ col1 action
0 DUMMY
1 BEFORE DEL
2 AFTER DELE
-select locks_dname,locks_key_left,locks_key_right from information_schema.tokudb_locks;
+select locks_dname,locks_key_left,locks_key_right from information_schema.tokudb_locks order by locks_dname,locks_key_left,locks_key_right;
locks_dname locks_key_left locks_key_right
-./test/t1_audit-main 0300000000000000 0300000000000000
-./test/t1-main ff02000000 0102000000
./test/t1-main 0002000000 0002000000
+./test/t1-main ff02000000 0102000000
+./test/t1_audit-main 0300000000000000 0300000000000000
commit;
drop trigger t1_trigger;
drop table t1;
diff --git a/storage/tokudb/mysql-test/tokudb_bugs/r/db397_insert_trigger.result b/storage/tokudb/mysql-test/tokudb_bugs/r/db397_insert_trigger.result
index 41765a6fcd3..aef99a9adcd 100644
--- a/storage/tokudb/mysql-test/tokudb_bugs/r/db397_insert_trigger.result
+++ b/storage/tokudb/mysql-test/tokudb_bugs/r/db397_insert_trigger.result
@@ -25,10 +25,10 @@ select col1,action from t1_audit;
col1 action
0 DUMMY
1 BEFORE INSERT
-select locks_dname,locks_key_left,locks_key_right from information_schema.tokudb_locks;
+select locks_dname,locks_key_left,locks_key_right from information_schema.tokudb_locks order by locks_dname,locks_key_left,locks_key_right;
locks_dname locks_key_left locks_key_right
-./test/t1_audit-main 0200000000000000 0200000000000000
./test/t1-main 0001000000 0001000000
+./test/t1_audit-main 0200000000000000 0200000000000000
commit;
drop trigger t1_trigger;
create trigger t1_trigger after insert on t1
@@ -46,10 +46,10 @@ col1 action
0 DUMMY
1 BEFORE INSERT
2 AFTER INSERT
-select locks_dname,locks_key_left,locks_key_right from information_schema.tokudb_locks;
+select locks_dname,locks_key_left,locks_key_right from information_schema.tokudb_locks order by locks_dname,locks_key_left,locks_key_right;
locks_dname locks_key_left locks_key_right
-./test/t1_audit-main 0300000000000000 0300000000000000
./test/t1-main 0002000000 0002000000
+./test/t1_audit-main 0300000000000000 0300000000000000
commit;
drop trigger t1_trigger;
drop table t1;
diff --git a/storage/tokudb/mysql-test/tokudb_bugs/r/db397_update_trigger.result b/storage/tokudb/mysql-test/tokudb_bugs/r/db397_update_trigger.result
index c197430ad25..d9b944d3849 100644
--- a/storage/tokudb/mysql-test/tokudb_bugs/r/db397_update_trigger.result
+++ b/storage/tokudb/mysql-test/tokudb_bugs/r/db397_update_trigger.result
@@ -25,11 +25,11 @@ select col1,action from t1_audit;
col1 action
0 DUMMY
1 BEFORE UPDATE
-select locks_dname,locks_key_left,locks_key_right from information_schema.tokudb_locks;
+select locks_dname,locks_key_left,locks_key_right from information_schema.tokudb_locks order by locks_dname,locks_key_left,locks_key_right;
locks_dname locks_key_left locks_key_right
-./test/t1_audit-main 0200000000000000 0200000000000000
-./test/t1-main ff01000000 0101000000
./test/t1-main 0001000000 0001000000
+./test/t1-main ff01000000 0101000000
+./test/t1_audit-main 0200000000000000 0200000000000000
commit;
drop trigger t1_trigger;
create trigger t1_trigger after update on t1
@@ -48,11 +48,11 @@ col1 action
0 DUMMY
1 BEFORE UPDATE
2 AFTER UPDATE
-select locks_dname,locks_key_left,locks_key_right from information_schema.tokudb_locks;
+select locks_dname,locks_key_left,locks_key_right from information_schema.tokudb_locks order by locks_dname,locks_key_left,locks_key_right;
locks_dname locks_key_left locks_key_right
-./test/t1_audit-main 0300000000000000 0300000000000000
-./test/t1-main ff02000000 0102000000
./test/t1-main 0002000000 0002000000
+./test/t1-main ff02000000 0102000000
+./test/t1_audit-main 0300000000000000 0300000000000000
commit;
drop trigger t1_trigger;
drop table t1, t1_audit;
diff --git a/storage/tokudb/mysql-test/tokudb_bugs/r/db739_replace.result b/storage/tokudb/mysql-test/tokudb_bugs/r/db739_replace.result
index 2bf141add9a..58a4ed6708a 100644
--- a/storage/tokudb/mysql-test/tokudb_bugs/r/db739_replace.result
+++ b/storage/tokudb/mysql-test/tokudb_bugs/r/db739_replace.result
@@ -100010,5 +100010,7 @@ insert into t (id,a) values (999,98);
insert into t (id,a) values (999,99);
delete from t where id=404;
set tokudb_pk_insert_mode=2;
+Warnings:
+Warning 131 Using tokudb_pk_insert_mode is deprecated and the parameter may be removed in future releases.
replace into t values (404,0,0,0);
drop table t;
diff --git a/storage/tokudb/mysql-test/tokudb_bugs/r/db756_card_part_hash.result b/storage/tokudb/mysql-test/tokudb_bugs/r/db756_card_part_hash.result
index 3ed5758bd15..70bc86e1abc 100644
--- a/storage/tokudb/mysql-test/tokudb_bugs/r/db756_card_part_hash.result
+++ b/storage/tokudb/mysql-test/tokudb_bugs/r/db756_card_part_hash.result
@@ -3,19 +3,19 @@ drop table if exists t;
create table t (id int, x int, primary key (id), key (x)) partition by hash(id) partitions 2;
show indexes from t;
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
-t 0 PRIMARY 1 id A 0 NULL NULL BTREE
-t 1 x 1 x A NULL NULL NULL YES BTREE
+t 0 PRIMARY 1 id A 2 NULL NULL BTREE
+t 1 x 1 x A 2 NULL NULL YES BTREE
insert into t values (1,1),(3,1),(5,1);
insert into t values (2,1),(4,1),(6,1);
show indexes from t;
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
t 0 PRIMARY 1 id A 6 NULL NULL BTREE
-t 1 x 1 x A NULL NULL NULL YES BTREE
+t 1 x 1 x A 6 NULL NULL YES BTREE
analyze table t;
Table Op Msg_type Msg_text
test.t analyze status OK
show indexes from t;
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
t 0 PRIMARY 1 id A 6 NULL NULL BTREE
-t 1 x 1 x A 2 NULL NULL YES BTREE
+t 1 x 1 x A 6 NULL NULL YES BTREE
drop table t;
diff --git a/storage/tokudb/mysql-test/tokudb_bugs/r/db756_card_part_hash_1.result b/storage/tokudb/mysql-test/tokudb_bugs/r/db756_card_part_hash_1.result
index 776342ce90e..b6d9fd7da85 100644
--- a/storage/tokudb/mysql-test/tokudb_bugs/r/db756_card_part_hash_1.result
+++ b/storage/tokudb/mysql-test/tokudb_bugs/r/db756_card_part_hash_1.result
@@ -3,18 +3,18 @@ drop table if exists t;
create table t (id int, x int, primary key (id), key (x)) partition by hash(id) partitions 2;
show indexes from t;
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
-t 0 PRIMARY 1 id A 0 NULL NULL BTREE
-t 1 x 1 x A NULL NULL NULL YES BTREE
+t 0 PRIMARY 1 id A 2 NULL NULL BTREE
+t 1 x 1 x A 2 NULL NULL YES BTREE
insert into t values (1,1),(3,1),(5,1);
show indexes from t;
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
-t 0 PRIMARY 1 id A 3 NULL NULL BTREE
-t 1 x 1 x A NULL NULL NULL YES BTREE
+t 0 PRIMARY 1 id A 4 NULL NULL BTREE
+t 1 x 1 x A 4 NULL NULL YES BTREE
analyze table t;
Table Op Msg_type Msg_text
test.t analyze status OK
show indexes from t;
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
-t 0 PRIMARY 1 id A 3 NULL NULL BTREE
-t 1 x 1 x A 1 NULL NULL YES BTREE
+t 0 PRIMARY 1 id A 4 NULL NULL BTREE
+t 1 x 1 x A 4 NULL NULL YES BTREE
drop table t;
diff --git a/storage/tokudb/mysql-test/tokudb_bugs/r/db756_card_part_hash_1_pick.result b/storage/tokudb/mysql-test/tokudb_bugs/r/db756_card_part_hash_1_pick.result
index 480978a545b..5ba5da21789 100644
--- a/storage/tokudb/mysql-test/tokudb_bugs/r/db756_card_part_hash_1_pick.result
+++ b/storage/tokudb/mysql-test/tokudb_bugs/r/db756_card_part_hash_1_pick.result
@@ -3,19 +3,19 @@ drop table if exists t;
create table t (id int, x int, primary key (id), key (x)) partition by hash(id) partitions 2;
show indexes from t;
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
-t 0 PRIMARY 1 id A 0 NULL NULL BTREE
-t 1 x 1 x A NULL NULL NULL YES BTREE
+t 0 PRIMARY 1 id A 2 NULL NULL BTREE
+t 1 x 1 x A 2 NULL NULL YES BTREE
insert into t values (1,1),(3,2),(5,3);
insert into t values (2,1),(4,1),(6,1),(8,1);
show indexes from t;
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
t 0 PRIMARY 1 id A 7 NULL NULL BTREE
-t 1 x 1 x A NULL NULL NULL YES BTREE
+t 1 x 1 x A 7 NULL NULL YES BTREE
analyze table t;
Table Op Msg_type Msg_text
test.t analyze status OK
show indexes from t;
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
t 0 PRIMARY 1 id A 7 NULL NULL BTREE
-t 1 x 1 x A 1 NULL NULL YES BTREE
+t 1 x 1 x A 3 NULL NULL YES BTREE
drop table t;
diff --git a/storage/tokudb/mysql-test/tokudb_bugs/r/db756_card_part_hash_2.result b/storage/tokudb/mysql-test/tokudb_bugs/r/db756_card_part_hash_2.result
index ec2a37b02d4..6d345d98c95 100644
--- a/storage/tokudb/mysql-test/tokudb_bugs/r/db756_card_part_hash_2.result
+++ b/storage/tokudb/mysql-test/tokudb_bugs/r/db756_card_part_hash_2.result
@@ -3,18 +3,18 @@ drop table if exists t;
create table t (id int, x int, primary key (id), key (x)) partition by hash(id) partitions 2;
show indexes from t;
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
-t 0 PRIMARY 1 id A 0 NULL NULL BTREE
-t 1 x 1 x A NULL NULL NULL YES BTREE
+t 0 PRIMARY 1 id A 2 NULL NULL BTREE
+t 1 x 1 x A 2 NULL NULL YES BTREE
insert into t values (2,1),(4,1),(6,1);
show indexes from t;
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
-t 0 PRIMARY 1 id A 3 NULL NULL BTREE
-t 1 x 1 x A NULL NULL NULL YES BTREE
+t 0 PRIMARY 1 id A 4 NULL NULL BTREE
+t 1 x 1 x A 4 NULL NULL YES BTREE
analyze table t;
Table Op Msg_type Msg_text
test.t analyze status OK
show indexes from t;
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
-t 0 PRIMARY 1 id A 3 NULL NULL BTREE
-t 1 x 1 x A 1 NULL NULL YES BTREE
+t 0 PRIMARY 1 id A 4 NULL NULL BTREE
+t 1 x 1 x A 4 NULL NULL YES BTREE
drop table t;
diff --git a/storage/tokudb/mysql-test/tokudb_bugs/r/db756_card_part_hash_2_pick.result b/storage/tokudb/mysql-test/tokudb_bugs/r/db756_card_part_hash_2_pick.result
index c1160ae27a0..06639c311cf 100644
--- a/storage/tokudb/mysql-test/tokudb_bugs/r/db756_card_part_hash_2_pick.result
+++ b/storage/tokudb/mysql-test/tokudb_bugs/r/db756_card_part_hash_2_pick.result
@@ -3,14 +3,14 @@ drop table if exists t;
create table t (id int, x int, primary key (id), key (x)) partition by hash(id) partitions 2;
show indexes from t;
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
-t 0 PRIMARY 1 id A 0 NULL NULL BTREE
-t 1 x 1 x A NULL NULL NULL YES BTREE
+t 0 PRIMARY 1 id A 2 NULL NULL BTREE
+t 1 x 1 x A 2 NULL NULL YES BTREE
insert into t values (1,1),(3,2),(5,3),(7,4);
insert into t values (2,1),(4,1),(6,1);
show indexes from t;
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
t 0 PRIMARY 1 id A 7 NULL NULL BTREE
-t 1 x 1 x A NULL NULL NULL YES BTREE
+t 1 x 1 x A 7 NULL NULL YES BTREE
analyze table t;
Table Op Msg_type Msg_text
test.t analyze status OK
diff --git a/storage/tokudb/mysql-test/tokudb_bugs/r/db757_part_alter_analyze.result b/storage/tokudb/mysql-test/tokudb_bugs/r/db757_part_alter_analyze.result
index 21ce7704928..62337802688 100644
--- a/storage/tokudb/mysql-test/tokudb_bugs/r/db757_part_alter_analyze.result
+++ b/storage/tokudb/mysql-test/tokudb_bugs/r/db757_part_alter_analyze.result
@@ -6,30 +6,30 @@ partition by range(id)
insert into t values (1,1,1),(2,1,2),(3,1,3),(4,1,4);
show indexes from t;
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
-t 0 PRIMARY 1 id A 4 NULL NULL BTREE
-t 1 x 1 x A NULL NULL NULL YES BTREE
-t 1 y 1 y A NULL NULL NULL YES BTREE
+t 0 PRIMARY 1 id A 5 NULL NULL BTREE
+t 1 x 1 x A 5 NULL NULL YES BTREE
+t 1 y 1 y A 5 NULL NULL YES BTREE
alter table t analyze partition p0;
Table Op Msg_type Msg_text
test.t analyze status OK
show indexes from t;
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
-t 0 PRIMARY 1 id A 4 NULL NULL BTREE
-t 1 x 1 x A 1 NULL NULL YES BTREE
-t 1 y 1 y A 4 NULL NULL YES BTREE
+t 0 PRIMARY 1 id A 5 NULL NULL BTREE
+t 1 x 1 x A 5 NULL NULL YES BTREE
+t 1 y 1 y A 5 NULL NULL YES BTREE
alter table t analyze partition p1;
Table Op Msg_type Msg_text
test.t analyze status OK
show indexes from t;
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
-t 0 PRIMARY 1 id A 4 NULL NULL BTREE
-t 1 x 1 x A 1 NULL NULL YES BTREE
-t 1 y 1 y A 4 NULL NULL YES BTREE
+t 0 PRIMARY 1 id A 5 NULL NULL BTREE
+t 1 x 1 x A 5 NULL NULL YES BTREE
+t 1 y 1 y A 5 NULL NULL YES BTREE
insert into t values (100,1,1),(200,2,1),(300,3,1),(400,4,1),(500,5,1);
show indexes from t;
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
t 0 PRIMARY 1 id A 9 NULL NULL BTREE
-t 1 x 1 x A 2 NULL NULL YES BTREE
+t 1 x 1 x A 9 NULL NULL YES BTREE
t 1 y 1 y A 9 NULL NULL YES BTREE
alter table t analyze partition p0;
Table Op Msg_type Msg_text
@@ -37,8 +37,8 @@ test.t analyze status OK
show indexes from t;
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
t 0 PRIMARY 1 id A 9 NULL NULL BTREE
-t 1 x 1 x A NULL NULL NULL YES BTREE
-t 1 y 1 y A NULL NULL NULL YES BTREE
+t 1 x 1 x A 9 NULL NULL YES BTREE
+t 1 y 1 y A 9 NULL NULL YES BTREE
alter table t analyze partition p1;
Table Op Msg_type Msg_text
test.t analyze status OK
@@ -46,5 +46,5 @@ show indexes from t;
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
t 0 PRIMARY 1 id A 9 NULL NULL BTREE
t 1 x 1 x A 9 NULL NULL YES BTREE
-t 1 y 1 y A 1 NULL NULL YES BTREE
+t 1 y 1 y A 9 NULL NULL YES BTREE
drop table t;
diff --git a/storage/tokudb/mysql-test/tokudb_bugs/r/db917.result b/storage/tokudb/mysql-test/tokudb_bugs/r/db917.result
new file mode 100644
index 00000000000..9c29033429e
--- /dev/null
+++ b/storage/tokudb/mysql-test/tokudb_bugs/r/db917.result
@@ -0,0 +1,14 @@
+drop table if exists t1;
+set @orig_table_open_cache = @@global.table_open_cache;
+create table t1(a int) engine = tokudb partition by key(a) partitions 2 (partition p0 engine = tokudb, partition p1 engine = tokudb);
+lock tables t1 read;
+set @@global.table_open_cache = 1;
+begin;
+insert into t1 values(1),(1);
+select * from t1 where c like _ucs2 0x039C0025 collate ucs2_unicode_ci;
+ERROR 42S22: Unknown column 'c' in 'where clause'
+create table t1(c1 binary (1), c2 varbinary(1));
+ERROR 42S01: Table 't1' already exists
+unlock tables;
+drop table t1;
+set @@global.table_open_cache = @orig_table_open_cache;
diff --git a/storage/tokudb/mysql-test/tokudb_bugs/r/db938.result b/storage/tokudb/mysql-test/tokudb_bugs/r/db938.result
new file mode 100644
index 00000000000..779d458221b
--- /dev/null
+++ b/storage/tokudb/mysql-test/tokudb_bugs/r/db938.result
@@ -0,0 +1,35 @@
+set @orig_auto_analyze = @@session.tokudb_auto_analyze;
+set @orig_in_background = @@session.tokudb_analyze_in_background;
+set @orig_mode = @@session.tokudb_analyze_mode;
+set @orig_throttle = @@session.tokudb_analyze_throttle;
+set @orig_time = @@session.tokudb_analyze_time;
+set @orig_scale_percent = @@global.tokudb_cardinality_scale_percent;
+set @orig_default_storage_engine = @@session.default_storage_engine;
+set @orig_pause_background_job_manager = @@global.tokudb_debug_pause_background_job_manager;
+set session default_storage_engine = 'tokudb';
+set session tokudb_auto_analyze = 1;
+set session tokudb_analyze_in_background = 1;
+set session tokudb_analyze_mode = tokudb_analyze_standard;
+set session tokudb_analyze_throttle = 0;
+set session tokudb_analyze_time = 0;
+set global tokudb_cardinality_scale_percent = DEFAULT;
+set global tokudb_debug_pause_background_job_manager = TRUE;
+create table t1 (a int not null auto_increment, b int, c int, primary key(a), key kb(b), key kc(c), key kabc(a,b,c), key kab(a,b), key kbc(b,c));
+insert into t1(b,c) values(0,0), (1,1), (2,2), (3,3);
+select database_name, table_name, job_type, job_params, scheduler from information_schema.tokudb_background_job_status;
+database_name table_name job_type job_params scheduler
+test t1 TOKUDB_ANALYZE_MODE_STANDARD TOKUDB_ANALYZE_DELETE_FRACTION=1.000000; TOKUDB_ANALYZE_TIME=0; TOKUDB_ANALYZE_THROTTLE=0; AUTO
+set DEBUG_SYNC = 'tokudb_after_truncate_all_dictionarys SIGNAL closed WAIT_FOR done';
+TRUNCATE TABLE t1;
+set global tokudb_debug_pause_background_job_manager = FALSE;
+set DEBUG_SYNC = 'now SIGNAL done';
+drop table t1;
+set session tokudb_auto_analyze = @orig_auto_analyze;
+set session tokudb_analyze_in_background = @orig_in_background;
+set session tokudb_analyze_mode = @orig_mode;
+set session tokudb_analyze_throttle = @orig_throttle;
+set session tokudb_analyze_time = @orig_time;
+set global tokudb_cardinality_scale_percent = @orig_scale_percent;
+set session default_storage_engine = @orig_default_storage_engine;
+set global tokudb_debug_pause_background_job_manager = @orig_pause_background_job_manager;
+set DEBUG_SYNC='reset';
diff --git a/storage/tokudb/mysql-test/tokudb_bugs/r/db945.result b/storage/tokudb/mysql-test/tokudb_bugs/r/db945.result
new file mode 100644
index 00000000000..6b3c239d602
--- /dev/null
+++ b/storage/tokudb/mysql-test/tokudb_bugs/r/db945.result
@@ -0,0 +1,15 @@
+set default_storage_engine='tokudb';
+drop table if exists t1;
+set session tokudb_auto_analyze = 1;
+set session tokudb_analyze_in_background = true;
+set session tokudb_analyze_mode = TOKUDB_ANALYZE_STANDARD;
+set session tokudb_analyze_throttle = 0;
+set session tokudb_analyze_time = 0;
+create table t1(a int, b text(1), c text(1), filler text(1), primary key(a, b(1)), unique key (a, c(1)));
+lock tables t1 write, t1 as a read, t1 as b read;
+insert into t1(a) values(1);
+Warnings:
+Warning 1364 Field 'b' doesn't have a default value
+alter table t1 drop key a;
+unlock tables;
+drop table t1;
diff --git a/storage/tokudb/mysql-test/tokudb_bugs/t/5585-master.opt b/storage/tokudb/mysql-test/tokudb_bugs/t/5585-master.opt
index 017432e797d..acad193fd76 100644
--- a/storage/tokudb/mysql-test/tokudb_bugs/t/5585-master.opt
+++ b/storage/tokudb/mysql-test/tokudb_bugs/t/5585-master.opt
@@ -1 +1 @@
---tokudb-cache-size=1000000000 --innodb-buffer-pool-size=1000000000
+--loose-tokudb-cache-size=1000000000 --innodb-buffer-pool-size=1000000000
diff --git a/storage/tokudb/mysql-test/tokudb_bugs/t/5585.test b/storage/tokudb/mysql-test/tokudb_bugs/t/5585.test
index bf0df681e7a..2489748dfa1 100644
--- a/storage/tokudb/mysql-test/tokudb_bugs/t/5585.test
+++ b/storage/tokudb/mysql-test/tokudb_bugs/t/5585.test
@@ -1,5 +1,6 @@
--source include/have_innodb.inc
--source include/have_tokudb.inc
+--source include/big_test.inc
--disable_warnings
drop table if exists t1, t2;
@@ -18,12 +19,6 @@ insert into t1 select t1.file_id+40, t1.file_number+40 from t1;
insert into t1 select t1.file_id+100, t1.file_number+100 from t1;
insert into t1 select t1.file_id+200, t1.file_number+200 from t1;
insert into t1 select t1.file_id+400, t1.file_number+400 from t1;
-insert into t1 select t1.file_id+1000, t1.file_number+1000 from t1;
-insert into t1 select t1.file_id+10000, t1.file_number+10000 from t1;
-insert into t1 select t1.file_id+100000, t1.file_number+100000 from t1;
-insert into t1 select t1.file_id+1000000, t1.file_number+1000000 from t1;
-insert into t1 select t1.file_id+10000000, t1.file_number+10000000 from t1;
-insert into t1 select t1.file_id+100000000, t1.file_number+100000000 from t1;
create table t2 (
file_id bigint unsigned not null,
@@ -64,4 +59,4 @@ select t1.file_id,
(select hits from t2 where t2.file_id = t1.file_id and t2.insert_ts = date(date_sub(now(),interval 1 day))) as d
from t1;
-drop table if exists t1, t2; \ No newline at end of file
+drop table if exists t1, t2;
diff --git a/storage/tokudb/mysql-test/tokudb_bugs/t/db233.test b/storage/tokudb/mysql-test/tokudb_bugs/t/db233.test
new file mode 100644
index 00000000000..8e4c3b73c09
--- /dev/null
+++ b/storage/tokudb/mysql-test/tokudb_bugs/t/db233.test
@@ -0,0 +1,70 @@
+# This test for DB-233 tests that icp descending range scans stop properly once
+# it fails to find a key match instead of continuing to scan all the way to the
+# beginning of the index.
+
+-- source include/have_tokudb.inc
+-- source include/have_debug.inc
+-- source include/have_debug_sync.inc
+
+-- enable_query_log
+
+SET SESSION tokudb_auto_analyze = 0;
+SET SESSION tokudb_analyze_in_background = 0;
+
+CREATE TABLE t1(
+ `id` int(10) unsigned NOT NULL,
+ `k` int(10) unsigned NOT NULL DEFAULT '0',
+ `c` char(120) NOT NULL DEFAULT '',
+ `pad` char(60) NOT NULL DEFAULT '',
+ KEY `xid` (`id`),
+ KEY `k` (`k`)
+) ENGINE=TokuDB DEFAULT CHARSET=latin1;
+
+INSERT INTO t1 VALUES(1, 1, '1', '1'), (2, 2, '2', '2'), (3, 3, '3', '3'), (4, 4, '4', '4'),
+(5, 5, '5', '5'), (6, 6, '6', '6'), (6, 6, '6', '6'), (7, 7, '7', '7'),
+(8, 8, '8', '8'), (9, 9, '9', '9'), (10, 10, '10', '10'), (11, 11, '11', '11');
+
+ANALYZE TABLE t1;
+
+# lets flip to another connection
+connect(conn1, localhost, root);
+
+# set up the DEBUG_SYNC point
+set DEBUG_SYNC = 'tokudb_icp_desc_scan_invalidate SIGNAL hit1 WAIT_FOR done1';
+
+# send the query
+send SELECT c FROM t1 WHERE id BETWEEN 5 AND 8 ORDER BY id DESC;
+
+# back to default connection
+connection default;
+
+# wait for the ICP reverse scan to invalidate
+set DEBUG_SYNC = 'now WAIT_FOR hit1';
+
+# lets release and clean up
+set DEBUG_SYNC = 'now SIGNAL done1';
+
+connection conn1;
+reap;
+
+# set up the DEBUG_SYNC point again, but for the out of range
+set DEBUG_SYNC = 'tokudb_icp_asc_scan_out_of_range SIGNAL hit2 WAIT_FOR done2';
+
+# send the query
+send SELECT c FROM t1 WHERE id BETWEEN 5 AND 8 ORDER BY id ASC;
+
+# back to default connection
+connection default;
+
+# wait for the ICP reverse scan to invalidate
+set DEBUG_SYNC = 'now WAIT_FOR hit2';
+
+# lets release and clean up
+set DEBUG_SYNC = 'now SIGNAL done2';
+
+connection conn1;
+reap;
+
+connection default;
+disconnect conn1;
+drop table t1;
diff --git a/storage/tokudb/mysql-test/tokudb_bugs/t/db397_delete_trigger.test b/storage/tokudb/mysql-test/tokudb_bugs/t/db397_delete_trigger.test
index 00751ed2346..79043664607 100644
--- a/storage/tokudb/mysql-test/tokudb_bugs/t/db397_delete_trigger.test
+++ b/storage/tokudb/mysql-test/tokudb_bugs/t/db397_delete_trigger.test
@@ -28,7 +28,7 @@ start transaction;
delete from t1 where col1 = 1;
select col1,col2 from t1;
select col1,action from t1_audit;
-select locks_dname,locks_key_left,locks_key_right from information_schema.tokudb_locks;
+select locks_dname,locks_key_left,locks_key_right from information_schema.tokudb_locks order by locks_dname,locks_key_left,locks_key_right;
# note the locks on t1 and t1_audit
commit;
drop trigger t1_trigger;
@@ -41,10 +41,10 @@ start transaction;
delete from t1 where col1 = 2;
select col1,col2 from t1;
select col1,action from t1_audit;
-select locks_dname,locks_key_left,locks_key_right from information_schema.tokudb_locks;
+select locks_dname,locks_key_left,locks_key_right from information_schema.tokudb_locks order by locks_dname,locks_key_left,locks_key_right;
# note the locks on t1 and t1_audit
commit;
drop trigger t1_trigger;
drop table t1;
-drop table t1_audit; \ No newline at end of file
+drop table t1_audit;
diff --git a/storage/tokudb/mysql-test/tokudb_bugs/t/db397_insert_trigger.test b/storage/tokudb/mysql-test/tokudb_bugs/t/db397_insert_trigger.test
index f32496e524d..ffe2face9f2 100644
--- a/storage/tokudb/mysql-test/tokudb_bugs/t/db397_insert_trigger.test
+++ b/storage/tokudb/mysql-test/tokudb_bugs/t/db397_insert_trigger.test
@@ -27,7 +27,7 @@ start transaction;
insert into t1 (col1, col2) values (1,1);
select col1,col2 from t1;
select col1,action from t1_audit;
-select locks_dname,locks_key_left,locks_key_right from information_schema.tokudb_locks;
+select locks_dname,locks_key_left,locks_key_right from information_schema.tokudb_locks order by locks_dname,locks_key_left,locks_key_right;
# note the locks on t1 and t1_audit
commit;
drop trigger t1_trigger;
@@ -39,10 +39,10 @@ start transaction;
insert into t1 (col1, col2) values (2,2);
select col1,col2 from t1;
select col1,action from t1_audit;
-select locks_dname,locks_key_left,locks_key_right from information_schema.tokudb_locks;
+select locks_dname,locks_key_left,locks_key_right from information_schema.tokudb_locks order by locks_dname,locks_key_left,locks_key_right;
# note the locks on t1 and t1_audit
commit;
drop trigger t1_trigger;
drop table t1;
-drop table t1_audit; \ No newline at end of file
+drop table t1_audit;
diff --git a/storage/tokudb/mysql-test/tokudb_bugs/t/db397_update_trigger.test b/storage/tokudb/mysql-test/tokudb_bugs/t/db397_update_trigger.test
index f1407e6488a..063a88cb4ab 100644
--- a/storage/tokudb/mysql-test/tokudb_bugs/t/db397_update_trigger.test
+++ b/storage/tokudb/mysql-test/tokudb_bugs/t/db397_update_trigger.test
@@ -29,7 +29,7 @@ start transaction;
update t1 set col2=1000 where col1 = 1;
select col1,col2 from t1;
select col1,action from t1_audit;
-select locks_dname,locks_key_left,locks_key_right from information_schema.tokudb_locks;
+select locks_dname,locks_key_left,locks_key_right from information_schema.tokudb_locks order by locks_dname,locks_key_left,locks_key_right;
# check locks on t1 and t1_audit
commit;
drop trigger t1_trigger;
@@ -43,7 +43,7 @@ start transaction;
update t1 set col2=1001 where col1 = 2;
select col1,col2 from t1;
select col1,action from t1_audit;
-select locks_dname,locks_key_left,locks_key_right from information_schema.tokudb_locks;
+select locks_dname,locks_key_left,locks_key_right from information_schema.tokudb_locks order by locks_dname,locks_key_left,locks_key_right;
# check locks on t1 and t1_audit
commit;
drop trigger t1_trigger;
diff --git a/storage/tokudb/mysql-test/tokudb_bugs/t/db917.test b/storage/tokudb/mysql-test/tokudb_bugs/t/db917.test
new file mode 100644
index 00000000000..ae94d7b30de
--- /dev/null
+++ b/storage/tokudb/mysql-test/tokudb_bugs/t/db917.test
@@ -0,0 +1,23 @@
+# test DB-917
+# test that table/share open lock timeout does not crash the server on subsequent access
+source include/have_partition.inc;
+source include/have_tokudb.inc;
+disable_warnings;
+drop table if exists t1;
+enable_warnings;
+set @orig_table_open_cache = @@global.table_open_cache;
+create table t1(a int) engine = tokudb partition by key(a) partitions 2 (partition p0 engine = tokudb, partition p1 engine = tokudb);
+lock tables t1 read;
+set @@global.table_open_cache = 1;
+begin;
+insert into t1 values(1),(1);
+# when the bug is present, this results in a lock wait timeout
+--error ER_BAD_FIELD_ERROR
+select * from t1 where c like _ucs2 0x039C0025 collate ucs2_unicode_ci;
+# when the bug exists, this results in the assertion
+# kc_info->cp_info[keynr] == NULL in tokudb/ha_tokudb.cc initialize_col_pack_info
+--error ER_TABLE_EXISTS_ERROR
+create table t1(c1 binary (1), c2 varbinary(1));
+unlock tables;
+drop table t1;
+set @@global.table_open_cache = @orig_table_open_cache;
diff --git a/storage/tokudb/mysql-test/tokudb_bugs/t/db938.test b/storage/tokudb/mysql-test/tokudb_bugs/t/db938.test
new file mode 100644
index 00000000000..f56f93d1492
--- /dev/null
+++ b/storage/tokudb/mysql-test/tokudb_bugs/t/db938.test
@@ -0,0 +1,77 @@
+# This test for DB-938 tests a race condition where a scheduled background job
+# (analyze) ends up operating on a set of DB* key_file[] in TOKUDB_SHARE that
+# were set to NULL during a TRUNCATE TABLE operation.
+
+-- source include/have_tokudb.inc
+-- source include/have_debug.inc
+-- source include/have_debug_sync.inc
+
+-- enable_query_log
+
+set @orig_auto_analyze = @@session.tokudb_auto_analyze;
+set @orig_in_background = @@session.tokudb_analyze_in_background;
+set @orig_mode = @@session.tokudb_analyze_mode;
+set @orig_throttle = @@session.tokudb_analyze_throttle;
+set @orig_time = @@session.tokudb_analyze_time;
+set @orig_scale_percent = @@global.tokudb_cardinality_scale_percent;
+set @orig_default_storage_engine = @@session.default_storage_engine;
+set @orig_pause_background_job_manager = @@global.tokudb_debug_pause_background_job_manager;
+
+# first, lets set up to auto analyze in the background with about any activity
+set session default_storage_engine = 'tokudb';
+set session tokudb_auto_analyze = 1;
+set session tokudb_analyze_in_background = 1;
+set session tokudb_analyze_mode = tokudb_analyze_standard;
+set session tokudb_analyze_throttle = 0;
+set session tokudb_analyze_time = 0;
+set global tokudb_cardinality_scale_percent = DEFAULT;
+
+# in debug build, we can prevent the background job manager from running,
+# let's do it to hold a job from running until we get the TRUNCATE TABLE
+# in action
+set global tokudb_debug_pause_background_job_manager = TRUE;
+
+create table t1 (a int not null auto_increment, b int, c int, primary key(a), key kb(b), key kc(c), key kabc(a,b,c), key kab(a,b), key kbc(b,c));
+
+insert into t1(b,c) values(0,0), (1,1), (2,2), (3,3);
+
+# insert above should have triggered an analyze, but since the bjm is paused,
+# we will see it sitting in the queue
+select database_name, table_name, job_type, job_params, scheduler from information_schema.tokudb_background_job_status;
+
+# lets flip to another connection
+connect(conn1, localhost, root);
+
+# set up the DEBUG_SYNC point
+set DEBUG_SYNC = 'tokudb_after_truncate_all_dictionarys SIGNAL closed WAIT_FOR done';
+
+# send the truncat table
+send TRUNCATE TABLE t1;
+
+# back to default connection
+connection default;
+
+# release the bjm
+set global tokudb_debug_pause_background_job_manager = FALSE;
+
+# if the bug is present, the bjm should crash here within 1/4 of a second
+sleep 5;
+
+# lets release and clean up
+set DEBUG_SYNC = 'now SIGNAL done';
+
+connection conn1;
+reap;
+connection default;
+disconnect conn1;
+drop table t1;
+
+set session tokudb_auto_analyze = @orig_auto_analyze;
+set session tokudb_analyze_in_background = @orig_in_background;
+set session tokudb_analyze_mode = @orig_mode;
+set session tokudb_analyze_throttle = @orig_throttle;
+set session tokudb_analyze_time = @orig_time;
+set global tokudb_cardinality_scale_percent = @orig_scale_percent;
+set session default_storage_engine = @orig_default_storage_engine;
+set global tokudb_debug_pause_background_job_manager = @orig_pause_background_job_manager;
+set DEBUG_SYNC='reset';
diff --git a/storage/tokudb/mysql-test/tokudb_bugs/t/db945.test b/storage/tokudb/mysql-test/tokudb_bugs/t/db945.test
new file mode 100644
index 00000000000..27b0d284484
--- /dev/null
+++ b/storage/tokudb/mysql-test/tokudb_bugs/t/db945.test
@@ -0,0 +1,24 @@
+source include/have_tokudb.inc;
+set default_storage_engine='tokudb';
+disable_warnings;
+drop table if exists t1;
+enable_warnings;
+
+set session tokudb_auto_analyze = 1;
+set session tokudb_analyze_in_background = true;
+set session tokudb_analyze_mode = TOKUDB_ANALYZE_STANDARD;
+set session tokudb_analyze_throttle = 0;
+set session tokudb_analyze_time = 0;
+
+create table t1(a int, b text(1), c text(1), filler text(1), primary key(a, b(1)), unique key (a, c(1)));
+lock tables t1 write, t1 as a read, t1 as b read;
+insert into t1(a) values(1);
+alter table t1 drop key a;
+unlock tables;
+
+# wait for the bjm queue to empty
+-- disable_query_log
+let $wait_condition=select count(*)=0 from information_schema.tokudb_background_job_status;
+-- source include/wait_condition.inc
+
+drop table t1;
diff --git a/storage/tokudb/mysql-test/tokudb_bugs/t/disabled.def b/storage/tokudb/mysql-test/tokudb_bugs/t/disabled.def
new file mode 100644
index 00000000000..8c755dde8f5
--- /dev/null
+++ b/storage/tokudb/mysql-test/tokudb_bugs/t/disabled.def
@@ -0,0 +1,16 @@
+2952: tokutek not_5_5
+4472 : #4521 MDL for alter table in 5.5 prohibits this test from having any chance of working
+fileops-2: tokutek not_5_5
+fileops-3: tokutek not_5_5
+fileops-4: tokutek not_5_5
+memcache_dirty: tokutek FB port
+xa-2: tokutek not_5_5
+tokudb_drop_part_table_668: upgrade from tokudb 6.6.8
+tokudb_drop_simple_table_668: upgrade from tokudb 6.6.8
+checkpoint_lock_2: test can not work when the checkpoint_safe_lock is a fair rwlock
+6053: tokudb is not the default storage engine
+1883: tokutek's auto inc singleton patch missing
+3083: no patch to find_shortest_key to prefer PK over CK
+db768 : https://tokutek.atlassian.net/browse/DB-768
+dict_leak_3518 : https://tokutek.atlassian.net/browse/DB-635
+1872 : https://tokutek.atlassian.net/browse/DB-750
diff --git a/storage/tokudb/mysql-test/tokudb_bugs/t/suite.opt b/storage/tokudb/mysql-test/tokudb_bugs/t/suite.opt
new file mode 100644
index 00000000000..23511b05020
--- /dev/null
+++ b/storage/tokudb/mysql-test/tokudb_bugs/t/suite.opt
@@ -0,0 +1 @@
+$TOKUDB_OPT $TOKUDB_LOAD_ADD --loose-tokudb-check-jemalloc=0
diff --git a/storage/tokudb/mysql-test/tokudb_bugs/t/xa-3.test b/storage/tokudb/mysql-test/tokudb_bugs/t/xa-3.test
index 828cf03d2fd..e19291991b6 100644
--- a/storage/tokudb/mysql-test/tokudb_bugs/t/xa-3.test
+++ b/storage/tokudb/mysql-test/tokudb_bugs/t/xa-3.test
@@ -1,6 +1,12 @@
-- source include/have_innodb.inc
-- source include/have_tokudb.inc
-- source include/have_debug.inc
+# Valgrind would report memory leaks on the intentional crashes
+-- source include/not_valgrind.inc
+# Embedded server does not support crashing
+-- source include/not_embedded.inc
+# Avoid CrashReporter popup on Mac
+-- source include/not_crashrep.inc
--disable_warnings
drop table if exists t1, t2;
diff --git a/storage/tokudb/mysql-test/tokudb_bugs/t/xa-4.test b/storage/tokudb/mysql-test/tokudb_bugs/t/xa-4.test
index 1642cb7ca1b..f1f96711b89 100644
--- a/storage/tokudb/mysql-test/tokudb_bugs/t/xa-4.test
+++ b/storage/tokudb/mysql-test/tokudb_bugs/t/xa-4.test
@@ -1,6 +1,12 @@
-- source include/have_innodb.inc
-- source include/have_tokudb.inc
-- source include/have_debug.inc
+# Valgrind would report memory leaks on the intentional crashes
+-- source include/not_valgrind.inc
+# Embedded server does not support crashing
+-- source include/not_embedded.inc
+# Avoid CrashReporter popup on Mac
+-- source include/not_crashrep.inc
--disable_warnings
drop table if exists t1, t2;
diff --git a/storage/tokudb/mysql-test/tokudb_bugs/t/xa-6.test b/storage/tokudb/mysql-test/tokudb_bugs/t/xa-6.test
index 312c2d15cd2..c6c11f633e1 100644
--- a/storage/tokudb/mysql-test/tokudb_bugs/t/xa-6.test
+++ b/storage/tokudb/mysql-test/tokudb_bugs/t/xa-6.test
@@ -1,5 +1,11 @@
--source include/have_tokudb.inc
--source include/have_debug.inc
+# Valgrind would report memory leaks on the intentional crashes
+-- source include/not_valgrind.inc
+# Embedded server does not support crashing
+-- source include/not_embedded.inc
+# Avoid CrashReporter popup on Mac
+-- source include/not_crashrep.inc
--disable_warnings
drop table if exists t1;
diff --git a/storage/tokudb/mysql-test/tokudb_mariadb/r/mdev5426.result b/storage/tokudb/mysql-test/tokudb_mariadb/r/mdev5426.result
index 625bb255755..086c4f4cc18 100644
--- a/storage/tokudb/mysql-test/tokudb_mariadb/r/mdev5426.result
+++ b/storage/tokudb/mysql-test/tokudb_mariadb/r/mdev5426.result
@@ -1,6 +1,6 @@
CREATE TABLE t1 (i INT) ENGINE=TokuDB;
EXPLAIN INSERT INTO t1 SELECT * FROM t1;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 ALL NULL NULL NULL NULL 0 Using temporary
+1 SIMPLE t1 ALL NULL NULL NULL NULL 1 Using temporary
INSERT INTO t1 SELECT * FROM t1;
DROP TABLE t1;
diff --git a/storage/tokudb/mysql-test/tokudb_mariadb/r/mrr.result b/storage/tokudb/mysql-test/tokudb_mariadb/r/mrr.result
index 02ede175ce1..50e5b1940b0 100644
--- a/storage/tokudb/mysql-test/tokudb_mariadb/r/mrr.result
+++ b/storage/tokudb/mysql-test/tokudb_mariadb/r/mrr.result
@@ -34,7 +34,7 @@ SELECT t3.task_id, t3.field FROM
t3,t2 WHERE t3.task_id=t2.task_id AND t2.type NOT IN (8,11);
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t2 index PRIMARY,ymtasks_type ymtasks_type 2 NULL 2 Using where; Using index
-1 SIMPLE t3 ref ymtasksoptions_task ymtasksoptions_task 4 test.t2.task_id 7
+1 SIMPLE t3 ref ymtasksoptions_task ymtasksoptions_task 4 test.t2.task_id 1
SELECT t3.task_id, t3.field FROM
t3,t2 WHERE t3.task_id=t2.task_id AND t2.type NOT IN (8,11);
task_id field
@@ -85,7 +85,7 @@ SELECT t3.task_id, t3.field FROM
t3,t2 WHERE t3.task_id=t2.task_id AND t2.type NOT IN (8,11);
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t2 index PRIMARY,ymtasks_type ymtasks_type 2 NULL 2 Using where; Using index
-1 SIMPLE t3 ref ymtasksoptions_task ymtasksoptions_task 4 test.t2.task_id 7 Using join buffer (flat, BKA join); Key-ordered Rowid-ordered scan
+1 SIMPLE t3 ref ymtasksoptions_task ymtasksoptions_task 4 test.t2.task_id 1 Using join buffer (flat, BKA join); Key-ordered Rowid-ordered scan
SELECT t3.task_id, t3.field FROM
t3,t2 WHERE t3.task_id=t2.task_id AND t2.type NOT IN (8,11);
task_id field
diff --git a/storage/tokudb/mysql-test/tokudb_parts/disabled.def b/storage/tokudb/mysql-test/tokudb_parts/disabled.def
index 582ca590166..3252a463176 100644
--- a/storage/tokudb/mysql-test/tokudb_parts/disabled.def
+++ b/storage/tokudb/mysql-test/tokudb_parts/disabled.def
@@ -1,6 +1,3 @@
-partition_basic_symlink_tokudb : tokudb_file_per_table is not supported
-partition_reorganize_tokudb : tokudb_file_per_table is not supported
-
partition_max_parts_hash_tokudb: 5.6 test not merged yet
partition_max_parts_inv_tokudb: 5.6 test not merged yet
partition_max_parts_key_tokudb: 5.6 test not merged yet
diff --git a/storage/tokudb/mysql-test/tokudb_parts/r/partition_alter4_tokudb.result b/storage/tokudb/mysql-test/tokudb_parts/r/partition_alter4_tokudb.result
index 644c4815a36..b4e8e47b7d9 100644
--- a/storage/tokudb/mysql-test/tokudb_parts/r/partition_alter4_tokudb.result
+++ b/storage/tokudb/mysql-test/tokudb_parts/r/partition_alter4_tokudb.result
@@ -7560,7 +7560,8 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 ANALYZE PARTITION part_1,part_2,part_5,part_6,part_10;
Table Op Msg_type Msg_text
-test.t1 analyze error Error in list of partitions to test.t1
+test.t1 analyze Error Error in list of partitions to test.t1
+test.t1 analyze status Operation failed
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN @max_row_div2 AND @max_row;
@@ -8019,7 +8020,8 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 ANALYZE PARTITION part_1,part_2,part_5,part_6,part_10;
Table Op Msg_type Msg_text
-test.t1 analyze error Error in list of partitions to test.t1
+test.t1 analyze Error Error in list of partitions to test.t1
+test.t1 analyze status Operation failed
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN @max_row_div2 AND @max_row;
@@ -8489,7 +8491,8 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 ANALYZE PARTITION part_1,part_2,part_5,part_6,part_10;
Table Op Msg_type Msg_text
-test.t1 analyze error Error in list of partitions to test.t1
+test.t1 analyze Error Error in list of partitions to test.t1
+test.t1 analyze status Operation failed
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN @max_row_div2 AND @max_row;
@@ -8960,7 +8963,8 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 ANALYZE PARTITION part_1,part_2,part_5,part_6,part_10;
Table Op Msg_type Msg_text
-test.t1 analyze error Error in list of partitions to test.t1
+test.t1 analyze Error Error in list of partitions to test.t1
+test.t1 analyze status Operation failed
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN @max_row_div2 AND @max_row;
@@ -9425,7 +9429,8 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 ANALYZE PARTITION part_1,part_2,part_5,part_6,part_10;
Table Op Msg_type Msg_text
-test.t1 analyze error Error in list of partitions to test.t1
+test.t1 analyze Error Error in list of partitions to test.t1
+test.t1 analyze status Operation failed
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN @max_row_div2 AND @max_row;
@@ -9896,7 +9901,8 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 ANALYZE PARTITION part_1,part_2,part_5,part_6,part_10;
Table Op Msg_type Msg_text
-test.t1 analyze error Error in list of partitions to test.t1
+test.t1 analyze Error Error in list of partitions to test.t1
+test.t1 analyze status Operation failed
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN @max_row_div2 AND @max_row;
@@ -10372,7 +10378,8 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 ANALYZE PARTITION part_1,part_2,part_5,part_6,part_10;
Table Op Msg_type Msg_text
-test.t1 analyze error Error in list of partitions to test.t1
+test.t1 analyze Error Error in list of partitions to test.t1
+test.t1 analyze status Operation failed
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN @max_row_div2 AND @max_row;
@@ -10846,7 +10853,8 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 ANALYZE PARTITION part_1,part_2,part_5,part_6,part_10;
Table Op Msg_type Msg_text
-test.t1 analyze error Error in list of partitions to test.t1
+test.t1 analyze Error Error in list of partitions to test.t1
+test.t1 analyze status Operation failed
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN @max_row_div2 AND @max_row;
@@ -11310,7 +11318,8 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 ANALYZE PARTITION part_1,part_1,part_1;
Table Op Msg_type Msg_text
-test.t1 analyze error Error in list of partitions to test.t1
+test.t1 analyze Error Error in list of partitions to test.t1
+test.t1 analyze status Operation failed
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN @max_row_div2 AND @max_row;
@@ -11769,7 +11778,8 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 ANALYZE PARTITION part_1,part_1,part_1;
Table Op Msg_type Msg_text
-test.t1 analyze error Error in list of partitions to test.t1
+test.t1 analyze Error Error in list of partitions to test.t1
+test.t1 analyze status Operation failed
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN @max_row_div2 AND @max_row;
@@ -12239,7 +12249,8 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 ANALYZE PARTITION part_1,part_1,part_1;
Table Op Msg_type Msg_text
-test.t1 analyze error Error in list of partitions to test.t1
+test.t1 analyze Error Error in list of partitions to test.t1
+test.t1 analyze status Operation failed
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN @max_row_div2 AND @max_row;
@@ -12710,7 +12721,8 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 ANALYZE PARTITION part_1,part_1,part_1;
Table Op Msg_type Msg_text
-test.t1 analyze error Error in list of partitions to test.t1
+test.t1 analyze Error Error in list of partitions to test.t1
+test.t1 analyze status Operation failed
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN @max_row_div2 AND @max_row;
@@ -13175,7 +13187,8 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 ANALYZE PARTITION part_1,part_1,part_1;
Table Op Msg_type Msg_text
-test.t1 analyze error Error in list of partitions to test.t1
+test.t1 analyze Error Error in list of partitions to test.t1
+test.t1 analyze status Operation failed
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN @max_row_div2 AND @max_row;
@@ -13646,7 +13659,8 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 ANALYZE PARTITION part_1,part_1,part_1;
Table Op Msg_type Msg_text
-test.t1 analyze error Error in list of partitions to test.t1
+test.t1 analyze Error Error in list of partitions to test.t1
+test.t1 analyze status Operation failed
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN @max_row_div2 AND @max_row;
@@ -14122,7 +14136,8 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 ANALYZE PARTITION part_1,part_1,part_1;
Table Op Msg_type Msg_text
-test.t1 analyze error Error in list of partitions to test.t1
+test.t1 analyze Error Error in list of partitions to test.t1
+test.t1 analyze status Operation failed
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN @max_row_div2 AND @max_row;
@@ -14596,7 +14611,8 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 ANALYZE PARTITION part_1,part_1,part_1;
Table Op Msg_type Msg_text
-test.t1 analyze error Error in list of partitions to test.t1
+test.t1 analyze Error Error in list of partitions to test.t1
+test.t1 analyze status Operation failed
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN @max_row_div2 AND @max_row;
@@ -26313,7 +26329,8 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 CHECK PARTITION part_1,part_2,part_5,part_6,part_10;
Table Op Msg_type Msg_text
-test.t1 check error Error in list of partitions to test.t1
+test.t1 check Error Error in list of partitions to test.t1
+test.t1 check status Operation failed
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN @max_row_div2 AND @max_row;
@@ -26772,7 +26789,8 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 CHECK PARTITION part_1,part_2,part_5,part_6,part_10;
Table Op Msg_type Msg_text
-test.t1 check error Error in list of partitions to test.t1
+test.t1 check Error Error in list of partitions to test.t1
+test.t1 check status Operation failed
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN @max_row_div2 AND @max_row;
@@ -27242,7 +27260,8 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 CHECK PARTITION part_1,part_2,part_5,part_6,part_10;
Table Op Msg_type Msg_text
-test.t1 check error Error in list of partitions to test.t1
+test.t1 check Error Error in list of partitions to test.t1
+test.t1 check status Operation failed
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN @max_row_div2 AND @max_row;
@@ -27713,7 +27732,8 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 CHECK PARTITION part_1,part_2,part_5,part_6,part_10;
Table Op Msg_type Msg_text
-test.t1 check error Error in list of partitions to test.t1
+test.t1 check Error Error in list of partitions to test.t1
+test.t1 check status Operation failed
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN @max_row_div2 AND @max_row;
@@ -28178,7 +28198,8 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 CHECK PARTITION part_1,part_2,part_5,part_6,part_10;
Table Op Msg_type Msg_text
-test.t1 check error Error in list of partitions to test.t1
+test.t1 check Error Error in list of partitions to test.t1
+test.t1 check status Operation failed
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN @max_row_div2 AND @max_row;
@@ -28649,7 +28670,8 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 CHECK PARTITION part_1,part_2,part_5,part_6,part_10;
Table Op Msg_type Msg_text
-test.t1 check error Error in list of partitions to test.t1
+test.t1 check Error Error in list of partitions to test.t1
+test.t1 check status Operation failed
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN @max_row_div2 AND @max_row;
@@ -29125,7 +29147,8 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 CHECK PARTITION part_1,part_2,part_5,part_6,part_10;
Table Op Msg_type Msg_text
-test.t1 check error Error in list of partitions to test.t1
+test.t1 check Error Error in list of partitions to test.t1
+test.t1 check status Operation failed
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN @max_row_div2 AND @max_row;
@@ -29599,7 +29622,8 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 CHECK PARTITION part_1,part_2,part_5,part_6,part_10;
Table Op Msg_type Msg_text
-test.t1 check error Error in list of partitions to test.t1
+test.t1 check Error Error in list of partitions to test.t1
+test.t1 check status Operation failed
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN @max_row_div2 AND @max_row;
@@ -30063,7 +30087,8 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 CHECK PARTITION part_1,part_1,part_1;
Table Op Msg_type Msg_text
-test.t1 check error Error in list of partitions to test.t1
+test.t1 check Error Error in list of partitions to test.t1
+test.t1 check status Operation failed
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN @max_row_div2 AND @max_row;
@@ -30522,7 +30547,8 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 CHECK PARTITION part_1,part_1,part_1;
Table Op Msg_type Msg_text
-test.t1 check error Error in list of partitions to test.t1
+test.t1 check Error Error in list of partitions to test.t1
+test.t1 check status Operation failed
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN @max_row_div2 AND @max_row;
@@ -30992,7 +31018,8 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 CHECK PARTITION part_1,part_1,part_1;
Table Op Msg_type Msg_text
-test.t1 check error Error in list of partitions to test.t1
+test.t1 check Error Error in list of partitions to test.t1
+test.t1 check status Operation failed
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN @max_row_div2 AND @max_row;
@@ -31463,7 +31490,8 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 CHECK PARTITION part_1,part_1,part_1;
Table Op Msg_type Msg_text
-test.t1 check error Error in list of partitions to test.t1
+test.t1 check Error Error in list of partitions to test.t1
+test.t1 check status Operation failed
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN @max_row_div2 AND @max_row;
@@ -31928,7 +31956,8 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 CHECK PARTITION part_1,part_1,part_1;
Table Op Msg_type Msg_text
-test.t1 check error Error in list of partitions to test.t1
+test.t1 check Error Error in list of partitions to test.t1
+test.t1 check status Operation failed
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN @max_row_div2 AND @max_row;
@@ -32399,7 +32428,8 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 CHECK PARTITION part_1,part_1,part_1;
Table Op Msg_type Msg_text
-test.t1 check error Error in list of partitions to test.t1
+test.t1 check Error Error in list of partitions to test.t1
+test.t1 check status Operation failed
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN @max_row_div2 AND @max_row;
@@ -32875,7 +32905,8 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 CHECK PARTITION part_1,part_1,part_1;
Table Op Msg_type Msg_text
-test.t1 check error Error in list of partitions to test.t1
+test.t1 check Error Error in list of partitions to test.t1
+test.t1 check status Operation failed
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN @max_row_div2 AND @max_row;
@@ -33349,7 +33380,8 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 CHECK PARTITION part_1,part_1,part_1;
Table Op Msg_type Msg_text
-test.t1 check error Error in list of partitions to test.t1
+test.t1 check Error Error in list of partitions to test.t1
+test.t1 check status Operation failed
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN @max_row_div2 AND @max_row;
@@ -45082,7 +45114,8 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 OPTIMIZE PARTITION part_1,part_2,part_5,part_6,part_10;
Table Op Msg_type Msg_text
-test.t1 optimize error Error in list of partitions to test.t1
+test.t1 optimize Error Error in list of partitions to test.t1
+test.t1 optimize status Operation failed
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN @max_row_div2 AND @max_row;
@@ -45541,7 +45574,8 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 OPTIMIZE PARTITION part_1,part_2,part_5,part_6,part_10;
Table Op Msg_type Msg_text
-test.t1 optimize error Error in list of partitions to test.t1
+test.t1 optimize Error Error in list of partitions to test.t1
+test.t1 optimize status Operation failed
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN @max_row_div2 AND @max_row;
@@ -46011,7 +46045,8 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 OPTIMIZE PARTITION part_1,part_2,part_5,part_6,part_10;
Table Op Msg_type Msg_text
-test.t1 optimize error Error in list of partitions to test.t1
+test.t1 optimize Error Error in list of partitions to test.t1
+test.t1 optimize status Operation failed
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN @max_row_div2 AND @max_row;
@@ -46482,7 +46517,8 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 OPTIMIZE PARTITION part_1,part_2,part_5,part_6,part_10;
Table Op Msg_type Msg_text
-test.t1 optimize error Error in list of partitions to test.t1
+test.t1 optimize Error Error in list of partitions to test.t1
+test.t1 optimize status Operation failed
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN @max_row_div2 AND @max_row;
@@ -46947,7 +46983,8 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 OPTIMIZE PARTITION part_1,part_2,part_5,part_6,part_10;
Table Op Msg_type Msg_text
-test.t1 optimize error Error in list of partitions to test.t1
+test.t1 optimize Error Error in list of partitions to test.t1
+test.t1 optimize status Operation failed
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN @max_row_div2 AND @max_row;
@@ -47418,7 +47455,8 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 OPTIMIZE PARTITION part_1,part_2,part_5,part_6,part_10;
Table Op Msg_type Msg_text
-test.t1 optimize error Error in list of partitions to test.t1
+test.t1 optimize Error Error in list of partitions to test.t1
+test.t1 optimize status Operation failed
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN @max_row_div2 AND @max_row;
@@ -47894,7 +47932,8 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 OPTIMIZE PARTITION part_1,part_2,part_5,part_6,part_10;
Table Op Msg_type Msg_text
-test.t1 optimize error Error in list of partitions to test.t1
+test.t1 optimize Error Error in list of partitions to test.t1
+test.t1 optimize status Operation failed
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN @max_row_div2 AND @max_row;
@@ -48368,7 +48407,8 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 OPTIMIZE PARTITION part_1,part_2,part_5,part_6,part_10;
Table Op Msg_type Msg_text
-test.t1 optimize error Error in list of partitions to test.t1
+test.t1 optimize Error Error in list of partitions to test.t1
+test.t1 optimize status Operation failed
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN @max_row_div2 AND @max_row;
@@ -48832,7 +48872,8 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 OPTIMIZE PARTITION part_1,part_1,part_1;
Table Op Msg_type Msg_text
-test.t1 optimize error Error in list of partitions to test.t1
+test.t1 optimize Error Error in list of partitions to test.t1
+test.t1 optimize status Operation failed
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN @max_row_div2 AND @max_row;
@@ -49291,7 +49332,8 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 OPTIMIZE PARTITION part_1,part_1,part_1;
Table Op Msg_type Msg_text
-test.t1 optimize error Error in list of partitions to test.t1
+test.t1 optimize Error Error in list of partitions to test.t1
+test.t1 optimize status Operation failed
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN @max_row_div2 AND @max_row;
@@ -49761,7 +49803,8 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 OPTIMIZE PARTITION part_1,part_1,part_1;
Table Op Msg_type Msg_text
-test.t1 optimize error Error in list of partitions to test.t1
+test.t1 optimize Error Error in list of partitions to test.t1
+test.t1 optimize status Operation failed
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN @max_row_div2 AND @max_row;
@@ -50232,7 +50275,8 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 OPTIMIZE PARTITION part_1,part_1,part_1;
Table Op Msg_type Msg_text
-test.t1 optimize error Error in list of partitions to test.t1
+test.t1 optimize Error Error in list of partitions to test.t1
+test.t1 optimize status Operation failed
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN @max_row_div2 AND @max_row;
@@ -50697,7 +50741,8 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 OPTIMIZE PARTITION part_1,part_1,part_1;
Table Op Msg_type Msg_text
-test.t1 optimize error Error in list of partitions to test.t1
+test.t1 optimize Error Error in list of partitions to test.t1
+test.t1 optimize status Operation failed
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN @max_row_div2 AND @max_row;
@@ -51168,7 +51213,8 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 OPTIMIZE PARTITION part_1,part_1,part_1;
Table Op Msg_type Msg_text
-test.t1 optimize error Error in list of partitions to test.t1
+test.t1 optimize Error Error in list of partitions to test.t1
+test.t1 optimize status Operation failed
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN @max_row_div2 AND @max_row;
@@ -51644,7 +51690,8 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 OPTIMIZE PARTITION part_1,part_1,part_1;
Table Op Msg_type Msg_text
-test.t1 optimize error Error in list of partitions to test.t1
+test.t1 optimize Error Error in list of partitions to test.t1
+test.t1 optimize status Operation failed
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN @max_row_div2 AND @max_row;
@@ -52118,7 +52165,8 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 OPTIMIZE PARTITION part_1,part_1,part_1;
Table Op Msg_type Msg_text
-test.t1 optimize error Error in list of partitions to test.t1
+test.t1 optimize Error Error in list of partitions to test.t1
+test.t1 optimize status Operation failed
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN @max_row_div2 AND @max_row;
@@ -75368,7 +75416,8 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 REPAIR PARTITION part_1,part_2,part_5,part_6,part_10;
Table Op Msg_type Msg_text
-test.t1 repair error Error in list of partitions to test.t1
+test.t1 repair Error Error in list of partitions to test.t1
+test.t1 repair status Operation failed
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN @max_row_div2 AND @max_row;
@@ -75827,7 +75876,8 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 REPAIR PARTITION part_1,part_2,part_5,part_6,part_10;
Table Op Msg_type Msg_text
-test.t1 repair error Error in list of partitions to test.t1
+test.t1 repair Error Error in list of partitions to test.t1
+test.t1 repair status Operation failed
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN @max_row_div2 AND @max_row;
@@ -76297,7 +76347,8 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 REPAIR PARTITION part_1,part_2,part_5,part_6,part_10;
Table Op Msg_type Msg_text
-test.t1 repair error Error in list of partitions to test.t1
+test.t1 repair Error Error in list of partitions to test.t1
+test.t1 repair status Operation failed
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN @max_row_div2 AND @max_row;
@@ -76768,7 +76819,8 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 REPAIR PARTITION part_1,part_2,part_5,part_6,part_10;
Table Op Msg_type Msg_text
-test.t1 repair error Error in list of partitions to test.t1
+test.t1 repair Error Error in list of partitions to test.t1
+test.t1 repair status Operation failed
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN @max_row_div2 AND @max_row;
@@ -77233,7 +77285,8 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 REPAIR PARTITION part_1,part_2,part_5,part_6,part_10;
Table Op Msg_type Msg_text
-test.t1 repair error Error in list of partitions to test.t1
+test.t1 repair Error Error in list of partitions to test.t1
+test.t1 repair status Operation failed
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN @max_row_div2 AND @max_row;
@@ -77704,7 +77757,8 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 REPAIR PARTITION part_1,part_2,part_5,part_6,part_10;
Table Op Msg_type Msg_text
-test.t1 repair error Error in list of partitions to test.t1
+test.t1 repair Error Error in list of partitions to test.t1
+test.t1 repair status Operation failed
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN @max_row_div2 AND @max_row;
@@ -78180,7 +78234,8 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 REPAIR PARTITION part_1,part_2,part_5,part_6,part_10;
Table Op Msg_type Msg_text
-test.t1 repair error Error in list of partitions to test.t1
+test.t1 repair Error Error in list of partitions to test.t1
+test.t1 repair status Operation failed
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN @max_row_div2 AND @max_row;
@@ -78654,7 +78709,8 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 REPAIR PARTITION part_1,part_2,part_5,part_6,part_10;
Table Op Msg_type Msg_text
-test.t1 repair error Error in list of partitions to test.t1
+test.t1 repair Error Error in list of partitions to test.t1
+test.t1 repair status Operation failed
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN @max_row_div2 AND @max_row;
@@ -79118,7 +79174,8 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 REPAIR PARTITION part_1,part_1,part_1;
Table Op Msg_type Msg_text
-test.t1 repair error Error in list of partitions to test.t1
+test.t1 repair Error Error in list of partitions to test.t1
+test.t1 repair status Operation failed
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN @max_row_div2 AND @max_row;
@@ -79577,7 +79634,8 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 REPAIR PARTITION part_1,part_1,part_1;
Table Op Msg_type Msg_text
-test.t1 repair error Error in list of partitions to test.t1
+test.t1 repair Error Error in list of partitions to test.t1
+test.t1 repair status Operation failed
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN @max_row_div2 AND @max_row;
@@ -80047,7 +80105,8 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 REPAIR PARTITION part_1,part_1,part_1;
Table Op Msg_type Msg_text
-test.t1 repair error Error in list of partitions to test.t1
+test.t1 repair Error Error in list of partitions to test.t1
+test.t1 repair status Operation failed
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN @max_row_div2 AND @max_row;
@@ -80518,7 +80577,8 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 REPAIR PARTITION part_1,part_1,part_1;
Table Op Msg_type Msg_text
-test.t1 repair error Error in list of partitions to test.t1
+test.t1 repair Error Error in list of partitions to test.t1
+test.t1 repair status Operation failed
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN @max_row_div2 AND @max_row;
@@ -80983,7 +81043,8 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 REPAIR PARTITION part_1,part_1,part_1;
Table Op Msg_type Msg_text
-test.t1 repair error Error in list of partitions to test.t1
+test.t1 repair Error Error in list of partitions to test.t1
+test.t1 repair status Operation failed
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN @max_row_div2 AND @max_row;
@@ -81454,7 +81515,8 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 REPAIR PARTITION part_1,part_1,part_1;
Table Op Msg_type Msg_text
-test.t1 repair error Error in list of partitions to test.t1
+test.t1 repair Error Error in list of partitions to test.t1
+test.t1 repair status Operation failed
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN @max_row_div2 AND @max_row;
@@ -81930,7 +81992,8 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 REPAIR PARTITION part_1,part_1,part_1;
Table Op Msg_type Msg_text
-test.t1 repair error Error in list of partitions to test.t1
+test.t1 repair Error Error in list of partitions to test.t1
+test.t1 repair status Operation failed
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN @max_row_div2 AND @max_row;
@@ -82404,7 +82467,8 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 REPAIR PARTITION part_1,part_1,part_1;
Table Op Msg_type Msg_text
-test.t1 repair error Error in list of partitions to test.t1
+test.t1 repair Error Error in list of partitions to test.t1
+test.t1 repair status Operation failed
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN @max_row_div2 AND @max_row;
diff --git a/storage/tokudb/mysql-test/tokudb_parts/r/partition_auto_increment_tokudb.result b/storage/tokudb/mysql-test/tokudb_parts/r/partition_auto_increment_tokudb.result
index 2101feaacb3..aadfaba3b7b 100644
--- a/storage/tokudb/mysql-test/tokudb_parts/r/partition_auto_increment_tokudb.result
+++ b/storage/tokudb/mysql-test/tokudb_parts/r/partition_auto_increment_tokudb.result
@@ -1,3 +1,6 @@
+SET @tokudb_prelock_empty_saved = @@GLOBAL.tokudb_prelock_empty;
+SET GLOBAL tokudb_prelock_empty = 0;
+SET SESSION tokudb_prelock_empty = 0;
DROP TABLE IF EXISTS t1;
# test without partitioning for reference
CREATE TABLE t1 (
@@ -1107,3 +1110,4 @@ a
0
DROP TABLE t1;
##############################################################################
+SET GLOBAL tokudb_prelock_empty = @tokudb_prelock_empty_saved;
diff --git a/storage/tokudb/mysql-test/tokudb_parts/t/disabled.def b/storage/tokudb/mysql-test/tokudb_parts/t/disabled.def
new file mode 100644
index 00000000000..90e599cd035
--- /dev/null
+++ b/storage/tokudb/mysql-test/tokudb_parts/t/disabled.def
@@ -0,0 +1,2 @@
+partition_basic_symlink_tokudb : tokudb_file_per_table is not supported
+partition_reorganize_tokudb : tokudb_file_per_table is not supported
diff --git a/storage/tokudb/mysql-test/tokudb_parts/t/partition_auto_increment_tokudb-master.opt b/storage/tokudb/mysql-test/tokudb_parts/t/partition_auto_increment_tokudb-master.opt
deleted file mode 100644
index 857da664d10..00000000000
--- a/storage/tokudb/mysql-test/tokudb_parts/t/partition_auto_increment_tokudb-master.opt
+++ /dev/null
@@ -1 +0,0 @@
---tokudb-prelock-empty=0
diff --git a/storage/tokudb/mysql-test/tokudb_parts/t/partition_auto_increment_tokudb.test b/storage/tokudb/mysql-test/tokudb_parts/t/partition_auto_increment_tokudb.test
index a13ddf65d4b..ceec2fda9ee 100644
--- a/storage/tokudb/mysql-test/tokudb_parts/t/partition_auto_increment_tokudb.test
+++ b/storage/tokudb/mysql-test/tokudb_parts/t/partition_auto_increment_tokudb.test
@@ -29,7 +29,12 @@
let $engine= 'TokuDB';
--source include/have_tokudb.inc
+SET @tokudb_prelock_empty_saved = @@GLOBAL.tokudb_prelock_empty;
+SET GLOBAL tokudb_prelock_empty = 0;
+SET SESSION tokudb_prelock_empty = 0;
+
#------------------------------------------------------------------------------#
# Execute the tests to be applied to all storage engines
--source suite/parts/inc/partition_auto_increment.inc
+SET GLOBAL tokudb_prelock_empty = @tokudb_prelock_empty_saved;
diff --git a/storage/tokudb/mysql-test/tokudb_parts/t/suite.opt b/storage/tokudb/mysql-test/tokudb_parts/t/suite.opt
new file mode 100644
index 00000000000..23511b05020
--- /dev/null
+++ b/storage/tokudb/mysql-test/tokudb_parts/t/suite.opt
@@ -0,0 +1 @@
+$TOKUDB_OPT $TOKUDB_LOAD_ADD --loose-tokudb-check-jemalloc=0
diff --git a/storage/tokudb/mysql-test/tokudb_rpl/r/rpl_rfr_disable_on_expl_pk_absence.result b/storage/tokudb/mysql-test/tokudb_rpl/r/rpl_rfr_disable_on_expl_pk_absence.result
new file mode 100644
index 00000000000..981a833aea5
--- /dev/null
+++ b/storage/tokudb/mysql-test/tokudb_rpl/r/rpl_rfr_disable_on_expl_pk_absence.result
@@ -0,0 +1,47 @@
+include/master-slave.inc
+Warnings:
+Note #### Sending passwords in plain text without SSL/TLS is extremely insecure.
+Note #### Storing MySQL user name or password information in the master info repository is not secure and is therefore not recommended. Please consider using the USER and PASSWORD connection options for START SLAVE; see the 'START SLAVE Syntax' in the MySQL Manual for more information.
+[connection master]
+call mtr.add_suppression("read free replication is disabled for tokudb table");
+CREATE TABLE t (a int(11), b char(20)) ENGINE = TokuDB;
+INSERT INTO t (a, b) VALUES (1, 'a'), (2, 'b'), (3, 'c'), (4, 'd'), (5, 'e');
+SELECT * FROM t;
+a b
+1 a
+2 b
+3 c
+4 d
+5 e
+UPDATE t SET a = a + 10 WHERE b = 'b';
+SELECT * FROM t;
+a b
+1 a
+12 b
+3 c
+4 d
+5 e
+SELECT * FROM t;
+a b
+1 a
+12 b
+3 c
+4 d
+5 e
+UPDATE t SET a = a + 10 WHERE b = 'b';
+SELECT * FROM t;
+a b
+1 a
+22 b
+3 c
+4 d
+5 e
+SELECT * FROM t;
+a b
+1 a
+22 b
+3 c
+4 d
+5 e
+DROP TABLE t;
+include/rpl_end.inc
diff --git a/storage/tokudb/mysql-test/tokudb_rpl/t/rpl_rfr_disable_on_expl_pk_absence-slave.opt b/storage/tokudb/mysql-test/tokudb_rpl/t/rpl_rfr_disable_on_expl_pk_absence-slave.opt
new file mode 100644
index 00000000000..fb12af6c5bd
--- /dev/null
+++ b/storage/tokudb/mysql-test/tokudb_rpl/t/rpl_rfr_disable_on_expl_pk_absence-slave.opt
@@ -0,0 +1 @@
+--read-only=true --tokudb-rpl-unique-checks=false --tokudb-rpl-lookup-rows=false
diff --git a/storage/tokudb/mysql-test/tokudb_rpl/t/rpl_rfr_disable_on_expl_pk_absence.test b/storage/tokudb/mysql-test/tokudb_rpl/t/rpl_rfr_disable_on_expl_pk_absence.test
new file mode 100644
index 00000000000..67e77c1511c
--- /dev/null
+++ b/storage/tokudb/mysql-test/tokudb_rpl/t/rpl_rfr_disable_on_expl_pk_absence.test
@@ -0,0 +1,48 @@
+# Test case for bug#1536663
+#
+# When read-free-replication is enabled for tokudb and there is no explicit
+# pk for replicated table there can be dublicated records in the table on
+# update operation.
+#
+# Consider this update operation:
+# UPDATE t SET a = a + 10 WHERE b = 'b';
+# The master does rows lookup and updates the rows which values correspond to
+# the condition. The update events are written to binary log with
+# rows values from the master. As rows lookup is forbidden for slave
+# the new rows are added instead of updating corresponding rows.
+#
+# Without the fix there will be several rows with b = 'b' in the table on slave
+# instead of one updated row.
+#
+
+--source include/have_tokudb.inc
+--source include/have_binlog_format_row.inc
+--source include/master-slave.inc
+
+call mtr.add_suppression("read free replication is disabled for tokudb table");
+
+--connection master
+CREATE TABLE t (a int(11), b char(20)) ENGINE = TokuDB;
+INSERT INTO t (a, b) VALUES (1, 'a'), (2, 'b'), (3, 'c'), (4, 'd'), (5, 'e');
+
+--sync_slave_with_master
+--sorted_result
+SELECT * FROM t;
+
+--let $i = 2
+--while($i) {
+ --dec $i
+ --connection master
+ UPDATE t SET a = a + 10 WHERE b = 'b';
+ --sorted_result
+ SELECT * FROM t;
+ --sync_slave_with_master
+ --sorted_result
+ SELECT * FROM t;
+}
+
+--connection master
+DROP TABLE t;
+--sync_slave_with_master
+
+--source include/rpl_end.inc
diff --git a/storage/tokudb/mysql-test/tokudb_sys_vars/r/tokudb_analyze_delete_fraction.result b/storage/tokudb/mysql-test/tokudb_sys_vars/r/tokudb_analyze_delete_fraction.result
new file mode 100644
index 00000000000..41918883dd6
--- /dev/null
+++ b/storage/tokudb/mysql-test/tokudb_sys_vars/r/tokudb_analyze_delete_fraction.result
@@ -0,0 +1,73 @@
+SET @orig_global = @@global.tokudb_analyze_delete_fraction;
+SELECT @orig_global;
+@orig_global
+1
+SET @orig_session = @@session.tokudb_analyze_delete_fraction;
+SELECT @orig_session;
+@orig_session
+1
+SET GLOBAL tokudb_analyze_delete_fraction = .5;
+SELECT @@global.tokudb_analyze_delete_fraction;
+@@global.tokudb_analyze_delete_fraction
+0.500000
+SET GLOBAL tokudb_analyze_delete_fraction = 0;
+SELECT @@global.tokudb_analyze_delete_fraction;
+@@global.tokudb_analyze_delete_fraction
+0.000000
+SET GLOBAL tokudb_analyze_delete_fraction = DEFAULT;
+SELECT @@global.tokudb_analyze_delete_fraction;
+@@global.tokudb_analyze_delete_fraction
+1.000000
+SET GLOBAL tokudb_analyze_delete_fraction = 'foobar';
+ERROR 42000: Incorrect argument type to variable 'tokudb_analyze_delete_fraction'
+SELECT @@global.tokudb_analyze_delete_fraction;
+@@global.tokudb_analyze_delete_fraction
+1.000000
+SET GLOBAL tokudb_analyze_delete_fraction = 3.75;
+Warnings:
+Warning 1292 Truncated incorrect tokudb_analyze_delete_fraction value: '3.75'
+SELECT @@global.tokudb_analyze_delete_fraction;
+@@global.tokudb_analyze_delete_fraction
+1.000000
+SET SESSION tokudb_analyze_delete_fraction = .5;
+SELECT @@session.tokudb_analyze_delete_fraction;
+@@session.tokudb_analyze_delete_fraction
+0.500000
+SET SESSION tokudb_analyze_delete_fraction = 0;
+SELECT @@session.tokudb_analyze_delete_fraction;
+@@session.tokudb_analyze_delete_fraction
+0.000000
+SET SESSION tokudb_analyze_delete_fraction = DEFAULT;
+SELECT @@session.tokudb_analyze_delete_fraction;
+@@session.tokudb_analyze_delete_fraction
+1.000000
+SET SESSION tokudb_analyze_delete_fraction = 'foobar';
+ERROR 42000: Incorrect argument type to variable 'tokudb_analyze_delete_fraction'
+SELECT @@session.tokudb_analyze_delete_fraction;
+@@session.tokudb_analyze_delete_fraction
+1.000000
+SET SESSION tokudb_analyze_delete_fraction = 3.75;
+Warnings:
+Warning 1292 Truncated incorrect tokudb_analyze_delete_fraction value: '3.75'
+SELECT @@session.tokudb_analyze_delete_fraction;
+@@session.tokudb_analyze_delete_fraction
+1.000000
+SET GLOBAL tokudb_analyze_delete_fraction = .2;
+SET SESSION tokudb_analyze_delete_fraction = .3;
+SELECT @@global.tokudb_analyze_delete_fraction;
+@@global.tokudb_analyze_delete_fraction
+0.200000
+SELECT @@session.tokudb_analyze_delete_fraction;
+@@session.tokudb_analyze_delete_fraction
+0.300000
+SHOW VARIABLES LIKE 'tokudb_analyze_delete_fraction';
+Variable_name Value
+tokudb_analyze_delete_fraction 0.300000
+SET SESSION tokudb_analyze_delete_fraction = @orig_session;
+SELECT @@session.tokudb_analyze_delete_fraction;
+@@session.tokudb_analyze_delete_fraction
+1.000000
+SET GLOBAL tokudb_analyze_delete_fraction = @orig_global;
+SELECT @@global.tokudb_analyze_delete_fraction;
+@@global.tokudb_analyze_delete_fraction
+1.000000
diff --git a/storage/tokudb/mysql-test/tokudb_sys_vars/r/tokudb_analyze_in_background_basic.result b/storage/tokudb/mysql-test/tokudb_sys_vars/r/tokudb_analyze_in_background_basic.result
new file mode 100644
index 00000000000..53e96810eda
--- /dev/null
+++ b/storage/tokudb/mysql-test/tokudb_sys_vars/r/tokudb_analyze_in_background_basic.result
@@ -0,0 +1,99 @@
+SET @orig_global = @@global.tokudb_analyze_in_background;
+SELECT @orig_global;
+@orig_global
+0
+SET @orig_session = @@session.tokudb_analyze_in_background;
+SELECT @orig_session;
+@orig_session
+0
+SET GLOBAL tokudb_analyze_in_background = 0;
+SELECT @@global.tokudb_analyze_in_background;
+@@global.tokudb_analyze_in_background
+0
+SET GLOBAL tokudb_analyze_in_background = 1;
+SELECT @@global.tokudb_analyze_in_background;
+@@global.tokudb_analyze_in_background
+1
+SET GLOBAL tokudb_analyze_in_background = DEFAULT;
+SELECT @@global.tokudb_analyze_in_background;
+@@global.tokudb_analyze_in_background
+0
+SET GLOBAL tokudb_analyze_in_background = -6;
+SELECT @@global.tokudb_analyze_in_background;
+@@global.tokudb_analyze_in_background
+1
+SET GLOBAL tokudb_analyze_in_background = 1.6;
+ERROR 42000: Incorrect argument type to variable 'tokudb_analyze_in_background'
+SELECT @@global.tokudb_analyze_in_background;
+@@global.tokudb_analyze_in_background
+1
+SET GLOBAL tokudb_analyze_in_background = "T";
+ERROR 42000: Variable 'tokudb_analyze_in_background' can't be set to the value of 'T'
+SELECT @@global.tokudb_analyze_in_background;
+@@global.tokudb_analyze_in_background
+1
+SET GLOBAL tokudb_analyze_in_background = "Y";
+ERROR 42000: Variable 'tokudb_analyze_in_background' can't be set to the value of 'Y'
+SELECT @@global.tokudb_analyze_in_background;
+@@global.tokudb_analyze_in_background
+1
+SET GLOBAL tokudb_analyze_in_background = 'foobar';
+ERROR 42000: Variable 'tokudb_analyze_in_background' can't be set to the value of 'foobar'
+SELECT @@global.tokudb_analyze_in_background;
+@@global.tokudb_analyze_in_background
+1
+SET SESSION tokudb_analyze_in_background = 0;
+SELECT @@session.tokudb_analyze_in_background;
+@@session.tokudb_analyze_in_background
+0
+SET SESSION tokudb_analyze_in_background = 1;
+SELECT @@session.tokudb_analyze_in_background;
+@@session.tokudb_analyze_in_background
+1
+SET SESSION tokudb_analyze_in_background = DEFAULT;
+SELECT @@session.tokudb_analyze_in_background;
+@@session.tokudb_analyze_in_background
+1
+SET SESSION tokudb_analyze_in_background = -6;
+SELECT @@session.tokudb_analyze_in_background;
+@@session.tokudb_analyze_in_background
+1
+SET SESSION tokudb_analyze_in_background = 1.6;
+ERROR 42000: Incorrect argument type to variable 'tokudb_analyze_in_background'
+SELECT @@session.tokudb_analyze_in_background;
+@@session.tokudb_analyze_in_background
+1
+SET SESSION tokudb_analyze_in_background = "T";
+ERROR 42000: Variable 'tokudb_analyze_in_background' can't be set to the value of 'T'
+SELECT @@session.tokudb_analyze_in_background;
+@@session.tokudb_analyze_in_background
+1
+SET SESSION tokudb_analyze_in_background = "Y";
+ERROR 42000: Variable 'tokudb_analyze_in_background' can't be set to the value of 'Y'
+SELECT @@session.tokudb_analyze_in_background;
+@@session.tokudb_analyze_in_background
+1
+SET SESSION tokudb_analyze_in_background = 'foobar';
+ERROR 42000: Variable 'tokudb_analyze_in_background' can't be set to the value of 'foobar'
+SELECT @@session.tokudb_analyze_in_background;
+@@session.tokudb_analyze_in_background
+1
+SET GLOBAL tokudb_analyze_in_background = 0;
+SET SESSION tokudb_analyze_in_background = 1;
+SELECT @@global.tokudb_analyze_in_background;
+@@global.tokudb_analyze_in_background
+0
+SELECT @@session.tokudb_analyze_in_background;
+@@session.tokudb_analyze_in_background
+1
+SHOW VARIABLES LIKE 'tokudb_analyze_in_background';
+Variable_name Value
+tokudb_analyze_in_background ON
+SET SESSION tokudb_analyze_in_background = @orig_session;
+SELECT @@session.tokudb_analyze_in_background;
+@@session.tokudb_analyze_in_background
+0
+SET GLOBAL tokudb_analyze_in_background = @orig_global;
+SELECT @@global.tokudb_analyze_in_background;
+@@global.tokudb_analyze_in_background
+0
diff --git a/storage/tokudb/mysql-test/tokudb_sys_vars/r/tokudb_analyze_mode_basic.result b/storage/tokudb/mysql-test/tokudb_sys_vars/r/tokudb_analyze_mode_basic.result
new file mode 100644
index 00000000000..e2a3059a5aa
--- /dev/null
+++ b/storage/tokudb/mysql-test/tokudb_sys_vars/r/tokudb_analyze_mode_basic.result
@@ -0,0 +1,89 @@
+SET @orig_global = @@global.tokudb_analyze_mode;
+SELECT @orig_global;
+@orig_global
+TOKUDB_ANALYZE_STANDARD
+SET @orig_session = @@session.tokudb_analyze_mode;
+SELECT @orig_session;
+@orig_session
+TOKUDB_ANALYZE_STANDARD
+SET GLOBAL tokudb_analyze_mode = 'tokudb_analyze_standard';
+SELECT @@global.tokudb_analyze_mode;
+@@global.tokudb_analyze_mode
+TOKUDB_ANALYZE_STANDARD
+SET GLOBAL tokudb_analyze_mode = 'tokudb_analyze_recount_rows';
+SELECT @@global.tokudb_analyze_mode;
+@@global.tokudb_analyze_mode
+TOKUDB_ANALYZE_RECOUNT_ROWS
+SET GLOBAL tokudb_analyze_mode = 'tokudb_analyze_cancel';
+SELECT @@global.tokudb_analyze_mode;
+@@global.tokudb_analyze_mode
+TOKUDB_ANALYZE_CANCEL
+SET GLOBAL tokudb_analyze_mode = DEFAULT;
+SELECT @@global.tokudb_analyze_mode;
+@@global.tokudb_analyze_mode
+TOKUDB_ANALYZE_STANDARD
+SET GLOBAL tokudb_analyze_mode = '';
+ERROR 42000: Variable 'tokudb_analyze_mode' can't be set to the value of ''
+SELECT @@global.tokudb_analyze_mode;
+@@global.tokudb_analyze_mode
+TOKUDB_ANALYZE_STANDARD
+SET GLOBAL tokudb_analyze_mode = 'foobar';
+ERROR 42000: Variable 'tokudb_analyze_mode' can't be set to the value of 'foobar'
+SELECT @@global.tokudb_analyze_mode;
+@@global.tokudb_analyze_mode
+TOKUDB_ANALYZE_STANDARD
+SET GLOBAL tokudb_analyze_mode = 123;
+ERROR 42000: Variable 'tokudb_analyze_mode' can't be set to the value of '123'
+SELECT @@global.tokudb_analyze_mode;
+@@global.tokudb_analyze_mode
+TOKUDB_ANALYZE_STANDARD
+SET SESSION tokudb_analyze_mode = 'tokudb_analyze_standard';
+SELECT @@session.tokudb_analyze_mode;
+@@session.tokudb_analyze_mode
+TOKUDB_ANALYZE_STANDARD
+SET SESSION tokudb_analyze_mode = 'tokudb_analyze_recount_rows';
+SELECT @@session.tokudb_analyze_mode;
+@@session.tokudb_analyze_mode
+TOKUDB_ANALYZE_RECOUNT_ROWS
+SET SESSION tokudb_analyze_mode = 'tokudb_analyze_cancel';
+SELECT @@session.tokudb_analyze_mode;
+@@session.tokudb_analyze_mode
+TOKUDB_ANALYZE_CANCEL
+SET SESSION tokudb_analyze_mode = DEFAULT;
+SELECT @@session.tokudb_analyze_mode;
+@@session.tokudb_analyze_mode
+TOKUDB_ANALYZE_STANDARD
+SET SESSION tokudb_analyze_mode = '';
+ERROR 42000: Variable 'tokudb_analyze_mode' can't be set to the value of ''
+SELECT @@session.tokudb_analyze_mode;
+@@session.tokudb_analyze_mode
+TOKUDB_ANALYZE_STANDARD
+SET SESSION tokudb_analyze_mode = 'foobar';
+ERROR 42000: Variable 'tokudb_analyze_mode' can't be set to the value of 'foobar'
+SELECT @@session.tokudb_analyze_mode;
+@@session.tokudb_analyze_mode
+TOKUDB_ANALYZE_STANDARD
+SET SESSION tokudb_analyze_mode = 123;
+ERROR 42000: Variable 'tokudb_analyze_mode' can't be set to the value of '123'
+SELECT @@session.tokudb_analyze_mode;
+@@session.tokudb_analyze_mode
+TOKUDB_ANALYZE_STANDARD
+SET GLOBAL tokudb_analyze_mode = 'tokudb_analyze_standard';
+SET SESSION tokudb_analyze_mode = 'tokudb_analyze_recount_rows';
+SELECT @@global.tokudb_analyze_mode;
+@@global.tokudb_analyze_mode
+TOKUDB_ANALYZE_STANDARD
+SELECT @@session.tokudb_analyze_mode;
+@@session.tokudb_analyze_mode
+TOKUDB_ANALYZE_RECOUNT_ROWS
+SHOW VARIABLES LIKE 'tokudb_analyze_mode';
+Variable_name Value
+tokudb_analyze_mode TOKUDB_ANALYZE_RECOUNT_ROWS
+SET SESSION tokudb_analyze_mode = @orig_session;
+SELECT @@session.tokudb_analyze_mode;
+@@session.tokudb_analyze_mode
+TOKUDB_ANALYZE_STANDARD
+SET GLOBAL tokudb_analyze_mode = @orig_global;
+SELECT @@global.tokudb_analyze_mode;
+@@global.tokudb_analyze_mode
+TOKUDB_ANALYZE_STANDARD
diff --git a/storage/tokudb/mysql-test/tokudb_sys_vars/r/tokudb_analyze_throttle_basic.result b/storage/tokudb/mysql-test/tokudb_sys_vars/r/tokudb_analyze_throttle_basic.result
new file mode 100644
index 00000000000..34317c7cb7b
--- /dev/null
+++ b/storage/tokudb/mysql-test/tokudb_sys_vars/r/tokudb_analyze_throttle_basic.result
@@ -0,0 +1,61 @@
+SET @orig_global = @@global.tokudb_analyze_throttle;
+SELECT @orig_global;
+@orig_global
+0
+SET @orig_session = @@session.tokudb_analyze_throttle;
+SELECT @orig_session;
+@orig_session
+0
+SET GLOBAL tokudb_analyze_throttle = 10;
+SELECT @@global.tokudb_analyze_throttle;
+@@global.tokudb_analyze_throttle
+10
+SET GLOBAL tokudb_analyze_throttle = 0;
+SELECT @@global.tokudb_analyze_throttle;
+@@global.tokudb_analyze_throttle
+0
+SET GLOBAL tokudb_analyze_throttle = DEFAULT;
+SELECT @@global.tokudb_analyze_throttle;
+@@global.tokudb_analyze_throttle
+0
+SET GLOBAL tokudb_analyze_throttle = 'foobar';
+ERROR 42000: Incorrect argument type to variable 'tokudb_analyze_throttle'
+SELECT @@global.tokudb_analyze_throttle;
+@@global.tokudb_analyze_throttle
+0
+SET SESSION tokudb_analyze_throttle = 10;
+SELECT @@session.tokudb_analyze_throttle;
+@@session.tokudb_analyze_throttle
+10
+SET SESSION tokudb_analyze_throttle = 0;
+SELECT @@session.tokudb_analyze_throttle;
+@@session.tokudb_analyze_throttle
+0
+SET SESSION tokudb_analyze_throttle = DEFAULT;
+SELECT @@session.tokudb_analyze_throttle;
+@@session.tokudb_analyze_throttle
+0
+SET SESSION tokudb_analyze_throttle = 'foobar';
+ERROR 42000: Incorrect argument type to variable 'tokudb_analyze_throttle'
+SELECT @@session.tokudb_analyze_throttle;
+@@session.tokudb_analyze_throttle
+0
+SET GLOBAL tokudb_analyze_throttle = 12;
+SET SESSION tokudb_analyze_throttle = 13;
+SELECT @@global.tokudb_analyze_throttle;
+@@global.tokudb_analyze_throttle
+12
+SELECT @@session.tokudb_analyze_throttle;
+@@session.tokudb_analyze_throttle
+13
+SHOW VARIABLES LIKE 'tokudb_analyze_throttle';
+Variable_name Value
+tokudb_analyze_throttle 13
+SET SESSION tokudb_analyze_throttle = @orig_session;
+SELECT @@session.tokudb_analyze_throttle;
+@@session.tokudb_analyze_throttle
+0
+SET GLOBAL tokudb_analyze_throttle = @orig_global;
+SELECT @@global.tokudb_analyze_throttle;
+@@global.tokudb_analyze_throttle
+0
diff --git a/storage/tokudb/mysql-test/tokudb_sys_vars/r/tokudb_analyze_time_basic.result b/storage/tokudb/mysql-test/tokudb_sys_vars/r/tokudb_analyze_time_basic.result
new file mode 100644
index 00000000000..2eac1fcc3a1
--- /dev/null
+++ b/storage/tokudb/mysql-test/tokudb_sys_vars/r/tokudb_analyze_time_basic.result
@@ -0,0 +1,61 @@
+SET @orig_global = @@global.tokudb_analyze_time;
+SELECT @orig_global;
+@orig_global
+5
+SET @orig_session = @@session.tokudb_analyze_time;
+SELECT @orig_session;
+@orig_session
+5
+SET GLOBAL tokudb_analyze_time = 10;
+SELECT @@global.tokudb_analyze_time;
+@@global.tokudb_analyze_time
+10
+SET GLOBAL tokudb_analyze_time = 0;
+SELECT @@global.tokudb_analyze_time;
+@@global.tokudb_analyze_time
+0
+SET GLOBAL tokudb_analyze_time = DEFAULT;
+SELECT @@global.tokudb_analyze_time;
+@@global.tokudb_analyze_time
+5
+SET GLOBAL tokudb_analyze_time = 'foobar';
+ERROR 42000: Incorrect argument type to variable 'tokudb_analyze_time'
+SELECT @@global.tokudb_analyze_time;
+@@global.tokudb_analyze_time
+5
+SET SESSION tokudb_analyze_time = 10;
+SELECT @@session.tokudb_analyze_time;
+@@session.tokudb_analyze_time
+10
+SET SESSION tokudb_analyze_time = 0;
+SELECT @@session.tokudb_analyze_time;
+@@session.tokudb_analyze_time
+0
+SET SESSION tokudb_analyze_time = DEFAULT;
+SELECT @@session.tokudb_analyze_time;
+@@session.tokudb_analyze_time
+5
+SET SESSION tokudb_analyze_time = 'foobar';
+ERROR 42000: Incorrect argument type to variable 'tokudb_analyze_time'
+SELECT @@session.tokudb_analyze_time;
+@@session.tokudb_analyze_time
+5
+SET GLOBAL tokudb_analyze_time = 12;
+SET SESSION tokudb_analyze_time = 13;
+SELECT @@global.tokudb_analyze_time;
+@@global.tokudb_analyze_time
+12
+SELECT @@session.tokudb_analyze_time;
+@@session.tokudb_analyze_time
+13
+SHOW VARIABLES LIKE 'tokudb_analyze_time';
+Variable_name Value
+tokudb_analyze_time 13
+SET SESSION tokudb_analyze_time = @orig_session;
+SELECT @@session.tokudb_analyze_time;
+@@session.tokudb_analyze_time
+5
+SET GLOBAL tokudb_analyze_time = @orig_global;
+SELECT @@global.tokudb_analyze_time;
+@@global.tokudb_analyze_time
+5
diff --git a/storage/tokudb/mysql-test/tokudb_sys_vars/r/tokudb_auto_analyze.result b/storage/tokudb/mysql-test/tokudb_sys_vars/r/tokudb_auto_analyze.result
new file mode 100644
index 00000000000..5e22bc489cb
--- /dev/null
+++ b/storage/tokudb/mysql-test/tokudb_sys_vars/r/tokudb_auto_analyze.result
@@ -0,0 +1,61 @@
+SET @orig_global = @@global.tokudb_auto_analyze;
+SELECT @orig_global;
+@orig_global
+0
+SET @orig_session = @@session.tokudb_auto_analyze;
+SELECT @orig_session;
+@orig_session
+0
+SET GLOBAL tokudb_auto_analyze = 10;
+SELECT @@global.tokudb_auto_analyze;
+@@global.tokudb_auto_analyze
+10
+SET GLOBAL tokudb_auto_analyze = 0;
+SELECT @@global.tokudb_auto_analyze;
+@@global.tokudb_auto_analyze
+0
+SET GLOBAL tokudb_auto_analyze = DEFAULT;
+SELECT @@global.tokudb_auto_analyze;
+@@global.tokudb_auto_analyze
+0
+SET GLOBAL tokudb_auto_analyze = 'foobar';
+ERROR 42000: Incorrect argument type to variable 'tokudb_auto_analyze'
+SELECT @@global.tokudb_auto_analyze;
+@@global.tokudb_auto_analyze
+0
+SET SESSION tokudb_auto_analyze = 10;
+SELECT @@session.tokudb_auto_analyze;
+@@session.tokudb_auto_analyze
+10
+SET SESSION tokudb_auto_analyze = 0;
+SELECT @@session.tokudb_auto_analyze;
+@@session.tokudb_auto_analyze
+0
+SET SESSION tokudb_auto_analyze = DEFAULT;
+SELECT @@session.tokudb_auto_analyze;
+@@session.tokudb_auto_analyze
+0
+SET SESSION tokudb_auto_analyze = 'foobar';
+ERROR 42000: Incorrect argument type to variable 'tokudb_auto_analyze'
+SELECT @@session.tokudb_auto_analyze;
+@@session.tokudb_auto_analyze
+0
+SET GLOBAL tokudb_auto_analyze = 12;
+SET SESSION tokudb_auto_analyze = 13;
+SELECT @@global.tokudb_auto_analyze;
+@@global.tokudb_auto_analyze
+12
+SELECT @@session.tokudb_auto_analyze;
+@@session.tokudb_auto_analyze
+13
+SHOW VARIABLES LIKE 'tokudb_auto_analyze';
+Variable_name Value
+tokudb_auto_analyze 13
+SET SESSION tokudb_auto_analyze = @orig_session;
+SELECT @@session.tokudb_auto_analyze;
+@@session.tokudb_auto_analyze
+0
+SET GLOBAL tokudb_auto_analyze = @orig_global;
+SELECT @@global.tokudb_auto_analyze;
+@@global.tokudb_auto_analyze
+0
diff --git a/storage/tokudb/mysql-test/tokudb_sys_vars/r/tokudb_cardinality_scale_percent_basic.result b/storage/tokudb/mysql-test/tokudb_sys_vars/r/tokudb_cardinality_scale_percent_basic.result
new file mode 100644
index 00000000000..cac5d8b0dc7
--- /dev/null
+++ b/storage/tokudb/mysql-test/tokudb_sys_vars/r/tokudb_cardinality_scale_percent_basic.result
@@ -0,0 +1,36 @@
+SET @orig_global = @@global.tokudb_cardinality_scale_percent;
+SELECT @orig_global;
+@orig_global
+50
+SET GLOBAL tokudb_cardinality_scale_percent = 10;
+SELECT @@global.tokudb_cardinality_scale_percent;
+@@global.tokudb_cardinality_scale_percent
+10
+SET GLOBAL tokudb_cardinality_scale_percent = 0;
+SELECT @@global.tokudb_cardinality_scale_percent;
+@@global.tokudb_cardinality_scale_percent
+0
+SET GLOBAL tokudb_cardinality_scale_percent = DEFAULT;
+SELECT @@global.tokudb_cardinality_scale_percent;
+@@global.tokudb_cardinality_scale_percent
+50
+SET GLOBAL tokudb_cardinality_scale_percent = 'foobar';
+ERROR 42000: Incorrect argument type to variable 'tokudb_cardinality_scale_percent'
+SELECT @@global.tokudb_cardinality_scale_percent;
+@@global.tokudb_cardinality_scale_percent
+50
+SET GLOBAL tokudb_cardinality_scale_percent = 12;
+SET SESSION tokudb_cardinality_scale_percent = 13;
+ERROR HY000: Variable 'tokudb_cardinality_scale_percent' is a GLOBAL variable and should be set with SET GLOBAL
+SELECT @@global.tokudb_cardinality_scale_percent;
+@@global.tokudb_cardinality_scale_percent
+12
+SELECT @@session.tokudb_cardinality_scale_percent;
+ERROR HY000: Variable 'tokudb_cardinality_scale_percent' is a GLOBAL variable
+SHOW VARIABLES LIKE 'tokudb_cardinality_scale_percent';
+Variable_name Value
+tokudb_cardinality_scale_percent 12
+SET GLOBAL tokudb_cardinality_scale_percent = @orig_global;
+SELECT @@global.tokudb_cardinality_scale_percent;
+@@global.tokudb_cardinality_scale_percent
+50
diff --git a/storage/tokudb/mysql-test/tokudb_sys_vars/r/tokudb_pk_insert_mode_basic.result b/storage/tokudb/mysql-test/tokudb_sys_vars/r/tokudb_pk_insert_mode_basic.result
new file mode 100644
index 00000000000..268c4032626
--- /dev/null
+++ b/storage/tokudb/mysql-test/tokudb_sys_vars/r/tokudb_pk_insert_mode_basic.result
@@ -0,0 +1,85 @@
+SET @orig_global = @@global.tokudb_pk_insert_mode;
+SELECT @orig_global;
+@orig_global
+1
+SET @orig_session = @@session.tokudb_pk_insert_mode;
+SELECT @orig_session;
+@orig_session
+1
+SET GLOBAL tokudb_pk_insert_mode = 10;
+Warnings:
+Warning 1292 Truncated incorrect tokudb_pk_insert_mode value: '10'
+Warning 131 Using tokudb_pk_insert_mode is deprecated and the parameter may be removed in future releases.
+SELECT @@global.tokudb_pk_insert_mode;
+@@global.tokudb_pk_insert_mode
+2
+SET GLOBAL tokudb_pk_insert_mode = 0;
+Warnings:
+Warning 131 Using tokudb_pk_insert_mode=0 is deprecated and the parameter may be removed in future releases. Only tokudb_pk_insert_mode=1|2 is allowed.Resettig the value to 1.
+SELECT @@global.tokudb_pk_insert_mode;
+@@global.tokudb_pk_insert_mode
+1
+SET GLOBAL tokudb_pk_insert_mode = DEFAULT;
+Warnings:
+Warning 131 Using tokudb_pk_insert_mode is deprecated and the parameter may be removed in future releases.
+SELECT @@global.tokudb_pk_insert_mode;
+@@global.tokudb_pk_insert_mode
+1
+SET GLOBAL tokudb_pk_insert_mode = 'foobar';
+ERROR 42000: Incorrect argument type to variable 'tokudb_pk_insert_mode'
+SELECT @@global.tokudb_pk_insert_mode;
+@@global.tokudb_pk_insert_mode
+1
+SET SESSION tokudb_pk_insert_mode = 10;
+Warnings:
+Warning 1292 Truncated incorrect tokudb_pk_insert_mode value: '10'
+Warning 131 Using tokudb_pk_insert_mode is deprecated and the parameter may be removed in future releases.
+SELECT @@session.tokudb_pk_insert_mode;
+@@session.tokudb_pk_insert_mode
+2
+SET SESSION tokudb_pk_insert_mode = 0;
+Warnings:
+Warning 131 Using tokudb_pk_insert_mode=0 is deprecated and the parameter may be removed in future releases. Only tokudb_pk_insert_mode=1|2 is allowed.Resettig the value to 1.
+SELECT @@session.tokudb_pk_insert_mode;
+@@session.tokudb_pk_insert_mode
+1
+SET SESSION tokudb_pk_insert_mode = DEFAULT;
+Warnings:
+Warning 131 Using tokudb_pk_insert_mode is deprecated and the parameter may be removed in future releases.
+SELECT @@session.tokudb_pk_insert_mode;
+@@session.tokudb_pk_insert_mode
+1
+SET SESSION tokudb_pk_insert_mode = 'foobar';
+ERROR 42000: Incorrect argument type to variable 'tokudb_pk_insert_mode'
+SELECT @@session.tokudb_pk_insert_mode;
+@@session.tokudb_pk_insert_mode
+1
+SET GLOBAL tokudb_pk_insert_mode = 12;
+Warnings:
+Warning 1292 Truncated incorrect tokudb_pk_insert_mode value: '12'
+Warning 131 Using tokudb_pk_insert_mode is deprecated and the parameter may be removed in future releases.
+SET SESSION tokudb_pk_insert_mode = 13;
+Warnings:
+Warning 1292 Truncated incorrect tokudb_pk_insert_mode value: '13'
+Warning 131 Using tokudb_pk_insert_mode is deprecated and the parameter may be removed in future releases.
+SELECT @@global.tokudb_pk_insert_mode;
+@@global.tokudb_pk_insert_mode
+2
+SELECT @@session.tokudb_pk_insert_mode;
+@@session.tokudb_pk_insert_mode
+2
+SHOW VARIABLES LIKE 'tokudb_pk_insert_mode';
+Variable_name Value
+tokudb_pk_insert_mode 2
+SET SESSION tokudb_pk_insert_mode = @orig_session;
+Warnings:
+Warning 131 Using tokudb_pk_insert_mode is deprecated and the parameter may be removed in future releases.
+SELECT @@session.tokudb_pk_insert_mode;
+@@session.tokudb_pk_insert_mode
+1
+SET GLOBAL tokudb_pk_insert_mode = @orig_global;
+Warnings:
+Warning 131 Using tokudb_pk_insert_mode is deprecated and the parameter may be removed in future releases.
+SELECT @@global.tokudb_pk_insert_mode;
+@@global.tokudb_pk_insert_mode
+1
diff --git a/storage/tokudb/mysql-test/tokudb_sys_vars/t/suite.opt b/storage/tokudb/mysql-test/tokudb_sys_vars/t/suite.opt
new file mode 100644
index 00000000000..23511b05020
--- /dev/null
+++ b/storage/tokudb/mysql-test/tokudb_sys_vars/t/suite.opt
@@ -0,0 +1 @@
+$TOKUDB_OPT $TOKUDB_LOAD_ADD --loose-tokudb-check-jemalloc=0
diff --git a/storage/tokudb/mysql-test/tokudb_sys_vars/t/tokudb_analyze_delete_fraction.test b/storage/tokudb/mysql-test/tokudb_sys_vars/t/tokudb_analyze_delete_fraction.test
new file mode 100644
index 00000000000..d5bd382740f
--- /dev/null
+++ b/storage/tokudb/mysql-test/tokudb_sys_vars/t/tokudb_analyze_delete_fraction.test
@@ -0,0 +1,56 @@
+--source include/have_tokudb.inc
+
+# Check the default value
+SET @orig_global = @@global.tokudb_analyze_delete_fraction;
+SELECT @orig_global;
+
+SET @orig_session = @@session.tokudb_analyze_delete_fraction;
+SELECT @orig_session;
+
+# Test global
+SET GLOBAL tokudb_analyze_delete_fraction = .5;
+SELECT @@global.tokudb_analyze_delete_fraction;
+
+SET GLOBAL tokudb_analyze_delete_fraction = 0;
+SELECT @@global.tokudb_analyze_delete_fraction;
+
+SET GLOBAL tokudb_analyze_delete_fraction = DEFAULT;
+SELECT @@global.tokudb_analyze_delete_fraction;
+
+-- error ER_WRONG_TYPE_FOR_VAR
+SET GLOBAL tokudb_analyze_delete_fraction = 'foobar';
+SELECT @@global.tokudb_analyze_delete_fraction;
+
+SET GLOBAL tokudb_analyze_delete_fraction = 3.75;
+SELECT @@global.tokudb_analyze_delete_fraction;
+
+# Test session
+SET SESSION tokudb_analyze_delete_fraction = .5;
+SELECT @@session.tokudb_analyze_delete_fraction;
+
+SET SESSION tokudb_analyze_delete_fraction = 0;
+SELECT @@session.tokudb_analyze_delete_fraction;
+
+SET SESSION tokudb_analyze_delete_fraction = DEFAULT;
+SELECT @@session.tokudb_analyze_delete_fraction;
+
+-- error ER_WRONG_TYPE_FOR_VAR
+SET SESSION tokudb_analyze_delete_fraction = 'foobar';
+SELECT @@session.tokudb_analyze_delete_fraction;
+
+SET SESSION tokudb_analyze_delete_fraction = 3.75;
+SELECT @@session.tokudb_analyze_delete_fraction;
+
+# both
+SET GLOBAL tokudb_analyze_delete_fraction = .2;
+SET SESSION tokudb_analyze_delete_fraction = .3;
+SELECT @@global.tokudb_analyze_delete_fraction;
+SELECT @@session.tokudb_analyze_delete_fraction;
+SHOW VARIABLES LIKE 'tokudb_analyze_delete_fraction';
+
+# Clean up
+SET SESSION tokudb_analyze_delete_fraction = @orig_session;
+SELECT @@session.tokudb_analyze_delete_fraction;
+
+SET GLOBAL tokudb_analyze_delete_fraction = @orig_global;
+SELECT @@global.tokudb_analyze_delete_fraction;
diff --git a/storage/tokudb/mysql-test/tokudb_sys_vars/t/tokudb_analyze_in_background_basic.test b/storage/tokudb/mysql-test/tokudb_sys_vars/t/tokudb_analyze_in_background_basic.test
new file mode 100644
index 00000000000..dfb2a0e416d
--- /dev/null
+++ b/storage/tokudb/mysql-test/tokudb_sys_vars/t/tokudb_analyze_in_background_basic.test
@@ -0,0 +1,80 @@
+--source include/have_tokudb.inc
+
+# Check the default value
+SET @orig_global = @@global.tokudb_analyze_in_background;
+SELECT @orig_global;
+
+SET @orig_session = @@session.tokudb_analyze_in_background;
+SELECT @orig_session;
+
+# Test global
+SET GLOBAL tokudb_analyze_in_background = 0;
+SELECT @@global.tokudb_analyze_in_background;
+
+SET GLOBAL tokudb_analyze_in_background = 1;
+SELECT @@global.tokudb_analyze_in_background;
+
+SET GLOBAL tokudb_analyze_in_background = DEFAULT;
+SELECT @@global.tokudb_analyze_in_background;
+
+SET GLOBAL tokudb_analyze_in_background = -6;
+SELECT @@global.tokudb_analyze_in_background;
+
+-- error ER_WRONG_TYPE_FOR_VAR
+SET GLOBAL tokudb_analyze_in_background = 1.6;
+SELECT @@global.tokudb_analyze_in_background;
+
+-- error ER_WRONG_VALUE_FOR_VAR
+SET GLOBAL tokudb_analyze_in_background = "T";
+SELECT @@global.tokudb_analyze_in_background;
+
+-- error ER_WRONG_VALUE_FOR_VAR
+SET GLOBAL tokudb_analyze_in_background = "Y";
+SELECT @@global.tokudb_analyze_in_background;
+
+-- error ER_WRONG_VALUE_FOR_VAR
+SET GLOBAL tokudb_analyze_in_background = 'foobar';
+SELECT @@global.tokudb_analyze_in_background;
+
+# Test session
+SET SESSION tokudb_analyze_in_background = 0;
+SELECT @@session.tokudb_analyze_in_background;
+
+SET SESSION tokudb_analyze_in_background = 1;
+SELECT @@session.tokudb_analyze_in_background;
+
+SET SESSION tokudb_analyze_in_background = DEFAULT;
+SELECT @@session.tokudb_analyze_in_background;
+
+SET SESSION tokudb_analyze_in_background = -6;
+SELECT @@session.tokudb_analyze_in_background;
+
+-- error ER_WRONG_TYPE_FOR_VAR
+SET SESSION tokudb_analyze_in_background = 1.6;
+SELECT @@session.tokudb_analyze_in_background;
+
+-- error ER_WRONG_VALUE_FOR_VAR
+SET SESSION tokudb_analyze_in_background = "T";
+SELECT @@session.tokudb_analyze_in_background;
+
+-- error ER_WRONG_VALUE_FOR_VAR
+SET SESSION tokudb_analyze_in_background = "Y";
+SELECT @@session.tokudb_analyze_in_background;
+
+-- error ER_WRONG_VALUE_FOR_VAR
+SET SESSION tokudb_analyze_in_background = 'foobar';
+SELECT @@session.tokudb_analyze_in_background;
+
+# both
+SET GLOBAL tokudb_analyze_in_background = 0;
+SET SESSION tokudb_analyze_in_background = 1;
+SELECT @@global.tokudb_analyze_in_background;
+SELECT @@session.tokudb_analyze_in_background;
+SHOW VARIABLES LIKE 'tokudb_analyze_in_background';
+
+# Clean up
+SET SESSION tokudb_analyze_in_background = @orig_session;
+SELECT @@session.tokudb_analyze_in_background;
+
+SET GLOBAL tokudb_analyze_in_background = @orig_global;
+SELECT @@global.tokudb_analyze_in_background;
diff --git a/storage/tokudb/mysql-test/tokudb_sys_vars/t/tokudb_analyze_mode_basic.test b/storage/tokudb/mysql-test/tokudb_sys_vars/t/tokudb_analyze_mode_basic.test
new file mode 100644
index 00000000000..69def75bd3d
--- /dev/null
+++ b/storage/tokudb/mysql-test/tokudb_sys_vars/t/tokudb_analyze_mode_basic.test
@@ -0,0 +1,72 @@
+--source include/have_tokudb.inc
+
+# Check the default value
+SET @orig_global = @@global.tokudb_analyze_mode;
+SELECT @orig_global;
+
+SET @orig_session = @@session.tokudb_analyze_mode;
+SELECT @orig_session;
+
+# Test global
+SET GLOBAL tokudb_analyze_mode = 'tokudb_analyze_standard';
+SELECT @@global.tokudb_analyze_mode;
+
+SET GLOBAL tokudb_analyze_mode = 'tokudb_analyze_recount_rows';
+SELECT @@global.tokudb_analyze_mode;
+
+SET GLOBAL tokudb_analyze_mode = 'tokudb_analyze_cancel';
+SELECT @@global.tokudb_analyze_mode;
+
+SET GLOBAL tokudb_analyze_mode = DEFAULT;
+SELECT @@global.tokudb_analyze_mode;
+
+-- error ER_WRONG_VALUE_FOR_VAR
+SET GLOBAL tokudb_analyze_mode = '';
+SELECT @@global.tokudb_analyze_mode;
+
+-- error ER_WRONG_VALUE_FOR_VAR
+SET GLOBAL tokudb_analyze_mode = 'foobar';
+SELECT @@global.tokudb_analyze_mode;
+
+-- error ER_WRONG_VALUE_FOR_VAR
+SET GLOBAL tokudb_analyze_mode = 123;
+SELECT @@global.tokudb_analyze_mode;
+
+# Test session
+SET SESSION tokudb_analyze_mode = 'tokudb_analyze_standard';
+SELECT @@session.tokudb_analyze_mode;
+
+SET SESSION tokudb_analyze_mode = 'tokudb_analyze_recount_rows';
+SELECT @@session.tokudb_analyze_mode;
+
+SET SESSION tokudb_analyze_mode = 'tokudb_analyze_cancel';
+SELECT @@session.tokudb_analyze_mode;
+
+SET SESSION tokudb_analyze_mode = DEFAULT;
+SELECT @@session.tokudb_analyze_mode;
+
+-- error ER_WRONG_VALUE_FOR_VAR
+SET SESSION tokudb_analyze_mode = '';
+SELECT @@session.tokudb_analyze_mode;
+
+-- error ER_WRONG_VALUE_FOR_VAR
+SET SESSION tokudb_analyze_mode = 'foobar';
+SELECT @@session.tokudb_analyze_mode;
+
+-- error ER_WRONG_VALUE_FOR_VAR
+SET SESSION tokudb_analyze_mode = 123;
+SELECT @@session.tokudb_analyze_mode;
+
+# both
+SET GLOBAL tokudb_analyze_mode = 'tokudb_analyze_standard';
+SET SESSION tokudb_analyze_mode = 'tokudb_analyze_recount_rows';
+SELECT @@global.tokudb_analyze_mode;
+SELECT @@session.tokudb_analyze_mode;
+SHOW VARIABLES LIKE 'tokudb_analyze_mode';
+
+# Clean up
+SET SESSION tokudb_analyze_mode = @orig_session;
+SELECT @@session.tokudb_analyze_mode;
+
+SET GLOBAL tokudb_analyze_mode = @orig_global;
+SELECT @@global.tokudb_analyze_mode;
diff --git a/storage/tokudb/mysql-test/tokudb_sys_vars/t/tokudb_analyze_throttle_basic.test b/storage/tokudb/mysql-test/tokudb_sys_vars/t/tokudb_analyze_throttle_basic.test
new file mode 100644
index 00000000000..a3660b0626a
--- /dev/null
+++ b/storage/tokudb/mysql-test/tokudb_sys_vars/t/tokudb_analyze_throttle_basic.test
@@ -0,0 +1,50 @@
+--source include/have_tokudb.inc
+
+# Check the default value
+SET @orig_global = @@global.tokudb_analyze_throttle;
+SELECT @orig_global;
+
+SET @orig_session = @@session.tokudb_analyze_throttle;
+SELECT @orig_session;
+
+# Test global
+SET GLOBAL tokudb_analyze_throttle = 10;
+SELECT @@global.tokudb_analyze_throttle;
+
+SET GLOBAL tokudb_analyze_throttle = 0;
+SELECT @@global.tokudb_analyze_throttle;
+
+SET GLOBAL tokudb_analyze_throttle = DEFAULT;
+SELECT @@global.tokudb_analyze_throttle;
+
+-- error ER_WRONG_TYPE_FOR_VAR
+SET GLOBAL tokudb_analyze_throttle = 'foobar';
+SELECT @@global.tokudb_analyze_throttle;
+
+# Test session
+SET SESSION tokudb_analyze_throttle = 10;
+SELECT @@session.tokudb_analyze_throttle;
+
+SET SESSION tokudb_analyze_throttle = 0;
+SELECT @@session.tokudb_analyze_throttle;
+
+SET SESSION tokudb_analyze_throttle = DEFAULT;
+SELECT @@session.tokudb_analyze_throttle;
+
+-- error ER_WRONG_TYPE_FOR_VAR
+SET SESSION tokudb_analyze_throttle = 'foobar';
+SELECT @@session.tokudb_analyze_throttle;
+
+# both
+SET GLOBAL tokudb_analyze_throttle = 12;
+SET SESSION tokudb_analyze_throttle = 13;
+SELECT @@global.tokudb_analyze_throttle;
+SELECT @@session.tokudb_analyze_throttle;
+SHOW VARIABLES LIKE 'tokudb_analyze_throttle';
+
+# Clean up
+SET SESSION tokudb_analyze_throttle = @orig_session;
+SELECT @@session.tokudb_analyze_throttle;
+
+SET GLOBAL tokudb_analyze_throttle = @orig_global;
+SELECT @@global.tokudb_analyze_throttle;
diff --git a/storage/tokudb/mysql-test/tokudb_sys_vars/t/tokudb_analyze_time_basic.test b/storage/tokudb/mysql-test/tokudb_sys_vars/t/tokudb_analyze_time_basic.test
new file mode 100644
index 00000000000..3098934ee8c
--- /dev/null
+++ b/storage/tokudb/mysql-test/tokudb_sys_vars/t/tokudb_analyze_time_basic.test
@@ -0,0 +1,50 @@
+--source include/have_tokudb.inc
+
+# Check the default value
+SET @orig_global = @@global.tokudb_analyze_time;
+SELECT @orig_global;
+
+SET @orig_session = @@session.tokudb_analyze_time;
+SELECT @orig_session;
+
+# Test global
+SET GLOBAL tokudb_analyze_time = 10;
+SELECT @@global.tokudb_analyze_time;
+
+SET GLOBAL tokudb_analyze_time = 0;
+SELECT @@global.tokudb_analyze_time;
+
+SET GLOBAL tokudb_analyze_time = DEFAULT;
+SELECT @@global.tokudb_analyze_time;
+
+-- error ER_WRONG_TYPE_FOR_VAR
+SET GLOBAL tokudb_analyze_time = 'foobar';
+SELECT @@global.tokudb_analyze_time;
+
+# Test session
+SET SESSION tokudb_analyze_time = 10;
+SELECT @@session.tokudb_analyze_time;
+
+SET SESSION tokudb_analyze_time = 0;
+SELECT @@session.tokudb_analyze_time;
+
+SET SESSION tokudb_analyze_time = DEFAULT;
+SELECT @@session.tokudb_analyze_time;
+
+-- error ER_WRONG_TYPE_FOR_VAR
+SET SESSION tokudb_analyze_time = 'foobar';
+SELECT @@session.tokudb_analyze_time;
+
+# both
+SET GLOBAL tokudb_analyze_time = 12;
+SET SESSION tokudb_analyze_time = 13;
+SELECT @@global.tokudb_analyze_time;
+SELECT @@session.tokudb_analyze_time;
+SHOW VARIABLES LIKE 'tokudb_analyze_time';
+
+# Clean up
+SET SESSION tokudb_analyze_time = @orig_session;
+SELECT @@session.tokudb_analyze_time;
+
+SET GLOBAL tokudb_analyze_time = @orig_global;
+SELECT @@global.tokudb_analyze_time;
diff --git a/storage/tokudb/mysql-test/tokudb_sys_vars/t/tokudb_auto_analyze.test b/storage/tokudb/mysql-test/tokudb_sys_vars/t/tokudb_auto_analyze.test
new file mode 100644
index 00000000000..d9998508ae1
--- /dev/null
+++ b/storage/tokudb/mysql-test/tokudb_sys_vars/t/tokudb_auto_analyze.test
@@ -0,0 +1,50 @@
+--source include/have_tokudb.inc
+
+# Check the default value
+SET @orig_global = @@global.tokudb_auto_analyze;
+SELECT @orig_global;
+
+SET @orig_session = @@session.tokudb_auto_analyze;
+SELECT @orig_session;
+
+# Test global
+SET GLOBAL tokudb_auto_analyze = 10;
+SELECT @@global.tokudb_auto_analyze;
+
+SET GLOBAL tokudb_auto_analyze = 0;
+SELECT @@global.tokudb_auto_analyze;
+
+SET GLOBAL tokudb_auto_analyze = DEFAULT;
+SELECT @@global.tokudb_auto_analyze;
+
+-- error ER_WRONG_TYPE_FOR_VAR
+SET GLOBAL tokudb_auto_analyze = 'foobar';
+SELECT @@global.tokudb_auto_analyze;
+
+# Test session
+SET SESSION tokudb_auto_analyze = 10;
+SELECT @@session.tokudb_auto_analyze;
+
+SET SESSION tokudb_auto_analyze = 0;
+SELECT @@session.tokudb_auto_analyze;
+
+SET SESSION tokudb_auto_analyze = DEFAULT;
+SELECT @@session.tokudb_auto_analyze;
+
+-- error ER_WRONG_TYPE_FOR_VAR
+SET SESSION tokudb_auto_analyze = 'foobar';
+SELECT @@session.tokudb_auto_analyze;
+
+# both
+SET GLOBAL tokudb_auto_analyze = 12;
+SET SESSION tokudb_auto_analyze = 13;
+SELECT @@global.tokudb_auto_analyze;
+SELECT @@session.tokudb_auto_analyze;
+SHOW VARIABLES LIKE 'tokudb_auto_analyze';
+
+# Clean up
+SET SESSION tokudb_auto_analyze = @orig_session;
+SELECT @@session.tokudb_auto_analyze;
+
+SET GLOBAL tokudb_auto_analyze = @orig_global;
+SELECT @@global.tokudb_auto_analyze;
diff --git a/storage/tokudb/mysql-test/tokudb_sys_vars/t/tokudb_cardinality_scale_percent_basic.test b/storage/tokudb/mysql-test/tokudb_sys_vars/t/tokudb_cardinality_scale_percent_basic.test
new file mode 100644
index 00000000000..83063f04248
--- /dev/null
+++ b/storage/tokudb/mysql-test/tokudb_sys_vars/t/tokudb_cardinality_scale_percent_basic.test
@@ -0,0 +1,32 @@
+--source include/have_tokudb.inc
+
+# Check the default value
+SET @orig_global = @@global.tokudb_cardinality_scale_percent;
+SELECT @orig_global;
+
+# Test global
+SET GLOBAL tokudb_cardinality_scale_percent = 10;
+SELECT @@global.tokudb_cardinality_scale_percent;
+
+SET GLOBAL tokudb_cardinality_scale_percent = 0;
+SELECT @@global.tokudb_cardinality_scale_percent;
+
+SET GLOBAL tokudb_cardinality_scale_percent = DEFAULT;
+SELECT @@global.tokudb_cardinality_scale_percent;
+
+-- error ER_WRONG_TYPE_FOR_VAR
+SET GLOBAL tokudb_cardinality_scale_percent = 'foobar';
+SELECT @@global.tokudb_cardinality_scale_percent;
+
+# both
+SET GLOBAL tokudb_cardinality_scale_percent = 12;
+-- error ER_GLOBAL_VARIABLE
+SET SESSION tokudb_cardinality_scale_percent = 13;
+SELECT @@global.tokudb_cardinality_scale_percent;
+-- error ER_INCORRECT_GLOBAL_LOCAL_VAR
+SELECT @@session.tokudb_cardinality_scale_percent;
+SHOW VARIABLES LIKE 'tokudb_cardinality_scale_percent';
+
+# Clean up
+SET GLOBAL tokudb_cardinality_scale_percent = @orig_global;
+SELECT @@global.tokudb_cardinality_scale_percent;
diff --git a/storage/tokudb/mysql-test/tokudb_sys_vars/t/tokudb_pk_insert_mode_basic.test b/storage/tokudb/mysql-test/tokudb_sys_vars/t/tokudb_pk_insert_mode_basic.test
new file mode 100644
index 00000000000..1669c7842a9
--- /dev/null
+++ b/storage/tokudb/mysql-test/tokudb_sys_vars/t/tokudb_pk_insert_mode_basic.test
@@ -0,0 +1,51 @@
+--source include/have_tokudb.inc
+--enable_warnings
+
+# Check the default value
+SET @orig_global = @@global.tokudb_pk_insert_mode;
+SELECT @orig_global;
+
+SET @orig_session = @@session.tokudb_pk_insert_mode;
+SELECT @orig_session;
+
+# Test global
+SET GLOBAL tokudb_pk_insert_mode = 10;
+SELECT @@global.tokudb_pk_insert_mode;
+
+SET GLOBAL tokudb_pk_insert_mode = 0;
+SELECT @@global.tokudb_pk_insert_mode;
+
+SET GLOBAL tokudb_pk_insert_mode = DEFAULT;
+SELECT @@global.tokudb_pk_insert_mode;
+
+-- error ER_WRONG_TYPE_FOR_VAR
+SET GLOBAL tokudb_pk_insert_mode = 'foobar';
+SELECT @@global.tokudb_pk_insert_mode;
+
+# Test session
+SET SESSION tokudb_pk_insert_mode = 10;
+SELECT @@session.tokudb_pk_insert_mode;
+
+SET SESSION tokudb_pk_insert_mode = 0;
+SELECT @@session.tokudb_pk_insert_mode;
+
+SET SESSION tokudb_pk_insert_mode = DEFAULT;
+SELECT @@session.tokudb_pk_insert_mode;
+
+-- error ER_WRONG_TYPE_FOR_VAR
+SET SESSION tokudb_pk_insert_mode = 'foobar';
+SELECT @@session.tokudb_pk_insert_mode;
+
+# both
+SET GLOBAL tokudb_pk_insert_mode = 12;
+SET SESSION tokudb_pk_insert_mode = 13;
+SELECT @@global.tokudb_pk_insert_mode;
+SELECT @@session.tokudb_pk_insert_mode;
+SHOW VARIABLES LIKE 'tokudb_pk_insert_mode';
+
+# Clean up
+SET SESSION tokudb_pk_insert_mode = @orig_session;
+SELECT @@session.tokudb_pk_insert_mode;
+
+SET GLOBAL tokudb_pk_insert_mode = @orig_global;
+SELECT @@global.tokudb_pk_insert_mode;
diff --git a/storage/tokudb/tokudb_background.cc b/storage/tokudb/tokudb_background.cc
new file mode 100644
index 00000000000..d8ef54a5972
--- /dev/null
+++ b/storage/tokudb/tokudb_background.cc
@@ -0,0 +1,253 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+/* -*- mode: C; c-basic-offset: 4 -*- */
+#ident "$Id$"
+/*======
+This file is part of TokuDB
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ TokuDBis is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ TokuDB is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with TokuDB. If not, see <http://www.gnu.org/licenses/>.
+
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "tokudb_background.h"
+#include "tokudb_sysvars.h"
+
+namespace tokudb {
+namespace background {
+
+
+std::atomic<uint64_t> job_manager_t::job_t::_next_id(1);
+
+job_manager_t::job_t::job_t(bool user_scheduled) :
+ _running(false),
+ _cancelled(false),
+ _id(_next_id++),
+ _user_scheduled(user_scheduled),
+ _scheduled_time(::time(0)),
+ _started_time(0) {
+}
+job_manager_t::job_t::~job_t() {
+}
+void* job_manager_t::operator new(size_t sz) {
+ return tokudb::memory::malloc(sz, MYF(MY_WME|MY_ZEROFILL|MY_FAE));
+}
+void job_manager_t::operator delete(void* p) {
+ tokudb::memory::free(p);
+}
+job_manager_t::job_manager_t() :
+ _sem(0, 65535),
+ _shutdown(false) {
+}
+job_manager_t::~job_manager_t() {
+}
+void job_manager_t::initialize() {
+ int r = _thread.start(thread_func, this);
+ assert_always(r == 0);
+}
+void job_manager_t::destroy() {
+ assert_always(!_shutdown);
+ assert_always(_foreground_jobs.size() == 0);
+ _shutdown = true;
+ _sem.set_interrupt();
+
+ while (_background_jobs.size()) {
+ _mutex.lock();
+ job_t* job = _background_jobs.front();
+ cancel(job);
+ _background_jobs.pop_front();
+ delete job;
+ _mutex.unlock();
+ }
+
+ void* result;
+ int r = _thread.join(&result);
+ assert_always(r == 0);
+}
+bool job_manager_t::run_job(job_t* newjob, bool background) {
+ bool ret = false;
+ const char* jobkey = newjob->key();
+
+ _mutex.lock();
+ assert_always(!_shutdown);
+
+ for (jobs_t::iterator it = _background_jobs.begin();
+ it != _background_jobs.end();
+ it++) {
+ job_t* job = *it;
+ if (!job->cancelled() && strcmp(job->key(), jobkey) == 0) {
+ // if this is a foreground job being run and
+ // there is an existing background job of the same type
+ // and it is not running yet, we can cancel the background job
+ // and just run this one in the foreground, might have different
+ // params, but that is up to the user to figure out.
+ if (!background && !job->running()) {
+ job->cancel();
+ } else {
+ // can't schedule or run another job on the same key
+ goto cleanup;
+ }
+ }
+ }
+ for (jobs_t::iterator it = _foreground_jobs.begin();
+ it != _foreground_jobs.end();
+ it++) {
+ job_t* job = *it;
+ if (strcmp(job->key(), jobkey) == 0) {
+ // can't schedule or run another job on the same key
+ // as an existing foreground job
+ goto cleanup;
+ }
+ }
+
+ if (background) {
+ _background_jobs.push_back(newjob);
+ _sem.signal();
+ ret = true;
+ } else {
+ _foreground_jobs.push_back(newjob);
+
+ run(newjob);
+
+ for (jobs_t::iterator it = _foreground_jobs.begin();
+ it != _foreground_jobs.end();
+ it++) {
+ job_t* job = *it;
+ if (job == newjob) {
+ _foreground_jobs.erase(it);
+ delete job;
+ break;
+ }
+ }
+ ret = true;
+ }
+
+cleanup:
+ _mutex.unlock();
+ return ret;
+}
+bool job_manager_t::cancel_job(const char* key) {
+ bool ret = false;
+ _mutex.lock();
+
+ for (jobs_t::iterator it = _background_jobs.begin();
+ it != _background_jobs.end(); it++) {
+ job_t* job = *it;
+
+ if (!job->cancelled() &&
+ strcmp(job->key(), key) == 0) {
+
+ cancel(job);
+
+ ret = true;
+ }
+ }
+
+ _mutex.unlock();
+ return ret;
+}
+void job_manager_t::iterate_jobs(pfn_iterate_t callback, void* extra) const {
+
+ char database[256], table[256], type[256], params[256], status[256];
+
+ _mutex.lock();
+
+ for (jobs_t::const_iterator it = _background_jobs.begin();
+ it != _background_jobs.end();
+ it++) {
+ job_t* job = *it;
+ if (!job->cancelled()) {
+ database[0] = table[0] = type[0] = params[0] = status[0] = '\0';
+ job->status(database, table, type, params, status);
+ callback(
+ job->id(),
+ database,
+ table,
+ type,
+ params,
+ status,
+ job->user_scheduled(),
+ job->scheduled_time(),
+ job->started_time(),
+ extra);
+ }
+ }
+
+ _mutex.unlock();
+}
+void* job_manager_t::thread_func(void* v) {
+ return ((tokudb::background::job_manager_t*)v)->real_thread_func();
+}
+void* job_manager_t::real_thread_func() {
+ while (_shutdown == false) {
+ tokudb::thread::semaphore_t::E_WAIT res = _sem.wait();
+ if (res == tokudb::thread::semaphore_t::E_INTERRUPTED || _shutdown) {
+ break;
+ } else if (res == tokudb::thread::semaphore_t::E_SIGNALLED) {
+#if TOKUDB_DEBUG
+ if (TOKUDB_UNLIKELY(
+ tokudb::sysvars::debug_pause_background_job_manager)) {
+ _sem.signal();
+ tokudb::time::sleep_microsec(250000);
+ continue;
+ }
+#endif // TOKUDB_DEBUG
+
+ _mutex.lock();
+ assert_debug(_background_jobs.size() > 0);
+ job_t* job = _background_jobs.front();
+ run(job);
+ _background_jobs.pop_front();
+ _mutex.unlock();
+ delete job;
+ }
+ }
+ return NULL;
+}
+void job_manager_t::run(job_t* job) {
+ assert_debug(_mutex.is_owned_by_me());
+ if (!job->cancelled()) {
+ _mutex.unlock();
+ // do job
+ job->run();
+ // done job
+ _mutex.lock();
+ }
+ if (!job->cancelled()) {
+ job->destroy();
+ }
+}
+void job_manager_t::cancel(job_t* job) {
+ assert_debug(_mutex.is_owned_by_me());
+ job->cancel();
+}
+job_manager_t* _job_manager = NULL;
+
+bool initialize() {
+ assert_always(_job_manager == NULL);
+ _job_manager = new job_manager_t;
+ _job_manager->initialize();
+ return true;
+}
+bool destroy() {
+ _job_manager->destroy();
+ delete _job_manager;
+ _job_manager = NULL;
+ return true;
+}
+} // namespace background
+} // namespace tokudb
diff --git a/storage/tokudb/tokudb_background.h b/storage/tokudb/tokudb_background.h
new file mode 100644
index 00000000000..3786701fd0f
--- /dev/null
+++ b/storage/tokudb/tokudb_background.h
@@ -0,0 +1,212 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of TokuDB
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ TokuDBis is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ TokuDB is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with TokuDB. If not, see <http://www.gnu.org/licenses/>.
+
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#ifndef _TOKUDB_BACKGROUND_H
+#define _TOKUDB_BACKGROUND_H
+
+#include "hatoku_hton.h"
+#include <atomic>
+#include <list>
+
+namespace tokudb {
+namespace background {
+
+class job_manager_t {
+public:
+ class job_t {
+ public:
+ // NO default constructor
+ job_t() = delete;
+
+ job_t(bool user_scheduled);
+
+ virtual ~job_t();
+
+ // method that runs the job
+ inline void run();
+
+ // method that tells the job to cancel ASAP
+ inline void cancel();
+
+ // method that tells the job to clean up/free resources on cancel
+ // or completion
+ inline void destroy();
+
+ // method that returns a 'key' string for finding a specific job
+ // (or jobs) usually used to find jobs to cancel
+ virtual const char* key() = 0;
+
+ // method to get info for information schema, 255 chars per buffer
+ virtual void status(
+ char* database,
+ char* table,
+ char* type,
+ char* params,
+ char* status) = 0;
+
+ inline bool running() const;
+
+ inline bool cancelled() const;
+
+ inline uint64_t id() const;
+
+ inline bool user_scheduled() const;
+
+ inline time_t scheduled_time() const;
+
+ inline time_t started_time() const;
+
+ protected:
+ // derived classed implement this to actually run their job
+ virtual void on_run() {};
+
+ // derived classes implement this to cancel their job
+ virtual void on_cancel() {};
+
+ // derived classes implement this to clean up/free resources
+ virtual void on_destroy() {};
+
+ private:
+ static std::atomic<uint64_t> _next_id;
+ std::atomic<bool> _running;
+ std::atomic<bool> _cancelled;
+ uint64_t _id;
+ bool _user_scheduled;
+ time_t _scheduled_time;
+ time_t _started_time;
+ };
+
+ // pfn for iterate callback
+ typedef void (*pfn_iterate_t)(
+ uint64_t,
+ const char*,
+ const char*,
+ const char*,
+ const char*,
+ const char*,
+ bool,
+ time_t,
+ time_t,
+ void*);
+
+public:
+ void* operator new(size_t sz);
+ void operator delete(void* p);
+
+ job_manager_t();
+
+ ~job_manager_t();
+
+ // creates/initializes a singleton bjm
+ void initialize();
+
+ // destroys a bjm singleton
+ // cancels all jobs abd frees all resources
+ void destroy();
+
+ // schedules or runs a job depending on the 'background' value
+ // job specifics all depend on the implementation od 'job'
+ // background jobs will be executed in a FIFO fashion
+ // two jobs with the same key can not run concurrently
+ // if a foreground job is attempted, any currently scheduled or running
+ // background jobs will be cancelled first
+ // if another foreground job is already running, a new foreground job with
+ // the same key will be rejected
+ bool run_job(job_t* newjob, bool background);
+
+ // cancels any background job with a matching key
+ bool cancel_job(const char* key);
+
+ // iterates currently pending and running background jobs, calling
+ // 'callback' with the 'extra' data provided and the original 'extra'
+ // data passed when the job was scheduled
+ void iterate_jobs(pfn_iterate_t callback, void* extra) const;
+
+private:
+ static void* thread_func(void* v);
+
+ void* real_thread_func();
+
+ // _mutex MUST be held on entry, will release and reaquire on exit
+ void run(job_t* job);
+
+ // _mutex MUST be held on entry
+ void cancel(job_t* job);
+private:
+ typedef std::list<job_t*> jobs_t;
+
+ mutable tokudb::thread::mutex_t _mutex;
+ mutable tokudb::thread::semaphore_t _sem;
+ mutable tokudb::thread::thread_t _thread;
+ jobs_t _background_jobs;
+ jobs_t _foreground_jobs;
+ std::atomic<bool> _shutdown;
+};
+
+extern job_manager_t* _job_manager;
+
+bool initialize();
+bool destroy();
+
+inline void job_manager_t::job_t::run() {
+ if (!_cancelled) {
+ _running = true;
+ _started_time = ::time(0);
+ on_run();
+ _running = false;
+ }
+}
+inline void job_manager_t::job_t::cancel() {
+ _cancelled = true;
+ if (_running)
+ on_cancel();
+ while (_running) tokudb::time::sleep_microsec(500000);
+ destroy();
+}
+void job_manager_t::job_t::destroy() {
+ on_destroy();
+}
+inline bool job_manager_t::job_t::running() const {
+ return _running;
+}
+inline bool job_manager_t::job_t::cancelled() const {
+ return _cancelled;
+}
+inline uint64_t job_manager_t::job_t::id() const {
+ return _id;
+}
+inline bool job_manager_t::job_t::user_scheduled() const {
+ return _user_scheduled;
+}
+inline time_t job_manager_t::job_t::scheduled_time() const {
+ return _scheduled_time;
+}
+inline time_t job_manager_t::job_t::started_time() const {
+ return _started_time;
+}
+} // namespace background
+} // namespace tokudb
+
+#endif // _TOKUDB_BACKGROUND_H
diff --git a/storage/tokudb/tokudb_buffer.h b/storage/tokudb/tokudb_buffer.h
index 1604ea61e2d..022f1b49643 100644
--- a/storage/tokudb/tokudb_buffer.h
+++ b/storage/tokudb/tokudb_buffer.h
@@ -23,38 +23,54 @@ Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
-#if !defined(_TOKUDB_BUFFER_H)
+#ifndef _TOKUDB_BUFFER_H
#define _TOKUDB_BUFFER_H
+#include "hatoku_defines.h"
+#include "tokudb_debug.h"
+#include "tokudb_thread.h"
#include "tokudb_vlq.h"
namespace tokudb {
-// A Buffer manages a contiguous chunk of memory and supports appending new data to the end of the buffer, and
-// consuming chunks from the beginning of the buffer. The buffer will reallocate memory when appending
-// new data to a full buffer.
+// A Buffer manages a contiguous chunk of memory and supports appending new
+// data to the end of the buffer, and consuming chunks from the beginning of
+// the buffer. The buffer will reallocate memory when appending new data to
+// a full buffer.
class buffer {
public:
- buffer(void *the_data, size_t s, size_t l) : m_data(the_data), m_size(s), m_limit(l), m_is_static(true) {
+ inline buffer(
+ void* the_data,
+ size_t s,
+ size_t l) :
+ m_data(the_data),
+ m_size(s),
+ m_limit(l),
+ m_is_static(true) {
}
- buffer() : m_data(NULL), m_size(0), m_limit(0), m_is_static(false) {
+ inline buffer() :
+ m_data(NULL),
+ m_size(0),
+ m_limit(0),
+ m_is_static(false) {
}
virtual ~buffer() {
if (!m_is_static)
free(m_data);
}
- // Return a pointer to the end of the buffer suitable for appending a fixed number of bytes.
- void *append_ptr(size_t s) {
+ // Return a pointer to the end of the buffer suitable for appending a
+ // fixed number of bytes.
+ void* append_ptr(size_t s) {
maybe_realloc(s);
- void *p = (char *) m_data + m_size;
+ void* p = (char*)m_data + m_size;
m_size += s;
return p;
}
// Append bytes to the buffer
- void append(void *p, size_t s) {
+ void append(void* p, size_t s) {
memcpy(append_ptr(s), p, s);
}
@@ -68,63 +84,70 @@ public:
return s;
}
- // Return a pointer to the next location in the buffer where bytes are consumed from.
- void *consume_ptr(size_t s) {
+ // Return a pointer to the next location in the buffer where bytes are
+ // consumed from.
+ void* consume_ptr(size_t s) {
if (m_size + s > m_limit)
return NULL;
- void *p = (char *) m_data + m_size;
+ void* p = (char*)m_data + m_size;
m_size += s;
return p;
}
// Consume bytes from the buffer.
- void consume(void *p, size_t s) {
+ void consume(void* p, size_t s) {
memcpy(p, consume_ptr(s), s);
}
// Consume an unsigned int from the buffer.
- // Returns 0 if the unsigned int could not be decoded, probably because the buffer is too short.
- // Otherwise return the number of bytes consumed, and stuffs the decoded number in *p.
- template<class T> size_t consume_ui(T *p) {
- size_t s = tokudb::vlq_decode_ui<T>(p, (char *) m_data + m_size, m_limit - m_size);
+ // Returns 0 if the unsigned int could not be decoded, probably because
+ // the buffer is too short.
+ // Otherwise return the number of bytes consumed, and stuffs the decoded
+ // number in *p.
+ template<class T> size_t consume_ui(T* p) {
+ size_t s = tokudb::vlq_decode_ui<T>(
+ p,
+ (char*)m_data + m_size,
+ m_limit - m_size);
m_size += s;
return s;
}
// Write p_length bytes at an offset in the buffer
- void write(void *p, size_t p_length, size_t offset) {
- assert(offset + p_length <= m_size);
- memcpy((char *)m_data + offset, p, p_length);
+ void write(void* p, size_t p_length, size_t offset) {
+ assert_always(offset + p_length <= m_size);
+ memcpy((char*)m_data + offset, p, p_length);
}
// Read p_length bytes at an offset in the buffer
- void read(void *p, size_t p_length, size_t offset) {
- assert(offset + p_length <= m_size);
- memcpy(p, (char *)m_data + offset, p_length);
+ void read(void* p, size_t p_length, size_t offset) {
+ assert_always(offset + p_length <= m_size);
+ memcpy(p, (char*)m_data + offset, p_length);
}
- // Replace a field in the buffer with new data. If the new data size is different, then readjust the
- // size of the buffer and move things around.
- void replace(size_t offset, size_t old_s, void *new_p, size_t new_s) {
- assert(offset + old_s <= m_size);
+ // Replace a field in the buffer with new data. If the new data size is
+ // different, then readjust the size of the buffer and move things around.
+ void replace(size_t offset, size_t old_s, void* new_p, size_t new_s) {
+ assert_always(offset + old_s <= m_size);
if (new_s > old_s)
maybe_realloc(new_s - old_s);
- char *data_offset = (char *) m_data + offset;
+ char* data_offset = (char*)m_data + offset;
if (new_s != old_s) {
size_t n = m_size - (offset + old_s);
- assert(offset + new_s + n <= m_limit && offset + old_s + n <= m_limit);
+ assert_always(
+ offset + new_s + n <= m_limit && offset + old_s + n <= m_limit);
memmove(data_offset + new_s, data_offset + old_s, n);
if (new_s > old_s)
m_size += new_s - old_s;
else
m_size -= old_s - new_s;
- assert(m_size <= m_limit);
+ assert_always(m_size <= m_limit);
}
memcpy(data_offset, new_p, new_s);
}
// Return a pointer to the data in the buffer
- void *data() const {
+ void* data() const {
return m_data;
}
@@ -145,15 +168,15 @@ private:
size_t new_limit = m_limit * 2;
if (new_limit < m_size + s)
new_limit = m_size + s;
- assert(!m_is_static);
+ assert_always(!m_is_static);
void *new_data = realloc(m_data, new_limit);
- assert(new_data != NULL);
+ assert_always(new_data != NULL);
m_data = new_data;
m_limit = new_limit;
}
}
private:
- void *m_data;
+ void* m_data;
size_t m_size;
size_t m_limit;
bool m_is_static;
diff --git a/storage/tokudb/tokudb_card.h b/storage/tokudb/tokudb_card.h
index 04eac731aeb..f649c2d887f 100644
--- a/storage/tokudb/tokudb_card.h
+++ b/storage/tokudb/tokudb_card.h
@@ -27,48 +27,54 @@ namespace tokudb {
uint compute_total_key_parts(TABLE_SHARE *table_share) {
uint total_key_parts = 0;
for (uint i = 0; i < table_share->keys; i++) {
- total_key_parts += get_key_parts(&table_share->key_info[i]);
+ total_key_parts += table_share->key_info[i].user_defined_key_parts;
}
return total_key_parts;
}
- // Set the key_info cardinality counters for the table.
- void set_card_in_key_info(TABLE *table, uint rec_per_keys, uint64_t rec_per_key[]) {
- uint next_key_part = 0;
- for (uint i = 0; i < table->s->keys; i++) {
- bool is_unique_key = (i == table->s->primary_key) || (table->key_info[i].flags & HA_NOSAME);
- uint num_key_parts = get_key_parts(&table->key_info[i]);
- for (uint j = 0; j < num_key_parts; j++) {
- assert(next_key_part < rec_per_keys);
- ulong val = rec_per_key[next_key_part++];
- if (is_unique_key && j == num_key_parts-1)
- val = 1;
- table->key_info[i].rec_per_key[j] = val;
- }
- }
- }
-
// Put the cardinality counters into the status dictionary.
- int set_card_in_status(DB *status_db, DB_TXN *txn, uint rec_per_keys, uint64_t rec_per_key[]) {
+ int set_card_in_status(
+ DB* status_db,
+ DB_TXN* txn,
+ uint rec_per_keys,
+ const uint64_t rec_per_key[]) {
+
// encode cardinality into the buffer
tokudb::buffer b;
size_t s;
s = b.append_ui<uint32_t>(rec_per_keys);
- assert(s > 0);
+ assert_always(s > 0);
for (uint i = 0; i < rec_per_keys; i++) {
s = b.append_ui<uint64_t>(rec_per_key[i]);
- assert(s > 0);
+ assert_always(s > 0);
}
// write cardinality to status
- int error = write_to_status(status_db, hatoku_cardinality, b.data(), b.size(), txn);
+ int error =
+ tokudb::metadata::write(
+ status_db,
+ hatoku_cardinality,
+ b.data(),
+ b.size(),
+ txn);
return error;
}
// Get the cardinality counters from the status dictionary.
- int get_card_from_status(DB *status_db, DB_TXN *txn, uint rec_per_keys, uint64_t rec_per_key[]) {
+ int get_card_from_status(
+ DB* status_db,
+ DB_TXN* txn,
+ uint rec_per_keys,
+ uint64_t rec_per_key[]) {
+
// read cardinality from status
- void *buf = 0; size_t buf_size = 0;
- int error = get_status_realloc(status_db, txn, hatoku_cardinality, &buf, &buf_size);
+ void* buf = 0; size_t buf_size = 0;
+ int error =
+ tokudb::metadata::read_realloc(
+ status_db,
+ txn,
+ hatoku_cardinality,
+ &buf,
+ &buf_size);
if (error == 0) {
// decode cardinality from the buffer
tokudb::buffer b(buf, 0, buf_size);
@@ -93,12 +99,17 @@ namespace tokudb {
}
// Delete the cardinality counters from the status dictionary.
- int delete_card_from_status(DB *status_db, DB_TXN *txn) {
- int error = remove_from_status(status_db, hatoku_cardinality, txn);
+ int delete_card_from_status(DB* status_db, DB_TXN* txn) {
+ int error =
+ tokudb::metadata::remove(status_db, hatoku_cardinality, txn);
return error;
}
- bool find_index_of_key(const char *key_name, TABLE_SHARE *table_share, uint *index_offset_ptr) {
+ bool find_index_of_key(
+ const char* key_name,
+ TABLE_SHARE* table_share,
+ uint* index_offset_ptr) {
+
for (uint i = 0; i < table_share->keys; i++) {
if (strcmp(key_name, table_share->key_info[i].name) == 0) {
*index_offset_ptr = i;
@@ -113,16 +124,30 @@ namespace tokudb {
dest[i] = src[i];
}
- // Altered table cardinality = select cardinality data from current table cardinality for keys that exist
+ // Altered table cardinality = select cardinality data from current table
+ // cardinality for keys that exist
// in the altered table and the current table.
- int alter_card(DB *status_db, DB_TXN *txn, TABLE_SHARE *table_share, TABLE_SHARE *altered_table_share) {
+ int alter_card(
+ DB* status_db,
+ DB_TXN *txn,
+ TABLE_SHARE* table_share,
+ TABLE_SHARE* altered_table_share) {
+
int error;
// read existing cardinality data from status
- uint table_total_key_parts = tokudb::compute_total_key_parts(table_share);
+ uint table_total_key_parts =
+ tokudb::compute_total_key_parts(table_share);
+
uint64_t rec_per_key[table_total_key_parts];
- error = get_card_from_status(status_db, txn, table_total_key_parts, rec_per_key);
+ error =
+ get_card_from_status(
+ status_db,
+ txn,
+ table_total_key_parts,
+ rec_per_key);
// set altered records per key to unknown
- uint altered_table_total_key_parts = tokudb::compute_total_key_parts(altered_table_share);
+ uint altered_table_total_key_parts =
+ tokudb::compute_total_key_parts(altered_table_share);
uint64_t altered_rec_per_key[altered_table_total_key_parts];
for (uint i = 0; i < altered_table_total_key_parts; i++)
altered_rec_per_key[i] = 0;
@@ -131,121 +156,37 @@ namespace tokudb {
uint orig_key_parts = 0;
for (uint i = 0; i < table_share->keys; i++) {
orig_key_offset[i] = orig_key_parts;
- orig_key_parts += get_key_parts(&table_share->key_info[i]);
+ orig_key_parts += table_share->key_info[i].user_defined_key_parts;
}
// if orig card data exists, then use it to compute new card data
if (error == 0) {
uint next_key_parts = 0;
for (uint i = 0; error == 0 && i < altered_table_share->keys; i++) {
- uint ith_key_parts = get_key_parts(&altered_table_share->key_info[i]);
+ uint ith_key_parts =
+ altered_table_share->key_info[i].user_defined_key_parts;
uint orig_key_index;
- if (find_index_of_key(altered_table_share->key_info[i].name, table_share, &orig_key_index)) {
- copy_card(&altered_rec_per_key[next_key_parts], &rec_per_key[orig_key_offset[orig_key_index]], ith_key_parts);
+ if (find_index_of_key(
+ altered_table_share->key_info[i].name,
+ table_share,
+ &orig_key_index)) {
+ copy_card(
+ &altered_rec_per_key[next_key_parts],
+ &rec_per_key[orig_key_offset[orig_key_index]],
+ ith_key_parts);
}
next_key_parts += ith_key_parts;
}
}
- if (error == 0)
- error = set_card_in_status(status_db, txn, altered_table_total_key_parts, altered_rec_per_key);
- else
- error = delete_card_from_status(status_db, txn);
- return error;
- }
-
- struct analyze_card_cursor_callback_extra {
- int (*analyze_progress)(void *extra, uint64_t rows);
- void *analyze_extra;
- uint64_t *rows;
- uint64_t *deleted_rows;
- };
-
- bool analyze_card_cursor_callback(void *extra, uint64_t deleted_rows) {
- analyze_card_cursor_callback_extra *a_extra = static_cast<analyze_card_cursor_callback_extra *>(extra);
- *a_extra->deleted_rows += deleted_rows;
- int r = a_extra->analyze_progress(a_extra->analyze_extra, *a_extra->rows);
- sql_print_information("tokudb analyze_card_cursor_callback %u %" PRIu64 " %" PRIu64, r, *a_extra->deleted_rows, deleted_rows);
- return r != 0;
- }
-
- // Compute records per key for all key parts of the ith key of the table.
- // For each key part, put records per key part in *rec_per_key_part[key_part_index].
- // Returns 0 if success, otherwise an error number.
- // TODO statistical dives into the FT
- int analyze_card(DB *db, DB_TXN *txn, bool is_unique, uint64_t num_key_parts, uint64_t *rec_per_key_part,
- int (*key_compare)(DB *, const DBT *, const DBT *, uint),
- int (*analyze_progress)(void *extra, uint64_t rows), void *progress_extra,
- uint64_t *return_rows, uint64_t *return_deleted_rows) {
- int error = 0;
- uint64_t rows = 0;
- uint64_t deleted_rows = 0;
- uint64_t unique_rows[num_key_parts];
- if (is_unique && num_key_parts == 1) {
- // dont compute for unique keys with a single part. we already know the answer.
- rows = unique_rows[0] = 1;
+ if (error == 0) {
+ error =
+ set_card_in_status(
+ status_db,
+ txn,
+ altered_table_total_key_parts,
+ altered_rec_per_key);
} else {
- DBC *cursor = NULL;
- error = db->cursor(db, txn, &cursor, 0);
- if (error == 0) {
- analyze_card_cursor_callback_extra e = { analyze_progress, progress_extra, &rows, &deleted_rows };
- cursor->c_set_check_interrupt_callback(cursor, analyze_card_cursor_callback, &e);
- for (uint64_t i = 0; i < num_key_parts; i++)
- unique_rows[i] = 1;
- // stop looking when the entire dictionary was analyzed, or a cap on execution time was reached, or the analyze was killed.
- DBT key = {}; key.flags = DB_DBT_REALLOC;
- DBT prev_key = {}; prev_key.flags = DB_DBT_REALLOC;
- while (1) {
- error = cursor->c_get(cursor, &key, 0, DB_NEXT);
- if (error != 0) {
- if (error == DB_NOTFOUND || error == TOKUDB_INTERRUPTED)
- error = 0; // not an error
- break;
- }
- rows++;
- // first row is a unique row, otherwise compare with the previous key
- bool copy_key = false;
- if (rows == 1) {
- copy_key = true;
- } else {
- // compare this key with the previous key. ignore appended PK for SK's.
- // TODO if a prefix is different, then all larger keys that include the prefix are also different.
- // TODO if we are comparing the entire primary key or the entire unique secondary key, then the cardinality must be 1,
- // so we can avoid computing it.
- for (uint64_t i = 0; i < num_key_parts; i++) {
- int cmp = key_compare(db, &prev_key, &key, i+1);
- if (cmp != 0) {
- unique_rows[i]++;
- copy_key = true;
- }
- }
- }
- // prev_key = key
- if (copy_key) {
- prev_key.data = realloc(prev_key.data, key.size);
- assert(prev_key.data);
- prev_key.size = key.size;
- memcpy(prev_key.data, key.data, prev_key.size);
- }
- // check for limit
- if (analyze_progress && (rows % 1000) == 0) {
- error = analyze_progress(progress_extra, rows);
- if (error)
- break;
- }
- }
- // cleanup
- free(key.data);
- free(prev_key.data);
- int close_error = cursor->c_close(cursor);
- assert(close_error == 0);
- }
+ error = delete_card_from_status(status_db, txn);
}
- // return cardinality
- if (return_rows)
- *return_rows = rows;
- if (return_deleted_rows)
- *return_deleted_rows = deleted_rows;
- for (uint64_t i = 0; i < num_key_parts; i++)
- rec_per_key_part[i] = rows / unique_rows[i];
return error;
}
}
diff --git a/storage/tokudb/tokudb_debug.h b/storage/tokudb/tokudb_debug.h
new file mode 100644
index 00000000000..46bd65c605a
--- /dev/null
+++ b/storage/tokudb/tokudb_debug.h
@@ -0,0 +1,227 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+/* -*- mode: C; c-basic-offset: 4 -*- */
+#ident "$Id$"
+/*======
+This file is part of TokuDB
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ TokuDBis is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ TokuDB is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with TokuDB. If not, see <http://www.gnu.org/licenses/>.
+
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#ifndef _TOKUDB_DEBUG_H
+#define _TOKUDB_DEBUG_H
+
+#include "hatoku_defines.h"
+
+#define TOKU_INCLUDE_BACKTRACE 0
+#if TOKU_INCLUDE_BACKTRACE
+static void tokudb_backtrace(void);
+#endif
+
+// tokudb debug tracing for tokudb_debug declared in tokudb_sysvars.h/.cc
+#define TOKUDB_DEBUG_INIT (1<<0)
+#define TOKUDB_DEBUG_OPEN (1<<1)
+#define TOKUDB_DEBUG_ENTER (1<<2)
+#define TOKUDB_DEBUG_RETURN (1<<3)
+#define TOKUDB_DEBUG_ERROR (1<<4)
+#define TOKUDB_DEBUG_TXN (1<<5)
+#define TOKUDB_DEBUG_AUTO_INCREMENT (1<<6)
+#define TOKUDB_DEBUG_INDEX_KEY (1<<7)
+#define TOKUDB_DEBUG_LOCK (1<<8)
+#define TOKUDB_DEBUG_CHECK_KEY (1<<9)
+#define TOKUDB_DEBUG_HIDE_DDL_LOCK_ERRORS (1<<10)
+#define TOKUDB_DEBUG_ALTER_TABLE (1<<11)
+#define TOKUDB_DEBUG_UPSERT (1<<12)
+#define TOKUDB_DEBUG_CHECK (1<<13)
+#define TOKUDB_DEBUG_ANALYZE (1<<14)
+#define TOKUDB_DEBUG_XA (1<<15)
+#define TOKUDB_DEBUG_SHARE (1<<16)
+
+#define TOKUDB_TRACE(_fmt, ...) { \
+ fprintf(stderr, "%u %s:%u %s " _fmt "\n", tokudb::thread::my_tid(), \
+ __FILE__, __LINE__, __FUNCTION__, ##__VA_ARGS__); \
+}
+
+#define TOKUDB_DEBUG_FLAGS(_flags) \
+ (tokudb::sysvars::debug & _flags)
+
+#define TOKUDB_TRACE_FOR_FLAGS(_flags, _fmt, ...) { \
+ if (TOKUDB_UNLIKELY(TOKUDB_DEBUG_FLAGS(_flags))) { \
+ TOKUDB_TRACE(_fmt, ##__VA_ARGS__); \
+ } \
+}
+
+#define TOKUDB_DBUG_ENTER(_fmt, ...) { \
+ if (TOKUDB_UNLIKELY(tokudb::sysvars::debug & TOKUDB_DEBUG_ENTER)) { \
+ TOKUDB_TRACE(_fmt, ##__VA_ARGS__); \
+ } \
+} \
+ DBUG_ENTER(__FUNCTION__);
+
+#define TOKUDB_DBUG_RETURN(r) { \
+ int rr = (r); \
+ if (TOKUDB_UNLIKELY((tokudb::sysvars::debug & TOKUDB_DEBUG_RETURN) || \
+ (rr != 0 && (tokudb::sysvars::debug & TOKUDB_DEBUG_ERROR)))) { \
+ TOKUDB_TRACE("return %d", rr); \
+ } \
+ DBUG_RETURN(rr); \
+}
+
+#define TOKUDB_HANDLER_TRACE(_fmt, ...) \
+ fprintf(stderr, "%u %p %s:%u ha_tokudb::%s " _fmt "\n", \
+ tokudb::thread::my_tid(), this, __FILE__, __LINE__, \
+ __FUNCTION__, ##__VA_ARGS__);
+
+#define TOKUDB_HANDLER_TRACE_FOR_FLAGS(_flags, _fmt, ...) { \
+ if (TOKUDB_UNLIKELY(TOKUDB_DEBUG_FLAGS(_flags))) { \
+ TOKUDB_HANDLER_TRACE(_fmt, ##__VA_ARGS__); \
+ } \
+}
+
+
+#define TOKUDB_HANDLER_DBUG_ENTER(_fmt, ...) { \
+ if (TOKUDB_UNLIKELY(tokudb::sysvars::debug & TOKUDB_DEBUG_ENTER)) { \
+ TOKUDB_HANDLER_TRACE(_fmt, ##__VA_ARGS__); \
+ } \
+} \
+ DBUG_ENTER(__FUNCTION__);
+
+#define TOKUDB_HANDLER_DBUG_RETURN(r) { \
+ int rr = (r); \
+ if (TOKUDB_UNLIKELY((tokudb::sysvars::debug & TOKUDB_DEBUG_RETURN) || \
+ (rr != 0 && (tokudb::sysvars::debug & TOKUDB_DEBUG_ERROR)))) { \
+ TOKUDB_HANDLER_TRACE("return %d", rr); \
+ } \
+ DBUG_RETURN(rr); \
+}
+
+#define TOKUDB_HANDLER_DBUG_RETURN_DOUBLE(r) { \
+ double rr = (r); \
+ if (TOKUDB_UNLIKELY(tokudb::sysvars::debug & TOKUDB_DEBUG_RETURN)) { \
+ TOKUDB_HANDLER_TRACE("return %f", rr); \
+ } \
+ DBUG_RETURN(rr); \
+}
+
+#define TOKUDB_HANDLER_DBUG_RETURN_PTR(r) { \
+ if (TOKUDB_UNLIKELY(tokudb::sysvars::debug & TOKUDB_DEBUG_RETURN)) { \
+ TOKUDB_HANDLER_TRACE("return 0x%p", r); \
+ } \
+ DBUG_RETURN(r); \
+}
+
+#define TOKUDB_HANDLER_DBUG_VOID_RETURN { \
+ if (TOKUDB_UNLIKELY(tokudb::sysvars::debug & TOKUDB_DEBUG_RETURN)) { \
+ TOKUDB_HANDLER_TRACE("return"); \
+ } \
+ DBUG_VOID_RETURN; \
+}
+
+#define TOKUDB_SHARE_TRACE(_fmt, ...) \
+ fprintf(stderr, "%u %p %s:%u TOUDB_SHARE::%s " _fmt "\n", \
+ tokudb::thread::my_tid(), this, __FILE__, __LINE__, \
+ __FUNCTION__, ##__VA_ARGS__);
+
+#define TOKUDB_SHARE_TRACE_FOR_FLAGS(_flags, _fmt, ...) { \
+ if (TOKUDB_UNLIKELY(TOKUDB_DEBUG_FLAGS(_flags))) { \
+ TOKUDB_SHARE_TRACE(_fmt, ##__VA_ARGS__); \
+ } \
+}
+
+#define TOKUDB_SHARE_DBUG_ENTER(_fmt, ...) { \
+ if (TOKUDB_UNLIKELY((tokudb::sysvars::debug & TOKUDB_DEBUG_ENTER) || \
+ (tokudb::sysvars::debug & TOKUDB_DEBUG_SHARE))) { \
+ TOKUDB_SHARE_TRACE(_fmt, ##__VA_ARGS__); \
+ } \
+} \
+ DBUG_ENTER(__FUNCTION__);
+
+#define TOKUDB_SHARE_DBUG_RETURN(r) { \
+ int rr = (r); \
+ if (TOKUDB_UNLIKELY((tokudb::sysvars::debug & TOKUDB_DEBUG_RETURN) || \
+ (tokudb::sysvars::debug & TOKUDB_DEBUG_SHARE) || \
+ (rr != 0 && (tokudb::sysvars::debug & TOKUDB_DEBUG_ERROR)))) { \
+ TOKUDB_SHARE_TRACE("return %d", rr); \
+ } \
+ DBUG_RETURN(rr); \
+}
+
+#define TOKUDB_SHARE_DBUG_RETURN_DOUBLE(r) { \
+ double rr = (r); \
+ if (TOKUDB_UNLIKELY((tokudb::sysvars::debug & TOKUDB_DEBUG_RETURN) || \
+ (tokudb::sysvars::debug & TOKUDB_DEBUG_SHARE))) { \
+ TOKUDB_SHARE_TRACE("return %f", rr); \
+ } \
+ DBUG_RETURN(rr); \
+}
+
+#define TOKUDB_SHARE_DBUG_RETURN_PTR(r) { \
+ if (TOKUDB_UNLIKELY((tokudb::sysvars::debug & TOKUDB_DEBUG_RETURN) || \
+ (tokudb::sysvars::debug & TOKUDB_DEBUG_SHARE))) { \
+ TOKUDB_SHARE_TRACE("return 0x%p", r); \
+ } \
+ DBUG_RETURN(r); \
+}
+
+#define TOKUDB_SHARE_DBUG_VOID_RETURN() { \
+ if (TOKUDB_UNLIKELY((tokudb::sysvars::debug & TOKUDB_DEBUG_RETURN) || \
+ (tokudb::sysvars::debug & TOKUDB_DEBUG_SHARE))) { \
+ TOKUDB_SHARE_TRACE("return"); \
+ } \
+ DBUG_VOID_RETURN; \
+}
+
+
+#define TOKUDB_DBUG_DUMP(s, p, len) \
+{ \
+ TOKUDB_TRACE("%s", s); \
+ uint i; \
+ for (i=0; i<len; i++) { \
+ fprintf(stderr, "%2.2x", ((uchar*)p)[i]); \
+ } \
+ fprintf(stderr, "\n"); \
+}
+
+// The intention is for a failed handlerton assert to invoke a failed assert
+// in the fractal tree layer, which dumps engine status to the error log.
+void toku_hton_assert_fail(
+ const char* /*expr_as_string*/,
+ const char* /*fun*/,
+ const char* /*file*/,
+ int /*line*/,
+ int /*errno*/)
+ __attribute__((__visibility__("default")))
+ __attribute__((__noreturn__));
+
+#define assert_always(expr) ((expr) ? (void)0 : \
+ toku_hton_assert_fail(#expr, __FUNCTION__, __FILE__, __LINE__, errno))
+
+#undef assert
+#define assert(expr) assert_always(expr)
+
+#ifdef TOKUDB_DEBUG
+ #define assert_debug(expr) ((expr) ? (void)0 : \
+ toku_hton_assert_fail(#expr, __FUNCTION__, __FILE__, __LINE__, errno))
+#else
+ #define assert_debug(expr) (void)0
+#endif // TOKUDB_DEBUG
+
+#define assert_unreachable __builtin_unreachable
+
+#endif // _TOKUDB_DEBUG_H
diff --git a/storage/tokudb/tokudb_information_schema.cc b/storage/tokudb/tokudb_information_schema.cc
new file mode 100644
index 00000000000..e69a7899b45
--- /dev/null
+++ b/storage/tokudb/tokudb_information_schema.cc
@@ -0,0 +1,1210 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+/* -*- mode: C; c-basic-offset: 4 -*- */
+#ident "$Id$"
+/*======
+This file is part of TokuDB
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ TokuDBis is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ TokuDB is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with TokuDB. If not, see <http://www.gnu.org/licenses/>.
+
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "hatoku_hton.h"
+#include "tokudb_information_schema.h"
+#include "sql_time.h"
+#include "tokudb_background.h"
+
+
+namespace tokudb {
+namespace information_schema {
+
+static void field_store_time_t(Field* field, time_t time) {
+ MYSQL_TIME my_time;
+ struct tm tm_time;
+
+ if (time) {
+ localtime_r(&time, &tm_time);
+ localtime_to_TIME(&my_time, &tm_time);
+ my_time.time_type = MYSQL_TIMESTAMP_DATETIME;
+#ifdef MARIA_PLUGIN_INTERFACE_VERSION
+ field->store_time(&my_time);
+#else
+ field->store_time(&my_time, MYSQL_TIMESTAMP_DATETIME);
+#endif
+ field->set_notnull();
+ } else {
+ field->set_null();
+ }
+}
+
+st_mysql_information_schema trx_information_schema = {
+ MYSQL_INFORMATION_SCHEMA_INTERFACE_VERSION
+};
+
+ST_FIELD_INFO trx_field_info[] = {
+ {"trx_id", 0, MYSQL_TYPE_LONGLONG, 0, 0, NULL, SKIP_OPEN_TABLE },
+ {"trx_mysql_thread_id", 0, MYSQL_TYPE_LONGLONG, 0, 0, NULL, SKIP_OPEN_TABLE },
+ {"trx_time", 0, MYSQL_TYPE_LONGLONG, 0, 0, NULL, SKIP_OPEN_TABLE },
+ {NULL, 0, MYSQL_TYPE_NULL, 0, 0, NULL, SKIP_OPEN_TABLE}
+};
+
+struct trx_extra_t {
+ THD *thd;
+ TABLE *table;
+};
+
+int trx_callback(
+ DB_TXN* txn,
+ iterate_row_locks_callback iterate_locks,
+ void* locks_extra,
+ void *extra) {
+
+ uint64_t txn_id = txn->id64(txn);
+ uint64_t client_id = txn->get_client_id(txn);
+ uint64_t start_time = txn->get_start_time(txn);
+ trx_extra_t* e = reinterpret_cast<struct trx_extra_t*>(extra);
+ THD* thd = e->thd;
+ TABLE* table = e->table;
+ table->field[0]->store(txn_id, false);
+ table->field[1]->store(client_id, false);
+ uint64_t tnow = (uint64_t) ::time(NULL);
+ table->field[2]->store(tnow >= start_time ? tnow - start_time : 0, false);
+ int error = schema_table_store_record(thd, table);
+ if (!error && thd_killed(thd))
+ error = ER_QUERY_INTERRUPTED;
+ return error;
+}
+
+#if MYSQL_VERSION_ID >= 50600
+int trx_fill_table(THD* thd, TABLE_LIST* tables, Item* cond) {
+#else
+int trx_fill_table(THD* thd, TABLE_LIST* tables, COND* cond) {
+#endif
+ TOKUDB_DBUG_ENTER("");
+ int error;
+
+ tokudb_hton_initialized_lock.lock_read();
+
+ if (!tokudb_hton_initialized) {
+ error = ER_PLUGIN_IS_NOT_LOADED;
+ my_error(error, MYF(0), tokudb_hton_name);
+ } else {
+ trx_extra_t e = { thd, tables->table };
+ error = db_env->iterate_live_transactions(db_env, trx_callback, &e);
+ if (error)
+ my_error(ER_GET_ERRNO, MYF(0), error, tokudb_hton_name);
+ }
+
+ tokudb_hton_initialized_lock.unlock();
+ TOKUDB_DBUG_RETURN(error);
+}
+
+int trx_init(void* p) {
+ ST_SCHEMA_TABLE *schema = (ST_SCHEMA_TABLE*) p;
+ schema->fields_info = trx_field_info;
+ schema->fill_table = trx_fill_table;
+ return 0;
+}
+
+int trx_done(void* p) {
+ return 0;
+}
+
+st_mysql_plugin trx = {
+ MYSQL_INFORMATION_SCHEMA_PLUGIN,
+ &trx_information_schema,
+ "TokuDB_trx",
+ "Percona",
+ "Percona TokuDB Storage Engine with Fractal Tree(tm) Technology",
+ PLUGIN_LICENSE_GPL,
+ trx_init, /* plugin init */
+ trx_done, /* plugin deinit */
+ TOKUDB_PLUGIN_VERSION,
+ NULL, /* status variables */
+ NULL, /* system variables */
+#ifdef MARIA_PLUGIN_INTERFACE_VERSION
+ tokudb::sysvars::version,
+ MariaDB_PLUGIN_MATURITY_STABLE /* maturity */
+#else
+ NULL, /* config options */
+ 0, /* flags */
+#endif
+};
+
+
+
+st_mysql_information_schema lock_waits_information_schema = {
+ MYSQL_INFORMATION_SCHEMA_INTERFACE_VERSION
+};
+
+ST_FIELD_INFO lock_waits_field_info[] = {
+ {"requesting_trx_id", 0, MYSQL_TYPE_LONGLONG, 0, 0, NULL, SKIP_OPEN_TABLE },
+ {"blocking_trx_id", 0, MYSQL_TYPE_LONGLONG, 0, 0, NULL, SKIP_OPEN_TABLE },
+ {"lock_waits_dname", 256, MYSQL_TYPE_STRING, 0, 0, NULL, SKIP_OPEN_TABLE },
+ {"lock_waits_key_left", 256, MYSQL_TYPE_STRING, 0, 0, NULL, SKIP_OPEN_TABLE },
+ {"lock_waits_key_right", 256, MYSQL_TYPE_STRING, 0, 0, NULL, SKIP_OPEN_TABLE },
+ {"lock_waits_start_time", 0, MYSQL_TYPE_LONGLONG, 0, 0, NULL, SKIP_OPEN_TABLE },
+ {"lock_waits_table_schema", 256, MYSQL_TYPE_STRING, 0, 0, NULL, SKIP_OPEN_TABLE },
+ {"lock_waits_table_name", 256, MYSQL_TYPE_STRING, 0, 0, NULL, SKIP_OPEN_TABLE },
+ {"lock_waits_table_dictionary_name", 256, MYSQL_TYPE_STRING, 0, 0, NULL, SKIP_OPEN_TABLE },
+ {NULL, 0, MYSQL_TYPE_NULL, 0, 0, NULL, SKIP_OPEN_TABLE}
+};
+
+struct lock_waits_extra_t {
+ THD* thd;
+ TABLE* table;
+};
+
+int lock_waits_callback(
+ DB* db,
+ uint64_t requesting_txnid,
+ const DBT* left_key,
+ const DBT* right_key,
+ uint64_t blocking_txnid,
+ uint64_t start_time,
+ void *extra) {
+
+ lock_waits_extra_t* e =
+ reinterpret_cast<struct lock_waits_extra_t*>(extra);
+ THD* thd = e->thd;
+ TABLE* table = e->table;
+ table->field[0]->store(requesting_txnid, false);
+ table->field[1]->store(blocking_txnid, false);
+ const char* dname = tokudb_get_index_name(db);
+ size_t dname_length = strlen(dname);
+ table->field[2]->store(dname, dname_length, system_charset_info);
+ String left_str;
+ tokudb_pretty_left_key(db, left_key, &left_str);
+ table->field[3]->store(
+ left_str.ptr(),
+ left_str.length(),
+ system_charset_info);
+ String right_str;
+ tokudb_pretty_right_key(db, right_key, &right_str);
+ table->field[4]->store(
+ right_str.ptr(),
+ right_str.length(),
+ system_charset_info);
+ table->field[5]->store(start_time, false);
+
+ String database_name, table_name, dictionary_name;
+ tokudb_split_dname(dname, database_name, table_name, dictionary_name);
+ table->field[6]->store(
+ database_name.c_ptr(),
+ database_name.length(),
+ system_charset_info);
+ table->field[7]->store(
+ table_name.c_ptr(),
+ table_name.length(),
+ system_charset_info);
+ table->field[8]->store(
+ dictionary_name.c_ptr(),
+ dictionary_name.length(),
+ system_charset_info);
+
+ int error = schema_table_store_record(thd, table);
+
+ if (!error && thd_killed(thd))
+ error = ER_QUERY_INTERRUPTED;
+
+ return error;
+}
+
+#if MYSQL_VERSION_ID >= 50600
+int lock_waits_fill_table(THD* thd, TABLE_LIST* tables, Item* cond) {
+#else
+int lock_waits_fill_table(THD* thd, TABLE_LIST* tables, COND* cond) {
+#endif
+ TOKUDB_DBUG_ENTER("");
+ int error;
+
+ tokudb_hton_initialized_lock.lock_read();
+
+ if (!tokudb_hton_initialized) {
+ error = ER_PLUGIN_IS_NOT_LOADED;
+ my_error(error, MYF(0), tokudb_hton_name);
+ } else {
+ lock_waits_extra_t e = { thd, tables->table };
+ error = db_env->iterate_pending_lock_requests(
+ db_env,
+ lock_waits_callback,
+ &e);
+ if (error)
+ my_error(ER_GET_ERRNO, MYF(0), error, tokudb_hton_name);
+ }
+
+ tokudb_hton_initialized_lock.unlock();
+ TOKUDB_DBUG_RETURN(error);
+}
+
+int lock_waits_init(void* p) {
+ ST_SCHEMA_TABLE* schema = (ST_SCHEMA_TABLE*)p;
+ schema->fields_info = lock_waits_field_info;
+ schema->fill_table = lock_waits_fill_table;
+ return 0;
+}
+
+int lock_waits_done(void *p) {
+ return 0;
+}
+
+st_mysql_plugin lock_waits = {
+ MYSQL_INFORMATION_SCHEMA_PLUGIN,
+ &lock_waits_information_schema,
+ "TokuDB_lock_waits",
+ "Percona",
+ "Percona TokuDB Storage Engine with Fractal Tree(tm) Technology",
+ PLUGIN_LICENSE_GPL,
+ lock_waits_init, /* plugin init */
+ lock_waits_done, /* plugin deinit */
+ TOKUDB_PLUGIN_VERSION,
+ NULL, /* status variables */
+ NULL, /* system variables */
+#ifdef MARIA_PLUGIN_INTERFACE_VERSION
+ tokudb::sysvars::version,
+ MariaDB_PLUGIN_MATURITY_STABLE /* maturity */
+#else
+ NULL, /* config options */
+ 0, /* flags */
+#endif
+};
+
+
+
+st_mysql_information_schema locks_information_schema = {
+ MYSQL_INFORMATION_SCHEMA_INTERFACE_VERSION
+};
+
+ ST_FIELD_INFO locks_field_info[] = {
+ {"locks_trx_id", 0, MYSQL_TYPE_LONGLONG, 0, 0, NULL, SKIP_OPEN_TABLE },
+ {"locks_mysql_thread_id", 0, MYSQL_TYPE_LONGLONG, 0, 0, NULL, SKIP_OPEN_TABLE },
+ {"locks_dname", 256, MYSQL_TYPE_STRING, 0, 0, NULL, SKIP_OPEN_TABLE },
+ {"locks_key_left", 256, MYSQL_TYPE_STRING, 0, 0, NULL, SKIP_OPEN_TABLE },
+ {"locks_key_right", 256, MYSQL_TYPE_STRING, 0, 0, NULL, SKIP_OPEN_TABLE },
+ {"locks_table_schema", 256, MYSQL_TYPE_STRING, 0, 0, NULL, SKIP_OPEN_TABLE },
+ {"locks_table_name", 256, MYSQL_TYPE_STRING, 0, 0, NULL, SKIP_OPEN_TABLE },
+ {"locks_table_dictionary_name", 256, MYSQL_TYPE_STRING, 0, 0, NULL, SKIP_OPEN_TABLE },
+ {NULL, 0, MYSQL_TYPE_NULL, 0, 0, NULL, SKIP_OPEN_TABLE}
+};
+
+struct locks_extra_t {
+ THD* thd;
+ TABLE* table;
+};
+
+int locks_callback(
+ DB_TXN* txn,
+ iterate_row_locks_callback iterate_locks,
+ void* locks_extra,
+ void* extra) {
+
+ uint64_t txn_id = txn->id64(txn);
+ uint64_t client_id = txn->get_client_id(txn);
+ locks_extra_t* e = reinterpret_cast<struct locks_extra_t*>(extra);
+ THD* thd = e->thd;
+ TABLE* table = e->table;
+ int error = 0;
+ DB* db;
+ DBT left_key, right_key;
+ while (error == 0 &&
+ iterate_locks(&db, &left_key, &right_key, locks_extra) == 0) {
+ table->field[0]->store(txn_id, false);
+ table->field[1]->store(client_id, false);
+
+ const char* dname = tokudb_get_index_name(db);
+ size_t dname_length = strlen(dname);
+ table->field[2]->store(dname, dname_length, system_charset_info);
+
+ String left_str;
+ tokudb_pretty_left_key(db, &left_key, &left_str);
+ table->field[3]->store(
+ left_str.ptr(),
+ left_str.length(),
+ system_charset_info);
+
+ String right_str;
+ tokudb_pretty_right_key(db, &right_key, &right_str);
+ table->field[4]->store(
+ right_str.ptr(),
+ right_str.length(),
+ system_charset_info);
+
+ String database_name, table_name, dictionary_name;
+ tokudb_split_dname(dname, database_name, table_name, dictionary_name);
+ table->field[5]->store(
+ database_name.c_ptr(),
+ database_name.length(),
+ system_charset_info);
+ table->field[6]->store(
+ table_name.c_ptr(),
+ table_name.length(),
+ system_charset_info);
+ table->field[7]->store(
+ dictionary_name.c_ptr(),
+ dictionary_name.length(),
+ system_charset_info);
+
+ error = schema_table_store_record(thd, table);
+
+ if (!error && thd_killed(thd))
+ error = ER_QUERY_INTERRUPTED;
+ }
+ return error;
+}
+
+#if MYSQL_VERSION_ID >= 50600
+int locks_fill_table(THD* thd, TABLE_LIST* tables, Item* cond) {
+#else
+int locks_fill_table(THD* thd, TABLE_LIST* tables, COND* cond) {
+#endif
+ TOKUDB_DBUG_ENTER("");
+ int error;
+
+ tokudb_hton_initialized_lock.lock_read();
+
+ if (!tokudb_hton_initialized) {
+ error = ER_PLUGIN_IS_NOT_LOADED;
+ my_error(error, MYF(0), tokudb_hton_name);
+ } else {
+ locks_extra_t e = { thd, tables->table };
+ error = db_env->iterate_live_transactions(db_env, locks_callback, &e);
+ if (error)
+ my_error(ER_GET_ERRNO, MYF(0), error, tokudb_hton_name);
+ }
+
+ tokudb_hton_initialized_lock.unlock();
+ TOKUDB_DBUG_RETURN(error);
+}
+
+int locks_init(void* p) {
+ ST_SCHEMA_TABLE* schema = (ST_SCHEMA_TABLE*)p;
+ schema->fields_info = locks_field_info;
+ schema->fill_table = locks_fill_table;
+ return 0;
+}
+
+int locks_done(void* p) {
+ return 0;
+}
+
+st_mysql_plugin locks = {
+ MYSQL_INFORMATION_SCHEMA_PLUGIN,
+ &locks_information_schema,
+ "TokuDB_locks",
+ "Percona",
+ "Percona TokuDB Storage Engine with Fractal Tree(tm) Technology",
+ PLUGIN_LICENSE_GPL,
+ locks_init, /* plugin init */
+ locks_done, /* plugin deinit */
+ TOKUDB_PLUGIN_VERSION,
+ NULL, /* status variables */
+ NULL, /* system variables */
+#ifdef MARIA_PLUGIN_INTERFACE_VERSION
+ tokudb::sysvars::version,
+ MariaDB_PLUGIN_MATURITY_STABLE /* maturity */
+#else
+ NULL, /* config options */
+ 0, /* flags */
+#endif
+};
+
+
+
+st_mysql_information_schema file_map_information_schema = {
+ MYSQL_INFORMATION_SCHEMA_INTERFACE_VERSION
+};
+
+ST_FIELD_INFO file_map_field_info[] = {
+ {"dictionary_name", 256, MYSQL_TYPE_STRING, 0, 0, NULL, SKIP_OPEN_TABLE },
+ {"internal_file_name", 256, MYSQL_TYPE_STRING, 0, 0, NULL, SKIP_OPEN_TABLE },
+ {"table_schema", 256, MYSQL_TYPE_STRING, 0, 0, NULL, SKIP_OPEN_TABLE },
+ {"table_name", 256, MYSQL_TYPE_STRING, 0, 0, NULL, SKIP_OPEN_TABLE },
+ {"table_dictionary_name", 256, MYSQL_TYPE_STRING, 0, 0, NULL, SKIP_OPEN_TABLE },
+ {NULL, 0, MYSQL_TYPE_NULL, 0, 0, NULL, SKIP_OPEN_TABLE}
+};
+
+int report_file_map(TABLE* table, THD* thd) {
+ int error;
+ DB_TXN* txn = NULL;
+ DBC* tmp_cursor = NULL;
+ DBT curr_key;
+ DBT curr_val;
+ memset(&curr_key, 0, sizeof curr_key);
+ memset(&curr_val, 0, sizeof curr_val);
+ error = txn_begin(db_env, 0, &txn, DB_READ_UNCOMMITTED, thd);
+ if (error) {
+ goto cleanup;
+ }
+ error = db_env->get_cursor_for_directory(db_env, txn, &tmp_cursor);
+ if (error) {
+ goto cleanup;
+ }
+ while (error == 0) {
+ error = tmp_cursor->c_get(tmp_cursor, &curr_key, &curr_val, DB_NEXT);
+ if (!error) {
+ // We store the NULL terminator in the directory so it's included
+ // in the size.
+ // See #5789
+ // Recalculate and check just to be safe.
+ const char *dname = (const char *) curr_key.data;
+ size_t dname_len = strlen(dname);
+ assert(dname_len == curr_key.size - 1);
+ table->field[0]->store(dname, dname_len, system_charset_info);
+
+ const char *iname = (const char *) curr_val.data;
+ size_t iname_len = strlen(iname);
+ assert(iname_len == curr_val.size - 1);
+ table->field[1]->store(iname, iname_len, system_charset_info);
+
+ // split the dname
+ String database_name, table_name, dictionary_name;
+ tokudb_split_dname(
+ dname,
+ database_name,
+ table_name,
+ dictionary_name);
+ table->field[2]->store(
+ database_name.c_ptr(),
+ database_name.length(),
+ system_charset_info);
+ table->field[3]->store(
+ table_name.c_ptr(),
+ table_name.length(),
+ system_charset_info);
+ table->field[4]->store(
+ dictionary_name.c_ptr(),
+ dictionary_name.length(),
+ system_charset_info);
+
+ error = schema_table_store_record(thd, table);
+ }
+ if (!error && thd_killed(thd))
+ error = ER_QUERY_INTERRUPTED;
+ }
+ if (error == DB_NOTFOUND) {
+ error = 0;
+ }
+cleanup:
+ if (tmp_cursor) {
+ int r = tmp_cursor->c_close(tmp_cursor);
+ assert(r == 0);
+ }
+ if (txn) {
+ commit_txn(txn, 0);
+ }
+ return error;
+}
+
+#if MYSQL_VERSION_ID >= 50600
+int file_map_fill_table(THD* thd, TABLE_LIST* tables, Item* cond) {
+#else
+int file_map_fill_table(THD* thd, TABLE_LIST* tables, COND* cond) {
+#endif
+ TOKUDB_DBUG_ENTER("");
+ int error;
+ TABLE* table = tables->table;
+
+ tokudb_hton_initialized_lock.lock_read();
+
+ if (!tokudb_hton_initialized) {
+ error = ER_PLUGIN_IS_NOT_LOADED;
+ my_error(error, MYF(0), tokudb_hton_name);
+ } else {
+ error = report_file_map(table, thd);
+ if (error)
+ my_error(ER_GET_ERRNO, MYF(0), error, tokudb_hton_name);
+ }
+
+ tokudb_hton_initialized_lock.unlock();
+ TOKUDB_DBUG_RETURN(error);
+}
+
+int file_map_init(void* p) {
+ ST_SCHEMA_TABLE* schema = (ST_SCHEMA_TABLE*)p;
+ schema->fields_info = file_map_field_info;
+ schema->fill_table = file_map_fill_table;
+ return 0;
+}
+
+int file_map_done(void* p) {
+ return 0;
+}
+
+st_mysql_plugin file_map = {
+ MYSQL_INFORMATION_SCHEMA_PLUGIN,
+ &file_map_information_schema,
+ "TokuDB_file_map",
+ "Percona",
+ "Percona TokuDB Storage Engine with Fractal Tree(tm) Technology",
+ PLUGIN_LICENSE_GPL,
+ file_map_init, /* plugin init */
+ file_map_done, /* plugin deinit */
+ TOKUDB_PLUGIN_VERSION,
+ NULL, /* status variables */
+ NULL, /* system variables */
+#ifdef MARIA_PLUGIN_INTERFACE_VERSION
+ tokudb::sysvars::version,
+ MariaDB_PLUGIN_MATURITY_STABLE /* maturity */
+#else
+ NULL, /* config options */
+ 0, /* flags */
+#endif
+};
+
+
+
+st_mysql_information_schema fractal_tree_info_information_schema = {
+ MYSQL_INFORMATION_SCHEMA_INTERFACE_VERSION
+};
+
+ST_FIELD_INFO fractal_tree_info_field_info[] = {
+ {"dictionary_name", 256, MYSQL_TYPE_STRING, 0, 0, NULL, SKIP_OPEN_TABLE },
+ {"internal_file_name", 256, MYSQL_TYPE_STRING, 0, 0, NULL, SKIP_OPEN_TABLE },
+ {"bt_num_blocks_allocated", 0, MYSQL_TYPE_LONGLONG, 0, 0, NULL, SKIP_OPEN_TABLE },
+ {"bt_num_blocks_in_use", 0, MYSQL_TYPE_LONGLONG, 0, 0, NULL, SKIP_OPEN_TABLE },
+ {"bt_size_allocated", 0, MYSQL_TYPE_LONGLONG, 0, 0, NULL, SKIP_OPEN_TABLE },
+ {"bt_size_in_use", 0, MYSQL_TYPE_LONGLONG, 0, 0, NULL, SKIP_OPEN_TABLE },
+ {"table_schema", 256, MYSQL_TYPE_STRING, 0, 0, NULL, SKIP_OPEN_TABLE },
+ {"table_name", 256, MYSQL_TYPE_STRING, 0, 0, NULL, SKIP_OPEN_TABLE },
+ {"table_dictionary_name", 256, MYSQL_TYPE_STRING, 0, 0, NULL, SKIP_OPEN_TABLE },
+ {NULL, 0, MYSQL_TYPE_NULL, 0, 0, NULL, SKIP_OPEN_TABLE}
+};
+
+int report_fractal_tree_info_for_db(
+ const DBT* dname,
+ const DBT* iname,
+ TABLE* table,
+ THD* thd) {
+
+ int error;
+ uint64_t bt_num_blocks_allocated;
+ uint64_t bt_num_blocks_in_use;
+ uint64_t bt_size_allocated;
+ uint64_t bt_size_in_use;
+
+ DB *db = NULL;
+ error = db_create(&db, db_env, 0);
+ if (error) {
+ goto exit;
+ }
+ error = db->open(db, NULL, (char *)dname->data, NULL, DB_BTREE, 0, 0666);
+ if (error) {
+ goto exit;
+ }
+ error = db->get_fractal_tree_info64(
+ db,
+ &bt_num_blocks_allocated,
+ &bt_num_blocks_in_use,
+ &bt_size_allocated,
+ &bt_size_in_use);
+ if (error) {
+ goto exit;
+ }
+
+ // We store the NULL terminator in the directory so it's included in the
+ // size.
+ // See #5789
+ // Recalculate and check just to be safe.
+ {
+ size_t dname_len = strlen((const char*)dname->data);
+ assert(dname_len == dname->size - 1);
+ table->field[0]->store(
+ (char*)dname->data,
+ dname_len,
+ system_charset_info);
+ size_t iname_len = strlen((const char*)iname->data);
+ assert(iname_len == iname->size - 1);
+ table->field[1]->store(
+ (char*)iname->data,
+ iname_len,
+ system_charset_info);
+ }
+ table->field[2]->store(bt_num_blocks_allocated, false);
+ table->field[3]->store(bt_num_blocks_in_use, false);
+ table->field[4]->store(bt_size_allocated, false);
+ table->field[5]->store(bt_size_in_use, false);
+
+ // split the dname
+ {
+ String database_name, table_name, dictionary_name;
+ tokudb_split_dname(
+ (const char*)dname->data,
+ database_name,
+ table_name,
+ dictionary_name);
+ table->field[6]->store(
+ database_name.c_ptr(),
+ database_name.length(),
+ system_charset_info);
+ table->field[7]->store(
+ table_name.c_ptr(),
+ table_name.length(),
+ system_charset_info);
+ table->field[8]->store(
+ dictionary_name.c_ptr(),
+ dictionary_name.length(),
+ system_charset_info);
+ }
+ error = schema_table_store_record(thd, table);
+
+exit:
+ if (db) {
+ int close_error = db->close(db, 0);
+ if (error == 0)
+ error = close_error;
+ }
+ return error;
+}
+
+int report_fractal_tree_info(TABLE* table, THD* thd) {
+ int error;
+ DB_TXN* txn = NULL;
+ DBC* tmp_cursor = NULL;
+ DBT curr_key;
+ DBT curr_val;
+ memset(&curr_key, 0, sizeof curr_key);
+ memset(&curr_val, 0, sizeof curr_val);
+ error = txn_begin(db_env, 0, &txn, DB_READ_UNCOMMITTED, thd);
+ if (error) {
+ goto cleanup;
+ }
+ error = db_env->get_cursor_for_directory(db_env, txn, &tmp_cursor);
+ if (error) {
+ goto cleanup;
+ }
+ while (error == 0) {
+ error = tmp_cursor->c_get(tmp_cursor, &curr_key, &curr_val, DB_NEXT);
+ if (!error) {
+ error = report_fractal_tree_info_for_db(
+ &curr_key,
+ &curr_val,
+ table,
+ thd);
+ if (error)
+ error = 0; // ignore read uncommitted errors
+ }
+ if (!error && thd_killed(thd))
+ error = ER_QUERY_INTERRUPTED;
+ }
+ if (error == DB_NOTFOUND) {
+ error = 0;
+ }
+cleanup:
+ if (tmp_cursor) {
+ int r = tmp_cursor->c_close(tmp_cursor);
+ assert(r == 0);
+ }
+ if (txn) {
+ commit_txn(txn, 0);
+ }
+ return error;
+}
+
+#if MYSQL_VERSION_ID >= 50600
+int fractal_tree_info_fill_table(THD* thd, TABLE_LIST* tables, Item* cond) {
+#else
+int fractal_tree_info_fill_table(THD* thd, TABLE_LIST* tables, COND* cond) {
+#endif
+ TOKUDB_DBUG_ENTER("");
+ int error;
+ TABLE* table = tables->table;
+
+ // 3938: Get a read lock on the status flag, since we must
+ // read it before safely proceeding
+ tokudb_hton_initialized_lock.lock_read();
+
+ if (!tokudb_hton_initialized) {
+ error = ER_PLUGIN_IS_NOT_LOADED;
+ my_error(error, MYF(0), tokudb_hton_name);
+ } else {
+ error = report_fractal_tree_info(table, thd);
+ if (error)
+ my_error(ER_GET_ERRNO, MYF(0), error, tokudb_hton_name);
+ }
+
+ //3938: unlock the status flag lock
+ tokudb_hton_initialized_lock.unlock();
+ TOKUDB_DBUG_RETURN(error);
+}
+
+int fractal_tree_info_init(void* p) {
+ ST_SCHEMA_TABLE* schema = (ST_SCHEMA_TABLE*)p;
+ schema->fields_info = fractal_tree_info_field_info;
+ schema->fill_table = fractal_tree_info_fill_table;
+ return 0;
+}
+
+int fractal_tree_info_done(void* p) {
+ return 0;
+}
+
+st_mysql_plugin fractal_tree_info = {
+ MYSQL_INFORMATION_SCHEMA_PLUGIN,
+ &fractal_tree_info_information_schema,
+ "TokuDB_fractal_tree_info",
+ "Percona",
+ "Percona TokuDB Storage Engine with Fractal Tree(tm) Technology",
+ PLUGIN_LICENSE_GPL,
+ fractal_tree_info_init, /* plugin init */
+ fractal_tree_info_done, /* plugin deinit */
+ TOKUDB_PLUGIN_VERSION,
+ NULL, /* status variables */
+ NULL, /* system variables */
+#ifdef MARIA_PLUGIN_INTERFACE_VERSION
+ tokudb::sysvars::version,
+ MariaDB_PLUGIN_MATURITY_STABLE /* maturity */
+#else
+ NULL, /* config options */
+ 0, /* flags */
+#endif
+};
+
+
+
+st_mysql_information_schema fractal_tree_block_map_information_schema = {
+ MYSQL_INFORMATION_SCHEMA_INTERFACE_VERSION
+};
+
+ST_FIELD_INFO fractal_tree_block_map_field_info[] = {
+ {"dictionary_name", 256, MYSQL_TYPE_STRING, 0, 0, NULL, SKIP_OPEN_TABLE },
+ {"internal_file_name", 256, MYSQL_TYPE_STRING, 0, 0, NULL, SKIP_OPEN_TABLE },
+ {"checkpoint_count", 0, MYSQL_TYPE_LONGLONG, 0, 0, NULL, SKIP_OPEN_TABLE },
+ {"blocknum", 0, MYSQL_TYPE_LONGLONG, 0, 0, NULL, SKIP_OPEN_TABLE },
+ {"offset", 0, MYSQL_TYPE_LONGLONG, 0, MY_I_S_MAYBE_NULL, NULL, SKIP_OPEN_TABLE },
+ {"size", 0, MYSQL_TYPE_LONGLONG, 0, MY_I_S_MAYBE_NULL, NULL, SKIP_OPEN_TABLE },
+ {"table_schema", 256, MYSQL_TYPE_STRING, 0, 0, NULL, SKIP_OPEN_TABLE },
+ {"table_name", 256, MYSQL_TYPE_STRING, 0, 0, NULL, SKIP_OPEN_TABLE },
+ {"table_dictionary_name", 256, MYSQL_TYPE_STRING, 0, 0, NULL, SKIP_OPEN_TABLE },
+ {NULL, 0, MYSQL_TYPE_NULL, 0, 0, NULL, SKIP_OPEN_TABLE}
+};
+
+struct report_fractal_tree_block_map_iterator_extra_t {
+ int64_t num_rows;
+ int64_t i;
+ uint64_t* checkpoint_counts;
+ int64_t* blocknums;
+ int64_t* diskoffs;
+ int64_t* sizes;
+};
+
+// This iterator is called while holding the blocktable lock.
+// We should be as quick as possible.
+// We don't want to do one call to get the number of rows, release the
+// blocktable lock, and then do another call to get all the rows because
+// the number of rows may change if we don't hold the lock.
+// As a compromise, we'll do some mallocs inside the lock on the first call,
+// but everything else should be fast.
+int report_fractal_tree_block_map_iterator(
+ uint64_t checkpoint_count,
+ int64_t num_rows,
+ int64_t blocknum,
+ int64_t diskoff,
+ int64_t size,
+ void* iter_extra) {
+
+ struct report_fractal_tree_block_map_iterator_extra_t* e =
+ static_cast<struct report_fractal_tree_block_map_iterator_extra_t*>(iter_extra);
+
+ assert(num_rows > 0);
+ if (e->num_rows == 0) {
+ e->checkpoint_counts =
+ (uint64_t*)tokudb::memory::malloc(
+ num_rows * (sizeof *e->checkpoint_counts),
+ MYF(MY_WME|MY_ZEROFILL|MY_FAE));
+ e->blocknums =
+ (int64_t*)tokudb::memory::malloc(
+ num_rows * (sizeof *e->blocknums),
+ MYF(MY_WME|MY_ZEROFILL|MY_FAE));
+ e->diskoffs =
+ (int64_t*)tokudb::memory::malloc(
+ num_rows * (sizeof *e->diskoffs),
+ MYF(MY_WME|MY_ZEROFILL|MY_FAE));
+ e->sizes =
+ (int64_t*)tokudb::memory::malloc(
+ num_rows * (sizeof *e->sizes),
+ MYF(MY_WME|MY_ZEROFILL|MY_FAE));
+ e->num_rows = num_rows;
+ }
+
+ e->checkpoint_counts[e->i] = checkpoint_count;
+ e->blocknums[e->i] = blocknum;
+ e->diskoffs[e->i] = diskoff;
+ e->sizes[e->i] = size;
+ ++(e->i);
+
+ return 0;
+}
+
+int report_fractal_tree_block_map_for_db(
+ const DBT* dname,
+ const DBT* iname,
+ TABLE* table,
+ THD* thd) {
+
+ int error;
+ DB* db;
+ // avoid struct initializers so that we can compile with older gcc versions
+ report_fractal_tree_block_map_iterator_extra_t e = {};
+
+ error = db_create(&db, db_env, 0);
+ if (error) {
+ goto exit;
+ }
+ error = db->open(db, NULL, (char *)dname->data, NULL, DB_BTREE, 0, 0666);
+ if (error) {
+ goto exit;
+ }
+ error = db->iterate_fractal_tree_block_map(
+ db,
+ report_fractal_tree_block_map_iterator,
+ &e);
+ {
+ int close_error = db->close(db, 0);
+ if (!error) {
+ error = close_error;
+ }
+ }
+ if (error) {
+ goto exit;
+ }
+
+ // If not, we should have gotten an error and skipped this section of code
+ assert(e.i == e.num_rows);
+ for (int64_t i = 0; error == 0 && i < e.num_rows; ++i) {
+ // We store the NULL terminator in the directory so it's included in the size.
+ // See #5789
+ // Recalculate and check just to be safe.
+ size_t dname_len = strlen((const char*)dname->data);
+ assert(dname_len == dname->size - 1);
+ table->field[0]->store(
+ (char*)dname->data,
+ dname_len,
+ system_charset_info);
+
+ size_t iname_len = strlen((const char*)iname->data);
+ assert(iname_len == iname->size - 1);
+ table->field[1]->store(
+ (char*)iname->data,
+ iname_len,
+ system_charset_info);
+
+ table->field[2]->store(e.checkpoint_counts[i], false);
+ table->field[3]->store(e.blocknums[i], false);
+ static const int64_t freelist_null = -1;
+ static const int64_t diskoff_unused = -2;
+ if (e.diskoffs[i] == diskoff_unused || e.diskoffs[i] == freelist_null) {
+ table->field[4]->set_null();
+ } else {
+ table->field[4]->set_notnull();
+ table->field[4]->store(e.diskoffs[i], false);
+ }
+ static const int64_t size_is_free = -1;
+ if (e.sizes[i] == size_is_free) {
+ table->field[5]->set_null();
+ } else {
+ table->field[5]->set_notnull();
+ table->field[5]->store(e.sizes[i], false);
+ }
+
+ // split the dname
+ String database_name, table_name, dictionary_name;
+ tokudb_split_dname(
+ (const char*)dname->data,
+ database_name,
+ table_name,
+ dictionary_name);
+ table->field[6]->store(
+ database_name.c_ptr(),
+ database_name.length(),
+ system_charset_info);
+ table->field[7]->store(
+ table_name.c_ptr(),
+ table_name.length(),
+ system_charset_info);
+ table->field[8]->store(
+ dictionary_name.c_ptr(),
+ dictionary_name.length(),
+ system_charset_info);
+
+ error = schema_table_store_record(thd, table);
+ }
+
+exit:
+ if (e.checkpoint_counts != NULL) {
+ tokudb::memory::free(e.checkpoint_counts);
+ e.checkpoint_counts = NULL;
+ }
+ if (e.blocknums != NULL) {
+ tokudb::memory::free(e.blocknums);
+ e.blocknums = NULL;
+ }
+ if (e.diskoffs != NULL) {
+ tokudb::memory::free(e.diskoffs);
+ e.diskoffs = NULL;
+ }
+ if (e.sizes != NULL) {
+ tokudb::memory::free(e.sizes);
+ e.sizes = NULL;
+ }
+ return error;
+}
+
+int report_fractal_tree_block_map(TABLE* table, THD* thd) {
+ int error;
+ DB_TXN* txn = NULL;
+ DBC* tmp_cursor = NULL;
+ DBT curr_key;
+ DBT curr_val;
+ memset(&curr_key, 0, sizeof curr_key);
+ memset(&curr_val, 0, sizeof curr_val);
+ error = txn_begin(db_env, 0, &txn, DB_READ_UNCOMMITTED, thd);
+ if (error) {
+ goto cleanup;
+ }
+ error = db_env->get_cursor_for_directory(db_env, txn, &tmp_cursor);
+ if (error) {
+ goto cleanup;
+ }
+ while (error == 0) {
+ error = tmp_cursor->c_get(tmp_cursor, &curr_key, &curr_val, DB_NEXT);
+ if (!error) {
+ error = report_fractal_tree_block_map_for_db(
+ &curr_key,
+ &curr_val,
+ table,
+ thd);
+ }
+ if (!error && thd_killed(thd))
+ error = ER_QUERY_INTERRUPTED;
+ }
+ if (error == DB_NOTFOUND) {
+ error = 0;
+ }
+cleanup:
+ if (tmp_cursor) {
+ int r = tmp_cursor->c_close(tmp_cursor);
+ assert(r == 0);
+ }
+ if (txn) {
+ commit_txn(txn, 0);
+ }
+ return error;
+}
+
+#if MYSQL_VERSION_ID >= 50600
+int fractal_tree_block_map_fill_table(
+ THD* thd,
+ TABLE_LIST* tables,
+ Item* cond) {
+#else
+int fractal_tree_block_map_fill_table(
+ THD* thd,
+ TABLE_LIST* tables,
+ COND* cond) {
+#endif
+ TOKUDB_DBUG_ENTER("");
+ int error;
+ TABLE* table = tables->table;
+
+ // 3938: Get a read lock on the status flag, since we must
+ // read it before safely proceeding
+ tokudb_hton_initialized_lock.lock_read();
+
+ if (!tokudb_hton_initialized) {
+ error = ER_PLUGIN_IS_NOT_LOADED;
+ my_error(error, MYF(0), tokudb_hton_name);
+ } else {
+ error = report_fractal_tree_block_map(table, thd);
+ if (error)
+ my_error(ER_GET_ERRNO, MYF(0), error, tokudb_hton_name);
+ }
+
+ //3938: unlock the status flag lock
+ tokudb_hton_initialized_lock.unlock();
+ TOKUDB_DBUG_RETURN(error);
+}
+
+int fractal_tree_block_map_init(void* p) {
+ ST_SCHEMA_TABLE* schema = (ST_SCHEMA_TABLE*)p;
+ schema->fields_info = fractal_tree_block_map_field_info;
+ schema->fill_table = fractal_tree_block_map_fill_table;
+ return 0;
+}
+
+int fractal_tree_block_map_done(void *p) {
+ return 0;
+}
+
+st_mysql_plugin fractal_tree_block_map = {
+ MYSQL_INFORMATION_SCHEMA_PLUGIN,
+ &fractal_tree_block_map_information_schema,
+ "TokuDB_fractal_tree_block_map",
+ "Percona",
+ "Percona TokuDB Storage Engine with Fractal Tree(tm) Technology",
+ PLUGIN_LICENSE_GPL,
+ fractal_tree_block_map_init, /* plugin init */
+ fractal_tree_block_map_done, /* plugin deinit */
+ TOKUDB_PLUGIN_VERSION,
+ NULL, /* status variables */
+ NULL, /* system variables */
+#ifdef MARIA_PLUGIN_INTERFACE_VERSION
+ tokudb::sysvars::version,
+ MariaDB_PLUGIN_MATURITY_STABLE /* maturity */
+#else
+ NULL, /* config options */
+ 0, /* flags */
+#endif
+};
+
+
+st_mysql_information_schema background_job_status_information_schema = {
+ MYSQL_INFORMATION_SCHEMA_INTERFACE_VERSION
+};
+
+ST_FIELD_INFO background_job_status_field_info[] = {
+ {"id", 0, MYSQL_TYPE_LONGLONG, 0, 0, NULL, SKIP_OPEN_TABLE },
+ {"database_name", 256, MYSQL_TYPE_STRING, 0, 0, NULL, SKIP_OPEN_TABLE },
+ {"table_name", 256, MYSQL_TYPE_STRING, 0, 0, NULL, SKIP_OPEN_TABLE },
+ {"job_type", 256, MYSQL_TYPE_STRING, 0, 0, NULL, SKIP_OPEN_TABLE },
+ {"job_params", 256, MYSQL_TYPE_STRING, 0, 0, NULL, SKIP_OPEN_TABLE },
+ {"scheduler", 32, MYSQL_TYPE_STRING, 0, 0, NULL, SKIP_OPEN_TABLE },
+ {"scheduled_time", 0, MYSQL_TYPE_DATETIME, 0, 0, NULL, SKIP_OPEN_TABLE },
+ {"started_time", 0, MYSQL_TYPE_DATETIME, 0, MY_I_S_MAYBE_NULL, NULL, SKIP_OPEN_TABLE },
+ {"status", 256, MYSQL_TYPE_STRING, 0, MY_I_S_MAYBE_NULL, SKIP_OPEN_TABLE },
+ {NULL, 0, MYSQL_TYPE_NULL, 0, 0, NULL, SKIP_OPEN_TABLE}
+};
+
+struct background_job_status_extra {
+ THD* thd;
+ TABLE* table;
+};
+
+void background_job_status_callback(
+ uint64_t id,
+ const char* database_name,
+ const char* table_name,
+ const char* type,
+ const char* params,
+ const char* status,
+ bool user_scheduled,
+ time_t scheduled_time,
+ time_t started_time,
+ void* extra) {
+
+ background_job_status_extra* e =
+ reinterpret_cast<background_job_status_extra*>(extra);
+
+ THD* thd = e->thd;
+ TABLE* table = e->table;
+
+ table->field[0]->store(id, false);
+ table->field[1]->store(
+ database_name,
+ strlen(database_name),
+ system_charset_info);
+ table->field[2]->store(table_name, strlen(table_name), system_charset_info);
+ table->field[3]->store(type, strlen(type), system_charset_info);
+ table->field[4]->store(params, strlen(params), system_charset_info);
+ if (user_scheduled)
+ table->field[5]->store("USER", strlen("USER"), system_charset_info);
+ else
+ table->field[5]->store("AUTO", strlen("AUTO"), system_charset_info);
+
+ field_store_time_t(table->field[6], scheduled_time);
+ field_store_time_t(table->field[7], started_time);
+ if (status[0] != '\0') {
+ table->field[8]->store(status, strlen(status), system_charset_info);
+ table->field[8]->set_notnull();
+ } else {
+ table->field[8]->store(NULL, 0, system_charset_info);
+ table->field[8]->set_null();
+ }
+
+ schema_table_store_record(thd, table);
+}
+
+int report_background_job_status(TABLE *table, THD *thd) {
+ int error = 0;
+ background_job_status_extra extra = {
+ thd,
+ table
+ };
+ tokudb::background::_job_manager->iterate_jobs(
+ background_job_status_callback,
+ &extra);
+ return error;
+}
+
+#if MYSQL_VERSION_ID >= 50600
+int background_job_status_fill_table(THD *thd, TABLE_LIST *tables, Item *cond) {
+#else
+int background_job_status_fill_table(THD *thd, TABLE_LIST *tables, COND *cond) {
+#endif
+ TOKUDB_DBUG_ENTER("");
+ int error;
+ TABLE* table = tables->table;
+
+ tokudb_hton_initialized_lock.lock_read();
+
+ if (!tokudb_hton_initialized) {
+ error = ER_PLUGIN_IS_NOT_LOADED;
+ my_error(error, MYF(0), tokudb_hton_name);
+ } else {
+ error = report_background_job_status(table, thd);
+ if (error)
+ my_error(ER_GET_ERRNO, MYF(0), error, tokudb_hton_name);
+ }
+
+ tokudb_hton_initialized_lock.unlock();
+ TOKUDB_DBUG_RETURN(error);
+}
+
+int background_job_status_init(void* p) {
+ ST_SCHEMA_TABLE* schema = (ST_SCHEMA_TABLE*)p;
+ schema->fields_info = background_job_status_field_info;
+ schema->fill_table = background_job_status_fill_table;
+ return 0;
+}
+
+int background_job_status_done(void* p) {
+ return 0;
+}
+
+st_mysql_plugin background_job_status = {
+ MYSQL_INFORMATION_SCHEMA_PLUGIN,
+ &background_job_status_information_schema,
+ "TokuDB_background_job_status",
+ "Percona",
+ "Percona TokuDB Storage Engine with Fractal Tree(tm) Technology",
+ PLUGIN_LICENSE_GPL,
+ background_job_status_init, /* plugin init */
+ background_job_status_done, /* plugin deinit */
+ TOKUDB_PLUGIN_VERSION,
+ NULL, /* status variables */
+ NULL, /* system variables */
+#ifdef MARIA_PLUGIN_INTERFACE_VERSION
+ tokudb::sysvars::version,
+ MariaDB_PLUGIN_MATURITY_STABLE /* maturity */
+#else
+ NULL, /* config options */
+ 0, /* flags */
+#endif
+};
+
+} // namespace information_schema
+} // namespace tokudb
diff --git a/storage/tokudb/tokudb_information_schema.h b/storage/tokudb/tokudb_information_schema.h
new file mode 100644
index 00000000000..b80e3f074ec
--- /dev/null
+++ b/storage/tokudb/tokudb_information_schema.h
@@ -0,0 +1,50 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+/* -*- mode: C; c-basic-offset: 4 -*- */
+#ident "$Id$"
+/*======
+This file is part of TokuDB
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ TokuDBis is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ TokuDB is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with TokuDB. If not, see <http://www.gnu.org/licenses/>.
+
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#ifndef _TOKUDB_INFORMATION_SCHEMA_H
+#define _TOKUDB_INFORMATION_SCHEMA_H
+
+#include "hatoku_defines.h"
+
+namespace tokudb {
+namespace information_schema {
+
+#ifdef MARIA_PLUGIN_INTERFACE_VERSION
+#define st_mysql_plugin st_maria_plugin
+#endif
+
+extern st_mysql_plugin trx;
+extern st_mysql_plugin lock_waits;
+extern st_mysql_plugin locks;
+extern st_mysql_plugin file_map;
+extern st_mysql_plugin fractal_tree_info;
+extern st_mysql_plugin fractal_tree_block_map;
+extern st_mysql_plugin background_job_status;
+
+} // namespace information_schema
+} // namespace tokudb
+
+#endif // _TOKUDB_INFORMATION_SCHEMA_H
diff --git a/storage/tokudb/tokudb_math.h b/storage/tokudb/tokudb_math.h
index c49af2a6a21..0338bf3871e 100644
--- a/storage/tokudb/tokudb_math.h
+++ b/storage/tokudb/tokudb_math.h
@@ -32,30 +32,34 @@ namespace tokudb {
// Overflow detection adapted from "Hackers Delight", Henry S. Warren
// Return a bit mask for bits 0 .. length_bits-1
-static uint64_t uint_mask(uint length_bits) __attribute__((unused));
+TOKUDB_UNUSED(static uint64_t uint_mask(uint length_bits));
static uint64_t uint_mask(uint length_bits) {
return length_bits == 64 ? ~0ULL : (1ULL<<length_bits)-1;
}
// Return the highest unsigned int with a given number of bits
-static uint64_t uint_high_endpoint(uint length_bits) __attribute__((unused));
+TOKUDB_UNUSED(static uint64_t uint_high_endpoint(uint length_bits));
static uint64_t uint_high_endpoint(uint length_bits) {
return uint_mask(length_bits);
}
// Return the lowest unsigned int with a given number of bits
-static uint64_t uint_low_endpoint(uint length_bits) __attribute__((unused));
-static uint64_t uint_low_endpoint(uint length_bits __attribute__((unused))) {
+TOKUDB_UNUSED(static uint64_t uint_low_endpoint(uint length_bits));
+static uint64_t uint_low_endpoint(TOKUDB_UNUSED(uint length_bits)) {
return 0;
}
// Add two unsigned integers with max maximum value.
// If there is an overflow then set the sum to the max.
// Return the sum and the overflow.
-static uint64_t uint_add(uint64_t x, uint64_t y, uint length_bits, bool *over) __attribute__((unused));
+TOKUDB_UNUSED(static uint64_t uint_add(
+ uint64_t x,
+ uint64_t y,
+ uint length_bits,
+ bool* over));
static uint64_t uint_add(uint64_t x, uint64_t y, uint length_bits, bool *over) {
uint64_t mask = uint_mask(length_bits);
- assert((x & ~mask) == 0 && (y & ~mask) == 0);
+ assert_always((x & ~mask) == 0 && (y & ~mask) == 0);
uint64_t s = (x + y) & mask;
*over = s < x; // check for overflow
return s;
@@ -64,30 +68,34 @@ static uint64_t uint_add(uint64_t x, uint64_t y, uint length_bits, bool *over) {
// Subtract two unsigned ints with max maximum value.
// If there is an over then set the difference to 0.
// Return the difference and the overflow.
-static uint64_t uint_sub(uint64_t x, uint64_t y, uint length_bits, bool *over) __attribute__((unused));
+TOKUDB_UNUSED(static uint64_t uint_sub(
+ uint64_t x,
+ uint64_t y,
+ uint length_bits,
+ bool* over));
static uint64_t uint_sub(uint64_t x, uint64_t y, uint length_bits, bool *over) {
uint64_t mask = uint_mask(length_bits);
- assert((x & ~mask) == 0 && (y & ~mask) == 0);
+ assert_always((x & ~mask) == 0 && (y & ~mask) == 0);
uint64_t s = (x - y) & mask;
*over = s > x; // check for overflow
return s;
}
// Return the highest int with a given number of bits
-static int64_t int_high_endpoint(uint length_bits) __attribute__((unused));
+TOKUDB_UNUSED(static int64_t int_high_endpoint(uint length_bits));
static int64_t int_high_endpoint(uint length_bits) {
return (1ULL<<(length_bits-1))-1;
}
// Return the lowest int with a given number of bits
-static int64_t int_low_endpoint(uint length_bits) __attribute__((unused));
+TOKUDB_UNUSED(static int64_t int_low_endpoint(uint length_bits));
static int64_t int_low_endpoint(uint length_bits) {
int64_t mask = uint_mask(length_bits);
return (1ULL<<(length_bits-1)) | ~mask;
}
// Sign extend to 64 bits an int with a given number of bits
-static int64_t int_sign_extend(int64_t n, uint length_bits) __attribute__((unused));
+TOKUDB_UNUSED(static int64_t int_sign_extend(int64_t n, uint length_bits));
static int64_t int_sign_extend(int64_t n, uint length_bits) {
if (n & (1ULL<<(length_bits-1)))
n |= ~uint_mask(length_bits);
@@ -99,7 +107,11 @@ static int64_t int_sign_extend(int64_t n, uint length_bits) {
// depending on the sign bit.
// Sign extend to 64 bits.
// Return the sum and the overflow.
-static int64_t int_add(int64_t x, int64_t y, uint length_bits, bool *over) __attribute__((unused));
+TOKUDB_UNUSED(static int64_t int_add(
+ int64_t x,
+ int64_t y,
+ uint length_bits,
+ bool* over));
static int64_t int_add(int64_t x, int64_t y, uint length_bits, bool *over) {
int64_t mask = uint_mask(length_bits);
int64_t n = (x + y) & mask;
@@ -114,7 +126,11 @@ static int64_t int_add(int64_t x, int64_t y, uint length_bits, bool *over) {
// depending on the sign bit.
// Sign extend to 64 bits.
// Return the sum and the overflow.
-static int64_t int_sub(int64_t x, int64_t y, uint length_bits, bool *over) __attribute__((unused));
+TOKUDB_UNUSED(static int64_t int_sub(
+ int64_t x,
+ int64_t y,
+ uint length_bits,
+ bool* over));
static int64_t int_sub(int64_t x, int64_t y, uint length_bits, bool *over) {
int64_t mask = uint_mask(length_bits);
int64_t n = (x - y) & mask;
diff --git a/storage/tokudb/tokudb_memory.h b/storage/tokudb/tokudb_memory.h
new file mode 100644
index 00000000000..2687c1cda8e
--- /dev/null
+++ b/storage/tokudb/tokudb_memory.h
@@ -0,0 +1,102 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+/* -*- mode: C; c-basic-offset: 4 -*- */
+#ident "$Id$"
+/*======
+This file is part of TokuDB
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ TokuDBis is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ TokuDB is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with TokuDB. If not, see <http://www.gnu.org/licenses/>.
+
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#ifndef _TOKUDB_MEMORY_H
+#define _TOKUDB_MEMORY_H
+
+#include "hatoku_defines.h"
+
+namespace tokudb {
+namespace memory {
+
+void* malloc(size_t s, myf flags);
+void* realloc(void* p, size_t s, myf flags);
+void free(void* ptr);
+char* strdup(const char* p, myf flags);
+void* multi_malloc(myf myFlags, ...);
+
+
+inline void* malloc(size_t s, myf flags) {
+#if 50700 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 50799
+ return ::my_malloc(0, s, flags);
+#else
+ return ::my_malloc(s, flags);
+#endif
+}
+inline void* realloc(void* p, size_t s, myf flags) {
+ if (s == 0)
+ return p;
+#if 50700 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 50799
+ return ::my_realloc(0, p, s, flags);
+#else
+ return ::my_realloc(p, s, flags | MY_ALLOW_ZERO_PTR);
+#endif
+}
+inline void free(void* ptr) {
+ if (ptr)
+ ::my_free(ptr);
+}
+inline char* strdup(const char* p, myf flags) {
+#if 50700 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 50799
+ return ::my_strdup(0, p, flags);
+#else
+ return ::my_strdup(p, flags);
+#endif
+}
+inline void* multi_malloc(myf myFlags, ...) {
+ va_list args;
+ char** ptr;
+ char* start;
+ char* res;
+ size_t tot_length,length;
+
+ va_start(args,myFlags);
+ tot_length = 0;
+ while ((ptr = va_arg(args, char**))) {
+ length = va_arg(args, uint);
+ tot_length += ALIGN_SIZE(length);
+ }
+ va_end(args);
+
+ if (!(start = (char*)malloc(tot_length, myFlags))) {
+ return 0;
+ }
+
+ va_start(args, myFlags);
+ res = start;
+ while ((ptr = va_arg(args, char**))) {
+ *ptr = res;
+ length = va_arg(args,uint);
+ res += ALIGN_SIZE(length);
+ }
+ va_end(args);
+ return start;
+}
+
+} // namespace thread
+} // namespace tokudb
+
+#endif // _TOKUDB_MEMORY_H
diff --git a/storage/tokudb/tokudb_status.h b/storage/tokudb/tokudb_status.h
index 4600e04596f..5cca54e52c9 100644
--- a/storage/tokudb/tokudb_status.h
+++ b/storage/tokudb/tokudb_status.h
@@ -43,118 +43,374 @@ typedef ulonglong HA_METADATA_KEY;
#define status_dict_pagesize 1024
namespace tokudb {
+namespace metadata {
- // get the value for a given key in the status dictionary. copy the value to the supplied buffer.
- // returns 0 if successful.
- int get_status(DB *status_db, DB_TXN *txn, HA_METADATA_KEY k, void *p, size_t s, size_t *sp) {
- DBT key = {}; key.data = &k; key.size = sizeof k;
- DBT val = {}; val.data = p; val.ulen = (uint32_t) s; val.flags = DB_DBT_USERMEM;
- int error = status_db->get(status_db, txn, &key, &val, 0);
- if (error == 0) {
- *sp = val.size;
- }
- return error;
- }
+// get the value for a given key in the status dictionary.
+// copy the value to the supplied buffer.
+// returns 0 if successful.
+int read(
+ DB* status_db,
+ DB_TXN* txn,
+ HA_METADATA_KEY k,
+ void* p,
+ size_t s,
+ size_t* sp) {
- // get the value for a given key in the status dictionary. put the value in a realloced buffer.
- // returns 0 if successful.
- int get_status_realloc(DB *status_db, DB_TXN *txn, HA_METADATA_KEY k, void **pp, size_t *sp) {
- DBT key = {}; key.data = &k; key.size = sizeof k;
- DBT val = {}; val.data = *pp; val.size = (uint32_t) *sp; val.flags = DB_DBT_REALLOC;
- int error = status_db->get(status_db, txn, &key, &val, 0);
- if (error == 0) {
- *pp = val.data;
- *sp = val.size;
- }
- return error;
+ DBT key = {};
+ key.data = &k;
+ key.size = sizeof(k);
+ DBT val = {};
+ val.data = p;
+ val.ulen = (uint32_t)s;
+ val.flags = DB_DBT_USERMEM;
+ int error = status_db->get(status_db, txn, &key, &val, 0);
+ if (error == 0) {
+ *sp = val.size;
}
+ return error;
+}
- // write a key value pair into the status dictionary, overwriting the previous value if any.
- // auto create a txn if necessary.
- // returns 0 if successful.
- int write_metadata(DB *status_db, void *key_data, uint key_size, void* val_data, uint val_size, DB_TXN *txn) {
- DBT key = {}; key.data = key_data; key.size = key_size;
- DBT value = {}; value.data = val_data; value.size = val_size;
- int error = status_db->put(status_db, txn, &key, &value, 0);
- return error;
- }
+// get the value for a given key in the status dictionary.
+// put the value in a realloced buffer.
+// returns 0 if successful.
+int read_realloc(
+ DB* status_db,
+ DB_TXN* txn,
+ HA_METADATA_KEY k,
+ void** pp,
+ size_t* sp) {
- // write a key value pair into the status dictionary, overwriting the previous value if any.
- // the key must be a HA_METADATA_KEY.
- // returns 0 if successful.
- int write_to_status(DB *status_db, HA_METADATA_KEY curr_key_data, void *val, size_t val_size, DB_TXN *txn) {
- return write_metadata(status_db, &curr_key_data, sizeof curr_key_data, val, val_size, txn);
+ DBT key = {};
+ key.data = &k;
+ key.size = sizeof(k);
+ DBT val = {};
+ val.data = *pp;
+ val.size = (uint32_t)*sp;
+ val.flags = DB_DBT_REALLOC;
+ int error = status_db->get(status_db, txn, &key, &val, 0);
+ if (error == 0) {
+ *pp = val.data;
+ *sp = val.size;
}
+ return error;
+}
- // remove a key from the status dictionary.
- // auto create a txn if necessary.
- // returns 0 if successful.
- int remove_metadata(DB *status_db, void *key_data, uint key_size, DB_TXN *txn) {
- DBT key = {}; key.data = key_data; key.size = key_size;
- int error = status_db->del(status_db, txn, &key, DB_DELETE_ANY);
- return error;
- }
+// write a key value pair into the status dictionary,
+// overwriting the previous value if any.
+// auto create a txn if necessary.
+// returns 0 if successful.
+int write_low(
+ DB* status_db,
+ void* key_data,
+ uint key_size,
+ void* val_data,
+ uint val_size,
+ DB_TXN *txn) {
- // remove a key from the status dictionary.
- // the key must be a HA_METADATA_KEY
- // returns 0 if successful.
- int remove_from_status(DB *status_db, HA_METADATA_KEY curr_key_data, DB_TXN *txn) {
- return remove_metadata(status_db, &curr_key_data, sizeof curr_key_data, txn);
+ DBT key = {};
+ key.data = key_data;
+ key.size = key_size;
+ DBT value = {};
+ value.data = val_data;
+ value.size = val_size;
+ int error = status_db->put(status_db, txn, &key, &value, 0);
+ return error;
+}
+
+// write a key value pair into the status dictionary,
+// overwriting the previous value if any.
+// the key must be a HA_METADATA_KEY.
+// returns 0 if successful.
+int write(
+ DB* status_db,
+ HA_METADATA_KEY curr_key_data,
+ void* val,
+ size_t val_size,
+ DB_TXN* txn) {
+
+ return
+ tokudb::metadata::write_low(
+ status_db,
+ &curr_key_data,
+ sizeof(curr_key_data),
+ val,
+ val_size,
+ txn);
+}
+
+// remove a key from the status dictionary.
+// auto create a txn if necessary.
+// returns 0 if successful.
+int remove_low(
+ DB* status_db,
+ void* key_data,
+ uint key_size,
+ DB_TXN* txn) {
+
+ DBT key = {};
+ key.data = key_data;
+ key.size = key_size;
+ int error = status_db->del(status_db, txn, &key, DB_DELETE_ANY);
+ return error;
+}
+
+// remove a key from the status dictionary.
+// the key must be a HA_METADATA_KEY
+// returns 0 if successful.
+int remove(
+ DB* status_db,
+ HA_METADATA_KEY curr_key_data,
+ DB_TXN* txn) {
+ return
+ tokudb::metadata::remove_low(
+ status_db,
+ &curr_key_data,
+ sizeof(curr_key_data),
+ txn);
+}
+
+int close(DB** status_db_ptr) {
+ int error = 0;
+ DB* status_db = *status_db_ptr;
+ if (status_db) {
+ error = status_db->close(status_db, 0);
+ if (error == 0)
+ *status_db_ptr = NULL;
}
+ return error;
+}
- int close_status(DB **status_db_ptr) {
- int error = 0;
- DB *status_db = *status_db_ptr;
- if (status_db) {
- error = status_db->close(status_db, 0);
- if (error == 0)
- *status_db_ptr = NULL;
- }
- return error;
+int create(
+ DB_ENV* env,
+ DB** status_db_ptr,
+ const char* name,
+ DB_TXN* txn) {
+
+ int error;
+ DB *status_db = NULL;
+
+ error = db_create(&status_db, env, 0);
+ if (error == 0) {
+ error = status_db->set_pagesize(status_db, status_dict_pagesize);
}
+ if (error == 0) {
+ error =
+ status_db->open(
+ status_db,
+ txn,
+ name,
+ NULL,
+ DB_BTREE, DB_CREATE | DB_EXCL,
+ 0);
+ }
+ if (error == 0) {
+ *status_db_ptr = status_db;
+ } else {
+ int r = tokudb::metadata::close(&status_db);
+ assert_always(r == 0);
+ }
+ return error;
+}
- int create_status(DB_ENV *env, DB **status_db_ptr, const char *name, DB_TXN *txn) {
- int error;
- DB *status_db = NULL;
+int open(
+ DB_ENV* env,
+ DB** status_db_ptr,
+ const char* name,
+ DB_TXN* txn) {
- error = db_create(&status_db, env, 0);
- if (error == 0) {
- error = status_db->set_pagesize(status_db, status_dict_pagesize);
- }
- if (error == 0) {
- error = status_db->open(status_db, txn, name, NULL, DB_BTREE, DB_CREATE | DB_EXCL, 0);
- }
- if (error == 0) {
- *status_db_ptr = status_db;
- } else {
- int r = close_status(&status_db);
- assert(r == 0);
+ int error = 0;
+ DB* status_db = NULL;
+ error = db_create(&status_db, env, 0);
+ if (error == 0) {
+ error =
+ status_db->open(
+ status_db,
+ txn,
+ name,
+ NULL,
+ DB_BTREE,
+ DB_THREAD,
+ 0);
+ }
+ if (error == 0) {
+ uint32_t pagesize = 0;
+ error = status_db->get_pagesize(status_db, &pagesize);
+ if (error == 0 && pagesize > status_dict_pagesize) {
+ error =
+ status_db->change_pagesize(status_db, status_dict_pagesize);
}
- return error;
}
+ if (error == 0) {
+ *status_db_ptr = status_db;
+ } else {
+ int r = tokudb::metadata::close(&status_db);
+ assert_always(r == 0);
+ }
+ return error;
+}
- int open_status(DB_ENV *env, DB **status_db_ptr, const char *name, DB_TXN *txn) {
- int error = 0;
- DB *status_db = NULL;
- error = db_create(&status_db, env, 0);
- if (error == 0) {
- error = status_db->open(status_db, txn, name, NULL, DB_BTREE, DB_THREAD, 0);
- }
- if (error == 0) {
- uint32_t pagesize = 0;
- error = status_db->get_pagesize(status_db, &pagesize);
- if (error == 0 && pagesize > status_dict_pagesize) {
- error = status_db->change_pagesize(status_db, status_dict_pagesize);
+int strip_frm_data(DB_ENV* env) {
+ int r;
+ DB_TXN* txn = NULL;
+
+ fprintf(stderr, "TokuDB strip_frm_data : Beginning stripping process.\n");
+
+ r = db_env->txn_begin(env, NULL, &txn, 0);
+ assert_always(r == 0);
+
+ DBC* c = NULL;
+ r = env->get_cursor_for_directory(env, txn, &c);
+ assert_always(r == 0);
+
+ DBT key = { };
+ key.flags = DB_DBT_REALLOC;
+
+ DBT val = { };
+ val.flags = DB_DBT_REALLOC;
+ while (1) {
+ r = c->c_get(c, &key, &val, DB_NEXT);
+ if (r == DB_NOTFOUND)
+ break;
+ const char* dname = (const char*) key.data;
+ const char* iname = (const char*) val.data;
+ assert_always(r == 0);
+
+ if (strstr(iname, "_status_")) {
+ fprintf(
+ stderr,
+ "TokuDB strip_frm_data : stripping from dname=%s iname=%s\n",
+ dname,
+ iname);
+
+ DB* status_db;
+ r = tokudb::metadata::open(db_env, &status_db, dname, txn);
+ if (r != 0) {
+ fprintf(
+ stderr,
+ "TokuDB strip_frm_data : unable to open status file %s, "
+ "error = %d\n",
+ dname,
+ r);
+ continue;
}
+
+ // GOL : this is a godawful hack. The inventors of this did not
+ // think it would be a good idea to use some kind of magic
+ // identifier k/v pair so that you can in fact tell a proper status
+ // file from any other file that might have the string _status_ in
+ // it. Out in ha_tokudb::create, when the status file is initially
+ // created, it is immediately populated with:
+ // uint hatoku_new_version=HA_TOKU_VERSION=4 and
+ // uint hatoku_capabilities=HA_TOKU_CAP=0
+ // Since I can't count on the fact that these values are/were
+ // _always_ 4 and 0, I can count on the fact that they _must_ be
+ // there and the _must_ be sizeof(uint). That will at least give us
+ // a much better idea that these are in fact status files.
+ void* p = NULL;
+ size_t sz;
+ r =
+ tokudb::metadata::read_realloc(
+ status_db,
+ txn,
+ hatoku_new_version,
+ &p,
+ &sz);
+ if (r != 0) {
+ fprintf(
+ stderr,
+ "TokuDB strip_frm_data : does not look like a real TokuDB "
+ "status file, new_verion is missing, leaving alone %s \n",
+ dname);
+
+ r = tokudb::metadata::close(&status_db);
+ assert_always(r == 0);
+ continue;
+ } else if (sz != sizeof(uint)) {
+ fprintf(
+ stderr,
+ "TokuDB strip_frm_data : does not look like a real TokuDB "
+ "status file, new_verion is the wrong size, "
+ "leaving alone %s \n",
+ dname);
+
+ tokudb::memory::free(p);
+ r = tokudb::metadata::close(&status_db);
+ assert_always(r == 0);
+ continue;
+ }
+ tokudb::memory::free(p);
+ p = NULL;
+
+ r =
+ tokudb::metadata::read_realloc(
+ status_db,
+ txn,
+ hatoku_capabilities,
+ &p,
+ &sz);
+ if (r != 0) {
+ fprintf(
+ stderr,
+ "TokuDB strip_frm_data : does not look like a real TokuDB "
+ "status file, capabilities is missing, leaving alone %s \n",
+ dname);
+
+ r = tokudb::metadata::close(&status_db);
+ assert_always(r == 0);
+ continue;
+ } else if (sz != sizeof(uint)) {
+ fprintf(
+ stderr,
+ "TokuDB strip_frm_data : does not look like a real TokuDB "
+ "status file, capabilities is the wrong size, "
+ "leaving alone %s \n",
+ dname);
+
+ tokudb::memory::free(p);
+ r = tokudb::metadata::close(&status_db);
+ assert_always(r == 0);
+ continue;
+ }
+ tokudb::memory::free(p);
+
+ // OK, st this point, it is probably a status file, not 100% but
+ // it looks like it :(
+ r = tokudb::metadata::remove(status_db, hatoku_frm_data, txn);
+ if (r != 0) {
+ fprintf(
+ stderr,
+ "TokuDB strip_frm_data : unable to find/strip frm data "
+ "from status file %s, error = %d\n",
+ dname,
+ r);
+ }
+
+ r = tokudb::metadata::close(&status_db);
+ assert_always(r == 0);
}
- if (error == 0) {
- *status_db_ptr = status_db;
- } else {
- int r = close_status(&status_db);
- assert(r == 0);
- }
- return error;
}
+ tokudb::memory::free(key.data);
+ tokudb::memory::free(val.data);
+
+ fprintf(
+ stderr,
+ "TokuDB strip_frm_data : Stripping process complete, beginning "
+ "commit, this may take some time.\n");
+
+ r = c->c_close(c);
+ assert_always(r == 0);
+
+ r = txn->commit(txn, 0);
+ assert_always(r == 0);
+
+ fprintf(
+ stderr,
+ "TokuDB strip_frm_data : Commit complete, resuming server init "
+ "process.");
+
+ return 0;
}
+} // namespace metadata
+} // namespace tokudb
#endif
diff --git a/storage/tokudb/tokudb_sysvars.cc b/storage/tokudb/tokudb_sysvars.cc
new file mode 100644
index 00000000000..3ec7a0e1f05
--- /dev/null
+++ b/storage/tokudb/tokudb_sysvars.cc
@@ -0,0 +1,1149 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+/* -*- mode: C; c-basic-offset: 4 -*- */
+#ident "$Id$"
+/*======
+This file is part of TokuDB
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ TokuDBis is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ TokuDB is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with TokuDB. If not, see <http://www.gnu.org/licenses/>.
+
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "hatoku_hton.h"
+
+namespace tokudb {
+namespace sysvars {
+
+//******************************************************************************
+// global variables
+//******************************************************************************
+#ifdef TOKUDB_VERSION
+#define tokudb_stringify_2(x) #x
+#define tokudb_stringify(x) tokudb_stringify_2(x)
+#define TOKUDB_VERSION_STR tokudb_stringify(TOKUDB_VERSION)
+#else
+#define TOKUDB_VERSION_STR NULL
+#endif
+
+ulonglong cache_size = 0;
+uint cachetable_pool_threads = 0;
+int cardinality_scale_percent = 0;
+my_bool checkpoint_on_flush_logs = FALSE;
+uint checkpoint_pool_threads = 0;
+uint checkpointing_period = 0;
+ulong cleaner_iterations = 0;
+ulong cleaner_period = 0;
+uint client_pool_threads = 0;
+my_bool compress_buffers_before_eviction = TRUE;
+char* data_dir = NULL;
+ulong debug = 0;
+#if TOKUDB_DEBUG
+// used to control background job manager
+my_bool debug_pause_background_job_manager = FALSE;
+#endif
+my_bool directio = FALSE;
+my_bool enable_partial_eviction = TRUE;
+int fs_reserve_percent = 0;
+uint fsync_log_period = 0;
+char* log_dir = NULL;
+ulonglong max_lock_memory = 0;
+uint read_status_frequency = 0;
+my_bool strip_frm_data = FALSE;
+char* tmp_dir = NULL;
+uint write_status_frequency = 0;
+char* version = (char*) TOKUDB_VERSION_STR;
+
+// file system reserve as a percentage of total disk space
+#if TOKU_INCLUDE_HANDLERTON_HANDLE_FATAL_SIGNAL
+char* gdb_path = NULL;
+my_bool gdb_on_fatal = FALSE;
+#endif
+
+#if TOKUDB_CHECK_JEMALLOC
+uint check_jemalloc = 0;
+#endif
+
+static MYSQL_SYSVAR_ULONGLONG(
+ cache_size,
+ cache_size,
+ PLUGIN_VAR_READONLY,
+ "cache table size",
+ NULL,
+ NULL,
+ 0,
+ 0,
+ ~0ULL,
+ 0);
+
+static MYSQL_SYSVAR_UINT(
+ cachetable_pool_threads,
+ cachetable_pool_threads,
+ PLUGIN_VAR_READONLY,
+ "cachetable ops thread pool size",
+ NULL,
+ NULL,
+ 0,
+ 0,
+ 1024,
+ 0);
+
+static MYSQL_SYSVAR_INT(
+ cardinality_scale_percent,
+ cardinality_scale_percent,
+ 0,
+ "index cardinality scale percentage",
+ NULL,
+ NULL,
+ 50,
+ 0,
+ 100,
+ 0);
+
+static MYSQL_SYSVAR_BOOL(
+ checkpoint_on_flush_logs,
+ checkpoint_on_flush_logs,
+ 0,
+ "checkpoint on flush logs",
+ NULL,
+ NULL,
+ FALSE);
+
+static MYSQL_SYSVAR_UINT(
+ checkpoint_pool_threads,
+ checkpoint_pool_threads,
+ PLUGIN_VAR_READONLY,
+ "checkpoint ops thread pool size",
+ NULL,
+ NULL,
+ 0,
+ 0,
+ 1024,
+ 0);
+
+static void checkpointing_period_update(
+ THD* thd,
+ st_mysql_sys_var* sys_var,
+ void* var,
+ const void* save) {
+
+ uint* cp = (uint*)var;
+ *cp = *(const uint*)save;
+ int r = db_env->checkpointing_set_period(db_env, *cp);
+ assert(r == 0);
+}
+
+static MYSQL_SYSVAR_UINT(
+ checkpointing_period,
+ checkpointing_period,
+ 0,
+ "checkpointing period",
+ NULL,
+ checkpointing_period_update,
+ 60,
+ 0,
+ ~0U,
+ 0);
+
+static void cleaner_iterations_update(
+ THD* thd,
+ st_mysql_sys_var* sys_var,
+ void* var,
+ const void* save) {
+
+ ulong* ci = (ulong*)var;
+ *ci = *(const ulong*)save;
+ int r = db_env->cleaner_set_iterations(db_env, *ci);
+ assert(r == 0);
+}
+
+static MYSQL_SYSVAR_ULONG(
+ cleaner_iterations,
+ cleaner_iterations,
+ 0,
+ "cleaner_iterations",
+ NULL,
+ cleaner_iterations_update,
+ DEFAULT_TOKUDB_CLEANER_ITERATIONS,
+ 0,
+ ~0UL,
+ 0);
+
+static void cleaner_period_update(
+ THD* thd,
+ st_mysql_sys_var* sys_var,
+ void* var,
+ const void * save) {
+
+ ulong* cp = (ulong*)var;
+ *cp = *(const ulong*)save;
+ int r = db_env->cleaner_set_period(db_env, *cp);
+ assert(r == 0);
+}
+
+static MYSQL_SYSVAR_ULONG(
+ cleaner_period,
+ cleaner_period,
+ 0,
+ "cleaner_period",
+ NULL,
+ cleaner_period_update,
+ DEFAULT_TOKUDB_CLEANER_PERIOD,
+ 0,
+ ~0UL,
+ 0);
+
+static MYSQL_SYSVAR_UINT(
+ client_pool_threads,
+ client_pool_threads,
+ PLUGIN_VAR_READONLY,
+ "client ops thread pool size",
+ NULL,
+ NULL,
+ 0,
+ 0,
+ 1024,
+ 0);
+
+static MYSQL_SYSVAR_BOOL(
+ compress_buffers_before_eviction,
+ compress_buffers_before_eviction,
+ PLUGIN_VAR_READONLY,
+ "enable in-memory buffer compression before partial eviction",
+ NULL,
+ NULL,
+ TRUE);
+
+static MYSQL_SYSVAR_STR(
+ data_dir,
+ data_dir,
+ PLUGIN_VAR_READONLY,
+ "data directory",
+ NULL,
+ NULL,
+ NULL);
+
+static MYSQL_SYSVAR_ULONG(
+ debug,
+ debug,
+ 0,
+ "plugin debug mask",
+ NULL,
+ NULL,
+ 0,
+ 0,
+ ~0UL,
+ 0);
+
+#if TOKUDB_DEBUG
+static MYSQL_SYSVAR_BOOL(
+ debug_pause_background_job_manager,
+ debug_pause_background_job_manager,
+ 0,
+ "debug : pause the background job manager",
+ NULL,
+ NULL,
+ FALSE);
+#endif // TOKUDB_DEBUG
+
+static MYSQL_SYSVAR_BOOL(
+ directio,
+ directio,
+ PLUGIN_VAR_READONLY, "enable direct i/o ",
+ NULL,
+ NULL,
+ FALSE);
+
+static void enable_partial_eviction_update(
+ THD* thd,
+ st_mysql_sys_var* sys_var,
+ void* var,
+ const void* save) {
+
+ my_bool* epe = (my_bool*)var;
+ *epe = *(const my_bool*)save;
+ int r = db_env->evictor_set_enable_partial_eviction(db_env, *epe);
+ assert(r == 0);
+}
+
+static MYSQL_SYSVAR_BOOL(
+ enable_partial_eviction,
+ enable_partial_eviction,
+ 0,
+ "enable partial node eviction",
+ NULL,
+ enable_partial_eviction_update,
+ TRUE);
+
+static MYSQL_SYSVAR_INT(
+ fs_reserve_percent,
+ fs_reserve_percent,
+ PLUGIN_VAR_READONLY,
+ "file system space reserve (percent free required)",
+ NULL,
+ NULL,
+ 5,
+ 0,
+ 100,
+ 0);
+
+static void fsync_log_period_update(
+ THD* thd,
+ st_mysql_sys_var* sys_var,
+ void* var,
+ const void* save) {
+
+ uint* flp = (uint*)var;
+ *flp = *(const uint*)save;
+ db_env->change_fsync_log_period(db_env, *flp);
+}
+
+static MYSQL_SYSVAR_UINT(
+ fsync_log_period,
+ fsync_log_period,
+ 0,
+ "fsync log period",
+ NULL,
+ fsync_log_period_update,
+ 0,
+ 0,
+ ~0U,
+ 0);
+
+static MYSQL_SYSVAR_STR(
+ log_dir,
+ log_dir,
+ PLUGIN_VAR_READONLY,
+ "log directory",
+ NULL,
+ NULL,
+ NULL);
+
+static MYSQL_SYSVAR_ULONGLONG(
+ max_lock_memory,
+ max_lock_memory,
+ PLUGIN_VAR_READONLY,
+ "max memory for locks",
+ NULL,
+ NULL,
+ 0,
+ 0,
+ ~0ULL,
+ 0);
+
+static MYSQL_SYSVAR_UINT(
+ read_status_frequency,
+ read_status_frequency,
+ 0,
+ "frequency that show processlist updates status of reads",
+ NULL,
+ NULL,
+ 10000,
+ 0,
+ ~0U,
+ 0);
+
+static MYSQL_SYSVAR_BOOL(
+ strip_frm_data,
+ strip_frm_data,
+ PLUGIN_VAR_READONLY,
+ "strip .frm data from metadata file(s)",
+ NULL,
+ NULL,
+ FALSE);
+
+static MYSQL_SYSVAR_STR(
+ tmp_dir,
+ tmp_dir,
+ PLUGIN_VAR_READONLY,
+ "directory to use for temporary files",
+ NULL,
+ NULL,
+ NULL);
+
+static MYSQL_SYSVAR_STR(
+ version,
+ version,
+ PLUGIN_VAR_READONLY,
+ "plugin version",
+ NULL,
+ NULL,
+ NULL);
+
+static MYSQL_SYSVAR_UINT(
+ write_status_frequency,
+ write_status_frequency,
+ 0,
+ "frequency that show processlist updates status of writes",
+ NULL,
+ NULL,
+ 1000,
+ 0,
+ ~0U,
+ 0);
+
+#if TOKU_INCLUDE_HANDLERTON_HANDLE_FATAL_SIGNAL
+static MYSQL_SYSVAR_STR(
+ gdb_path,
+ gdb_path,
+ PLUGIN_VAR_READONLY|PLUGIN_VAR_RQCMDARG,
+ "path to gdb for extra debug info on fatal signal",
+ NULL,
+ NULL,
+ "/usr/bin/gdb");
+
+static MYSQL_SYSVAR_BOOL(
+ gdb_on_fatal,
+ gdb_on_fatal,
+ 0,
+ "enable gdb debug info on fatal signal",
+ NULL,
+ NULL,
+ true);
+#endif
+
+#if TOKUDB_CHECK_JEMALLOC
+static MYSQL_SYSVAR_UINT(
+ check_jemalloc,
+ check_jemalloc,
+ 0,
+ "check if jemalloc is linked",
+ NULL,
+ NULL,
+ 1,
+ 0,
+ 1,
+ 0);
+#endif
+
+
+//******************************************************************************
+// session variables
+//******************************************************************************
+static MYSQL_THDVAR_BOOL(
+ alter_print_error,
+ 0,
+ "print errors for alter table operations",
+ NULL,
+ NULL,
+ false);
+
+static MYSQL_THDVAR_DOUBLE(
+ analyze_delete_fraction,
+ 0,
+ "fraction of rows allowed to be deleted",
+ NULL,
+ NULL,
+ 1.0,
+ 0,
+ 1.0,
+ 1);
+
+static MYSQL_THDVAR_BOOL(
+ analyze_in_background,
+ 0,
+ "dispatch ANALYZE TABLE to background job.",
+ NULL,
+ NULL,
+ false);
+
+const char* srv_analyze_mode_names[] = {
+ "TOKUDB_ANALYZE_STANDARD",
+ "TOKUDB_ANALYZE_RECOUNT_ROWS",
+ "TOKUDB_ANALYZE_CANCEL",
+ NullS
+};
+
+static TYPELIB tokudb_analyze_mode_typelib = {
+ array_elements(srv_analyze_mode_names) - 1,
+ "tokudb_analyze_mode_typelib",
+ srv_analyze_mode_names,
+ NULL
+};
+
+static MYSQL_THDVAR_ENUM(analyze_mode,
+ PLUGIN_VAR_RQCMDARG,
+ "Controls the function of ANALYZE TABLE. Possible values are: "
+ "TOKUDB_ANALYZE_STANDARD perform standard table analysis (default); "
+ "TOKUDB_ANALYZE_RECOUNT_ROWS perform logical recount of table rows;"
+ "TOKUDB_ANALYZE_CANCEL terminate and cancel all scheduled background jobs "
+ "for a table",
+ NULL,
+ NULL,
+ TOKUDB_ANALYZE_STANDARD,
+ &tokudb_analyze_mode_typelib);
+
+static MYSQL_THDVAR_ULONGLONG(
+ analyze_throttle,
+ 0,
+ "analyze throttle (keys)",
+ NULL,
+ NULL,
+ 0,
+ 0,
+ ~0U,
+ 1);
+
+static MYSQL_THDVAR_UINT(
+ analyze_time,
+ 0,
+ "analyze time (seconds)",
+ NULL,
+ NULL,
+ 5,
+ 0,
+ ~0U,
+ 1);
+
+static MYSQL_THDVAR_ULONGLONG(
+ auto_analyze,
+ 0,
+ "auto analyze threshold (percent)",
+ NULL,
+ NULL,
+ 0,
+ 0,
+ ~0U,
+ 1);
+
+static MYSQL_THDVAR_UINT(
+ block_size,
+ 0,
+ "fractal tree block size",
+ NULL,
+ NULL,
+ 4<<20,
+ 4096,
+ ~0U,
+ 1);
+
+static MYSQL_THDVAR_BOOL(
+ bulk_fetch,
+ PLUGIN_VAR_THDLOCAL,
+ "enable bulk fetch",
+ NULL,
+ NULL,
+ true);
+
+static void checkpoint_lock_update(
+ THD* thd,
+ st_mysql_sys_var* var,
+ void* var_ptr,
+ const void* save) {
+
+ my_bool* val = (my_bool*)var_ptr;
+ *val= *(my_bool*)save ? true : false;
+ if (*val) {
+ tokudb_checkpoint_lock(thd);
+ } else {
+ tokudb_checkpoint_unlock(thd);
+ }
+}
+
+static MYSQL_THDVAR_BOOL(
+ checkpoint_lock,
+ 0,
+ "checkpoint lock",
+ NULL,
+ checkpoint_lock_update,
+ false);
+
+static MYSQL_THDVAR_BOOL(
+ commit_sync,
+ PLUGIN_VAR_THDLOCAL,
+ "sync on txn commit",
+ NULL,
+ NULL,
+ true);
+
+static MYSQL_THDVAR_BOOL(
+ create_index_online,
+ 0,
+ "if on, create index done online",
+ NULL,
+ NULL,
+ true);
+
+static MYSQL_THDVAR_BOOL(
+ disable_hot_alter,
+ 0,
+ "if on, hot alter table is disabled",
+ NULL,
+ NULL,
+ false);
+
+static MYSQL_THDVAR_BOOL(
+ disable_prefetching,
+ 0,
+ "if on, prefetching disabled",
+ NULL,
+ NULL,
+ false);
+
+static MYSQL_THDVAR_BOOL(
+ disable_slow_alter,
+ 0,
+ "if on, alter tables that require copy are disabled",
+ NULL,
+ NULL,
+ false);
+
+static const char *tokudb_empty_scan_names[] = {
+ "disabled",
+ "lr",
+ "rl",
+ NullS
+};
+
+static TYPELIB tokudb_empty_scan_typelib = {
+ array_elements(tokudb_empty_scan_names) - 1,
+ "tokudb_empty_scan_typelib",
+ tokudb_empty_scan_names,
+ NULL
+};
+
+static MYSQL_THDVAR_ENUM(
+ empty_scan,
+ PLUGIN_VAR_OPCMDARG,
+ "algorithm to check if the table is empty when opened",
+ NULL,
+ NULL,
+ TOKUDB_EMPTY_SCAN_RL,
+ &tokudb_empty_scan_typelib);
+
+static MYSQL_THDVAR_UINT(
+ fanout,
+ 0,
+ "fractal tree fanout",
+ NULL,
+ NULL,
+ 16,
+ 2,
+ 16*1024,
+ 1);
+
+static MYSQL_THDVAR_BOOL(
+ hide_default_row_format,
+ 0,
+ "hide the default row format",
+ NULL,
+ NULL,
+ true);
+
+static MYSQL_THDVAR_ULONGLONG(
+ killed_time,
+ 0,
+ "killed time",
+ NULL,
+ NULL,
+ DEFAULT_TOKUDB_KILLED_TIME,
+ 0,
+ ~0ULL,
+ 1);
+
+static MYSQL_THDVAR_STR(
+ last_lock_timeout,
+ PLUGIN_VAR_MEMALLOC,
+ "last lock timeout",
+ NULL,
+ NULL,
+ NULL);
+
+static MYSQL_THDVAR_BOOL(
+ load_save_space,
+ 0,
+ "compress intermediate bulk loader files to save space",
+ NULL,
+ NULL,
+ true);
+
+static MYSQL_THDVAR_ULONGLONG(
+ loader_memory_size,
+ 0,
+ "loader memory size",
+ NULL,
+ NULL,
+ 100*1000*1000,
+ 0,
+ ~0ULL,
+ 1);
+
+static MYSQL_THDVAR_ULONGLONG(
+ lock_timeout,
+ 0,
+ "lock timeout",
+ NULL,
+ NULL,
+ DEFAULT_TOKUDB_LOCK_TIMEOUT,
+ 0,
+ ~0ULL,
+ 1);
+
+static MYSQL_THDVAR_UINT(
+ lock_timeout_debug,
+ 0,
+ "lock timeout debug",
+ NULL,
+ NULL,
+ 1,
+ 0,
+ ~0U,
+ 1);
+
+static MYSQL_THDVAR_DOUBLE(
+ optimize_index_fraction,
+ 0,
+ "optimize index fraction (default 1.0 all)",
+ NULL,
+ NULL,
+ 1.0,
+ 0,
+ 1.0,
+ 1);
+
+static MYSQL_THDVAR_STR(
+ optimize_index_name,
+ PLUGIN_VAR_THDLOCAL + PLUGIN_VAR_MEMALLOC,
+ "optimize index name (default all indexes)",
+ NULL,
+ NULL,
+ NULL);
+
+static MYSQL_THDVAR_ULONGLONG(
+ optimize_throttle,
+ 0,
+ "optimize throttle (default no throttle)",
+ NULL,
+ NULL,
+ 0,
+ 0,
+ ~0ULL,
+ 1);
+
+static const char* deprecated_tokudb_pk_insert_mode =
+ "Using tokudb_pk_insert_mode is deprecated and the "
+ "parameter may be removed in future releases.";
+static const char* deprecated_tokudb_pk_insert_mode_zero =
+ "Using tokudb_pk_insert_mode=0 is deprecated and the "
+ "parameter may be removed in future releases. "
+ "Only tokudb_pk_insert_mode=1|2 is allowed."
+ "Resettig the value to 1.";
+
+static void pk_insert_mode_update(
+ THD* thd,
+ st_mysql_sys_var* var,
+ void* var_ptr,
+ const void* save) {
+ const uint* new_pk_insert_mode = static_cast<const uint*>(save);
+ uint* pk_insert_mode = static_cast<uint*>(var_ptr);
+ if (*new_pk_insert_mode == 0) {
+ push_warning(
+ thd,
+ Sql_condition::WARN_LEVEL_WARN,
+ HA_ERR_WRONG_COMMAND,
+ deprecated_tokudb_pk_insert_mode_zero);
+ *pk_insert_mode = 1;
+ } else {
+ push_warning(
+ thd,
+ Sql_condition::WARN_LEVEL_WARN,
+ HA_ERR_WRONG_COMMAND,
+ deprecated_tokudb_pk_insert_mode);
+ *pk_insert_mode = *new_pk_insert_mode;
+ }
+}
+
+static MYSQL_THDVAR_UINT(
+ pk_insert_mode,
+ 0,
+ "set the primary key insert mode",
+ NULL,
+ pk_insert_mode_update,
+ 1,
+ 0,
+ 2,
+ 1);
+
+static MYSQL_THDVAR_BOOL(
+ prelock_empty,
+ 0,
+ "prelock empty table",
+ NULL,
+ NULL,
+ true);
+
+static MYSQL_THDVAR_UINT(
+ read_block_size,
+ 0,
+ "fractal tree read block size",
+ NULL,
+ NULL,
+ 64*1024,
+ 4096,
+ ~0U,
+ 1);
+
+static MYSQL_THDVAR_UINT(
+ read_buf_size,
+ 0,
+ "range query read buffer size",
+ NULL,
+ NULL,
+ 128*1024,
+ 0,
+ 1*1024*1024,
+ 1);
+
+static const char *tokudb_row_format_names[] = {
+ "tokudb_uncompressed",
+ "tokudb_zlib",
+ "tokudb_snappy",
+ "tokudb_quicklz",
+ "tokudb_lzma",
+ "tokudb_fast",
+ "tokudb_small",
+ "tokudb_default",
+ NullS
+};
+
+static TYPELIB tokudb_row_format_typelib = {
+ array_elements(tokudb_row_format_names) - 1,
+ "tokudb_row_format_typelib",
+ tokudb_row_format_names,
+ NULL
+};
+
+static MYSQL_THDVAR_ENUM(
+ row_format,
+ PLUGIN_VAR_OPCMDARG,
+ "Specifies the compression method for a table created during this session. "
+ "Possible values are TOKUDB_UNCOMPRESSED, TOKUDB_ZLIB, TOKUDB_SNAPPY, "
+ "TOKUDB_QUICKLZ, TOKUDB_LZMA, TOKUDB_FAST, TOKUDB_SMALL and TOKUDB_DEFAULT",
+ NULL,
+ NULL,
+ SRV_ROW_FORMAT_ZLIB,
+ &tokudb_row_format_typelib);
+
+static MYSQL_THDVAR_BOOL(
+ rpl_check_readonly,
+ PLUGIN_VAR_THDLOCAL,
+ "check if the slave is read only",
+ NULL,
+ NULL,
+ true);
+
+static MYSQL_THDVAR_BOOL(
+ rpl_lookup_rows,
+ PLUGIN_VAR_THDLOCAL,
+ "lookup a row on rpl slave",
+ NULL,
+ NULL,
+ true);
+
+static MYSQL_THDVAR_ULONGLONG(
+ rpl_lookup_rows_delay,
+ PLUGIN_VAR_THDLOCAL,
+ "time in milliseconds to add to lookups on replication slave",
+ NULL,
+ NULL,
+ 0,
+ 0,
+ ~0ULL,
+ 1);
+
+static MYSQL_THDVAR_BOOL(
+ rpl_unique_checks,
+ PLUGIN_VAR_THDLOCAL,
+ "enable unique checks on replication slave",
+ NULL,
+ NULL,
+ true);
+
+static MYSQL_THDVAR_ULONGLONG(
+ rpl_unique_checks_delay,
+ PLUGIN_VAR_THDLOCAL,
+ "time in milliseconds to add to unique checks test on replication slave",
+ NULL,
+ NULL,
+ 0,
+ 0,
+ ~0ULL,
+ 1);
+
+#if TOKU_INCLUDE_UPSERT
+static MYSQL_THDVAR_BOOL(
+ disable_slow_update,
+ PLUGIN_VAR_THDLOCAL,
+ "disable slow update",
+ NULL,
+ NULL,
+ false);
+
+static MYSQL_THDVAR_BOOL(
+ disable_slow_upsert,
+ PLUGIN_VAR_THDLOCAL,
+ "disable slow upsert",
+ NULL,
+ NULL,
+ false);
+#endif
+
+#if TOKU_INCLUDE_XA
+static MYSQL_THDVAR_BOOL(
+ support_xa,
+ PLUGIN_VAR_OPCMDARG,
+ "Enable TokuDB support for the XA two-phase commit",
+ NULL,
+ NULL,
+ true);
+#endif
+
+
+
+//******************************************************************************
+// all system variables
+//******************************************************************************
+st_mysql_sys_var* system_variables[] = {
+ // global vars
+ MYSQL_SYSVAR(cache_size),
+ MYSQL_SYSVAR(checkpoint_on_flush_logs),
+ MYSQL_SYSVAR(cachetable_pool_threads),
+ MYSQL_SYSVAR(cardinality_scale_percent),
+ MYSQL_SYSVAR(checkpoint_pool_threads),
+ MYSQL_SYSVAR(checkpointing_period),
+ MYSQL_SYSVAR(cleaner_iterations),
+ MYSQL_SYSVAR(cleaner_period),
+ MYSQL_SYSVAR(client_pool_threads),
+ MYSQL_SYSVAR(compress_buffers_before_eviction),
+ MYSQL_SYSVAR(data_dir),
+ MYSQL_SYSVAR(debug),
+ MYSQL_SYSVAR(directio),
+ MYSQL_SYSVAR(enable_partial_eviction),
+ MYSQL_SYSVAR(fs_reserve_percent),
+ MYSQL_SYSVAR(fsync_log_period),
+ MYSQL_SYSVAR(log_dir),
+ MYSQL_SYSVAR(max_lock_memory),
+ MYSQL_SYSVAR(read_status_frequency),
+ MYSQL_SYSVAR(strip_frm_data),
+ MYSQL_SYSVAR(tmp_dir),
+ MYSQL_SYSVAR(version),
+ MYSQL_SYSVAR(write_status_frequency),
+
+#if TOKU_INCLUDE_HANDLERTON_HANDLE_FATAL_SIGNAL
+ MYSQL_SYSVAR(gdb_path),
+ MYSQL_SYSVAR(gdb_on_fatal),
+#endif
+
+#if TOKUDB_CHECK_JEMALLOC
+ MYSQL_SYSVAR(check_jemalloc),
+#endif
+
+ // session vars
+ MYSQL_SYSVAR(alter_print_error),
+ MYSQL_SYSVAR(analyze_delete_fraction),
+ MYSQL_SYSVAR(analyze_in_background),
+ MYSQL_SYSVAR(analyze_mode),
+ MYSQL_SYSVAR(analyze_throttle),
+ MYSQL_SYSVAR(analyze_time),
+ MYSQL_SYSVAR(auto_analyze),
+ MYSQL_SYSVAR(block_size),
+ MYSQL_SYSVAR(bulk_fetch),
+ MYSQL_SYSVAR(checkpoint_lock),
+ MYSQL_SYSVAR(commit_sync),
+ MYSQL_SYSVAR(create_index_online),
+ MYSQL_SYSVAR(disable_hot_alter),
+ MYSQL_SYSVAR(disable_prefetching),
+ MYSQL_SYSVAR(disable_slow_alter),
+ MYSQL_SYSVAR(empty_scan),
+ MYSQL_SYSVAR(fanout),
+ MYSQL_SYSVAR(hide_default_row_format),
+ MYSQL_SYSVAR(killed_time),
+ MYSQL_SYSVAR(last_lock_timeout),
+ MYSQL_SYSVAR(load_save_space),
+ MYSQL_SYSVAR(loader_memory_size),
+ MYSQL_SYSVAR(lock_timeout),
+ MYSQL_SYSVAR(lock_timeout_debug),
+ MYSQL_SYSVAR(optimize_index_fraction),
+ MYSQL_SYSVAR(optimize_index_name),
+ MYSQL_SYSVAR(optimize_throttle),
+ MYSQL_SYSVAR(pk_insert_mode),
+ MYSQL_SYSVAR(prelock_empty),
+ MYSQL_SYSVAR(read_block_size),
+ MYSQL_SYSVAR(read_buf_size),
+ MYSQL_SYSVAR(row_format),
+ MYSQL_SYSVAR(rpl_check_readonly),
+ MYSQL_SYSVAR(rpl_lookup_rows),
+ MYSQL_SYSVAR(rpl_lookup_rows_delay),
+ MYSQL_SYSVAR(rpl_unique_checks),
+ MYSQL_SYSVAR(rpl_unique_checks_delay),
+
+#if TOKU_INCLUDE_UPSERT
+ MYSQL_SYSVAR(disable_slow_update),
+ MYSQL_SYSVAR(disable_slow_upsert),
+#endif
+
+#if TOKU_INCLUDE_XA
+ MYSQL_SYSVAR(support_xa),
+#endif
+
+#if TOKUDB_DEBUG
+ MYSQL_SYSVAR(debug_pause_background_job_manager),
+#endif // TOKUDB_DEBUG
+
+ NULL
+};
+
+my_bool alter_print_error(THD* thd) {
+ return (THDVAR(thd, alter_print_error) != 0);
+}
+double analyze_delete_fraction(THD* thd) {
+ return THDVAR(thd, analyze_delete_fraction);
+}
+my_bool analyze_in_background(THD* thd) {
+ return (THDVAR(thd, analyze_in_background) != 0);
+}
+analyze_mode_t analyze_mode(THD* thd) {
+ return (analyze_mode_t ) THDVAR(thd, analyze_mode);
+}
+ulonglong analyze_throttle(THD* thd) {
+ return THDVAR(thd, analyze_throttle);
+}
+ulonglong analyze_time(THD* thd) {
+ return THDVAR(thd, analyze_time);
+}
+ulonglong auto_analyze(THD* thd) {
+ return THDVAR(thd, auto_analyze);
+}
+my_bool bulk_fetch(THD* thd) {
+ return (THDVAR(thd, bulk_fetch) != 0);
+}
+uint block_size(THD* thd) {
+ return THDVAR(thd, block_size);
+}
+my_bool commit_sync(THD* thd) {
+ return (THDVAR(thd, commit_sync) != 0);
+}
+my_bool create_index_online(THD* thd) {
+ return (THDVAR(thd, create_index_online) != 0);
+}
+my_bool disable_hot_alter(THD* thd) {
+ return (THDVAR(thd, disable_hot_alter) != 0);
+}
+my_bool disable_prefetching(THD* thd) {
+ return (THDVAR(thd, disable_prefetching) != 0);
+}
+my_bool disable_slow_alter(THD* thd) {
+ return (THDVAR(thd, disable_slow_alter) != 0);
+}
+#if TOKU_INCLUDE_UPSERT
+my_bool disable_slow_update(THD* thd) {
+ return (THDVAR(thd, disable_slow_update) != 0);
+}
+my_bool disable_slow_upsert(THD* thd) {
+ return (THDVAR(thd, disable_slow_upsert) != 0);
+}
+#endif
+empty_scan_mode_t empty_scan(THD* thd) {
+ return (empty_scan_mode_t)THDVAR(thd, empty_scan);
+}
+uint fanout(THD* thd) {
+ return THDVAR(thd, fanout);
+}
+my_bool hide_default_row_format(THD* thd) {
+ return (THDVAR(thd, hide_default_row_format) != 0);
+}
+ulonglong killed_time(THD* thd) {
+ return THDVAR(thd, killed_time);
+}
+char* last_lock_timeout(THD* thd) {
+ return THDVAR(thd, last_lock_timeout);
+}
+void set_last_lock_timeout(THD* thd, char* last) {
+ THDVAR(thd, last_lock_timeout) = last;
+}
+my_bool load_save_space(THD* thd) {
+ return (THDVAR(thd, load_save_space) != 0);
+}
+ulonglong loader_memory_size(THD* thd) {
+ return THDVAR(thd, loader_memory_size);
+}
+ulonglong lock_timeout(THD* thd) {
+ return THDVAR(thd, lock_timeout);
+}
+uint lock_timeout_debug(THD* thd) {
+ return THDVAR(thd, lock_timeout_debug);
+}
+double optimize_index_fraction(THD* thd) {
+ return THDVAR(thd, optimize_index_fraction);
+}
+const char* optimize_index_name(THD* thd) {
+ return THDVAR(thd, optimize_index_name);
+}
+ulonglong optimize_throttle(THD* thd) {
+ return THDVAR(thd, optimize_throttle);
+}
+uint pk_insert_mode(THD* thd) {
+ return THDVAR(thd, pk_insert_mode);
+}
+void set_pk_insert_mode(THD* thd, uint mode) {
+ THDVAR(thd, pk_insert_mode) = mode;
+}
+my_bool prelock_empty(THD* thd) {
+ return (THDVAR(thd, prelock_empty) != 0);
+}
+uint read_block_size(THD* thd) {
+ return THDVAR(thd, read_block_size);
+}
+uint read_buf_size(THD* thd) {
+ return THDVAR(thd, read_buf_size);
+}
+row_format_t row_format(THD *thd) {
+ return (row_format_t) THDVAR(thd, row_format);
+}
+my_bool rpl_check_readonly(THD* thd) {
+ return (THDVAR(thd, rpl_check_readonly) != 0);
+}
+my_bool rpl_lookup_rows(THD* thd) {
+ return (THDVAR(thd, rpl_lookup_rows) != 0);
+}
+ulonglong rpl_lookup_rows_delay(THD* thd) {
+ return THDVAR(thd, rpl_lookup_rows_delay);
+}
+my_bool rpl_unique_checks(THD* thd) {
+ return (THDVAR(thd, rpl_unique_checks) != 0);
+}
+ulonglong rpl_unique_checks_delay(THD* thd) {
+ return THDVAR(thd, rpl_unique_checks_delay);
+}
+my_bool support_xa(THD* thd) {
+ return (THDVAR(thd, support_xa) != 0);
+}
+
+#if TOKU_INCLUDE_OPTION_STRUCTS
+ha_create_table_option tokudb_table_options[] = {
+ HA_TOPTION_SYSVAR("compression", row_format, row_format),
+ HA_TOPTION_END
+};
+
+ha_create_table_option tokudb_index_options[] = {
+ HA_IOPTION_BOOL("clustering", clustering, 0),
+ HA_IOPTION_END
+};
+#endif
+
+} // namespace sysvars
+} // namespace tokudb
diff --git a/storage/tokudb/tokudb_sysvars.h b/storage/tokudb/tokudb_sysvars.h
new file mode 100644
index 00000000000..b67cf8aa0e2
--- /dev/null
+++ b/storage/tokudb/tokudb_sysvars.h
@@ -0,0 +1,173 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of TokuDB
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ TokuDBis is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ TokuDB is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with TokuDB. If not, see <http://www.gnu.org/licenses/>.
+
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#ifndef _TOKUDB_SYSVARS_H
+#define _TOKUDB_SYSVARS_H
+
+#if TOKU_INCLUDE_OPTION_STRUCTS
+struct ha_table_option_struct {
+ uint row_format;
+};
+
+struct ha_index_option_struct {
+ bool clustering;
+};
+
+static inline bool key_is_clustering(const KEY *key) {
+ return (key->flags & HA_CLUSTERING) || (key->option_struct && key->option_struct->clustering);
+}
+
+#else
+
+static inline bool key_is_clustering(const KEY *key) {
+ return key->flags & HA_CLUSTERING;
+}
+#endif
+
+namespace tokudb {
+namespace sysvars {
+
+enum analyze_mode_t {
+ TOKUDB_ANALYZE_STANDARD = 0,
+ TOKUDB_ANALYZE_RECOUNT_ROWS = 1,
+ TOKUDB_ANALYZE_CANCEL = 2
+};
+
+enum empty_scan_mode_t {
+ TOKUDB_EMPTY_SCAN_DISABLED = 0,
+ TOKUDB_EMPTY_SCAN_LR = 1,
+ TOKUDB_EMPTY_SCAN_RL = 2,
+};
+
+enum row_format_t {
+ SRV_ROW_FORMAT_UNCOMPRESSED = 0,
+ SRV_ROW_FORMAT_ZLIB = 1,
+ SRV_ROW_FORMAT_SNAPPY = 2,
+ SRV_ROW_FORMAT_QUICKLZ = 3,
+ SRV_ROW_FORMAT_LZMA = 4,
+ SRV_ROW_FORMAT_FAST = 5,
+ SRV_ROW_FORMAT_SMALL = 6,
+ SRV_ROW_FORMAT_DEFAULT = 7
+};
+
+#define DEFAULT_TOKUDB_CLEANER_ITERATIONS 5
+#define DEFAULT_TOKUDB_CLEANER_PERIOD 1
+#define DEFAULT_TOKUDB_KILLED_TIME 4000 // milliseconds
+#define DEFAULT_TOKUDB_LOCK_TIMEOUT 4000 // milliseconds
+
+
+// globals
+extern ulonglong cache_size;
+extern uint cachetable_pool_threads;
+extern int cardinality_scale_percent;
+extern my_bool checkpoint_on_flush_logs;
+extern uint checkpoint_pool_threads;
+extern uint checkpointing_period;
+extern ulong cleaner_iterations;
+extern ulong cleaner_period;
+extern uint client_pool_threads;
+extern my_bool compress_buffers_before_eviction;
+extern char* data_dir;
+extern ulong debug;
+extern my_bool directio;
+extern my_bool enable_partial_eviction;
+extern int fs_reserve_percent;
+extern uint fsync_log_period;
+extern char* log_dir;
+extern ulonglong max_lock_memory;
+extern uint read_status_frequency;
+extern my_bool strip_frm_data;
+extern char* tmp_dir;
+extern uint write_status_frequency;
+extern char* version;
+
+#if TOKU_INCLUDE_HANDLERTON_HANDLE_FATAL_SIGNAL
+extern char* gdb_path;
+extern my_bool gdb_on_fatal;
+#endif
+
+#if TOKUDB_CHECK_JEMALLOC
+extern uint check_jemalloc;
+#endif
+
+#if TOKUDB_DEBUG
+// used to control background job manager
+extern my_bool debug_pause_background_job_manager;
+#endif // TOKUDB_DEBUG
+
+// session/thread
+my_bool alter_print_error(THD* thd);
+double analyze_delete_fraction(THD* thd);
+my_bool analyze_in_background(THD* thd);
+analyze_mode_t analyze_mode(THD* thd);
+ulonglong analyze_throttle(THD* thd);
+ulonglong analyze_time(THD* thd);
+ulonglong auto_analyze(THD* thd);
+uint block_size(THD* thd);
+my_bool bulk_fetch(THD* thd);
+my_bool commit_sync(THD* thd);
+my_bool create_index_online(THD* thd);
+my_bool disable_hot_alter(THD* thd);
+my_bool disable_prefetching(THD* thd);
+my_bool disable_slow_alter(THD* thd);
+my_bool disable_slow_update(THD* thd);
+my_bool disable_slow_upsert(THD* thd);
+empty_scan_mode_t empty_scan(THD* thd);
+uint fanout(THD* thd);
+my_bool hide_default_row_format(THD* thd);
+ulonglong killed_time(THD* thd);
+my_bool load_save_space(THD* thd);
+char* last_lock_timeout(THD* thd);
+void set_last_lock_timeout(THD* thd, char* last);
+ulonglong loader_memory_size(THD* thd);
+ulonglong lock_timeout(THD* thd);
+uint lock_timeout_debug(THD* thd);
+double optimize_index_fraction(THD* thd);
+const char* optimize_index_name(THD* thd);
+ulonglong optimize_throttle(THD* thd);
+uint pk_insert_mode(THD* thd);
+void set_pk_insert_mode(THD* thd, uint mode);
+my_bool prelock_empty(THD* thd);
+uint read_block_size(THD* thd);
+uint read_buf_size(THD* thd);
+row_format_t row_format(THD *thd);
+my_bool rpl_check_readonly(THD* thd);
+my_bool rpl_lookup_rows(THD* thd);
+ulonglong rpl_lookup_rows_delay(THD* thd);
+my_bool rpl_unique_checks(THD* thd);
+ulonglong rpl_unique_checks_delay(THD* thd);
+my_bool support_xa(THD* thd);
+
+extern st_mysql_sys_var* system_variables[];
+
+#if TOKU_INCLUDE_OPTION_STRUCTS
+extern ha_create_table_option tokudb_table_options[];
+extern ha_create_table_option tokudb_index_options[];
+#endif
+
+} // namespace sysvars
+} // namespace tokudb
+
+#endif // _TOKUDB_SYSVARS_H
diff --git a/storage/tokudb/tokudb_thread.cc b/storage/tokudb/tokudb_thread.cc
new file mode 100644
index 00000000000..f27e803a065
--- /dev/null
+++ b/storage/tokudb/tokudb_thread.cc
@@ -0,0 +1,35 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+/* -*- mode: C; c-basic-offset: 4 -*- */
+#ident "$Id$"
+/*======
+This file is part of TokuDB
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ TokuDBis is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ TokuDB is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with TokuDB. If not, see <http://www.gnu.org/licenses/>.
+
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "tokudb_thread.h"
+
+namespace tokudb {
+namespace thread {
+
+pthread_t mutex_t::_null_owner = 0;
+
+} // namespace thread
+} // namespace tokudb
diff --git a/storage/tokudb/tokudb_thread.h b/storage/tokudb/tokudb_thread.h
new file mode 100644
index 00000000000..dcb1fd6ec63
--- /dev/null
+++ b/storage/tokudb/tokudb_thread.h
@@ -0,0 +1,597 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+/* -*- mode: C; c-basic-offset: 4 -*- */
+#ident "$Id$"
+/*======
+This file is part of TokuDB
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ TokuDBis is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ TokuDB is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with TokuDB. If not, see <http://www.gnu.org/licenses/>.
+
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#ifndef _TOKUDB_SYNC_H
+#define _TOKUDB_SYNC_H
+
+#include "hatoku_defines.h"
+#include "tokudb_debug.h"
+#include "tokudb_time.h"
+
+namespace tokudb {
+namespace thread {
+
+#if (defined(__MACH__) || defined(__APPLE__)) && _POSIX_TIMERS <= 0
+
+#define _x_min(a, b) ((a) < (b) ? (a) : (b))
+
+#define timed_lock_define(timed_func_name, lock_type_name, lock_func_name) \
+inline int timed_func_name(lock_type_name *mutex, \
+ const struct timespec *abs_timeout) { \
+ int pthread_rc; \
+ struct timespec remaining, slept, ts; \
+ static const int sleep_step = 1000000; \
+ \
+ remaining = *abs_timeout; \
+ while ((pthread_rc = lock_func_name(mutex)) == EBUSY) { \
+ ts.tv_sec = 0; \
+ ts.tv_nsec = (remaining.tv_sec > 0 ? \
+ sleep_step : \
+ _x_min(remaining.tv_nsec,sleep_step)); \
+ nanosleep(&ts, &slept); \
+ ts.tv_nsec -= slept.tv_nsec; \
+ if (ts.tv_nsec <= remaining.tv_nsec) { \
+ remaining.tv_nsec -= ts.tv_nsec; \
+ } else { \
+ remaining.tv_sec--; \
+ remaining.tv_nsec = \
+ (sleep_step - (ts.tv_nsec - remaining.tv_nsec)); \
+ } \
+ if (remaining.tv_sec < 0 || \
+ (!remaining.tv_sec && remaining.tv_nsec <= 0)) { \
+ return ETIMEDOUT; \
+ } \
+ } \
+ \
+ return pthread_rc; \
+}
+
+timed_lock_define(pthread_mutex_timedlock,
+ pthread_mutex_t,
+ pthread_mutex_trylock);
+
+timed_lock_define(pthread_rwlock_timedrdlock,
+ pthread_rwlock_t,
+ pthread_rwlock_tryrdlock);
+
+timed_lock_define(pthread_rwlock_timedwrlock,
+ pthread_rwlock_t,
+ pthread_rwlock_trywrlock);
+
+#endif //(defined(__MACH__) || defined(__APPLE__)) && _POSIX_TIMERS <= 0
+
+uint my_tid(void);
+
+// Your basic mutex
+class mutex_t {
+public:
+ mutex_t(void);
+ ~mutex_t(void);
+
+ void lock(void);
+ int lock(ulonglong microseconds);
+ void unlock(void);
+#ifdef TOKUDB_DEBUG
+ bool is_owned_by_me(void) const;
+#endif
+private:
+ static pthread_t _null_owner;
+ pthread_mutex_t _mutex;
+#ifdef TOKUDB_DEBUG
+ uint _owners;
+ pthread_t _owner;
+#endif
+};
+
+// Simple read write lock
+class rwlock_t {
+public:
+ rwlock_t(void);
+ ~rwlock_t(void);
+
+ void lock_read(void);
+ int lock_read(ulonglong microseconds);
+ void lock_write(void);
+ int lock_write(ulonglong microseconds);
+ void unlock(void);
+
+private:
+ rwlock_t(const rwlock_t&);
+ rwlock_t& operator=(const rwlock_t&);
+
+ pthread_rwlock_t _rwlock;
+};
+
+// Simple event signal/wait class
+class event_t {
+public:
+ // create_signalled - create the event in a signalled state
+ // manual_reset - create an event that must be manually reset
+ // after signaling
+ event_t(
+ bool create_signalled = false,
+ bool manual_reset = false);
+ ~event_t(void);
+
+ // wait for the event to become signalled
+ void wait(void);
+ int wait(ulonglong microseconds);
+
+ // signal the event
+ void signal(void);
+
+ // pulse the event (signal and free exactly one waiter)
+ void pulse(void);
+
+ // is the event currently signalled
+ bool signalled(void);
+
+ // unsignal/clear the event
+ void reset(void);
+
+private:
+ event_t(const event_t&);
+ event_t& operator=(const event_t&);
+
+ pthread_mutex_t _mutex;
+ pthread_cond_t _cond;
+ bool _signalled;
+ bool _pulsed;
+ bool _manual_reset;
+};
+
+// Semaphore signal/wait class
+class semaphore_t {
+public:
+ // initial_count - the initial signal count of the semaphore
+ // max_count - the maximum signal count for the semaphore.
+ semaphore_t(int initial_count, int max_count);
+ ~semaphore_t(void);
+
+ enum E_WAIT {
+ E_SIGNALLED = 0,
+ E_INTERRUPTED = 1,
+ E_TIMEDOUT = 2
+ };
+
+ // wait for the semaphore to become signalled
+ E_WAIT wait(void);
+ E_WAIT wait(ulonglong microseconds);
+
+ // signal the semaphore to increase the count
+ // return true if signalled, false if ignored due to count
+ bool signal(void);
+
+ // what is the semaphore signal count
+ int signalled(void);
+
+ // unsignal a signalled semaphore
+ void reset(void);
+
+ // set to interrupt any waiters, as long as is set,
+ // waiters will return immediately with E_INTERRUPTED.
+ // the semaphore signal count and tracking will continue
+ // accepting signals and leave the signalled state intact
+ void set_interrupt(void);
+ void clear_interrupt(void);
+
+private:
+ semaphore_t(const semaphore_t&);
+ semaphore_t& operator=(const semaphore_t&);
+
+ pthread_mutex_t _mutex;
+ pthread_cond_t _cond;
+ bool _interrupted;
+ int _signalled;
+ int _initial_count;
+ int _max_count;
+};
+
+// Thread class
+class thread_t {
+public:
+ thread_t(void);
+ ~thread_t(void);
+
+ int start(void* (*pfn)(void*), void* arg);
+ int join(void** value_ptr);
+ int detach(void);
+
+private:
+ pthread_t _thread;
+};
+
+
+inline uint my_tid(void) {
+ return (uint)toku_os_gettid();
+}
+
+
+inline mutex_t::mutex_t(void) {
+ #ifdef TOKUDB_DEBUG
+ _owners = 0;
+ _owner = _null_owner;
+ #endif
+ int r = pthread_mutex_init(&_mutex, MY_MUTEX_INIT_FAST);
+ assert_debug(r == 0);
+}
+inline mutex_t::~mutex_t(void) {
+ #ifdef TOKUDB_DEBUG
+ assert_debug(_owners == 0);
+ #endif
+ int r = pthread_mutex_destroy(&_mutex);
+ assert_debug(r == 0);
+}
+inline void mutex_t::lock(void) {
+ assert_debug(is_owned_by_me() == false);
+ int r = pthread_mutex_lock(&_mutex);
+ assert_debug(r == 0);
+ #ifdef TOKUDB_DEBUG
+ _owners++;
+ _owner = pthread_self();
+ #endif
+}
+inline int mutex_t::lock(ulonglong microseconds) {
+ assert_debug(is_owned_by_me() == false);
+ timespec waittime = time::offset_timespec(microseconds);
+ int r = pthread_mutex_timedlock(&_mutex, &waittime);
+ #ifdef TOKUDB_DEBUG
+ if (r == 0) {
+ _owners++;
+ _owner = pthread_self();
+ }
+ #endif
+ assert_debug(r == 0 || r == ETIMEDOUT);
+ return r;
+}
+inline void mutex_t::unlock(void) {
+ #ifdef TOKUDB_DEBUG
+ assert_debug(_owners > 0);
+ assert_debug(is_owned_by_me());
+ _owners--;
+ _owner = _null_owner;
+ #endif
+ int r = pthread_mutex_unlock(&_mutex);
+ assert_debug(r == 0);
+}
+#ifdef TOKUDB_DEBUG
+inline bool mutex_t::is_owned_by_me(void) const {
+ return pthread_equal(pthread_self(), _owner) != 0 ? true : false;
+}
+#endif
+
+
+inline rwlock_t::rwlock_t(void) {
+ int r = pthread_rwlock_init(&_rwlock, NULL);
+ assert_debug(r == 0);
+}
+inline rwlock_t::~rwlock_t(void) {
+ int r = pthread_rwlock_destroy(&_rwlock);
+ assert_debug(r == 0);
+}
+inline void rwlock_t::lock_read(void) {
+ int r;
+ while ((r = pthread_rwlock_rdlock(&_rwlock)) != 0) {
+ if (r == EBUSY || r == EAGAIN) {
+ time::sleep_microsec(1000);
+ continue;
+ }
+ break;
+ }
+ assert_debug(r == 0);
+}
+inline int rwlock_t::lock_read(ulonglong microseconds) {
+ int r;
+ timespec waittime = time::offset_timespec(microseconds);
+ while ((r = pthread_rwlock_timedrdlock(&_rwlock, &waittime)) != 0) {
+ if (r == EBUSY || r == EAGAIN) {
+ time::sleep_microsec(1000);
+ continue;
+ } else if (r == ETIMEDOUT) {
+ return ETIMEDOUT;
+ }
+ break;
+ }
+ assert_debug(r == 0);
+ return r;
+}
+inline void rwlock_t::lock_write(void) {
+ int r;
+ while ((r = pthread_rwlock_wrlock(&_rwlock)) != 0) {
+ if (r == EBUSY || r == EAGAIN) {
+ time::sleep_microsec(1000);
+ continue;
+ }
+ break;
+ }
+ assert_debug(r == 0);
+}
+inline int rwlock_t::lock_write(ulonglong microseconds) {
+ int r;
+ timespec waittime = time::offset_timespec(microseconds);
+ while ((r = pthread_rwlock_timedwrlock(&_rwlock, &waittime)) != 0) {
+ if (r == EBUSY || r == EAGAIN) {
+ time::sleep_microsec(1000);
+ continue;
+ } else if (r == ETIMEDOUT) {
+ return ETIMEDOUT;
+ }
+ break;
+ }
+ assert_debug(r == 0);
+ return r;
+}
+inline void rwlock_t::unlock(void) {
+ int r = pthread_rwlock_unlock(&_rwlock);
+ assert_debug(r == 0);
+}
+inline rwlock_t::rwlock_t(const rwlock_t&) {
+}
+inline rwlock_t& rwlock_t::operator=(const rwlock_t&) {
+ return *this;
+}
+
+
+inline event_t::event_t(bool create_signalled, bool manual_reset) :
+ _manual_reset(manual_reset) {
+
+ int r = pthread_mutex_init(&_mutex, NULL);
+ assert_debug(r == 0);
+ r = pthread_cond_init(&_cond, NULL);
+ assert_debug(r == 0);
+ if (create_signalled) {
+ _signalled = true;
+ } else {
+ _signalled = false;
+ }
+ _pulsed = false;
+}
+inline event_t::~event_t(void) {
+ int r = pthread_mutex_destroy(&_mutex);
+ assert_debug(r == 0);
+ r = pthread_cond_destroy(&_cond);
+ assert_debug(r == 0);
+}
+inline void event_t::wait(void) {
+ int r = pthread_mutex_lock(&_mutex);
+ assert_debug(r == 0);
+ while (_signalled == false && _pulsed == false) {
+ r = pthread_cond_wait(&_cond, &_mutex);
+ assert_debug(r == 0);
+ }
+ if (_manual_reset == false)
+ _signalled = false;
+ if (_pulsed)
+ _pulsed = false;
+ r = pthread_mutex_unlock(&_mutex);
+ assert_debug(r == 0);
+ return;
+}
+inline int event_t::wait(ulonglong microseconds) {
+ timespec waittime = time::offset_timespec(microseconds);
+ int r = pthread_mutex_timedlock(&_mutex, &waittime);
+ if (r == ETIMEDOUT) return ETIMEDOUT;
+ assert_debug(r == 0);
+ while (_signalled == false && _pulsed == false) {
+ r = pthread_cond_timedwait(&_cond, &_mutex, &waittime);
+ if (r == ETIMEDOUT) {
+ r = pthread_mutex_unlock(&_mutex);
+ assert_debug(r == 0);
+ return ETIMEDOUT;
+ }
+ assert_debug(r == 0);
+ }
+ if (_manual_reset == false)
+ _signalled = false;
+ if (_pulsed)
+ _pulsed = false;
+ r = pthread_mutex_unlock(&_mutex);
+ assert_debug(r == 0);
+ return 0;
+}
+inline void event_t::signal(void) {
+ int r = pthread_mutex_lock(&_mutex);
+ assert_debug(r == 0);
+ _signalled = true;
+ if (_manual_reset) {
+ r = pthread_cond_broadcast(&_cond);
+ assert_debug(r == 0);
+ } else {
+ r = pthread_cond_signal(&_cond);
+ assert_debug(r == 0);
+ }
+ r = pthread_mutex_unlock(&_mutex);
+ assert_debug(r == 0);
+}
+inline void event_t::pulse(void) {
+ int r = pthread_mutex_lock(&_mutex);
+ assert_debug(r == 0);
+ _pulsed = true;
+ r = pthread_cond_signal(&_cond);
+ assert_debug(r == 0);
+ r = pthread_mutex_unlock(&_mutex);
+ assert_debug(r == 0);
+}
+inline bool event_t::signalled(void) {
+ bool ret = false;
+ int r = pthread_mutex_lock(&_mutex);
+ assert_debug(r == 0);
+ ret = _signalled;
+ r = pthread_mutex_unlock(&_mutex);
+ assert_debug(r == 0);
+ return ret;
+}
+inline void event_t::reset(void) {
+ int r = pthread_mutex_lock(&_mutex);
+ assert_debug(r == 0);
+ _signalled = false;
+ _pulsed = false;
+ r = pthread_mutex_unlock(&_mutex);
+ assert_debug(r == 0);
+ return;
+}
+inline event_t::event_t(const event_t&) {
+}
+inline event_t& event_t::operator=(const event_t&) {
+ return *this;
+}
+
+
+inline semaphore_t::semaphore_t(
+ int initial_count,
+ int max_count) :
+ _interrupted(false),
+ _initial_count(initial_count),
+ _max_count(max_count) {
+
+ int r = pthread_mutex_init(&_mutex, NULL);
+ assert_debug(r == 0);
+ r = pthread_cond_init(&_cond, NULL);
+ assert_debug(r == 0);
+ _signalled = _initial_count;
+}
+inline semaphore_t::~semaphore_t(void) {
+ int r = pthread_mutex_destroy(&_mutex);
+ assert_debug(r == 0);
+ r = pthread_cond_destroy(&_cond);
+ assert_debug(r == 0);
+}
+inline semaphore_t::E_WAIT semaphore_t::wait(void) {
+ E_WAIT ret;
+ int r = pthread_mutex_lock(&_mutex);
+ assert_debug(r == 0);
+ while (_signalled == 0 && _interrupted == false) {
+ r = pthread_cond_wait(&_cond, &_mutex);
+ assert_debug(r == 0);
+ }
+ if (_interrupted) {
+ ret = E_INTERRUPTED;
+ } else {
+ _signalled--;
+ ret = E_SIGNALLED;
+ }
+ r = pthread_mutex_unlock(&_mutex);
+ assert_debug(r == 0);
+ return ret;
+}
+inline semaphore_t::E_WAIT semaphore_t::wait(ulonglong microseconds) {
+ E_WAIT ret;
+ timespec waittime = time::offset_timespec(microseconds);
+ int r = pthread_mutex_timedlock(&_mutex, &waittime);
+ if (r == ETIMEDOUT) return E_TIMEDOUT;
+ assert_debug(r == 0);
+ while (_signalled == 0 && _interrupted == false) {
+ r = pthread_cond_timedwait(&_cond, &_mutex, &waittime);
+ if (r == ETIMEDOUT) {
+ r = pthread_mutex_unlock(&_mutex);
+ assert_debug(r == 0);
+ return E_TIMEDOUT;
+ }
+ assert_debug(r == 0);
+ }
+ if (_interrupted) {
+ ret = E_INTERRUPTED;
+ } else {
+ _signalled--;
+ ret = E_SIGNALLED;
+ }
+ r = pthread_mutex_unlock(&_mutex);
+ assert_debug(r == 0);
+ return ret;
+}
+inline bool semaphore_t::signal(void) {
+ bool ret = false;
+ int r = pthread_mutex_lock(&_mutex);
+ assert_debug(r == 0);
+ if (_signalled < _max_count) {
+ _signalled++;
+ ret = true;
+ }
+ r = pthread_cond_signal(&_cond);
+ assert_debug(r == 0);
+ r = pthread_mutex_unlock(&_mutex);
+ assert_debug(r == 0);
+ return ret;
+}
+inline int semaphore_t::signalled(void) {
+ int ret = 0;
+ int r = pthread_mutex_lock(&_mutex);
+ assert_debug(r == 0);
+ ret = _signalled;
+ r = pthread_mutex_unlock(&_mutex);
+ assert_debug(r == 0);
+ return ret;
+}
+inline void semaphore_t::reset(void) {
+ int r = pthread_mutex_lock(&_mutex);
+ assert_debug(r == 0);
+ _signalled = 0;
+ r = pthread_mutex_unlock(&_mutex);
+ assert_debug(r == 0);
+ return;
+}
+inline void semaphore_t::set_interrupt(void) {
+ int r = pthread_mutex_lock(&_mutex);
+ assert_debug(r == 0);
+ _interrupted = true;
+ r = pthread_cond_broadcast(&_cond);
+ assert_debug(r == 0);
+ r = pthread_mutex_unlock(&_mutex);
+ assert_debug(r == 0);
+}
+inline void semaphore_t::clear_interrupt(void) {
+ int r = pthread_mutex_lock(&_mutex);
+ assert_debug(r == 0);
+ _interrupted = false;
+ r = pthread_mutex_unlock(&_mutex);
+ assert_debug(r == 0);
+}
+inline semaphore_t::semaphore_t(const semaphore_t&) {
+}
+inline semaphore_t& semaphore_t::operator=(const semaphore_t&) {
+ return *this;
+}
+
+
+inline thread_t::thread_t(void) : _thread(0) {
+}
+inline thread_t::~thread_t(void) {
+}
+inline int thread_t::start(void*(*pfn)(void*), void* arg) {
+ return pthread_create(&_thread, NULL, pfn, arg);
+}
+inline int thread_t::join(void** value_ptr) {
+ return pthread_join(_thread, value_ptr);
+}
+inline int thread_t::detach(void) {
+ return pthread_detach(_thread);
+}
+
+} // namespace thread
+} // namespace tokudb
+
+
+#endif // _TOKUDB_SYNC_H
diff --git a/storage/tokudb/tokudb_time.h b/storage/tokudb/tokudb_time.h
new file mode 100644
index 00000000000..12baa0de24d
--- /dev/null
+++ b/storage/tokudb/tokudb_time.h
@@ -0,0 +1,73 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+/* -*- mode: C; c-basic-offset: 4 -*- */
+#ident "$Id$"
+/*======
+This file is part of TokuDB
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ TokuDBis is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ TokuDB is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with TokuDB. If not, see <http://www.gnu.org/licenses/>.
+
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#ifndef _TOKUDB_TIME_H
+#define _TOKUDB_TIME_H
+
+#include "hatoku_defines.h"
+
+namespace tokudb {
+namespace time {
+
+static const ulonglong MILLISECONDS = 1000;
+static const ulonglong MICROSECONDS = 1000000;
+static const ulonglong NANOSECONDS = 1000000000;
+
+// gets curent time of day in microseconds
+ulonglong microsec(void);
+
+// gets a timespec in the future based on the current time and an offset forward
+timespec offset_timespec(ulonglong offset);
+
+// sleep microseconds
+void sleep_microsec(ulong tm);
+
+
+
+inline ulonglong microsec(void) {
+ timeval t;
+ gettimeofday(&t, NULL);
+ return t.tv_sec * (1UL * 1000 * 1000) + t.tv_usec;
+}
+inline timespec offset_timespec(ulonglong offset) {
+ timespec ret;
+ ulonglong tm = offset + microsec();
+ ret.tv_sec = tm / MICROSECONDS;
+ ret.tv_nsec = (tm % MICROSECONDS) * 1000;
+ return ret;
+}
+inline void sleep_microsec(ulong tm) {
+ timeval t;
+ t.tv_sec = tm / MICROSECONDS;
+ t.tv_usec = tm % MICROSECONDS;
+
+ select(0, NULL, NULL, NULL, &t);
+}
+
+} // namespace time
+} // namespace tokudb
+
+#endif // _TOKUDB_TIME_H
diff --git a/storage/tokudb/tokudb_txn.h b/storage/tokudb/tokudb_txn.h
new file mode 100644
index 00000000000..67bf591d088
--- /dev/null
+++ b/storage/tokudb/tokudb_txn.h
@@ -0,0 +1,155 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+/* -*- mode: C; c-basic-offset: 4 -*- */
+#ident "$Id$"
+/*======
+This file is part of TokuDB
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ TokuDBis is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ TokuDB is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with TokuDB. If not, see <http://www.gnu.org/licenses/>.
+
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#ifndef _TOKUDB_TXN_H
+#define _TOKUDB_TXN_H
+
+#include "hatoku_defines.h"
+#include "tokudb_debug.h"
+#include "tokudb_sysvars.h"
+
+typedef enum {
+ hatoku_iso_not_set = 0,
+ hatoku_iso_read_uncommitted,
+ hatoku_iso_read_committed,
+ hatoku_iso_repeatable_read,
+ hatoku_iso_serializable
+} HA_TOKU_ISO_LEVEL;
+
+typedef struct st_tokudb_stmt_progress {
+ ulonglong inserted;
+ ulonglong updated;
+ ulonglong deleted;
+ ulonglong queried;
+ bool using_loader;
+} tokudb_stmt_progress;
+
+typedef struct st_tokudb_trx_data {
+ DB_TXN* all;
+ DB_TXN* stmt;
+ DB_TXN* sp_level;
+ DB_TXN* sub_sp_level;
+ uint tokudb_lock_count;
+ uint create_lock_count;
+ tokudb_stmt_progress stmt_progress;
+ bool checkpoint_lock_taken;
+ LIST* handlers;
+} tokudb_trx_data;
+
+extern char* tokudb_data_dir;
+extern const char* ha_tokudb_ext;
+
+inline void reset_stmt_progress(tokudb_stmt_progress* val) {
+ val->deleted = 0;
+ val->inserted = 0;
+ val->updated = 0;
+ val->queried = 0;
+}
+
+inline int get_name_length(const char* name) {
+ int n = 0;
+ const char* newname = name;
+ n += strlen(newname);
+ n += strlen(ha_tokudb_ext);
+ return n;
+}
+
+//
+// returns maximum length of path to a dictionary
+//
+inline int get_max_dict_name_path_length(const char* tablename) {
+ int n = 0;
+ n += get_name_length(tablename);
+ n += 1; //for the '-'
+ n += MAX_DICT_NAME_LEN;
+ return n;
+}
+
+inline void make_name(
+ char* newname,
+ size_t newname_len,
+ const char* tablename,
+ const char* dictname) {
+
+ assert_always(tablename);
+ assert_always(dictname);
+ size_t real_size = snprintf(
+ newname,
+ newname_len,
+ "%s-%s",
+ tablename,
+ dictname);
+ assert_always(real_size < newname_len);
+}
+
+inline int txn_begin(
+ DB_ENV* env,
+ DB_TXN* parent,
+ DB_TXN** txn,
+ uint32_t flags,
+ THD* thd) {
+
+ *txn = NULL;
+ int r = env->txn_begin(env, parent, txn, flags);
+ if (r == 0 && thd) {
+ DB_TXN* this_txn = *txn;
+ this_txn->set_client_id(this_txn, thd_get_thread_id(thd));
+ }
+ TOKUDB_TRACE_FOR_FLAGS(
+ TOKUDB_DEBUG_TXN,
+ "begin txn %p %p %u r=%d",
+ parent,
+ *txn,
+ flags,
+ r);
+ return r;
+}
+
+inline void commit_txn(DB_TXN* txn, uint32_t flags) {
+ TOKUDB_TRACE_FOR_FLAGS(TOKUDB_DEBUG_TXN, "commit txn %p", txn);
+ int r = txn->commit(txn, flags);
+ if (r != 0) {
+ sql_print_error(
+ "tried committing transaction %p and got error code %d",
+ txn,
+ r);
+ }
+ assert_always(r == 0);
+}
+
+inline void abort_txn(DB_TXN* txn) {
+ TOKUDB_TRACE_FOR_FLAGS(TOKUDB_DEBUG_TXN, "abort txn %p", txn);
+ int r = txn->abort(txn);
+ if (r != 0) {
+ sql_print_error(
+ "tried aborting transaction %p and got error code %d",
+ txn,
+ r);
+ }
+ assert_always(r == 0);
+}
+
+#endif // _TOKUDB_TXN_H
diff --git a/storage/tokudb/tokudb_update_fun.cc b/storage/tokudb/tokudb_update_fun.cc
index 4647c91880b..05149d17317 100644
--- a/storage/tokudb/tokudb_update_fun.cc
+++ b/storage/tokudb/tokudb_update_fun.cc
@@ -105,7 +105,8 @@ enum {
// at most, 4 0's
// So, upperbound is num_blobs(1+4+1+4) = num_columns*10
-// The expand varchar offsets message is used to expand the size of an offset from 1 to 2 bytes. Not VLQ coded.
+// The expand varchar offsets message is used to expand the size of an offset
+// from 1 to 2 bytes. Not VLQ coded.
// uint8 operation = UPDATE_OP_EXPAND_VARIABLE_OFFSETS
// uint32 number of offsets
// uint32 starting offset of the variable length field offsets
@@ -132,8 +133,8 @@ enum {
// uint8 old lengths[N]
// uint8 new lengths[N]
-// Update and Upsert version 1 messages. Not VLQ coded. Not used anymore, but may be in the
-// fractal tree from a previous build.
+// Update and Upsert version 1 messages. Not VLQ coded. Not used anymore, but
+// may be in the fractal tree from a previous build.
//
// Field descriptor:
// Operations:
@@ -143,8 +144,10 @@ enum {
// x = x - k
// field type 4 see field types above
// unused 4 unused
-// field null num 4 bit 31 is 1 if the field is nullible and the remaining bits contain the null bit number
-// field offset 4 for fixed fields, this is the offset from begining of the row of the field
+// field null num 4 bit 31 is 1 if the field is nullible and the
+// remaining bits contain the null bit number
+// field offset 4 for fixed fields, this is the offset from
+// begining of the row of the field
// value:
// value length 4 == N, length of the value
// value N value to add or subtract
@@ -234,7 +237,11 @@ static inline bool is_overall_null_position_set(uchar* data, uint32_t pos) {
//
// sets the bit at index pos in data to 1 if is_null, 0 otherwise
//
-static inline void set_overall_null_position(uchar* data, uint32_t pos, bool is_null) {
+static inline void set_overall_null_position(
+ uchar* data,
+ uint32_t pos,
+ bool is_null) {
+
uint32_t offset = pos/8;
uchar remainder = pos%8;
uchar null_bit = 1<<remainder;
@@ -251,9 +258,7 @@ static inline void copy_null_bits(
uint32_t start_new_pos,
uint32_t num_bits,
uchar* old_null_bytes,
- uchar* new_null_bytes
- )
-{
+ uchar* new_null_bytes) {
for (uint32_t i = 0; i < num_bits; i++) {
uint32_t curr_old_pos = i + start_old_pos;
uint32_t curr_new_pos = i + start_new_pos;
@@ -268,19 +273,27 @@ static inline void copy_null_bits(
}
static inline void copy_var_fields(
- uint32_t start_old_num_var_field, //index of var fields that we should start writing
- uint32_t num_var_fields, // number of var fields to copy
- uchar* old_var_field_offset_ptr, //static ptr to where offset bytes begin in old row
- uchar old_num_offset_bytes, //number of offset bytes used in old row
- uchar* start_new_var_field_data_ptr, // where the new var data should be written
- uchar* start_new_var_field_offset_ptr, // where the new var offsets should be written
- uchar* new_var_field_data_ptr, // pointer to beginning of var fields in new row
- uchar* old_var_field_data_ptr, // pointer to beginning of var fields in old row
- uint32_t new_num_offset_bytes, // number of offset bytes used in new row
+ //index of var fields that we should start writing
+ uint32_t start_old_num_var_field,
+ // number of var fields to copy
+ uint32_t num_var_fields,
+ //static ptr to where offset bytes begin in old row
+ uchar* old_var_field_offset_ptr,
+ //number of offset bytes used in old row
+ uchar old_num_offset_bytes,
+ // where the new var data should be written
+ uchar* start_new_var_field_data_ptr,
+ // where the new var offsets should be written
+ uchar* start_new_var_field_offset_ptr,
+ // pointer to beginning of var fields in new row
+ uchar* new_var_field_data_ptr,
+ // pointer to beginning of var fields in old row
+ uchar* old_var_field_data_ptr,
+ // number of offset bytes used in new row
+ uint32_t new_num_offset_bytes,
uint32_t* num_data_bytes_written,
- uint32_t* num_offset_bytes_written
- )
-{
+ uint32_t* num_offset_bytes_written) {
+
uchar* curr_new_var_field_data_ptr = start_new_var_field_data_ptr;
uchar* curr_new_var_field_offset_ptr = start_new_var_field_offset_ptr;
for (uint32_t i = 0; i < num_var_fields; i++) {
@@ -290,12 +303,11 @@ static inline void copy_var_fields(
uchar* data_to_copy = NULL;
// get the length and pointer to data that needs to be copied
get_var_field_info(
- &field_len,
- &start_read_offset,
- curr_old,
- old_var_field_offset_ptr,
- old_num_offset_bytes
- );
+ &field_len,
+ &start_read_offset,
+ curr_old,
+ old_var_field_offset_ptr,
+ old_num_offset_bytes);
data_to_copy = old_var_field_data_ptr + start_read_offset;
// now need to copy field_len bytes starting from data_to_copy
curr_new_var_field_data_ptr = write_var_field(
@@ -304,15 +316,22 @@ static inline void copy_var_fields(
new_var_field_data_ptr,
data_to_copy,
field_len,
- new_num_offset_bytes
- );
+ new_num_offset_bytes);
curr_new_var_field_offset_ptr += new_num_offset_bytes;
}
- *num_data_bytes_written = (uint32_t)(curr_new_var_field_data_ptr - start_new_var_field_data_ptr);
- *num_offset_bytes_written = (uint32_t)(curr_new_var_field_offset_ptr - start_new_var_field_offset_ptr);
+ *num_data_bytes_written =
+ (uint32_t)(curr_new_var_field_data_ptr - start_new_var_field_data_ptr);
+ *num_offset_bytes_written =
+ (uint32_t)(curr_new_var_field_offset_ptr -
+ start_new_var_field_offset_ptr);
}
-static inline uint32_t copy_toku_blob(uchar* to_ptr, uchar* from_ptr, uint32_t len_bytes, bool skip) {
+static inline uint32_t copy_toku_blob(
+ uchar* to_ptr,
+ uchar* from_ptr,
+ uint32_t len_bytes,
+ bool skip) {
+
uint32_t length = 0;
if (!skip) {
memcpy(to_ptr, from_ptr, len_bytes);
@@ -326,13 +345,12 @@ static inline uint32_t copy_toku_blob(uchar* to_ptr, uchar* from_ptr, uint32_t l
static int tokudb_hcad_update_fun(
DB* db,
- const DBT *key,
- const DBT *old_val,
- const DBT *extra,
- void (*set_val)(const DBT *new_val, void *set_extra),
- void *set_extra
- )
-{
+ const DBT* key,
+ const DBT* old_val,
+ const DBT* extra,
+ void (*set_val)(const DBT* new_val, void* set_extra),
+ void* set_extra) {
+
uint32_t max_num_bytes;
uint32_t num_columns;
DBT new_val;
@@ -397,7 +415,7 @@ static int tokudb_hcad_update_fun(
operation = extra_pos[0];
extra_pos++;
- assert(operation == UP_COL_ADD_OR_DROP);
+ assert_always(operation == UP_COL_ADD_OR_DROP);
memcpy(&old_num_null_bytes, extra_pos, sizeof(uint32_t));
extra_pos += sizeof(uint32_t);
@@ -419,12 +437,15 @@ static int tokudb_hcad_update_fun(
memcpy(&new_len_of_offsets, extra_pos, sizeof(uint32_t));
extra_pos += sizeof(uint32_t);
- max_num_bytes = old_val->size + extra->size + new_len_of_offsets + new_fixed_field_size;
- new_val_data = (uchar *)tokudb_my_malloc(
- max_num_bytes,
- MYF(MY_FAE)
- );
- if (new_val_data == NULL) { goto cleanup; }
+ max_num_bytes =
+ old_val->size + extra->size + new_len_of_offsets + new_fixed_field_size;
+ new_val_data = (uchar *)tokudb::memory::malloc(
+ max_num_bytes,
+ MYF(MY_FAE));
+ if (new_val_data == NULL) {
+ error = ENOMEM;
+ goto cleanup;
+ }
old_fixed_field_ptr = (uchar *) old_val->data;
old_fixed_field_ptr += old_num_null_bytes;
@@ -465,7 +486,7 @@ static int tokudb_hcad_update_fun(
bool is_null_default = false;
extra_pos++;
- assert(op_type == COL_DROP || op_type == COL_ADD);
+ assert_always(op_type == COL_DROP || op_type == COL_ADD);
bool nullable = (extra_pos[0] != 0);
extra_pos++;
if (nullable) {
@@ -474,11 +495,10 @@ static int tokudb_hcad_update_fun(
extra_pos += sizeof(uint32_t);
uint32_t num_bits;
if (op_type == COL_DROP) {
- assert(curr_old_null_pos <= null_bit_position);
+ assert_always(curr_old_null_pos <= null_bit_position);
num_bits = null_bit_position - curr_old_null_pos;
- }
- else {
- assert(curr_new_null_pos <= null_bit_position);
+ } else {
+ assert_always(curr_new_null_pos <= null_bit_position);
num_bits = null_bit_position - curr_new_null_pos;
}
copy_null_bits(
@@ -486,22 +506,19 @@ static int tokudb_hcad_update_fun(
curr_new_null_pos,
num_bits,
old_null_bytes,
- new_null_bytes
- );
+ new_null_bytes);
// update the positions
curr_new_null_pos += num_bits;
curr_old_null_pos += num_bits;
if (op_type == COL_DROP) {
curr_old_null_pos++; // account for dropped column
- }
- else {
+ } else {
is_null_default = (extra_pos[0] != 0);
extra_pos++;
set_overall_null_position(
new_null_bytes,
null_bit_position,
- is_null_default
- );
+ is_null_default);
curr_new_null_pos++; //account for added column
}
}
@@ -518,47 +535,44 @@ static int tokudb_hcad_update_fun(
if (op_type == COL_DROP) {
num_bytes_to_copy = col_offset - curr_old_fixed_offset;
- }
- else {
+ } else {
num_bytes_to_copy = col_offset - curr_new_fixed_offset;
}
memcpy(
new_fixed_field_ptr + curr_new_fixed_offset,
- old_fixed_field_ptr + curr_old_fixed_offset,
- num_bytes_to_copy
- );
+ old_fixed_field_ptr + curr_old_fixed_offset,
+ num_bytes_to_copy);
curr_old_fixed_offset += num_bytes_to_copy;
curr_new_fixed_offset += num_bytes_to_copy;
if (op_type == COL_DROP) {
- // move old_fixed_offset val to skip OVER column that is being dropped
+ // move old_fixed_offset val to skip OVER column that is
+ // being dropped
curr_old_fixed_offset += col_size;
- }
- else {
+ } else {
if (is_null_default) {
// copy zeroes
- memset(new_fixed_field_ptr + curr_new_fixed_offset, 0, col_size);
- }
- else {
+ memset(
+ new_fixed_field_ptr + curr_new_fixed_offset,
+ 0,
+ col_size);
+ } else {
// copy data from extra_pos into new row
memcpy(
new_fixed_field_ptr + curr_new_fixed_offset,
extra_pos,
- col_size
- );
+ col_size);
extra_pos += col_size;
}
curr_new_fixed_offset += col_size;
}
- }
- else if (col_type == COL_VAR) {
+ } else if (col_type == COL_VAR) {
uint32_t var_col_index;
memcpy(&var_col_index, extra_pos, sizeof(uint32_t));
extra_pos += sizeof(uint32_t);
if (op_type == COL_DROP) {
num_var_fields_to_copy = var_col_index - curr_old_num_var_field;
- }
- else {
+ } else {
num_var_fields_to_copy = var_col_index - curr_new_num_var_field;
}
copy_var_fields(
@@ -568,20 +582,21 @@ static int tokudb_hcad_update_fun(
old_num_offset_bytes,
curr_new_var_field_data_ptr,
curr_new_var_field_offset_ptr,
- new_var_field_data_ptr, // pointer to beginning of var fields in new row
- old_var_field_data_ptr, // pointer to beginning of var fields in old row
- new_num_offset_bytes, // number of offset bytes used in new row
+ // pointer to beginning of var fields in new row
+ new_var_field_data_ptr,
+ // pointer to beginning of var fields in old row
+ old_var_field_data_ptr,
+ // number of offset bytes used in new row
+ new_num_offset_bytes,
&num_data_bytes_written,
- &num_offset_bytes_written
- );
+ &num_offset_bytes_written);
curr_new_var_field_data_ptr += num_data_bytes_written;
curr_new_var_field_offset_ptr += num_offset_bytes_written;
curr_new_num_var_field += num_var_fields_to_copy;
curr_old_num_var_field += num_var_fields_to_copy;
if (op_type == COL_DROP) {
curr_old_num_var_field++; // skip over dropped field
- }
- else {
+ } else {
if (is_null_default) {
curr_new_var_field_data_ptr = write_var_field(
curr_new_var_field_offset_ptr,
@@ -589,11 +604,9 @@ static int tokudb_hcad_update_fun(
new_var_field_data_ptr,
NULL, //copying no data
0, //copying 0 bytes
- new_num_offset_bytes
- );
+ new_num_offset_bytes);
curr_new_var_field_offset_ptr += new_num_offset_bytes;
- }
- else {
+ } else {
uint32_t data_length;
memcpy(&data_length, extra_pos, sizeof(data_length));
extra_pos += sizeof(data_length);
@@ -603,20 +616,17 @@ static int tokudb_hcad_update_fun(
new_var_field_data_ptr,
extra_pos, //copying data from mutator
data_length, //copying data_length bytes
- new_num_offset_bytes
- );
+ new_num_offset_bytes);
extra_pos += data_length;
curr_new_var_field_offset_ptr += new_num_offset_bytes;
}
curr_new_num_var_field++; //account for added column
}
- }
- else if (col_type == COL_BLOB) {
+ } else if (col_type == COL_BLOB) {
// handle blob data later
continue;
- }
- else {
- assert(false);
+ } else {
+ assert_unreachable();
}
}
// finish copying the null stuff
@@ -629,19 +639,17 @@ static int tokudb_hcad_update_fun(
curr_new_null_pos,
overall_null_bits_left,
old_null_bytes,
- new_null_bytes
- );
+ new_null_bytes);
// finish copying fixed field stuff
num_bytes_left = old_fixed_field_size - curr_old_fixed_offset;
memcpy(
new_fixed_field_ptr + curr_new_fixed_offset,
old_fixed_field_ptr + curr_old_fixed_offset,
- num_bytes_left
- );
+ num_bytes_left);
curr_old_fixed_offset += num_bytes_left;
curr_new_fixed_offset += num_bytes_left;
// sanity check
- assert(curr_new_fixed_offset == new_fixed_field_size);
+ assert_always(curr_new_fixed_offset == new_fixed_field_size);
// finish copying var field stuff
num_var_fields_to_copy = old_num_var_fields - curr_old_num_var_field;
@@ -652,33 +660,34 @@ static int tokudb_hcad_update_fun(
old_num_offset_bytes,
curr_new_var_field_data_ptr,
curr_new_var_field_offset_ptr,
- new_var_field_data_ptr, // pointer to beginning of var fields in new row
- old_var_field_data_ptr, // pointer to beginning of var fields in old row
- new_num_offset_bytes, // number of offset bytes used in new row
+ // pointer to beginning of var fields in new row
+ new_var_field_data_ptr,
+ // pointer to beginning of var fields in old row
+ old_var_field_data_ptr,
+ // number of offset bytes used in new row
+ new_num_offset_bytes,
&num_data_bytes_written,
- &num_offset_bytes_written
- );
+ &num_offset_bytes_written);
curr_new_var_field_offset_ptr += num_offset_bytes_written;
curr_new_var_field_data_ptr += num_data_bytes_written;
// sanity check
- assert(curr_new_var_field_offset_ptr == new_var_field_data_ptr);
+ assert_always(curr_new_var_field_offset_ptr == new_var_field_data_ptr);
// start handling blobs
get_blob_field_info(
&start_blob_offset,
old_len_of_offsets,
old_var_field_data_ptr,
- old_num_offset_bytes
- );
+ old_num_offset_bytes);
start_blob_ptr = old_var_field_data_ptr + start_blob_offset;
- // if nothing else in extra, then there are no blobs to add or drop, so can copy blobs straight
+ // if nothing else in extra, then there are no blobs to add or drop, so
+ // can copy blobs straight
if ((extra_pos - extra_pos_start) == extra->size) {
num_blob_bytes = old_val->size - (start_blob_ptr - old_null_bytes);
memcpy(curr_new_var_field_data_ptr, start_blob_ptr, num_blob_bytes);
curr_new_var_field_data_ptr += num_blob_bytes;
- }
- // else, there is blob information to process
- else {
+ } else {
+ // else, there is blob information to process
uchar* len_bytes = NULL;
uint32_t curr_old_blob = 0;
uint32_t curr_new_blob = 0;
@@ -696,11 +705,10 @@ static int tokudb_hcad_update_fun(
uint32_t blob_index;
memcpy(&blob_index, extra_pos, sizeof(blob_index));
extra_pos += sizeof(blob_index);
- assert (op_type == COL_DROP || op_type == COL_ADD);
+ assert_always (op_type == COL_DROP || op_type == COL_ADD);
if (op_type == COL_DROP) {
num_blobs_to_copy = blob_index - curr_old_blob;
- }
- else {
+ } else {
num_blobs_to_copy = blob_index - curr_new_blob;
}
for (uint32_t i = 0; i < num_blobs_to_copy; i++) {
@@ -708,8 +716,7 @@ static int tokudb_hcad_update_fun(
curr_new_var_field_data_ptr,
curr_old_blob_ptr,
len_bytes[curr_old_blob + i],
- false
- );
+ false);
curr_old_blob_ptr += num_bytes_written;
curr_new_var_field_data_ptr += num_bytes_written;
}
@@ -721,12 +728,10 @@ static int tokudb_hcad_update_fun(
NULL,
curr_old_blob_ptr,
len_bytes[curr_old_blob],
- true
- );
+ true);
curr_old_blob++;
curr_old_blob_ptr += num_bytes;
- }
- else {
+ } else {
// copy new data
uint32_t new_len_bytes = extra_pos[0];
extra_pos++;
@@ -734,8 +739,7 @@ static int tokudb_hcad_update_fun(
curr_new_var_field_data_ptr,
extra_pos,
new_len_bytes,
- false
- );
+ false);
curr_new_blob++;
curr_new_var_field_data_ptr += num_bytes;
extra_pos += num_bytes;
@@ -751,27 +755,27 @@ static int tokudb_hcad_update_fun(
error = 0;
cleanup:
- tokudb_my_free(new_val_data);
+ tokudb::memory::free(new_val_data);
return error;
}
-// Expand the variable offset array in the old row given the update mesage in the extra.
+// Expand the variable offset array in the old row given the update mesage
+// in the extra.
static int tokudb_expand_variable_offsets(
DB* db,
- const DBT *key,
- const DBT *old_val,
- const DBT *extra,
- void (*set_val)(const DBT *new_val, void *set_extra),
- void *set_extra
- )
-{
+ const DBT* key,
+ const DBT* old_val,
+ const DBT* extra,
+ void (*set_val)(const DBT* new_val, void* set_extra),
+ void* set_extra) {
+
int error = 0;
tokudb::buffer extra_val(extra->data, 0, extra->size);
// decode the operation
uint8_t operation;
extra_val.consume(&operation, sizeof operation);
- assert(operation == UPDATE_OP_EXPAND_VARIABLE_OFFSETS);
+ assert_always(operation == UPDATE_OP_EXPAND_VARIABLE_OFFSETS);
// decode number of offsets
uint32_t number_of_offsets;
@@ -781,18 +785,20 @@ static int tokudb_expand_variable_offsets(
uint32_t offset_start;
extra_val.consume(&offset_start, sizeof offset_start);
- assert(extra_val.size() == extra_val.limit());
+ assert_always(extra_val.size() == extra_val.limit());
DBT new_val; memset(&new_val, 0, sizeof new_val);
if (old_val != NULL) {
- assert(offset_start + number_of_offsets <= old_val->size);
+ assert_always(offset_start + number_of_offsets <= old_val->size);
// compute the new val from the old val
- uchar *old_val_ptr = (uchar *)old_val->data;
+ uchar* old_val_ptr = (uchar*)old_val->data;
// allocate space for the new val's data
- uchar *new_val_ptr = (uchar *)tokudb_my_malloc(number_of_offsets + old_val->size, MYF(MY_FAE));
+ uchar* new_val_ptr = (uchar*)tokudb::memory::malloc(
+ number_of_offsets + old_val->size,
+ MYF(MY_FAE));
if (!new_val_ptr) {
error = ENOMEM;
goto cleanup;
@@ -819,8 +825,8 @@ static int tokudb_expand_variable_offsets(
old_val_ptr += n;
new_val.size = new_val_ptr - (uchar *)new_val.data;
- assert(new_val_ptr == (uchar *)new_val.data + new_val.size);
- assert(old_val_ptr == (uchar *)old_val->data + old_val->size);
+ assert_always(new_val_ptr == (uchar *)new_val.data + new_val.size);
+ assert_always(old_val_ptr == (uchar *)old_val->data + old_val->size);
// set the new val
set_val(&new_val, set_extra);
@@ -829,46 +835,50 @@ static int tokudb_expand_variable_offsets(
error = 0;
cleanup:
- tokudb_my_free(new_val.data);
+ tokudb::memory::free(new_val.data);
return error;
}
// Expand an int field in a old row given the expand message in the extra.
static int tokudb_expand_int_field(
DB* db,
- const DBT *key,
- const DBT *old_val,
- const DBT *extra,
- void (*set_val)(const DBT *new_val, void *set_extra),
- void *set_extra
- )
-{
+ const DBT* key,
+ const DBT* old_val,
+ const DBT* extra,
+ void (*set_val)(const DBT* new_val, void* set_extra),
+ void* set_extra) {
+
int error = 0;
tokudb::buffer extra_val(extra->data, 0, extra->size);
uint8_t operation;
extra_val.consume(&operation, sizeof operation);
- assert(operation == UPDATE_OP_EXPAND_INT || operation == UPDATE_OP_EXPAND_UINT);
+ assert_always(
+ operation == UPDATE_OP_EXPAND_INT ||
+ operation == UPDATE_OP_EXPAND_UINT);
uint32_t the_offset;
extra_val.consume(&the_offset, sizeof the_offset);
uint32_t old_length;
extra_val.consume(&old_length, sizeof old_length);
uint32_t new_length;
extra_val.consume(&new_length, sizeof new_length);
- assert(extra_val.size() == extra_val.limit());
+ assert_always(extra_val.size() == extra_val.limit());
- assert(new_length >= old_length); // expand only
+ assert_always(new_length >= old_length); // expand only
DBT new_val; memset(&new_val, 0, sizeof new_val);
if (old_val != NULL) {
- assert(the_offset + old_length <= old_val->size); // old field within the old val
+ // old field within the old val
+ assert_always(the_offset + old_length <= old_val->size);
// compute the new val from the old val
- uchar *old_val_ptr = (uchar *)old_val->data;
+ uchar* old_val_ptr = (uchar*)old_val->data;
// allocate space for the new val's data
- uchar *new_val_ptr = (uchar *)tokudb_my_malloc(old_val->size + (new_length - old_length), MYF(MY_FAE));
+ uchar* new_val_ptr = (uchar*)tokudb::memory::malloc(
+ old_val->size + (new_length - old_length),
+ MYF(MY_FAE));
if (!new_val_ptr) {
error = ENOMEM;
goto cleanup;
@@ -882,21 +892,27 @@ static int tokudb_expand_int_field(
switch (operation) {
case UPDATE_OP_EXPAND_INT:
- // fill the entire new value with ones or zeros depending on the sign bit
- // the encoding is little endian
- memset(new_val_ptr, (old_val_ptr[old_length-1] & 0x80) ? 0xff : 0x00, new_length);
- memcpy(new_val_ptr, old_val_ptr, old_length); // overlay the low bytes of the new value with the old value
+ // fill the entire new value with ones or zeros depending on the
+ // sign bit the encoding is little endian
+ memset(
+ new_val_ptr,
+ (old_val_ptr[old_length-1] & 0x80) ? 0xff : 0x00,
+ new_length);
+ // overlay the low bytes of the new value with the old value
+ memcpy(new_val_ptr, old_val_ptr, old_length);
new_val_ptr += new_length;
old_val_ptr += old_length;
break;
case UPDATE_OP_EXPAND_UINT:
- memset(new_val_ptr, 0, new_length); // fill the entire new value with zeros
- memcpy(new_val_ptr, old_val_ptr, old_length); // overlay the low bytes of the new value with the old value
+ // fill the entire new value with zeros
+ memset(new_val_ptr, 0, new_length);
+ // overlay the low bytes of the new value with the old value
+ memcpy(new_val_ptr, old_val_ptr, old_length);
new_val_ptr += new_length;
old_val_ptr += old_length;
break;
default:
- assert(0);
+ assert_unreachable();
}
// copy the rest
@@ -906,8 +922,8 @@ static int tokudb_expand_int_field(
old_val_ptr += n;
new_val.size = new_val_ptr - (uchar *)new_val.data;
- assert(new_val_ptr == (uchar *)new_val.data + new_val.size);
- assert(old_val_ptr == (uchar *)old_val->data + old_val->size);
+ assert_always(new_val_ptr == (uchar *)new_val.data + new_val.size);
+ assert_always(old_val_ptr == (uchar *)old_val->data + old_val->size);
// set the new val
set_val(&new_val, set_extra);
@@ -916,26 +932,27 @@ static int tokudb_expand_int_field(
error = 0;
cleanup:
- tokudb_my_free(new_val.data);
+ tokudb::memory::free(new_val.data);
return error;
}
// Expand a char field in a old row given the expand message in the extra.
static int tokudb_expand_char_field(
DB* db,
- const DBT *key,
- const DBT *old_val,
- const DBT *extra,
- void (*set_val)(const DBT *new_val, void *set_extra),
- void *set_extra
- )
-{
+ const DBT* key,
+ const DBT* old_val,
+ const DBT* extra,
+ void (*set_val)(const DBT* new_val, void* set_extra),
+ void* set_extra) {
+
int error = 0;
tokudb::buffer extra_val(extra->data, 0, extra->size);
uint8_t operation;
extra_val.consume(&operation, sizeof operation);
- assert(operation == UPDATE_OP_EXPAND_CHAR || operation == UPDATE_OP_EXPAND_BINARY);
+ assert_always(
+ operation == UPDATE_OP_EXPAND_CHAR ||
+ operation == UPDATE_OP_EXPAND_BINARY);
uint32_t the_offset;
extra_val.consume(&the_offset, sizeof the_offset);
uint32_t old_length;
@@ -944,20 +961,23 @@ static int tokudb_expand_char_field(
extra_val.consume(&new_length, sizeof new_length);
uchar pad_char;
extra_val.consume(&pad_char, sizeof pad_char);
- assert(extra_val.size() == extra_val.limit());
+ assert_always(extra_val.size() == extra_val.limit());
- assert(new_length >= old_length); // expand only
+ assert_always(new_length >= old_length); // expand only
DBT new_val; memset(&new_val, 0, sizeof new_val);
if (old_val != NULL) {
- assert(the_offset + old_length <= old_val->size); // old field within the old val
+ // old field within the old val
+ assert_always(the_offset + old_length <= old_val->size);
// compute the new val from the old val
- uchar *old_val_ptr = (uchar *)old_val->data;
+ uchar* old_val_ptr = (uchar*)old_val->data;
// allocate space for the new val's data
- uchar *new_val_ptr = (uchar *)tokudb_my_malloc(old_val->size + (new_length - old_length), MYF(MY_FAE));
+ uchar* new_val_ptr = (uchar*)tokudb::memory::malloc(
+ old_val->size + (new_length - old_length),
+ MYF(MY_FAE));
if (!new_val_ptr) {
error = ENOMEM;
goto cleanup;
@@ -972,13 +992,15 @@ static int tokudb_expand_char_field(
switch (operation) {
case UPDATE_OP_EXPAND_CHAR:
case UPDATE_OP_EXPAND_BINARY:
- memset(new_val_ptr, pad_char, new_length); // fill the entire new value with the pad char
- memcpy(new_val_ptr, old_val_ptr, old_length); // overlay the low bytes of the new value with the old value
+ // fill the entire new value with the pad char
+ memset(new_val_ptr, pad_char, new_length);
+ // overlay the low bytes of the new value with the old value
+ memcpy(new_val_ptr, old_val_ptr, old_length);
new_val_ptr += new_length;
old_val_ptr += old_length;
break;
default:
- assert(0);
+ assert_unreachable();
}
// copy the rest
@@ -988,8 +1010,8 @@ static int tokudb_expand_char_field(
old_val_ptr += n;
new_val.size = new_val_ptr - (uchar *)new_val.data;
- assert(new_val_ptr == (uchar *)new_val.data + new_val.size);
- assert(old_val_ptr == (uchar *)old_val->data + old_val->size);
+ assert_always(new_val_ptr == (uchar *)new_val.data + new_val.size);
+ assert_always(old_val_ptr == (uchar *)old_val->data + old_val->size);
// set the new val
set_val(&new_val, set_extra);
@@ -998,7 +1020,7 @@ static int tokudb_expand_char_field(
error = 0;
cleanup:
- tokudb_my_free(new_val.data);
+ tokudb::memory::free(new_val.data);
return error;
}
@@ -1006,17 +1028,25 @@ namespace tokudb {
class var_fields {
public:
- var_fields() {
+ inline var_fields() {
}
- void init_var_fields(uint32_t var_offset, uint32_t offset_bytes, uint32_t bytes_per_offset, tokudb::buffer *val_buffer) {
- assert(bytes_per_offset == 0 || bytes_per_offset == 1 || bytes_per_offset == 2);
+ inline void init_var_fields(
+ uint32_t var_offset,
+ uint32_t offset_bytes,
+ uint32_t bytes_per_offset,
+ tokudb::buffer* val_buffer) {
+
+ assert_always(
+ bytes_per_offset == 0 ||
+ bytes_per_offset == 1 ||
+ bytes_per_offset == 2);
m_var_offset = var_offset;
m_val_offset = m_var_offset + offset_bytes;
m_bytes_per_offset = bytes_per_offset;
if (bytes_per_offset > 0) {
m_num_fields = offset_bytes / bytes_per_offset;
} else {
- assert(offset_bytes == 0);
+ assert_always(offset_bytes == 0);
m_num_fields = 0;
}
m_val_buffer = val_buffer;
@@ -1025,7 +1055,10 @@ public:
uint32_t value_length(uint32_t var_index);
void update_offsets(uint32_t var_index, uint32_t old_s, uint32_t new_s);
uint32_t end_offset();
- void replace(uint32_t var_index, void *new_val_ptr, uint32_t new_val_length);
+ void replace(
+ uint32_t var_index,
+ void* new_val_ptr,
+ uint32_t new_val_length);
private:
uint32_t read_offset(uint32_t var_index);
void write_offset(uint32_t var_index, uint32_t v);
@@ -1034,24 +1067,28 @@ private:
uint32_t m_val_offset;
uint32_t m_bytes_per_offset;
uint32_t m_num_fields;
- tokudb::buffer *m_val_buffer;
+ tokudb::buffer* m_val_buffer;
};
// Return the ith variable length offset
uint32_t var_fields::read_offset(uint32_t var_index) {
uint32_t offset = 0;
- m_val_buffer->read(&offset, m_bytes_per_offset, m_var_offset + var_index * m_bytes_per_offset);
+ m_val_buffer->read(
+ &offset, m_bytes_per_offset, m_var_offset + var_index * m_bytes_per_offset);
return offset;
}
// Write the ith variable length offset with a new offset.
void var_fields::write_offset(uint32_t var_index, uint32_t new_offset) {
- m_val_buffer->write(&new_offset, m_bytes_per_offset, m_var_offset + var_index * m_bytes_per_offset);
+ m_val_buffer->write(
+ &new_offset,
+ m_bytes_per_offset,
+ m_var_offset + var_index * m_bytes_per_offset);
}
// Return the offset of the ith variable length field
uint32_t var_fields::value_offset(uint32_t var_index) {
- assert(var_index < m_num_fields);
+ assert_always(var_index < m_num_fields);
if (var_index == 0)
return m_val_offset;
else
@@ -1060,16 +1097,21 @@ uint32_t var_fields::value_offset(uint32_t var_index) {
// Return the length of the ith variable length field
uint32_t var_fields::value_length(uint32_t var_index) {
- assert(var_index < m_num_fields);
+ assert_always(var_index < m_num_fields);
if (var_index == 0)
return read_offset(0);
else
return read_offset(var_index) - read_offset(var_index-1);
}
-// The length of the ith variable length fields changed. Update all of the subsequent offsets.
-void var_fields::update_offsets(uint32_t var_index, uint32_t old_s, uint32_t new_s) {
- assert(var_index < m_num_fields);
+// The length of the ith variable length fields changed.
+// Update all of the subsequent offsets.
+void var_fields::update_offsets(
+ uint32_t var_index,
+ uint32_t old_s,
+ uint32_t new_s) {
+
+ assert_always(var_index < m_num_fields);
if (old_s == new_s)
return;
for (uint i = var_index; i < m_num_fields; i++) {
@@ -1088,7 +1130,11 @@ uint32_t var_fields::end_offset() {
return m_val_offset + read_offset(m_num_fields-1);
}
-void var_fields::replace(uint32_t var_index, void *new_val_ptr, uint32_t new_val_length) {
+void var_fields::replace(
+ uint32_t var_index,
+ void* new_val_ptr,
+ uint32_t new_val_length) {
+
// replace the new val with the extra val
uint32_t the_offset = value_offset(var_index);
uint32_t old_s = value_length(var_index);
@@ -1103,15 +1149,23 @@ class blob_fields {
public:
blob_fields() {
}
- void init_blob_fields(uint32_t num_blobs, const uint8_t *blob_lengths, tokudb::buffer *val_buffer) {
- m_num_blobs = num_blobs; m_blob_lengths = blob_lengths; m_val_buffer = val_buffer;
+ void init_blob_fields(
+ uint32_t num_blobs,
+ const uint8_t* blob_lengths,
+ tokudb::buffer* val_buffer) {
+ m_num_blobs = num_blobs;
+ m_blob_lengths = blob_lengths;
+ m_val_buffer = val_buffer;
}
void start_blobs(uint32_t offset) {
m_blob_offset = offset;
}
void replace(uint32_t blob_index, uint32_t length, void *p);
- void expand_length(uint32_t blob_index, uint8_t old_length_length, uint8_t new_length_length);
+ void expand_length(
+ uint32_t blob_index,
+ uint8_t old_length_length,
+ uint8_t new_length_length);
private:
uint32_t read_length(uint32_t offset, size_t size);
void write_length(uint32_t offset, size_t size, uint32_t new_length);
@@ -1129,12 +1183,15 @@ uint32_t blob_fields::read_length(uint32_t offset, size_t blob_length) {
return length;
}
-void blob_fields::write_length(uint32_t offset, size_t size, uint32_t new_length) {
+void blob_fields::write_length(
+ uint32_t offset,
+ size_t size,
+ uint32_t new_length) {
m_val_buffer->write(&new_length, size, offset);
}
uint32_t blob_fields::blob_offset(uint32_t blob_index) {
- assert(blob_index < m_num_blobs);
+ assert_always(blob_index < m_num_blobs);
uint32_t offset = m_blob_offset;
for (uint i = 0; i < blob_index; i++) {
uint32_t blob_length = m_blob_lengths[i];
@@ -1144,8 +1201,12 @@ uint32_t blob_fields::blob_offset(uint32_t blob_index) {
return offset;
}
-void blob_fields::replace(uint32_t blob_index, uint32_t new_length, void *new_value) {
- assert(blob_index < m_num_blobs);
+void blob_fields::replace(
+ uint32_t blob_index,
+ uint32_t new_length,
+ void* new_value) {
+
+ assert_always(blob_index < m_num_blobs);
// compute the ith blob offset
uint32_t offset = blob_offset(blob_index);
@@ -1155,15 +1216,23 @@ void blob_fields::replace(uint32_t blob_index, uint32_t new_length, void *new_va
uint32_t old_length = read_length(offset, blob_length);
// replace the data
- m_val_buffer->replace(offset + blob_length, old_length, new_value, new_length);
+ m_val_buffer->replace(
+ offset + blob_length,
+ old_length,
+ new_value,
+ new_length);
// write the new length
write_length(offset, blob_length, new_length);
}
-void blob_fields::expand_length(uint32_t blob_index, uint8_t old_length_length, uint8_t new_length_length) {
- assert(blob_index < m_num_blobs);
- assert(old_length_length == m_blob_lengths[blob_index]);
+void blob_fields::expand_length(
+ uint32_t blob_index,
+ uint8_t old_length_length,
+ uint8_t new_length_length) {
+
+ assert_always(blob_index < m_num_blobs);
+ assert_always(old_length_length == m_blob_lengths[blob_index]);
// compute the ith blob offset
uint32_t offset = blob_offset(blob_index);
@@ -1172,7 +1241,11 @@ void blob_fields::expand_length(uint32_t blob_index, uint8_t old_length_length,
uint32_t blob_length = read_length(offset, old_length_length);
// expand the length
- m_val_buffer->replace(offset, old_length_length, &blob_length, new_length_length);
+ m_val_buffer->replace(
+ offset,
+ old_length_length,
+ &blob_length,
+ new_length_length);
}
class value_map {
@@ -1180,8 +1253,16 @@ public:
value_map(tokudb::buffer *val_buffer) : m_val_buffer(val_buffer) {
}
- void init_var_fields(uint32_t var_offset, uint32_t offset_bytes, uint32_t bytes_per_offset) {
- m_var_fields.init_var_fields(var_offset, offset_bytes, bytes_per_offset, m_val_buffer);
+ void init_var_fields(
+ uint32_t var_offset,
+ uint32_t offset_bytes,
+ uint32_t bytes_per_offset) {
+
+ m_var_fields.init_var_fields(
+ var_offset,
+ offset_bytes,
+ bytes_per_offset,
+ m_val_buffer);
}
void init_blob_fields(uint32_t num_blobs, const uint8_t *blob_lengths) {
@@ -1189,31 +1270,63 @@ public:
}
// Replace the value of a fixed length field
- void replace_fixed(uint32_t the_offset, uint32_t field_null_num, void *new_val_ptr, uint32_t new_val_length) {
- m_val_buffer->replace(the_offset, new_val_length, new_val_ptr, new_val_length);
+ void replace_fixed(
+ uint32_t the_offset,
+ uint32_t field_null_num,
+ void* new_val_ptr,
+ uint32_t new_val_length) {
+
+ m_val_buffer->replace(
+ the_offset,
+ new_val_length,
+ new_val_ptr,
+ new_val_length);
maybe_clear_null(field_null_num);
}
// Replace the value of a variable length field
- void replace_varchar(uint32_t var_index, uint32_t field_null_num, void *new_val_ptr, uint32_t new_val_length) {
+ void replace_varchar(
+ uint32_t var_index,
+ uint32_t field_null_num,
+ void* new_val_ptr,
+ uint32_t new_val_length) {
+
m_var_fields.replace(var_index, new_val_ptr, new_val_length);
maybe_clear_null(field_null_num);
}
// Replace the value of a blob field
- void replace_blob(uint32_t blob_index, uint32_t field_null_num, void *new_val_ptr, uint32_t new_val_length) {
+ void replace_blob(
+ uint32_t blob_index,
+ uint32_t field_null_num,
+ void* new_val_ptr,
+ uint32_t new_val_length) {
+
m_blob_fields.start_blobs(m_var_fields.end_offset());
m_blob_fields.replace(blob_index, new_val_length, new_val_ptr);
maybe_clear_null(field_null_num);
}
- void expand_blob_lengths(uint32_t num_blob, const uint8_t *old_length, const uint8_t *new_length);
-
- void int_op(uint32_t operation, uint32_t the_offset, uint32_t length, uint32_t field_null_num,
- tokudb::buffer &old_val, void *extra_val);
-
- void uint_op(uint32_t operation, uint32_t the_offset, uint32_t length, uint32_t field_null_num,
- tokudb::buffer &old_val, void *extra_val);
+ void expand_blob_lengths(
+ uint32_t num_blob,
+ const uint8_t* old_length,
+ const uint8_t* new_length);
+
+ void int_op(
+ uint32_t operation,
+ uint32_t the_offset,
+ uint32_t length,
+ uint32_t field_null_num,
+ tokudb::buffer& old_val,
+ void* extra_val);
+
+ void uint_op(
+ uint32_t operation,
+ uint32_t the_offset,
+ uint32_t length,
+ uint32_t field_null_num,
+ tokudb::buffer& old_val,
+ void* extra_val);
private:
bool is_null(uint32_t null_num, uchar *null_bytes) {
@@ -1234,7 +1347,10 @@ private:
null_num &= ~(1<<31);
else
null_num -= 1;
- set_overall_null_position((uchar *) m_val_buffer->data(), null_num, false);
+ set_overall_null_position(
+ (uchar*)m_val_buffer->data(),
+ null_num,
+ false);
}
}
@@ -1245,11 +1361,19 @@ private:
};
// Update an int field: signed newval@offset = old_val@offset OP extra_val
-void value_map::int_op(uint32_t operation, uint32_t the_offset, uint32_t length, uint32_t field_null_num,
- tokudb::buffer &old_val, void *extra_val) {
- assert(the_offset + length <= m_val_buffer->size());
- assert(the_offset + length <= old_val.size());
- assert(length == 1 || length == 2 || length == 3 || length == 4 || length == 8);
+void value_map::int_op(
+ uint32_t operation,
+ uint32_t the_offset,
+ uint32_t length,
+ uint32_t field_null_num,
+ tokudb::buffer &old_val,
+ void* extra_val) {
+
+ assert_always(the_offset + length <= m_val_buffer->size());
+ assert_always(the_offset + length <= old_val.size());
+ assert_always(
+ length == 1 || length == 2 || length == 3 ||
+ length == 4 || length == 8);
uchar *old_val_ptr = (uchar *) old_val.data();
bool field_is_null = is_null(field_null_num, old_val_ptr);
@@ -1287,16 +1411,24 @@ void value_map::int_op(uint32_t operation, uint32_t the_offset, uint32_t length,
}
break;
default:
- assert(0);
+ assert_unreachable();
}
}
// Update an unsigned field: unsigned newval@offset = old_val@offset OP extra_val
-void value_map::uint_op(uint32_t operation, uint32_t the_offset, uint32_t length, uint32_t field_null_num,
- tokudb::buffer &old_val, void *extra_val) {
- assert(the_offset + length <= m_val_buffer->size());
- assert(the_offset + length <= old_val.size());
- assert(length == 1 || length == 2 || length == 3 || length == 4 || length == 8);
+void value_map::uint_op(
+ uint32_t operation,
+ uint32_t the_offset,
+ uint32_t length,
+ uint32_t field_null_num,
+ tokudb::buffer& old_val,
+ void* extra_val) {
+
+ assert_always(the_offset + length <= m_val_buffer->size());
+ assert_always(the_offset + length <= old_val.size());
+ assert_always(
+ length == 1 || length == 2 || length == 3 ||
+ length == 4 || length == 8);
uchar *old_val_ptr = (uchar *) old_val.data();
bool field_is_null = is_null(field_null_num, old_val_ptr);
@@ -1326,16 +1458,23 @@ void value_map::uint_op(uint32_t operation, uint32_t the_offset, uint32_t length
}
break;
default:
- assert(0);
+ assert_unreachable();
}
}
-void value_map::expand_blob_lengths(uint32_t num_blob, const uint8_t *old_length, const uint8_t *new_length) {
+void value_map::expand_blob_lengths(
+ uint32_t num_blob,
+ const uint8_t* old_length,
+ const uint8_t* new_length) {
+
uint8_t current_length[num_blob];
memcpy(current_length, old_length, num_blob);
for (uint32_t i = 0; i < num_blob; i++) {
if (new_length[i] > current_length[i]) {
- m_blob_fields.init_blob_fields(num_blob, current_length, m_val_buffer);
+ m_blob_fields.init_blob_fields(
+ num_blob,
+ current_length,
+ m_val_buffer);
m_blob_fields.start_blobs(m_var_fields.end_offset());
m_blob_fields.expand_length(i, current_length[i], new_length[i]);
current_length[i] = new_length[i];
@@ -1348,30 +1487,29 @@ void value_map::expand_blob_lengths(uint32_t num_blob, const uint8_t *old_length
static uint32_t consume_uint32(tokudb::buffer &b) {
uint32_t n;
size_t s = b.consume_ui<uint32_t>(&n);
- assert(s > 0);
+ assert_always(s > 0);
return n;
}
static uint8_t *consume_uint8_array(tokudb::buffer &b, uint32_t array_size) {
uint8_t *p = (uint8_t *) b.consume_ptr(array_size);
- assert(p);
+ assert_always(p);
return p;
}
static int tokudb_expand_blobs(
DB* db,
- const DBT *key_dbt,
- const DBT *old_val_dbt,
- const DBT *extra,
- void (*set_val)(const DBT *new_val_dbt, void *set_extra),
- void *set_extra
- )
-{
+ const DBT* key_dbt,
+ const DBT* old_val_dbt,
+ const DBT* extra,
+ void (*set_val)(const DBT* new_val_dbt, void* set_extra),
+ void* set_extra) {
+
tokudb::buffer extra_val(extra->data, 0, extra->size);
uint8_t operation;
extra_val.consume(&operation, sizeof operation);
- assert(operation == UPDATE_OP_EXPAND_BLOB);
+ assert_always(operation == UPDATE_OP_EXPAND_BLOB);
if (old_val_dbt != NULL) {
// new val = old val
@@ -1384,13 +1522,18 @@ static int tokudb_expand_blobs(
uint32_t var_field_offset = consume_uint32(extra_val);
uint32_t var_offset_bytes = consume_uint32(extra_val);
uint32_t bytes_per_offset = consume_uint32(extra_val);
- vd.init_var_fields(var_field_offset, var_offset_bytes, bytes_per_offset);
+ vd.init_var_fields(
+ var_field_offset,
+ var_offset_bytes,
+ bytes_per_offset);
// decode blob info
uint32_t num_blob = consume_uint32(extra_val);
- const uint8_t *old_blob_length = consume_uint8_array(extra_val, num_blob);
- const uint8_t *new_blob_length = consume_uint8_array(extra_val, num_blob);
- assert(extra_val.size() == extra_val.limit());
+ const uint8_t* old_blob_length =
+ consume_uint8_array(extra_val, num_blob);
+ const uint8_t* new_blob_length =
+ consume_uint8_array(extra_val, num_blob);
+ assert_always(extra_val.size() == extra_val.limit());
// expand blob lengths
vd.expand_blob_lengths(num_blob, old_blob_length, new_blob_length);
@@ -1404,8 +1547,14 @@ static int tokudb_expand_blobs(
return 0;
}
-// Decode and apply a sequence of update operations defined in the extra to the old value and put the result in the new value.
-static void apply_1_updates(tokudb::value_map &vd, tokudb::buffer &new_val, tokudb::buffer &old_val, tokudb::buffer &extra_val) {
+// Decode and apply a sequence of update operations defined in the extra to
+// the old value and put the result in the new value.
+static void apply_1_updates(
+ tokudb::value_map& vd,
+ tokudb::buffer& new_val,
+ tokudb::buffer& old_val,
+ tokudb::buffer& extra_val) {
+
uint32_t num_updates;
extra_val.consume(&num_updates, sizeof num_updates);
for ( ; num_updates > 0; num_updates--) {
@@ -1428,46 +1577,70 @@ static void apply_1_updates(tokudb::value_map &vd, tokudb::buffer &new_val, toku
switch (field_type) {
case UPDATE_TYPE_INT:
if (update_operation == '=')
- vd.replace_fixed(the_offset, field_null_num, extra_val_ptr, extra_val_length);
+ vd.replace_fixed(
+ the_offset,
+ field_null_num,
+ extra_val_ptr,
+ extra_val_length);
else
- vd.int_op(update_operation, the_offset, extra_val_length, field_null_num, old_val, extra_val_ptr);
+ vd.int_op(
+ update_operation,
+ the_offset,
+ extra_val_length,
+ field_null_num,
+ old_val,
+ extra_val_ptr);
break;
case UPDATE_TYPE_UINT:
if (update_operation == '=')
- vd.replace_fixed(the_offset, field_null_num, extra_val_ptr, extra_val_length);
+ vd.replace_fixed(
+ the_offset,
+ field_null_num,
+ extra_val_ptr,
+ extra_val_length);
else
- vd.uint_op(update_operation, the_offset, extra_val_length, field_null_num, old_val, extra_val_ptr);
+ vd.uint_op(
+ update_operation,
+ the_offset,
+ extra_val_length,
+ field_null_num,
+ old_val,
+ extra_val_ptr);
break;
case UPDATE_TYPE_CHAR:
case UPDATE_TYPE_BINARY:
if (update_operation == '=')
- vd.replace_fixed(the_offset, field_null_num, extra_val_ptr, extra_val_length);
+ vd.replace_fixed(
+ the_offset,
+ field_null_num,
+ extra_val_ptr,
+ extra_val_length);
else
- assert(0);
+ assert_unreachable();
break;
default:
- assert(0);
+ assert_unreachable();
break;
}
}
- assert(extra_val.size() == extra_val.limit());
+ assert_always(extra_val.size() == extra_val.limit());
}
-// Simple update handler. Decode the update message, apply the update operations to the old value, and set the new value.
+// Simple update handler. Decode the update message, apply the update operations
+// to the old value, and set the new value.
static int tokudb_update_1_fun(
DB* db,
- const DBT *key_dbt,
- const DBT *old_val_dbt,
- const DBT *extra,
- void (*set_val)(const DBT *new_val_dbt, void *set_extra),
- void *set_extra
- )
-{
+ const DBT* key_dbt,
+ const DBT* old_val_dbt,
+ const DBT* extra,
+ void (*set_val)(const DBT* new_val_dbt, void* set_extra),
+ void* set_extra) {
+
tokudb::buffer extra_val(extra->data, 0, extra->size);
uint8_t operation;
extra_val.consume(&operation, sizeof operation);
- assert(operation == UPDATE_OP_UPDATE_1);
+ assert_always(operation == UPDATE_OP_UPDATE_1);
if (old_val_dbt != NULL) {
// get the simple descriptor
@@ -1480,14 +1653,20 @@ static int tokudb_update_1_fun(
uint32_t m_bytes_per_offset;
extra_val.consume(&m_bytes_per_offset, sizeof m_bytes_per_offset);
- tokudb::buffer old_val(old_val_dbt->data, old_val_dbt->size, old_val_dbt->size);
+ tokudb::buffer old_val(
+ old_val_dbt->data,
+ old_val_dbt->size,
+ old_val_dbt->size);
// new val = old val
tokudb::buffer new_val;
new_val.append(old_val_dbt->data, old_val_dbt->size);
tokudb::value_map vd(&new_val);
- vd.init_var_fields(m_var_field_offset, m_var_offset_bytes, m_bytes_per_offset);
+ vd.init_var_fields(
+ m_var_field_offset,
+ m_var_offset_bytes,
+ m_bytes_per_offset);
// apply updates to new val
apply_1_updates(vd, new_val, old_val, extra_val);
@@ -1502,22 +1681,23 @@ static int tokudb_update_1_fun(
return 0;
}
-// Simple upsert handler. Decode the upsert message. If the key does not exist, then insert a new value from the extra.
-// Otherwise, apply the update operations to the old value, and then set the new value.
+// Simple upsert handler. Decode the upsert message. If the key does not exist,
+// then insert a new value from the extra.
+// Otherwise, apply the update operations to the old value, and then set the
+// new value.
static int tokudb_upsert_1_fun(
DB* db,
- const DBT *key_dbt,
- const DBT *old_val_dbt,
- const DBT *extra,
- void (*set_val)(const DBT *new_val_dbt, void *set_extra),
- void *set_extra
- )
-{
+ const DBT* key_dbt,
+ const DBT* old_val_dbt,
+ const DBT* extra,
+ void (*set_val)(const DBT* new_val_dbt, void* set_extra),
+ void* set_extra) {
+
tokudb::buffer extra_val(extra->data, 0, extra->size);
uint8_t operation;
extra_val.consume(&operation, sizeof operation);
- assert(operation == UPDATE_OP_UPSERT_1);
+ assert_always(operation == UPDATE_OP_UPSERT_1);
uint32_t insert_length;
extra_val.consume(&insert_length, sizeof insert_length);
@@ -1540,14 +1720,20 @@ static int tokudb_upsert_1_fun(
uint32_t m_bytes_per_offset;
extra_val.consume(&m_bytes_per_offset, sizeof m_bytes_per_offset);
- tokudb::buffer old_val(old_val_dbt->data, old_val_dbt->size, old_val_dbt->size);
+ tokudb::buffer old_val(
+ old_val_dbt->data,
+ old_val_dbt->size,
+ old_val_dbt->size);
// new val = old val
tokudb::buffer new_val;
new_val.append(old_val_dbt->data, old_val_dbt->size);
tokudb::value_map vd(&new_val);
- vd.init_var_fields(m_var_field_offset, m_var_offset_bytes, m_bytes_per_offset);
+ vd.init_var_fields(
+ m_var_field_offset,
+ m_var_offset_bytes,
+ m_bytes_per_offset);
// apply updates to new val
apply_1_updates(vd, new_val, old_val, extra_val);
@@ -1562,8 +1748,14 @@ static int tokudb_upsert_1_fun(
return 0;
}
-// Decode and apply a sequence of update operations defined in the extra to the old value and put the result in the new value.
-static void apply_2_updates(tokudb::value_map &vd, tokudb::buffer &new_val, tokudb::buffer &old_val, tokudb::buffer &extra_val) {
+// Decode and apply a sequence of update operations defined in the extra to the
+// old value and put the result in the new value.
+static void apply_2_updates(
+ tokudb::value_map& vd,
+ tokudb::buffer& new_val,
+ tokudb::buffer& old_val,
+ tokudb::buffer& extra_val) {
+
uint32_t num_updates = consume_uint32(extra_val);
for (uint32_t i = 0; i < num_updates; i++) {
uint32_t update_operation = consume_uint32(extra_val);
@@ -1571,79 +1763,118 @@ static void apply_2_updates(tokudb::value_map &vd, tokudb::buffer &new_val, toku
uint32_t var_field_offset = consume_uint32(extra_val);
uint32_t var_offset_bytes = consume_uint32(extra_val);
uint32_t bytes_per_offset = consume_uint32(extra_val);
- vd.init_var_fields(var_field_offset, var_offset_bytes, bytes_per_offset);
+ vd.init_var_fields(
+ var_field_offset,
+ var_offset_bytes,
+ bytes_per_offset);
} else if (update_operation == 'b') {
uint32_t num_blobs = consume_uint32(extra_val);
- const uint8_t *blob_lengths = consume_uint8_array(extra_val, num_blobs);
+ const uint8_t* blob_lengths =
+ consume_uint8_array(extra_val, num_blobs);
vd.init_blob_fields(num_blobs, blob_lengths);
} else {
uint32_t field_type = consume_uint32(extra_val);
uint32_t field_null_num = consume_uint32(extra_val);
uint32_t the_offset = consume_uint32(extra_val);
uint32_t extra_val_length = consume_uint32(extra_val);
- void *extra_val_ptr = extra_val.consume_ptr(extra_val_length); assert(extra_val_ptr);
+ void* extra_val_ptr = extra_val.consume_ptr(extra_val_length);
+ assert_always(extra_val_ptr);
switch (field_type) {
case UPDATE_TYPE_INT:
if (update_operation == '=')
- vd.replace_fixed(the_offset, field_null_num, extra_val_ptr, extra_val_length);
+ vd.replace_fixed(
+ the_offset,
+ field_null_num,
+ extra_val_ptr,
+ extra_val_length);
else
- vd.int_op(update_operation, the_offset, extra_val_length, field_null_num, old_val, extra_val_ptr);
+ vd.int_op(
+ update_operation,
+ the_offset,
+ extra_val_length,
+ field_null_num,
+ old_val,
+ extra_val_ptr);
break;
case UPDATE_TYPE_UINT:
if (update_operation == '=')
- vd.replace_fixed(the_offset, field_null_num, extra_val_ptr, extra_val_length);
+ vd.replace_fixed(
+ the_offset,
+ field_null_num,
+ extra_val_ptr,
+ extra_val_length);
else
- vd.uint_op(update_operation, the_offset, extra_val_length, field_null_num, old_val, extra_val_ptr);
+ vd.uint_op(
+ update_operation,
+ the_offset,
+ extra_val_length,
+ field_null_num,
+ old_val,
+ extra_val_ptr);
break;
case UPDATE_TYPE_CHAR:
case UPDATE_TYPE_BINARY:
if (update_operation == '=')
- vd.replace_fixed(the_offset, field_null_num, extra_val_ptr, extra_val_length);
+ vd.replace_fixed(
+ the_offset,
+ field_null_num,
+ extra_val_ptr,
+ extra_val_length);
else
- assert(0);
+ assert_unreachable();
break;
case UPDATE_TYPE_VARBINARY:
case UPDATE_TYPE_VARCHAR:
if (update_operation == '=')
- vd.replace_varchar(the_offset, field_null_num, extra_val_ptr, extra_val_length);
+ vd.replace_varchar(
+ the_offset,
+ field_null_num,
+ extra_val_ptr,
+ extra_val_length);
else
- assert(0);
+ assert_unreachable();
break;
case UPDATE_TYPE_TEXT:
case UPDATE_TYPE_BLOB:
if (update_operation == '=')
- vd.replace_blob(the_offset, field_null_num, extra_val_ptr, extra_val_length);
+ vd.replace_blob(
+ the_offset,
+ field_null_num,
+ extra_val_ptr,
+ extra_val_length);
else
- assert(0);
+ assert_unreachable();
break;
default:
- assert(0);
- break;
+ assert_unreachable();
}
}
}
- assert(extra_val.size() == extra_val.limit());
+ assert_always(extra_val.size() == extra_val.limit());
}
-// Simple update handler. Decode the update message, apply the update operations to the old value, and set the new value.
+// Simple update handler. Decode the update message, apply the update
+// operations to the old value, and set the new value.
static int tokudb_update_2_fun(
DB* db,
- const DBT *key_dbt,
- const DBT *old_val_dbt,
- const DBT *extra,
- void (*set_val)(const DBT *new_val_dbt, void *set_extra),
- void *set_extra
- )
-{
+ const DBT* key_dbt,
+ const DBT* old_val_dbt,
+ const DBT* extra,
+ void (*set_val)(const DBT* new_val_dbt, void* set_extra),
+ void* set_extra) {
+
tokudb::buffer extra_val(extra->data, 0, extra->size);
uint8_t op;
extra_val.consume(&op, sizeof op);
- assert(op == UPDATE_OP_UPDATE_2);
+ assert_always(op == UPDATE_OP_UPDATE_2);
if (old_val_dbt != NULL) {
- tokudb::buffer old_val(old_val_dbt->data, old_val_dbt->size, old_val_dbt->size);
+ tokudb::buffer old_val(
+ old_val_dbt->data,
+ old_val_dbt->size,
+ old_val_dbt->size);
// new val = old val
tokudb::buffer new_val;
@@ -1664,26 +1895,28 @@ static int tokudb_update_2_fun(
return 0;
}
-// Simple upsert handler. Decode the upsert message. If the key does not exist, then insert a new value from the extra.
-// Otherwise, apply the update operations to the old value, and then set the new value.
+// Simple upsert handler. Decode the upsert message. If the key does not exist,
+// then insert a new value from the extra.
+// Otherwise, apply the update operations to the old value, and then set the
+// new value.
static int tokudb_upsert_2_fun(
DB* db,
- const DBT *key_dbt,
- const DBT *old_val_dbt,
- const DBT *extra,
- void (*set_val)(const DBT *new_val_dbt, void *set_extra),
- void *set_extra
- )
-{
+ const DBT* key_dbt,
+ const DBT* old_val_dbt,
+ const DBT* extra,
+ void (*set_val)(const DBT* new_val_dbt, void* set_extra),
+ void* set_extra) {
+
tokudb::buffer extra_val(extra->data, 0, extra->size);
uint8_t op;
extra_val.consume(&op, sizeof op);
- assert(op == UPDATE_OP_UPSERT_2);
+ assert_always(op == UPDATE_OP_UPSERT_2);
uint32_t insert_length = consume_uint32(extra_val);
- assert(insert_length < extra_val.limit());
- void *insert_row = extra_val.consume_ptr(insert_length); assert(insert_row);
+ assert_always(insert_length < extra_val.limit());
+ void* insert_row = extra_val.consume_ptr(insert_length);
+ assert_always(insert_row);
if (old_val_dbt == NULL) {
// insert a new row
@@ -1692,7 +1925,10 @@ static int tokudb_upsert_2_fun(
new_val_dbt.data = insert_row;
set_val(&new_val_dbt, set_extra);
} else {
- tokudb::buffer old_val(old_val_dbt->data, old_val_dbt->size, old_val_dbt->size);
+ tokudb::buffer old_val(
+ old_val_dbt->data,
+ old_val_dbt->size,
+ old_val_dbt->size);
// new val = old val
tokudb::buffer new_val;
@@ -1713,56 +1949,107 @@ static int tokudb_upsert_2_fun(
return 0;
}
-// This function is the update callback function that is registered with the YDB environment.
-// It uses the first byte in the update message to identify the update message type and call
-// the handler for that message.
+// This function is the update callback function that is registered with the
+// YDB environment. It uses the first byte in the update message to identify
+// the update message type and call the handler for that message.
int tokudb_update_fun(
DB* db,
- const DBT *key,
- const DBT *old_val,
- const DBT *extra,
- void (*set_val)(const DBT *new_val, void *set_extra),
- void *set_extra
- )
-{
- assert(extra->size > 0);
- uint8_t *extra_pos = (uchar *)extra->data;
+ const DBT* key,
+ const DBT* old_val,
+ const DBT* extra,
+ void (*set_val)(const DBT* new_val, void* set_extra),
+ void* set_extra) {
+
+ assert_always(extra->size > 0);
+ uint8_t* extra_pos = (uchar*)extra->data;
uint8_t operation = extra_pos[0];
int error;
switch (operation) {
case UPDATE_OP_COL_ADD_OR_DROP:
- error = tokudb_hcad_update_fun(db, key, old_val, extra, set_val, set_extra);
+ error = tokudb_hcad_update_fun(
+ db,
+ key,
+ old_val,
+ extra,
+ set_val,
+ set_extra);
break;
case UPDATE_OP_EXPAND_VARIABLE_OFFSETS:
- error = tokudb_expand_variable_offsets(db, key, old_val, extra, set_val, set_extra);
+ error = tokudb_expand_variable_offsets(
+ db,
+ key,
+ old_val,
+ extra,
+ set_val,
+ set_extra);
break;
case UPDATE_OP_EXPAND_INT:
case UPDATE_OP_EXPAND_UINT:
- error = tokudb_expand_int_field(db, key, old_val, extra, set_val, set_extra);
+ error = tokudb_expand_int_field(
+ db,
+ key,
+ old_val,
+ extra,
+ set_val,
+ set_extra);
break;
case UPDATE_OP_EXPAND_CHAR:
case UPDATE_OP_EXPAND_BINARY:
- error = tokudb_expand_char_field(db, key, old_val, extra, set_val, set_extra);
+ error = tokudb_expand_char_field(
+ db,
+ key,
+ old_val,
+ extra,
+ set_val,
+ set_extra);
break;
case UPDATE_OP_EXPAND_BLOB:
- error = tokudb_expand_blobs(db, key, old_val, extra, set_val, set_extra);
+ error = tokudb_expand_blobs(
+ db,
+ key,
+ old_val,
+ extra,
+ set_val,
+ set_extra);
break;
case UPDATE_OP_UPDATE_1:
- error = tokudb_update_1_fun(db, key, old_val, extra, set_val, set_extra);
+ error = tokudb_update_1_fun(
+ db,
+ key,
+ old_val,
+ extra,
+ set_val,
+ set_extra);
break;
case UPDATE_OP_UPSERT_1:
- error = tokudb_upsert_1_fun(db, key, old_val, extra, set_val, set_extra);
+ error = tokudb_upsert_1_fun(
+ db,
+ key,
+ old_val,
+ extra,
+ set_val,
+ set_extra);
break;
case UPDATE_OP_UPDATE_2:
- error = tokudb_update_2_fun(db, key, old_val, extra, set_val, set_extra);
+ error = tokudb_update_2_fun(
+ db,
+ key,
+ old_val,
+ extra,
+ set_val,
+ set_extra);
break;
case UPDATE_OP_UPSERT_2:
- error = tokudb_upsert_2_fun(db, key, old_val, extra, set_val, set_extra);
+ error = tokudb_upsert_2_fun(
+ db,
+ key,
+ old_val,
+ extra,
+ set_val,
+ set_extra);
break;
default:
- assert(0);
- error = EINVAL;
- break;
+ assert_unreachable();
}
return error;
}
diff --git a/storage/xtradb/buf/buf0buf.cc b/storage/xtradb/buf/buf0buf.cc
index 556096ca7e4..19dd375f6e6 100644
--- a/storage/xtradb/buf/buf0buf.cc
+++ b/storage/xtradb/buf/buf0buf.cc
@@ -1544,6 +1544,7 @@ buf_pool_free_instance(
buf_chunk_t* chunk;
buf_chunk_t* chunks;
buf_page_t* bpage;
+ ulint i;
bpage = UT_LIST_GET_LAST(buf_pool->LRU);
while (bpage != NULL) {
@@ -1567,10 +1568,29 @@ buf_pool_free_instance(
mem_free(buf_pool->watch);
buf_pool->watch = NULL;
+ for (i = BUF_FLUSH_LRU; i < BUF_FLUSH_N_TYPES; i++) {
+ os_event_free(buf_pool->no_flush[i]);
+ }
+ mutex_free(&buf_pool->LRU_list_mutex);
+ mutex_free(&buf_pool->free_list_mutex);
+ mutex_free(&buf_pool->zip_free_mutex);
+ mutex_free(&buf_pool->zip_hash_mutex);
+ mutex_free(&buf_pool->zip_mutex);
+ mutex_free(&buf_pool->flush_state_mutex);
+ mutex_free(&buf_pool->flush_list_mutex);
+
chunks = buf_pool->chunks;
chunk = chunks + buf_pool->n_chunks;
while (--chunk >= chunks) {
+ buf_block_t* block = chunk->blocks;
+ for (i = 0; i < chunk->size; i++, block++) {
+ mutex_free(&block->mutex);
+ rw_lock_free(&block->lock);
+#ifdef UNIV_SYNC_DEBUG
+ rw_lock_free(&block->debug_latch);
+#endif
+ }
os_mem_free_large(chunk->mem, chunk->mem_size);
}
diff --git a/storage/xtradb/buf/buf0lru.cc b/storage/xtradb/buf/buf0lru.cc
index b36136e4079..fdec0e48198 100644
--- a/storage/xtradb/buf/buf0lru.cc
+++ b/storage/xtradb/buf/buf0lru.cc
@@ -607,6 +607,7 @@ rescan:
bpage != NULL;
bpage = prev) {
+ ut_ad(!must_restart);
ut_a(buf_page_in_file(bpage));
/* Save the previous link because once we free the
@@ -624,9 +625,6 @@ rescan:
/* Remove was unsuccessful, we have to try again
by scanning the entire list from the end.
- This also means that we never released the
- flush list mutex. Therefore we can trust the prev
- pointer.
buf_flush_or_remove_page() released the
flush list mutex but not the LRU list mutex.
Therefore it is possible that a new page was
@@ -643,6 +641,11 @@ rescan:
iteration. */
all_freed = false;
+ if (UNIV_UNLIKELY(must_restart)) {
+
+ /* Cannot trust the prev pointer */
+ break;
+ }
} else if (flush) {
/* The processing was successful. And during the
@@ -650,12 +653,9 @@ rescan:
when calling buf_page_flush(). We cannot trust
prev pointer. */
goto rescan;
- } else if (UNIV_UNLIKELY(must_restart)) {
-
- ut_ad(!all_freed);
- break;
}
+ ut_ad(!must_restart);
++processed;
/* Yield if we have hogged the CPU and mutexes for too long. */
@@ -666,6 +666,11 @@ rescan:
/* Reset the batch size counter if we had to yield. */
processed = 0;
+ } else if (UNIV_UNLIKELY(must_restart)) {
+
+ /* Cannot trust the prev pointer */
+ all_freed = false;
+ break;
}
#ifdef DBUG_OFF
diff --git a/storage/xtradb/dict/dict0crea.cc b/storage/xtradb/dict/dict0crea.cc
index 68cbde8c342..e2c7355b2e4 100644
--- a/storage/xtradb/dict/dict0crea.cc
+++ b/storage/xtradb/dict/dict0crea.cc
@@ -1369,7 +1369,7 @@ dict_create_or_check_foreign_constraint_tables(void)
ib_logf(IB_LOG_LEVEL_WARN,
"Dropping incompletely created "
"SYS_FOREIGN table.");
- row_drop_table_for_mysql("SYS_FOREIGN", trx, TRUE);
+ row_drop_table_for_mysql("SYS_FOREIGN", trx, TRUE, TRUE);
}
if (sys_foreign_cols_err == DB_CORRUPTION) {
@@ -1377,7 +1377,7 @@ dict_create_or_check_foreign_constraint_tables(void)
"Dropping incompletely created "
"SYS_FOREIGN_COLS table.");
- row_drop_table_for_mysql("SYS_FOREIGN_COLS", trx, TRUE);
+ row_drop_table_for_mysql("SYS_FOREIGN_COLS", trx, TRUE, TRUE);
}
ib_logf(IB_LOG_LEVEL_WARN,
@@ -1431,8 +1431,8 @@ dict_create_or_check_foreign_constraint_tables(void)
ut_ad(err == DB_OUT_OF_FILE_SPACE
|| err == DB_TOO_MANY_CONCURRENT_TRXS);
- row_drop_table_for_mysql("SYS_FOREIGN", trx, TRUE);
- row_drop_table_for_mysql("SYS_FOREIGN_COLS", trx, TRUE);
+ row_drop_table_for_mysql("SYS_FOREIGN", trx, TRUE, TRUE);
+ row_drop_table_for_mysql("SYS_FOREIGN_COLS", trx, TRUE, TRUE);
if (err == DB_OUT_OF_FILE_SPACE) {
err = DB_MUST_GET_MORE_FILE_SPACE;
@@ -1856,7 +1856,7 @@ dict_create_or_check_sys_tablespace(void)
ib_logf(IB_LOG_LEVEL_WARN,
"Dropping incompletely created "
"SYS_TABLESPACES table.");
- row_drop_table_for_mysql("SYS_TABLESPACES", trx, TRUE);
+ row_drop_table_for_mysql("SYS_TABLESPACES", trx, TRUE, TRUE);
}
if (sys_datafiles_err == DB_CORRUPTION) {
@@ -1864,7 +1864,7 @@ dict_create_or_check_sys_tablespace(void)
"Dropping incompletely created "
"SYS_DATAFILES table.");
- row_drop_table_for_mysql("SYS_DATAFILES", trx, TRUE);
+ row_drop_table_for_mysql("SYS_DATAFILES", trx, TRUE, TRUE);
}
ib_logf(IB_LOG_LEVEL_INFO,
@@ -1900,8 +1900,8 @@ dict_create_or_check_sys_tablespace(void)
ut_a(err == DB_OUT_OF_FILE_SPACE
|| err == DB_TOO_MANY_CONCURRENT_TRXS);
- row_drop_table_for_mysql("SYS_TABLESPACES", trx, TRUE);
- row_drop_table_for_mysql("SYS_DATAFILES", trx, TRUE);
+ row_drop_table_for_mysql("SYS_TABLESPACES", trx, TRUE, TRUE);
+ row_drop_table_for_mysql("SYS_DATAFILES", trx, TRUE, TRUE);
if (err == DB_OUT_OF_FILE_SPACE) {
err = DB_MUST_GET_MORE_FILE_SPACE;
diff --git a/storage/xtradb/fil/fil0fil.cc b/storage/xtradb/fil/fil0fil.cc
index 9fad34257b8..b0695e2f260 100644
--- a/storage/xtradb/fil/fil0fil.cc
+++ b/storage/xtradb/fil/fil0fil.cc
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1995, 2015, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2013, 2016, MariaDB Corporation. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
@@ -5292,7 +5292,7 @@ retry:
if (posix_fallocate(node->handle, start_offset, len) == -1) {
ib_logf(IB_LOG_LEVEL_ERROR, "preallocating file "
"space for file \'%s\' failed. Current size "
- INT64PF ", desired size " INT64PF "\n",
+ INT64PF ", desired size " INT64PF,
node->name, start_offset, len+start_offset);
os_file_handle_error_no_exit(node->name, "posix_fallocate", FALSE, __FILE__, __LINE__);
success = FALSE;
@@ -6454,10 +6454,7 @@ fil_close(void)
{
fil_space_crypt_cleanup();
-#ifndef UNIV_HOTBACKUP
- /* The mutex should already have been freed. */
- ut_ad(fil_system->mutex.magic_n == 0);
-#endif /* !UNIV_HOTBACKUP */
+ mutex_free(&fil_system->mutex);
hash_table_free(fil_system->spaces);
@@ -7106,27 +7103,6 @@ fil_mtr_rename_log(
/*************************************************************************
functions to access is_corrupt flag of fil_space_t*/
-ibool
-fil_space_is_corrupt(
-/*=================*/
- ulint space_id)
-{
- fil_space_t* space;
- ibool ret = FALSE;
-
- mutex_enter(&fil_system->mutex);
-
- space = fil_space_get_by_id(space_id);
-
- if (UNIV_UNLIKELY(space && space->is_corrupt)) {
- ret = TRUE;
- }
-
- mutex_exit(&fil_system->mutex);
-
- return(ret);
-}
-
void
fil_space_set_corrupt(
/*==================*/
diff --git a/storage/xtradb/fts/fts0fts.cc b/storage/xtradb/fts/fts0fts.cc
index 712dfa264d4..a2e8a3d90ac 100644
--- a/storage/xtradb/fts/fts0fts.cc
+++ b/storage/xtradb/fts/fts0fts.cc
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 2011, 2015, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2011, 2016, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -260,16 +260,18 @@ static const char* fts_config_table_insert_values_sql =
"INSERT INTO \"%s\" VALUES ('"
FTS_TABLE_STATE "', '0');\n";
-/****************************************************************//**
-Run SYNC on the table, i.e., write out data from the cache to the
+/** Run SYNC on the table, i.e., write out data from the cache to the
FTS auxiliary INDEX table and clear the cache at the end.
-@return DB_SUCCESS if all OK */
+@param[in,out] sync sync state
+@param[in] unlock_cache whether unlock cache lock when write node
+@param[in] wait whether wait when a sync is in progress
+@return DB_SUCCESS if all OK */
static
dberr_t
fts_sync(
-/*=====*/
- fts_sync_t* sync) /*!< in: sync state */
- __attribute__((nonnull));
+ fts_sync_t* sync,
+ bool unlock_cache,
+ bool wait);
/****************************************************************//**
Release all resources help by the words rb tree e.g., the node ilist. */
@@ -653,6 +655,7 @@ fts_cache_create(
mem_heap_zalloc(heap, sizeof(fts_sync_t)));
cache->sync->table = table;
+ cache->sync->event = os_event_create();
/* Create the index cache vector that will hold the inverted indexes. */
cache->indexes = ib_vector_create(
@@ -1207,6 +1210,7 @@ fts_cache_destroy(
mutex_free(&cache->optimize_lock);
mutex_free(&cache->deleted_lock);
mutex_free(&cache->doc_id_lock);
+ os_event_free(cache->sync->event);
if (cache->stopword_info.cached_stopword) {
rbt_free(cache->stopword_info.cached_stopword);
@@ -1435,7 +1439,7 @@ fts_cache_add_doc(
ib_vector_last(word->nodes));
}
- if (fts_node == NULL
+ if (fts_node == NULL || fts_node->synced
|| fts_node->ilist_size > FTS_ILIST_MAX_SIZE
|| doc_id < fts_node->last_doc_id) {
@@ -1915,7 +1919,7 @@ func_exit:
trx_rollback_to_savepoint(trx, NULL);
- row_drop_table_for_mysql(table->name, trx, FALSE);
+ row_drop_table_for_mysql(table->name, trx, FALSE, TRUE);
trx->error_state = DB_SUCCESS;
}
@@ -2067,7 +2071,7 @@ fts_create_index_tables_low(
trx_rollback_to_savepoint(trx, NULL);
- row_drop_table_for_mysql(table_name, trx, FALSE);
+ row_drop_table_for_mysql(table_name, trx, FALSE, TRUE);
trx->error_state = DB_SUCCESS;
}
@@ -2886,35 +2890,28 @@ fts_doc_ids_free(
}
/*********************************************************************//**
-Do commit-phase steps necessary for the insertion of a new row.
-@return DB_SUCCESS or error code */
-static __attribute__((nonnull, warn_unused_result))
-dberr_t
+Do commit-phase steps necessary for the insertion of a new row. */
+void
fts_add(
/*====*/
fts_trx_table_t*ftt, /*!< in: FTS trx table */
fts_trx_row_t* row) /*!< in: row */
{
dict_table_t* table = ftt->table;
- dberr_t error = DB_SUCCESS;
doc_id_t doc_id = row->doc_id;
ut_a(row->state == FTS_INSERT || row->state == FTS_MODIFY);
fts_add_doc_by_id(ftt, doc_id, row->fts_indexes);
- if (error == DB_SUCCESS) {
- mutex_enter(&table->fts->cache->deleted_lock);
- ++table->fts->cache->added;
- mutex_exit(&table->fts->cache->deleted_lock);
+ mutex_enter(&table->fts->cache->deleted_lock);
+ ++table->fts->cache->added;
+ mutex_exit(&table->fts->cache->deleted_lock);
- if (!DICT_TF2_FLAG_IS_SET(table, DICT_TF2_FTS_HAS_DOC_ID)
- && doc_id >= table->fts->cache->next_doc_id) {
- table->fts->cache->next_doc_id = doc_id + 1;
- }
+ if (!DICT_TF2_FLAG_IS_SET(table, DICT_TF2_FTS_HAS_DOC_ID)
+ && doc_id >= table->fts->cache->next_doc_id) {
+ table->fts->cache->next_doc_id = doc_id + 1;
}
-
- return(error);
}
/*********************************************************************//**
@@ -3025,7 +3022,7 @@ fts_modify(
error = fts_delete(ftt, row);
if (error == DB_SUCCESS) {
- error = fts_add(ftt, row);
+ fts_add(ftt, row);
}
return(error);
@@ -3114,7 +3111,7 @@ fts_commit_table(
switch (row->state) {
case FTS_INSERT:
- error = fts_add(ftt, row);
+ fts_add(ftt, row);
break;
case FTS_MODIFY:
@@ -3554,16 +3551,34 @@ fts_add_doc_by_id(
get_doc->index_cache,
doc_id, doc.tokens);
+ bool need_sync = false;
+ if ((cache->total_size > fts_max_cache_size / 10
+ || fts_need_sync)
+ && !cache->sync->in_progress) {
+ need_sync = true;
+ }
+
rw_lock_x_unlock(&table->fts->cache->lock);
DBUG_EXECUTE_IF(
"fts_instrument_sync",
- fts_sync(cache->sync);
+ fts_optimize_request_sync_table(table);
+ os_event_wait(cache->sync->event);
+ );
+
+ DBUG_EXECUTE_IF(
+ "fts_instrument_sync_debug",
+ fts_sync(cache->sync, true, true);
);
- if (cache->total_size > fts_max_cache_size
- || fts_need_sync) {
- fts_sync(cache->sync);
+ DEBUG_SYNC_C("fts_instrument_sync_request");
+ DBUG_EXECUTE_IF(
+ "fts_instrument_sync_request",
+ fts_optimize_request_sync_table(table);
+ );
+
+ if (need_sync) {
+ fts_optimize_request_sync_table(table);
}
mtr_start(&mtr);
@@ -3934,16 +3949,17 @@ fts_sync_add_deleted_cache(
return(error);
}
-/*********************************************************************//**
-Write the words and ilist to disk.
+/** Write the words and ilist to disk.
+@param[in,out] trx transaction
+@param[in] index_cache index cache
+@param[in] unlock_cache whether unlock cache when write node
@return DB_SUCCESS if all went well else error code */
static __attribute__((nonnull, warn_unused_result))
dberr_t
fts_sync_write_words(
-/*=================*/
- trx_t* trx, /*!< in: transaction */
- fts_index_cache_t*
- index_cache) /*!< in: index cache */
+ trx_t* trx,
+ fts_index_cache_t* index_cache,
+ bool unlock_cache)
{
fts_table_t fts_table;
ulint n_nodes = 0;
@@ -3951,8 +3967,8 @@ fts_sync_write_words(
const ib_rbt_node_t* rbt_node;
dberr_t error = DB_SUCCESS;
ibool print_error = FALSE;
-#ifdef FTS_DOC_STATS_DEBUG
dict_table_t* table = index_cache->index->table;
+#ifdef FTS_DOC_STATS_DEBUG
ulint n_new_words = 0;
#endif /* FTS_DOC_STATS_DEBUG */
@@ -3965,7 +3981,7 @@ fts_sync_write_words(
since we want to free the memory used during caching. */
for (rbt_node = rbt_first(index_cache->words);
rbt_node;
- rbt_node = rbt_first(index_cache->words)) {
+ rbt_node = rbt_next(index_cache->words, rbt_node)) {
ulint i;
ulint selected;
@@ -3998,27 +4014,47 @@ fts_sync_write_words(
}
#endif /* FTS_DOC_STATS_DEBUG */
- n_nodes += ib_vector_size(word->nodes);
-
- /* We iterate over all the nodes even if there was an error,
- this is to free the memory of the fts_node_t elements. */
+ /* We iterate over all the nodes even if there was an error */
for (i = 0; i < ib_vector_size(word->nodes); ++i) {
fts_node_t* fts_node = static_cast<fts_node_t*>(
ib_vector_get(word->nodes, i));
+ if (fts_node->synced) {
+ continue;
+ } else {
+ fts_node->synced = true;
+ }
+
+ /*FIXME: we need to handle the error properly. */
if (error == DB_SUCCESS) {
+ if (unlock_cache) {
+ rw_lock_x_unlock(
+ &table->fts->cache->lock);
+ }
error = fts_write_node(
trx,
&index_cache->ins_graph[selected],
&fts_table, &word->text, fts_node);
- }
- ut_free(fts_node->ilist);
- fts_node->ilist = NULL;
+ DEBUG_SYNC_C("fts_write_node");
+ DBUG_EXECUTE_IF("fts_write_node_crash",
+ DBUG_SUICIDE(););
+
+ DBUG_EXECUTE_IF("fts_instrument_sync_sleep",
+ os_thread_sleep(1000000);
+ );
+
+ if (unlock_cache) {
+ rw_lock_x_lock(
+ &table->fts->cache->lock);
+ }
+ }
}
+ n_nodes += ib_vector_size(word->nodes);
+
if (error != DB_SUCCESS && !print_error) {
ut_print_timestamp(stderr);
fprintf(stderr, " InnoDB: Error (%s) writing "
@@ -4027,9 +4063,6 @@ fts_sync_write_words(
print_error = TRUE;
}
-
- /* NOTE: We are responsible for free'ing the node */
- ut_free(rbt_remove_node(index_cache->words, rbt_node));
}
#ifdef FTS_DOC_STATS_DEBUG
@@ -4330,7 +4363,7 @@ fts_sync_index(
ut_ad(rbt_validate(index_cache->words));
- error = fts_sync_write_words(trx, index_cache);
+ error = fts_sync_write_words(sync->trx, index_cache, sync->unlock_cache);
#ifdef FTS_DOC_STATS_DEBUG
/* FTS_RESOLVE: the word counter info in auxiliary table "DOC_ID"
@@ -4346,6 +4379,36 @@ fts_sync_index(
return(error);
}
+/** Check if index cache has been synced completely
+@param[in,out] sync sync state
+@param[in,out] index_cache index cache
+@return true if index is synced, otherwise false. */
+static
+bool
+fts_sync_index_check(
+ fts_sync_t* sync,
+ fts_index_cache_t* index_cache)
+{
+ const ib_rbt_node_t* rbt_node;
+
+ for (rbt_node = rbt_first(index_cache->words);
+ rbt_node != NULL;
+ rbt_node = rbt_next(index_cache->words, rbt_node)) {
+
+ fts_tokenizer_word_t* word;
+ word = rbt_value(fts_tokenizer_word_t, rbt_node);
+
+ fts_node_t* fts_node;
+ fts_node = static_cast<fts_node_t*>(ib_vector_last(word->nodes));
+
+ if (!fts_node->synced) {
+ return(false);
+ }
+ }
+
+ return(true);
+}
+
/*********************************************************************//**
Commit the SYNC, change state of processed doc ids etc.
@return DB_SUCCESS if all OK */
@@ -4422,21 +4485,53 @@ fts_sync_rollback(
trx_t* trx = sync->trx;
fts_cache_t* cache = sync->table->fts->cache;
+ for (ulint i = 0; i < ib_vector_size(cache->indexes); ++i) {
+ ulint j;
+ fts_index_cache_t* index_cache;
+
+ index_cache = static_cast<fts_index_cache_t*>(
+ ib_vector_get(cache->indexes, i));
+
+ for (j = 0; fts_index_selector[j].value; ++j) {
+
+ if (index_cache->ins_graph[j] != NULL) {
+
+ fts_que_graph_free_check_lock(
+ NULL, index_cache,
+ index_cache->ins_graph[j]);
+
+ index_cache->ins_graph[j] = NULL;
+ }
+
+ if (index_cache->sel_graph[j] != NULL) {
+
+ fts_que_graph_free_check_lock(
+ NULL, index_cache,
+ index_cache->sel_graph[j]);
+
+ index_cache->sel_graph[j] = NULL;
+ }
+ }
+ }
+
rw_lock_x_unlock(&cache->lock);
fts_sql_rollback(trx);
trx_free_for_background(trx);
}
-/****************************************************************//**
-Run SYNC on the table, i.e., write out data from the cache to the
+/** Run SYNC on the table, i.e., write out data from the cache to the
FTS auxiliary INDEX table and clear the cache at the end.
+@param[in,out] sync sync state
+@param[in] unlock_cache whether unlock cache lock when write node
+@param[in] wait whether wait when a sync is in progress
@return DB_SUCCESS if all OK */
static
dberr_t
fts_sync(
-/*=====*/
- fts_sync_t* sync) /*!< in: sync state */
+ fts_sync_t* sync,
+ bool unlock_cache,
+ bool wait)
{
ulint i;
dberr_t error = DB_SUCCESS;
@@ -4444,8 +4539,35 @@ fts_sync(
rw_lock_x_lock(&cache->lock);
+ /* Check if cache is being synced.
+ Note: we release cache lock in fts_sync_write_words() to
+ avoid long wait for the lock by other threads. */
+ while (sync->in_progress) {
+ rw_lock_x_unlock(&cache->lock);
+
+ if (wait) {
+ os_event_wait(sync->event);
+ } else {
+ return(DB_SUCCESS);
+ }
+
+ rw_lock_x_lock(&cache->lock);
+ }
+
+ sync->unlock_cache = unlock_cache;
+ sync->in_progress = true;
+
+ DEBUG_SYNC_C("fts_sync_begin");
fts_sync_begin(sync);
+begin_sync:
+ if (cache->total_size > fts_max_cache_size) {
+ /* Avoid the case: sync never finish when
+ insert/update keeps comming. */
+ ut_ad(sync->unlock_cache);
+ sync->unlock_cache = false;
+ }
+
for (i = 0; i < ib_vector_size(cache->indexes); ++i) {
fts_index_cache_t* index_cache;
@@ -4460,21 +4582,43 @@ fts_sync(
if (error != DB_SUCCESS && !sync->interrupted) {
- break;
+ goto end_sync;
}
}
DBUG_EXECUTE_IF("fts_instrument_sync_interrupted",
sync->interrupted = true;
error = DB_INTERRUPTED;
+ goto end_sync;
);
+ /* Make sure all the caches are synced. */
+ for (i = 0; i < ib_vector_size(cache->indexes); ++i) {
+ fts_index_cache_t* index_cache;
+
+ index_cache = static_cast<fts_index_cache_t*>(
+ ib_vector_get(cache->indexes, i));
+
+ if (index_cache->index->to_be_dropped
+ || fts_sync_index_check(sync, index_cache)) {
+ continue;
+ }
+
+ goto begin_sync;
+ }
+
+end_sync:
if (error == DB_SUCCESS && !sync->interrupted) {
error = fts_sync_commit(sync);
} else {
fts_sync_rollback(sync);
}
+ rw_lock_x_lock(&cache->lock);
+ sync->in_progress = false;
+ os_event_set(sync->event);
+ rw_lock_x_unlock(&cache->lock);
+
/* We need to check whether an optimize is required, for that
we make copies of the two variables that control the trigger. These
variables can change behind our back and we don't want to hold the
@@ -4489,21 +4633,25 @@ fts_sync(
return(error);
}
-/****************************************************************//**
-Run SYNC on the table, i.e., write out data from the cache to the
-FTS auxiliary INDEX table and clear the cache at the end. */
+/** Run SYNC on the table, i.e., write out data from the cache to the
+FTS auxiliary INDEX table and clear the cache at the end.
+@param[in,out] table fts table
+@param[in] unlock_cache whether unlock cache when write node
+@param[in] wait whether wait for existing sync to finish
+@return DB_SUCCESS on success, error code on failure. */
UNIV_INTERN
dberr_t
fts_sync_table(
-/*===========*/
- dict_table_t* table) /*!< in: table */
+ dict_table_t* table,
+ bool unlock_cache,
+ bool wait)
{
dberr_t err = DB_SUCCESS;
ut_ad(table->fts);
if (!dict_table_is_discarded(table) && table->fts->cache) {
- err = fts_sync(table->fts->cache->sync);
+ err = fts_sync(table->fts->cache->sync, unlock_cache, wait);
}
return(err);
diff --git a/storage/xtradb/fts/fts0opt.cc b/storage/xtradb/fts/fts0opt.cc
index 00b3b4682c3..ccb7090c61d 100644
--- a/storage/xtradb/fts/fts0opt.cc
+++ b/storage/xtradb/fts/fts0opt.cc
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 2007, 2015, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2007, 2016, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -87,6 +87,7 @@ enum fts_msg_type_t {
FTS_MSG_DEL_TABLE, /*!< Remove a table from the optimize
threads work queue */
+ FTS_MSG_SYNC_TABLE /*!< Sync fts cache of a table */
};
/** Compressed list of words that have been read from FTS INDEX
@@ -2652,6 +2653,39 @@ fts_optimize_remove_table(
os_event_free(event);
}
+/** Send sync fts cache for the table.
+@param[in] table table to sync */
+UNIV_INTERN
+void
+fts_optimize_request_sync_table(
+ dict_table_t* table)
+{
+ fts_msg_t* msg;
+ table_id_t* table_id;
+
+ /* if the optimize system not yet initialized, return */
+ if (!fts_optimize_wq) {
+ return;
+ }
+
+ /* FTS optimizer thread is already exited */
+ if (fts_opt_start_shutdown) {
+ ib_logf(IB_LOG_LEVEL_INFO,
+ "Try to sync table %s after FTS optimize"
+ " thread exiting.", table->name);
+ return;
+ }
+
+ msg = fts_optimize_create_msg(FTS_MSG_SYNC_TABLE, NULL);
+
+ table_id = static_cast<table_id_t*>(
+ mem_heap_alloc(msg->heap, sizeof(table_id_t)));
+ *table_id = table->id;
+ msg->ptr = table_id;
+
+ ib_wqueue_add(fts_optimize_wq, msg, msg->heap);
+}
+
/**********************************************************************//**
Find the slot for a particular table.
@return slot if found else NULL. */
@@ -2932,6 +2966,25 @@ fts_optimize_need_sync(
}
#endif
+/** Sync fts cache of a table
+@param[in] table_id table id */
+void
+fts_optimize_sync_table(
+ table_id_t table_id)
+{
+ dict_table_t* table = NULL;
+
+ table = dict_table_open_on_id(table_id, FALSE, DICT_TABLE_OP_NORMAL);
+
+ if (table) {
+ if (dict_table_has_fts_index(table) && table->fts->cache) {
+ fts_sync_table(table, true, false);
+ }
+
+ dict_table_close(table, FALSE, FALSE);
+ }
+}
+
/**********************************************************************//**
Optimize all FTS tables.
@return Dummy return */
@@ -3053,6 +3106,11 @@ fts_optimize_thread(
((fts_msg_del_t*) msg->ptr)->event);
break;
+ case FTS_MSG_SYNC_TABLE:
+ fts_optimize_sync_table(
+ *static_cast<table_id_t*>(msg->ptr));
+ break;
+
default:
ut_error;
}
@@ -3079,26 +3137,7 @@ fts_optimize_thread(
ib_vector_get(tables, i));
if (slot->state != FTS_STATE_EMPTY) {
- dict_table_t* table = NULL;
-
- /*slot->table may be freed, so we try to open
- table by slot->table_id.*/
- table = dict_table_open_on_id(
- slot->table_id, FALSE,
- DICT_TABLE_OP_NORMAL);
-
- if (table) {
-
- if (dict_table_has_fts_index(table)) {
- fts_sync_table(table);
- }
-
- if (table->fts) {
- fts_free(table);
- }
-
- dict_table_close(table, FALSE, FALSE);
- }
+ fts_optimize_sync_table(slot->table_id);
}
}
}
diff --git a/storage/xtradb/ha/ha0ha.cc b/storage/xtradb/ha/ha0ha.cc
index b79ae922045..3674260f173 100644
--- a/storage/xtradb/ha/ha0ha.cc
+++ b/storage/xtradb/ha/ha0ha.cc
@@ -155,11 +155,15 @@ ha_clear(
switch (table->type) {
case HASH_TABLE_SYNC_MUTEX:
+ for (ulint i = 0; i < table->n_sync_obj; i++)
+ mutex_free(table->sync_obj.mutexes + i);
mem_free(table->sync_obj.mutexes);
table->sync_obj.mutexes = NULL;
break;
case HASH_TABLE_SYNC_RW_LOCK:
+ for (ulint i = 0; i < table->n_sync_obj; i++)
+ rw_lock_free(table->sync_obj.rw_locks + i);
mem_free(table->sync_obj.rw_locks);
table->sync_obj.rw_locks = NULL;
break;
diff --git a/storage/xtradb/handler/ha_innodb.cc b/storage/xtradb/handler/ha_innodb.cc
index 2406ed96716..5e736adb836 100644
--- a/storage/xtradb/handler/ha_innodb.cc
+++ b/storage/xtradb/handler/ha_innodb.cc
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 2000, 2015, Oracle and/or its affiliates.
+Copyright (c) 2000, 2016, Oracle and/or its affiliates.
Copyright (c) 2013, 2016, MariaDB Corporation.
Copyright (c) 2008, 2009 Google Inc.
Copyright (c) 2009, Percona Inc.
@@ -4256,6 +4256,16 @@ innobase_change_buffering_inited_ok:
innobase_open_files = tc_size;
}
}
+
+ if (innobase_open_files > (long) open_files_limit) {
+ fprintf(stderr,
+ "innodb_open_files should not be greater"
+ " than the open_files_limit.\n");
+ if (innobase_open_files > (long) tc_size) {
+ innobase_open_files = tc_size;
+ }
+ }
+
srv_max_n_open_files = (ulint) innobase_open_files;
srv_innodb_status = (ibool) innobase_create_status_file;
@@ -13080,7 +13090,8 @@ ha_innobase::delete_table(
/* Drop the table in InnoDB */
err = row_drop_table_for_mysql(
- norm_name, trx, thd_sql_command(thd) == SQLCOM_DROP_DB);
+ norm_name, trx, thd_sql_command(thd) == SQLCOM_DROP_DB,
+ FALSE);
if (err == DB_TABLE_NOT_FOUND
@@ -13111,7 +13122,8 @@ ha_innobase::delete_table(
#endif
err = row_drop_table_for_mysql(
par_case_name, trx,
- thd_sql_command(thd) == SQLCOM_DROP_DB);
+ thd_sql_command(thd) == SQLCOM_DROP_DB,
+ FALSE);
}
}
@@ -14436,7 +14448,7 @@ ha_innobase::optimize(
if (innodb_optimize_fulltext_only) {
if (prebuilt->table->fts && prebuilt->table->fts->cache
&& !dict_table_is_discarded(prebuilt->table)) {
- fts_sync_table(prebuilt->table);
+ fts_sync_table(prebuilt->table, false, true);
fts_optimize_table(prebuilt->table);
}
return(HA_ADMIN_OK);
@@ -14498,6 +14510,34 @@ ha_innobase::check(
DBUG_RETURN(HA_ADMIN_CORRUPT);
}
+ if (prebuilt->table->corrupted) {
+ char index_name[MAX_FULL_NAME_LEN + 1];
+ /* If some previous operation has marked the table as
+ corrupted in memory, and has not propagated such to
+ clustered index, we will do so here */
+ index = dict_table_get_first_index(prebuilt->table);
+
+ if (!dict_index_is_corrupted(index)) {
+ row_mysql_lock_data_dictionary(prebuilt->trx);
+ dict_set_corrupted(index, prebuilt->trx, "CHECK TABLE");
+ row_mysql_unlock_data_dictionary(prebuilt->trx);
+ }
+
+ innobase_format_name(index_name, sizeof index_name,
+ index->name, TRUE);
+
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
+ HA_ERR_INDEX_CORRUPT,
+ "InnoDB: Index %s is marked as"
+ " corrupted", index_name);
+
+ /* Now that the table is already marked as corrupted,
+ there is no need to check any index of this table */
+ prebuilt->trx->op_info = "";
+
+ DBUG_RETURN(HA_ADMIN_CORRUPT);
+ }
+
prebuilt->trx->op_info = "checking table";
old_isolation_level = prebuilt->trx->isolation_level;
@@ -14581,6 +14621,15 @@ ha_innobase::check(
prebuilt->index_usable = row_merge_is_index_usable(
prebuilt->trx, prebuilt->index);
+ DBUG_EXECUTE_IF(
+ "dict_set_index_corrupted",
+ if (!dict_index_is_clust(index)) {
+ prebuilt->index_usable = FALSE;
+ row_mysql_lock_data_dictionary(prebuilt->trx);
+ dict_set_corrupted(index, prebuilt->trx, "dict_set_index_corrupted");;
+ row_mysql_unlock_data_dictionary(prebuilt->trx);
+ });
+
if (UNIV_UNLIKELY(!prebuilt->index_usable)) {
innobase_format_name(
index_name, sizeof index_name,
diff --git a/storage/xtradb/handler/handler0alter.cc b/storage/xtradb/handler/handler0alter.cc
index 1e57f6c5293..b2e351155a5 100644
--- a/storage/xtradb/handler/handler0alter.cc
+++ b/storage/xtradb/handler/handler0alter.cc
@@ -3940,6 +3940,24 @@ check_if_can_drop_indexes:
drop_index = NULL;
}
+ /* Check if any of the existing indexes are marked as corruption
+ and if they are, refuse adding more indexes. */
+ if (ha_alter_info->handler_flags & Alter_inplace_info::ADD_INDEX) {
+ for (dict_index_t* index = dict_table_get_first_index(indexed_table);
+ index != NULL; index = dict_table_get_next_index(index)) {
+
+ if (!index->to_be_dropped && dict_index_is_corrupted(index)) {
+ char index_name[MAX_FULL_NAME_LEN + 1];
+
+ innobase_format_name(index_name, sizeof index_name,
+ index->name, TRUE);
+
+ my_error(ER_INDEX_CORRUPT, MYF(0), index_name);
+ DBUG_RETURN(true);
+ }
+ }
+ }
+
n_add_fk = 0;
if (ha_alter_info->handler_flags
diff --git a/storage/xtradb/ibuf/ibuf0ibuf.cc b/storage/xtradb/ibuf/ibuf0ibuf.cc
index bac2a92dd0b..4467b713c88 100644
--- a/storage/xtradb/ibuf/ibuf0ibuf.cc
+++ b/storage/xtradb/ibuf/ibuf0ibuf.cc
@@ -2917,7 +2917,7 @@ ibuf_contract_in_background(
sum_bytes += n_bytes;
sum_pages += n_pag2;
- srv_inc_activity_count();
+ srv_inc_activity_count(true);
}
return(sum_bytes);
diff --git a/storage/xtradb/include/fil0fil.h b/storage/xtradb/include/fil0fil.h
index 973882a50b4..fb9c79a3e5a 100644
--- a/storage/xtradb/include/fil0fil.h
+++ b/storage/xtradb/include/fil0fil.h
@@ -1281,11 +1281,6 @@ fil_system_hash_nodes(void);
/*************************************************************************
functions to access is_corrupt flag of fil_space_t*/
-ibool
-fil_space_is_corrupt(
-/*=================*/
- ulint space_id);
-
void
fil_space_set_corrupt(
/*==================*/
diff --git a/storage/xtradb/include/fts0fts.h b/storage/xtradb/include/fts0fts.h
index d54ed281d9a..9f7b0216d9b 100644
--- a/storage/xtradb/include/fts0fts.h
+++ b/storage/xtradb/include/fts0fts.h
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 2011, 2015, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2011, 2016, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -724,6 +724,13 @@ fts_optimize_remove_table(
/*======================*/
dict_table_t* table); /*!< in: table to remove */
+/** Send sync fts cache for the table.
+@param[in] table table to sync */
+UNIV_INTERN
+void
+fts_optimize_request_sync_table(
+ dict_table_t* table);
+
/**********************************************************************//**
Signal the optimize thread to prepare for shutdown. */
UNIV_INTERN
@@ -826,15 +833,18 @@ fts_drop_index_split_tables(
dict_index_t* index) /*!< in: fts instance */
__attribute__((nonnull, warn_unused_result));
-/****************************************************************//**
-Run SYNC on the table, i.e., write out data from the cache to the
-FTS auxiliary INDEX table and clear the cache at the end. */
+/** Run SYNC on the table, i.e., write out data from the cache to the
+FTS auxiliary INDEX table and clear the cache at the end.
+@param[in,out] table fts table
+@param[in] unlock_cache whether unlock cache when write node
+@param[in] wait whether wait for existing sync to finish
+@return DB_SUCCESS on success, error code on failure. */
UNIV_INTERN
dberr_t
fts_sync_table(
-/*===========*/
- dict_table_t* table) /*!< in: table */
- __attribute__((nonnull));
+ dict_table_t* table,
+ bool unlock_cache,
+ bool wait);
/****************************************************************//**
Free the query graph but check whether dict_sys->mutex is already
diff --git a/storage/xtradb/include/fts0types.h b/storage/xtradb/include/fts0types.h
index 64677428331..e495fe72a60 100644
--- a/storage/xtradb/include/fts0types.h
+++ b/storage/xtradb/include/fts0types.h
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 2007, 2011, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2007, 2016, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -122,7 +122,11 @@ struct fts_sync_t {
doc_id_t max_doc_id; /*!< The doc id at which the cache was
noted as being full, we use this to
set the upper_limit field */
- ib_time_t start_time; /*!< SYNC start time */
+ ib_time_t start_time; /*!< SYNC start time */
+ bool in_progress; /*!< flag whether sync is in progress.*/
+ bool unlock_cache; /*!< flag whether unlock cache when
+ write fts node */
+ os_event_t event; /*!< sync finish event */
};
/** The cache for the FTS system. It is a memory-based inverted index
@@ -165,7 +169,6 @@ struct fts_cache_t {
objects, they are recreated after
a SYNC is completed */
-
ib_alloc_t* self_heap; /*!< This heap is the heap out of
which an instance of the cache itself
was created. Objects created using
@@ -212,6 +215,7 @@ struct fts_node_t {
ulint ilist_size_alloc;
/*!< Allocated size of ilist in
bytes */
+ bool synced; /*!< flag whether the node is synced */
};
/** A tokenizer word. Contains information about one word. */
diff --git a/storage/xtradb/include/log0log.h b/storage/xtradb/include/log0log.h
index 69bc2705733..470274761fc 100644
--- a/storage/xtradb/include/log0log.h
+++ b/storage/xtradb/include/log0log.h
@@ -352,17 +352,6 @@ log_archive_do(
ulint* n_bytes);/*!< out: archive log buffer size, 0 if nothing to
archive */
/****************************************************************//**
-Writes the log contents to the archive up to the lsn when this function was
-called, and stops the archiving. When archiving is started again, the archived
-log file numbers start from a number one higher, so that the archiving will
-not write again to the archived log files which exist when this function
-returns.
-@return DB_SUCCESS or DB_ERROR */
-UNIV_INTERN
-ulint
-log_archive_stop(void);
-/*==================*/
-/****************************************************************//**
Starts again archiving which has been stopped.
@return DB_SUCCESS or DB_ERROR */
UNIV_INTERN
@@ -624,25 +613,14 @@ log_mem_free(void);
/*==============*/
/****************************************************************//**
-Safely reads the log_sys->tracked_lsn value. Uses atomic operations
-if available, otherwise this field is protected with the log system
-mutex. The writer counterpart function is log_set_tracked_lsn() in
-log0online.c.
+Safely reads the log_sys->tracked_lsn value. The writer counterpart function
+is log_set_tracked_lsn() in log0online.c.
@return log_sys->tracked_lsn value. */
UNIV_INLINE
lsn_t
log_get_tracked_lsn(void);
/*=====================*/
-/****************************************************************//**
-Unsafely reads the log_sys->tracked_lsn value. Uses atomic operations
-if available, or use dirty read. Use for printing only.
-
-@return log_sys->tracked_lsn value. */
-UNIV_INLINE
-lsn_t
-log_get_tracked_lsn_peek(void);
-/*==========================*/
extern log_t* log_sys;
diff --git a/storage/xtradb/include/log0log.ic b/storage/xtradb/include/log0log.ic
index ff5a83be249..70458fa546b 100644
--- a/storage/xtradb/include/log0log.ic
+++ b/storage/xtradb/include/log0log.ic
@@ -553,37 +553,15 @@ log_free_check(void)
#endif /* !UNIV_HOTBACKUP */
/****************************************************************//**
-Unsafely reads the log_sys->tracked_lsn value. Uses atomic operations
-if available, or use dirty read. Use for printing only.
+Safely reads the log_sys->tracked_lsn value. The writer counterpart function
+is log_set_tracked_lsn() in log0online.c.
@return log_sys->tracked_lsn value. */
UNIV_INLINE
lsn_t
-log_get_tracked_lsn_peek(void)
-/*==========================*/
-{
-#ifdef HAVE_ATOMIC_BUILTINS_64
- return os_atomic_increment_uint64(&log_sys->tracked_lsn, 0);
-#else
- return log_sys->tracked_lsn;
-#endif
-}
-
-/****************************************************************//**
-Safely reads the log_sys->tracked_lsn value. Uses atomic operations
-if available, otherwise this field is protected with the log system
-mutex. The writer counterpart function is log_set_tracked_lsn() in
-log0online.c.
-@return log_sys->tracked_lsn value. */
-UNIV_INLINE
-lsn_t
log_get_tracked_lsn(void)
/*=====================*/
{
-#ifdef HAVE_ATOMIC_BUILTINS_64
- return os_atomic_increment_uint64(&log_sys->tracked_lsn, 0);
-#else
- ut_ad(mutex_own(&(log_sys->mutex)));
+ os_rmb;
return log_sys->tracked_lsn;
-#endif
}
diff --git a/storage/xtradb/include/log0online.h b/storage/xtradb/include/log0online.h
index 1ef4df7d6da..67dc0d72b4b 100644
--- a/storage/xtradb/include/log0online.h
+++ b/storage/xtradb/include/log0online.h
@@ -73,20 +73,7 @@ UNIV_INTERN
ibool
log_online_purge_changed_page_bitmaps(
/*==================================*/
- ib_uint64_t lsn); /*!<in: LSN to purge files up to */
-
-/************************************************************//**
-Delete all the bitmap files for data less than the specified LSN.
-If called with lsn == 0 (i.e. set by RESET request) or
-IB_ULONGLONG_MAX, restart the bitmap file sequence, otherwise
-continue it.
-
-@return FALSE to indicate success, TRUE for failure. */
-UNIV_INTERN
-ibool
-log_online_purge_changed_page_bitmaps(
-/*==================================*/
- ib_uint64_t lsn); /*!<in: LSN to purge files up to */
+ lsn_t lsn); /*!<in: LSN to purge files up to */
#define LOG_BITMAP_ITERATOR_START_LSN(i) \
((i).start_lsn)
diff --git a/storage/xtradb/include/log0recv.h b/storage/xtradb/include/log0recv.h
index 8fc5daaef1d..1019f43f70c 100644
--- a/storage/xtradb/include/log0recv.h
+++ b/storage/xtradb/include/log0recv.h
@@ -101,15 +101,6 @@ UNIV_INLINE
ibool
recv_recovery_is_on(void);
/*=====================*/
-#ifdef UNIV_LOG_ARCHIVE
-/*******************************************************************//**
-Returns TRUE if recovery from backup is currently running.
-@return recv_recovery_from_backup_on */
-UNIV_INLINE
-ibool
-recv_recovery_from_backup_is_on(void);
-/*=================================*/
-#endif /* UNIV_LOG_ARCHIVE */
/************************************************************************//**
Applies the hashed log records to the page, if the page lsn is less than the
lsn of a log record. This can be called when a buffer page has just been
@@ -331,30 +322,6 @@ void
recv_apply_log_recs_for_backup(void);
/*================================*/
#endif
-#ifdef UNIV_LOG_ARCHIVE
-/********************************************************//**
-Recovers from archived log files, and also from log files, if they exist.
-@return error code or DB_SUCCESS */
-UNIV_INTERN
-dberr_t
-recv_recovery_from_archive_start(
-/*=============================*/
- lsn_t min_flushed_lsn,/*!< in: min flushed lsn field from the
- data files */
- lsn_t limit_lsn, /*!< in: recover up to this lsn if
- possible */
- lsn_t first_log_no); /*!< in: number of the first archived
- log file to use in the recovery; the
- file will be searched from
- INNOBASE_LOG_ARCH_DIR specified in
- server config file */
-/********************************************************//**
-Completes recovery from archive. */
-UNIV_INTERN
-void
-recv_recovery_from_archive_finish(void);
-/*===================================*/
-#endif /* UNIV_LOG_ARCHIVE */
/** Block of log record data */
struct recv_data_t{
diff --git a/storage/xtradb/include/log0recv.ic b/storage/xtradb/include/log0recv.ic
index 32c28dd03e6..b29272f4672 100644
--- a/storage/xtradb/include/log0recv.ic
+++ b/storage/xtradb/include/log0recv.ic
@@ -35,19 +35,3 @@ recv_recovery_is_on(void)
{
return(recv_recovery_on);
}
-
-#ifdef UNIV_LOG_ARCHIVE
-/** TRUE when applying redo log records from an archived log file */
-extern ibool recv_recovery_from_backup_on;
-
-/*******************************************************************//**
-Returns TRUE if recovery from backup is currently running.
-@return recv_recovery_from_backup_on */
-UNIV_INLINE
-ibool
-recv_recovery_from_backup_is_on(void)
-/*=================================*/
-{
- return(recv_recovery_from_backup_on);
-}
-#endif /* UNIV_LOG_ARCHIVE */
diff --git a/storage/xtradb/include/os0file.h b/storage/xtradb/include/os0file.h
index c890f96b0e1..453536beba4 100644
--- a/storage/xtradb/include/os0file.h
+++ b/storage/xtradb/include/os0file.h
@@ -451,12 +451,6 @@ os_get_os_version(void);
/*===================*/
#endif /* __WIN__ */
#ifndef UNIV_HOTBACKUP
-/****************************************************************//**
-Creates the seek mutexes used in positioned reads and writes. */
-UNIV_INTERN
-void
-os_io_init_simple(void);
-/*===================*/
/** Create a temporary file. This function is like tmpfile(3), but
diff --git a/storage/xtradb/include/os0sync.h b/storage/xtradb/include/os0sync.h
index db996b096bb..0f93f3ff074 100644
--- a/storage/xtradb/include/os0sync.h
+++ b/storage/xtradb/include/os0sync.h
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1995, 2015, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2008, Google Inc.
Portions of this file contain modifications contributed and copyrighted by
@@ -42,7 +42,6 @@ Created 9/6/1995 Heikki Tuuri
|| defined _M_X64 || defined __WIN__
#define IB_STRONG_MEMORY_MODEL
-#undef HAVE_IB_GCC_ATOMIC_TEST_AND_SET // Quick-and-dirty fix for bug 1519094
#endif /* __i386__ || __x86_64__ || _M_IX86 || _M_X64 || __WIN__ */
@@ -94,16 +93,62 @@ struct os_event {
#endif
os_fast_mutex_t os_mutex; /*!< this mutex protects the next
fields */
- ibool is_set; /*!< this is TRUE when the event is
- in the signaled state, i.e., a thread
- does not stop if it tries to wait for
- this event */
- ib_int64_t signal_count; /*!< this is incremented each time
- the event becomes signaled */
+private:
+ /** Masks for the event signal count and set flag in the count_and_set
+ field */
+ static const ib_uint64_t count_mask = 0x7fffffffffffffffULL;
+ static const ib_uint64_t set_mask = 0x8000000000000000ULL;
+
+ /** The MSB is set whenever when the event is in the signaled state,
+ i.e. a thread does not stop if it tries to wait for this event. Lower
+ bits are incremented each time the event becomes signaled. */
+ ib_uint64_t count_and_set;
+public:
os_cond_t cond_var; /*!< condition variable is used in
waiting for the event */
- UT_LIST_NODE_T(os_event_t) os_event_list;
- /*!< list of all created events */
+
+ /** Initialise count_and_set field */
+ void init_count_and_set(void)
+ {
+ /* We return this value in os_event_reset(), which can then be
+ be used to pass to the os_event_wait_low(). The value of zero
+ is reserved in os_event_wait_low() for the case when the
+ caller does not want to pass any signal_count value. To
+ distinguish between the two cases we initialize signal_count
+ to 1 here. */
+ count_and_set = 1;
+ }
+
+ /** Mark this event as set */
+ void set(void)
+ {
+ count_and_set |= set_mask;
+ }
+
+ /** Unmark this event as set */
+ void reset(void)
+ {
+ count_and_set &= count_mask;
+ }
+
+ /** Return true if this event is set */
+ bool is_set(void) const
+ {
+ return count_and_set & set_mask;
+ }
+
+ /** Bump signal count for this event */
+ void inc_signal_count(void)
+ {
+ ut_ad(static_cast<ib_uint64_t>(signal_count()) < count_mask);
+ count_and_set++;
+ }
+
+ /** Return how many times this event has been signalled */
+ ib_int64_t signal_count(void) const
+ {
+ return (count_and_set & count_mask);
+ }
};
/** Denotes an infinite delay for os_event_wait_time() */
@@ -115,8 +160,7 @@ struct os_event {
/** Operating system mutex handle */
typedef struct os_mutex_t* os_ib_mutex_t;
-/** Mutex protecting counts and the event and OS 'slow' mutex lists */
-extern os_ib_mutex_t os_sync_mutex;
+// All the os_*_count variables are accessed atomically
/** This is incremented by 1 in os_thread_create and decremented by 1 in
os_thread_exit */
@@ -132,12 +176,15 @@ UNIV_INTERN
void
os_sync_init(void);
/*==============*/
-/*********************************************************//**
-Frees created events and OS 'slow' mutexes. */
+
+/** Create an event semaphore, i.e., a semaphore which may just have two
+states: signaled and nonsignaled. The created event is manual reset: it must be
+reset explicitly by calling sync_os_reset_event.
+@param[in,out] event memory block where to create the event */
UNIV_INTERN
void
-os_sync_free(void);
-/*==============*/
+os_event_create(os_event_t event);
+
/*********************************************************//**
Creates an event semaphore, i.e., a semaphore which may just have two states:
signaled and nonsignaled. The created event is manual reset: it must be reset
@@ -173,7 +220,10 @@ UNIV_INTERN
void
os_event_free(
/*==========*/
- os_event_t event); /*!< in: event to free */
+ os_event_t event, /*!< in: event to free */
+ bool free_memory = true);
+ /*!< in: if true, deallocate the event memory
+ block too */
/**********************************************************//**
Waits for an event object until it is in the signaled state.
@@ -467,28 +517,7 @@ amount to decrement. */
# define os_atomic_decrement_uint64(ptr, amount) \
os_atomic_decrement(ptr, amount)
-# if defined(HAVE_IB_GCC_ATOMIC_TEST_AND_SET)
-
-/** Do an atomic test-and-set.
-@param[in,out] ptr Memory location to set to non-zero
-@return the previous value */
-inline
-lock_word_t
-os_atomic_test_and_set(volatile lock_word_t* ptr)
-{
- return(__atomic_test_and_set(ptr, __ATOMIC_ACQUIRE));
-}
-
-/** Do an atomic clear.
-@param[in,out] ptr Memory location to set to zero */
-inline
-void
-os_atomic_clear(volatile lock_word_t* ptr)
-{
- __atomic_clear(ptr, __ATOMIC_RELEASE);
-}
-
-# elif defined(HAVE_ATOMIC_BUILTINS)
+# if defined(HAVE_ATOMIC_BUILTINS)
/** Do an atomic test and set.
@param[in,out] ptr Memory location to set to non-zero
@@ -517,6 +546,27 @@ os_atomic_clear(volatile lock_word_t* ptr)
return(__sync_lock_test_and_set(ptr, 0));
}
+# elif defined(HAVE_IB_GCC_ATOMIC_TEST_AND_SET)
+
+/** Do an atomic test-and-set.
+@param[in,out] ptr Memory location to set to non-zero
+@return the previous value */
+inline
+lock_word_t
+os_atomic_test_and_set(volatile lock_word_t* ptr)
+{
+ return(__atomic_test_and_set(ptr, __ATOMIC_ACQUIRE));
+}
+
+/** Do an atomic clear.
+@param[in,out] ptr Memory location to set to zero */
+inline
+void
+os_atomic_clear(volatile lock_word_t* ptr)
+{
+ __atomic_clear(ptr, __ATOMIC_RELEASE);
+}
+
# else
# error "Unsupported platform"
diff --git a/storage/xtradb/include/row0mysql.h b/storage/xtradb/include/row0mysql.h
index a0ff6cdad15..e6a201be7a5 100644
--- a/storage/xtradb/include/row0mysql.h
+++ b/storage/xtradb/include/row0mysql.h
@@ -491,6 +491,9 @@ row_drop_table_for_mysql(
const char* name, /*!< in: table name */
trx_t* trx, /*!< in: dictionary transaction handle */
bool drop_db,/*!< in: true=dropping whole database */
+ ibool create_failed,/*!<in: TRUE=create table failed
+ because e.g. foreign key column
+ type mismatch. */
bool nonatomic = true)
/*!< in: whether it is permitted
to release and reacquire dict_operation_lock */
diff --git a/storage/xtradb/include/srv0srv.h b/storage/xtradb/include/srv0srv.h
index a2d300957ba..703b1471f81 100644
--- a/storage/xtradb/include/srv0srv.h
+++ b/storage/xtradb/include/srv0srv.h
@@ -480,8 +480,6 @@ extern ulong srv_innodb_stats_method;
#ifdef UNIV_LOG_ARCHIVE
extern ibool srv_log_archive_on;
-extern ibool srv_archive_recovery;
-extern ib_uint64_t srv_archive_recovery_limit_lsn;
#endif /* UNIV_LOG_ARCHIVE */
extern char* srv_file_flush_method_str;
@@ -954,19 +952,29 @@ ulint
srv_get_activity_count(void);
/*========================*/
/*******************************************************************//**
-Check if there has been any activity.
+Check if there has been any activity. Considers background change buffer
+merge as regular server activity unless a non-default
+old_ibuf_merge_activity_count value is passed, in which case the merge will be
+treated as keeping server idle.
@return FALSE if no change in activity counter. */
UNIV_INTERN
ibool
srv_check_activity(
/*===============*/
- ulint old_activity_count); /*!< old activity count */
+ ulint old_activity_count, /*!< old activity count */
+ /*!< old change buffer merge
+ activity count, or
+ ULINT_UNDEFINED */
+ ulint old_ibuf_merge_activity_count = ULINT_UNDEFINED);
/******************************************************************//**
Increment the server activity counter. */
UNIV_INTERN
void
-srv_inc_activity_count(void);
-/*=========================*/
+srv_inc_activity_count(
+/*===================*/
+ bool ibuf_merge_activity = false); /*!< whether this activity bump
+ is caused by the background
+ change buffer merge */
/**********************************************************************//**
Enqueues a task to server task queue and releases a worker thread, if there
diff --git a/storage/xtradb/include/sync0rw.h b/storage/xtradb/include/sync0rw.h
index d59613e0abb..cef2008c4ea 100644
--- a/storage/xtradb/include/sync0rw.h
+++ b/storage/xtradb/include/sync0rw.h
@@ -734,8 +734,8 @@ struct rw_lock_t {
/*!< Thread id of writer thread. Is only
guaranteed to have sane and non-stale
value iff recursive flag is set. */
- os_event_t event; /*!< Used by sync0arr.cc for thread queueing */
- os_event_t wait_ex_event;
+ struct os_event event; /*!< Used by sync0arr.cc for thread queueing */
+ struct os_event wait_ex_event;
/*!< Event for next-writer to wait on. A thread
must decrement lock_word before waiting. */
#ifndef INNODB_RW_LOCKS_USE_ATOMICS
@@ -788,12 +788,12 @@ struct prio_rw_lock_t {
volatile ulint high_priority_s_waiters;
/* Number of high priority S
waiters */
- os_event_t high_priority_s_event; /* High priority wait
+ struct os_event high_priority_s_event; /* High priority wait
array event for S waiters */
volatile ulint high_priority_x_waiters;
/* Number of high priority X
waiters */
- os_event_t high_priority_x_event;
+ struct os_event high_priority_x_event;
/* High priority wait arraay
event for X waiters */
volatile ulint high_priority_wait_ex_waiter;
diff --git a/storage/xtradb/include/sync0rw.ic b/storage/xtradb/include/sync0rw.ic
index f66d435b8fc..b65e48e0881 100644
--- a/storage/xtradb/include/sync0rw.ic
+++ b/storage/xtradb/include/sync0rw.ic
@@ -602,7 +602,7 @@ rw_lock_s_unlock_func(
/* wait_ex waiter exists. It may not be asleep, but we signal
anyway. We do not wake other waiters, because they can't
exist without wait_ex waiter and wait_ex waiter goes first.*/
- os_event_set(lock->wait_ex_event);
+ os_event_set(&lock->wait_ex_event);
sync_array_object_signalled();
}
@@ -642,7 +642,7 @@ rw_lock_s_unlock_func(
/* A waiting next-writer exists, either high priority or
regular, sharing the same wait event. */
- os_event_set(lock->base_lock.wait_ex_event);
+ os_event_set(&lock->base_lock.wait_ex_event);
sync_array_object_signalled();
} else if (lock_word == X_LOCK_DECR) {
@@ -653,7 +653,7 @@ rw_lock_s_unlock_func(
if (lock->base_lock.waiters) {
rw_lock_reset_waiter_flag(&lock->base_lock);
- os_event_set(lock->base_lock.event);
+ os_event_set(&lock->base_lock.event);
sync_array_object_signalled();
}
}
@@ -735,7 +735,7 @@ rw_lock_x_unlock_func(
if (lock->waiters) {
rw_lock_reset_waiter_flag(lock);
- os_event_set(lock->event);
+ os_event_set(&lock->event);
sync_array_object_signalled();
}
}
@@ -778,16 +778,16 @@ rw_lock_x_unlock_func(
if (lock->high_priority_x_waiters) {
- os_event_set(lock->high_priority_x_event);
+ os_event_set(&lock->high_priority_x_event);
sync_array_object_signalled();
} else if (lock->high_priority_s_waiters) {
- os_event_set(lock->high_priority_s_event);
+ os_event_set(&lock->high_priority_s_event);
sync_array_object_signalled();
} else if (lock->base_lock.waiters) {
rw_lock_reset_waiter_flag(&lock->base_lock);
- os_event_set(lock->base_lock.event);
+ os_event_set(&lock->base_lock.event);
sync_array_object_signalled();
}
}
diff --git a/storage/xtradb/include/sync0sync.h b/storage/xtradb/include/sync0sync.h
index 88fe4644a07..2b794059399 100644
--- a/storage/xtradb/include/sync0sync.h
+++ b/storage/xtradb/include/sync0sync.h
@@ -926,7 +926,7 @@ implementation of a mutual exclusion semaphore. */
/** InnoDB mutex */
struct ib_mutex_t {
- os_event_t event; /*!< Used by sync0arr.cc for the wait queue */
+ struct os_event event; /*!< Used by sync0arr.cc for the wait queue */
volatile lock_word_t lock_word; /*!< lock_word is the target
of the atomic test-and-set instruction when
atomic operations are enabled. */
@@ -974,14 +974,13 @@ struct ib_mutex_t {
struct ib_prio_mutex_t {
ib_mutex_t base_mutex; /* The regular mutex provides the lock
word etc. for the priority mutex */
- os_event_t high_priority_event; /* High priority wait array
+ struct os_event high_priority_event; /* High priority wait array
event */
volatile ulint high_priority_waiters; /* Number of threads that asked
for this mutex to be acquired with high
priority in the global wait array
waiting for this mutex to be
released. */
- UT_LIST_NODE_T(ib_prio_mutex_t) list;
};
/** Constant determining how long spin wait is continued before suspending
diff --git a/storage/xtradb/include/sync0sync.ic b/storage/xtradb/include/sync0sync.ic
index fa0724d7996..83f28bfeded 100644
--- a/storage/xtradb/include/sync0sync.ic
+++ b/storage/xtradb/include/sync0sync.ic
@@ -224,7 +224,7 @@ mutex_exit_func(
/* Wake up any high priority waiters first. */
if (mutex->high_priority_waiters != 0) {
- os_event_set(mutex->high_priority_event);
+ os_event_set(&mutex->high_priority_event);
sync_array_object_signalled();
} else if (mutex_get_waiters(&mutex->base_mutex) != 0) {
diff --git a/storage/xtradb/include/univ.i b/storage/xtradb/include/univ.i
index b0fe3f020d2..528abe183c7 100644
--- a/storage/xtradb/include/univ.i
+++ b/storage/xtradb/include/univ.i
@@ -45,10 +45,10 @@ Created 1/20/1994 Heikki Tuuri
#define INNODB_VERSION_MAJOR 5
#define INNODB_VERSION_MINOR 6
-#define INNODB_VERSION_BUGFIX 29
+#define INNODB_VERSION_BUGFIX 30
#ifndef PERCONA_INNODB_VERSION
-#define PERCONA_INNODB_VERSION 76.2
+#define PERCONA_INNODB_VERSION 76.3
#endif
/* Enable UNIV_LOG_ARCHIVE in XtraDB */
diff --git a/storage/xtradb/lock/lock0lock.cc b/storage/xtradb/lock/lock0lock.cc
index 7364c10c08e..dfca7980e23 100644
--- a/storage/xtradb/lock/lock0lock.cc
+++ b/storage/xtradb/lock/lock0lock.cc
@@ -661,6 +661,17 @@ lock_sys_close(void)
mutex_free(&lock_sys->mutex);
mutex_free(&lock_sys->wait_mutex);
+ os_event_free(lock_sys->timeout_event);
+
+ for (srv_slot_t* slot = lock_sys->waiting_threads;
+ slot < lock_sys->waiting_threads + OS_THREAD_MAX_N; slot++) {
+
+ ut_ad(!slot->in_use);
+ ut_ad(!slot->thr);
+ if (slot->event != NULL)
+ os_event_free(slot->event);
+ }
+
mem_free(lock_stack);
mem_free(lock_sys);
diff --git a/storage/xtradb/log/log0log.cc b/storage/xtradb/log/log0log.cc
index 36531f3c6f4..a19cb70e747 100644
--- a/storage/xtradb/log/log0log.cc
+++ b/storage/xtradb/log/log0log.cc
@@ -1086,8 +1086,7 @@ log_group_init(
ulint space_id, /*!< in: space id of the file space
which contains the log files of this
group */
- ulint archive_space_id __attribute__((unused)))
- /*!< in: space id of the file space
+ ulint archive_space_id) /*!< in: space id of the file space
which contains some archived log
files for this group; currently, only
for the first log group this is
@@ -3304,10 +3303,9 @@ log_archive_close_groups(
Writes the log contents to the archive up to the lsn when this function was
called, and stops the archiving. When archiving is started again, the archived
log file numbers start from 2 higher, so that the archiving will not write
-again to the archived log files which exist when this function returns.
-@return DB_SUCCESS or DB_ERROR */
-UNIV_INTERN
-ulint
+again to the archived log files which exist when this function returns. */
+static
+void
log_archive_stop(void)
/*==================*/
{
@@ -3315,13 +3313,7 @@ log_archive_stop(void)
mutex_enter(&(log_sys->mutex));
- if (log_sys->archiving_state != LOG_ARCH_ON) {
-
- mutex_exit(&(log_sys->mutex));
-
- return(DB_ERROR);
- }
-
+ ut_ad(log_sys->archiving_state == LOG_ARCH_ON);
log_sys->archiving_state = LOG_ARCH_STOPPING;
mutex_exit(&(log_sys->mutex));
@@ -3363,8 +3355,6 @@ log_archive_stop(void)
log_sys->archiving_state = LOG_ARCH_STOPPED;
mutex_exit(&(log_sys->mutex));
-
- return(DB_SUCCESS);
}
/****************************************************************//**
@@ -4013,7 +4003,7 @@ log_print(
"Log tracking enabled\n"
"Log tracked up to " LSN_PF "\n"
"Max tracked LSN age " LSN_PF "\n",
- log_get_tracked_lsn_peek(),
+ log_get_tracked_lsn(),
log_sys->max_checkpoint_age);
}
@@ -4110,6 +4100,7 @@ log_shutdown(void)
rw_lock_free(&log_sys->checkpoint_lock);
mutex_free(&log_sys->mutex);
+ mutex_free(&log_sys->log_flush_order_mutex);
#ifdef UNIV_LOG_ARCHIVE
rw_lock_free(&log_sys->archive_lock);
diff --git a/storage/xtradb/log/log0online.cc b/storage/xtradb/log/log0online.cc
index 51a9fa8f6c5..3f53791ed4c 100644
--- a/storage/xtradb/log/log0online.cc
+++ b/storage/xtradb/log/log0online.cc
@@ -290,7 +290,7 @@ log_online_read_bitmap_page(
/* The following call prints an error message */
os_file_get_last_error(TRUE);
ib_logf(IB_LOG_LEVEL_WARN,
- "failed reading changed page bitmap file \'%s\'\n",
+ "failed reading changed page bitmap file \'%s\'",
bitmap_file->name);
return FALSE;
}
@@ -350,7 +350,7 @@ log_online_read_last_tracked_lsn(void)
ib_logf(IB_LOG_LEVEL_WARN,
"corruption detected in \'%s\' at offset "
- UINT64PF "\n",
+ UINT64PF,
log_bmp_sys->out.name, read_offset);
}
};
@@ -364,7 +364,7 @@ log_online_read_last_tracked_lsn(void)
log_bmp_sys->out.offset)) {
ib_logf(IB_LOG_LEVEL_WARN,
"failed truncating changed page bitmap file \'%s\' to "
- UINT64PF " bytes\n",
+ UINT64PF " bytes",
log_bmp_sys->out.name, log_bmp_sys->out.offset);
result = 0;
}
@@ -382,16 +382,8 @@ log_set_tracked_lsn(
/*================*/
lsn_t tracked_lsn) /*!<in: new value */
{
-#ifdef HAVE_ATOMIC_BUILTINS_64
- /* Single writer, no data race here */
- lsn_t old_value = os_atomic_increment_uint64(&log_sys->tracked_lsn, 0);
- (void) os_atomic_increment_uint64(&log_sys->tracked_lsn,
- tracked_lsn - old_value);
-#else
- mutex_enter(&log_sys->mutex);
log_sys->tracked_lsn = tracked_lsn;
- mutex_exit(&log_sys->mutex);
-#endif
+ os_wmb;
}
/*********************************************************************//**
@@ -416,7 +408,7 @@ log_online_can_track_missing(
ib_logf(IB_LOG_LEVEL_ERROR,
"last tracked LSN " LSN_PF " is ahead of tracking "
"start LSN " LSN_PF ". This can be caused by "
- "mismatched bitmap files.\n",
+ "mismatched bitmap files.",
last_tracked_lsn, tracking_start_lsn);
exit(1);
}
@@ -444,7 +436,7 @@ log_online_track_missing_on_startup(
ib_logf(IB_LOG_LEVEL_WARN, "last tracked LSN in \'%s\' is " LSN_PF
", but the last checkpoint LSN is " LSN_PF ". This might be "
- "due to a server crash or a very fast shutdown. ",
+ "due to a server crash or a very fast shutdown.",
log_bmp_sys->out.name, last_tracked_lsn, tracking_start_lsn);
/* See if we can fully recover the missing interval */
@@ -452,7 +444,7 @@ log_online_track_missing_on_startup(
tracking_start_lsn)) {
ib_logf(IB_LOG_LEVEL_INFO,
- "reading the log to advance the last tracked LSN.\n");
+ "reading the log to advance the last tracked LSN.");
log_bmp_sys->start_lsn = ut_max(last_tracked_lsn,
MIN_TRACKED_LSN);
@@ -463,22 +455,22 @@ log_online_track_missing_on_startup(
ut_ad(log_bmp_sys->end_lsn >= tracking_start_lsn);
ib_logf(IB_LOG_LEVEL_INFO,
- "continuing tracking changed pages from LSN " LSN_PF
- "\n", log_bmp_sys->end_lsn);
+ "continuing tracking changed pages from LSN " LSN_PF,
+ log_bmp_sys->end_lsn);
}
else {
ib_logf(IB_LOG_LEVEL_WARN,
"the age of last tracked LSN exceeds log capacity, "
"tracking-based incremental backups will work only "
- "from the higher LSN!\n");
+ "from the higher LSN!");
log_bmp_sys->end_lsn = log_bmp_sys->start_lsn
= tracking_start_lsn;
log_set_tracked_lsn(log_bmp_sys->start_lsn);
ib_logf(IB_LOG_LEVEL_INFO,
- "starting tracking changed pages from LSN " LSN_PF
- "\n", log_bmp_sys->end_lsn);
+ "starting tracking changed pages from LSN " LSN_PF,
+ log_bmp_sys->end_lsn);
}
}
@@ -546,7 +538,7 @@ log_online_start_bitmap_file(void)
/* The following call prints an error message */
os_file_get_last_error(TRUE);
ib_logf(IB_LOG_LEVEL_ERROR,
- "cannot create \'%s\'\n", log_bmp_sys->out.name);
+ "cannot create \'%s\'", log_bmp_sys->out.name);
return FALSE;
}
@@ -682,7 +674,7 @@ log_online_read_init(void)
if (os_file_closedir(bitmap_dir)) {
os_file_get_last_error(TRUE);
- ib_logf(IB_LOG_LEVEL_ERROR, "cannot close \'%s\'\n",
+ ib_logf(IB_LOG_LEVEL_ERROR, "cannot close \'%s\'",
log_bmp_sys->bmp_file_home);
exit(1);
}
@@ -722,7 +714,7 @@ log_online_read_init(void)
ib_logf(IB_LOG_LEVEL_WARN,
"truncated block detected in \'%s\' at offset "
- UINT64PF "\n",
+ UINT64PF,
log_bmp_sys->out.name,
log_bmp_sys->out.offset);
log_bmp_sys->out.offset -=
@@ -760,14 +752,14 @@ log_online_read_init(void)
"last tracked LSN is " LSN_PF ", but the last "
"checkpoint LSN is " LSN_PF ". The "
"tracking-based incremental backups will work "
- "only from the latter LSN!\n",
+ "only from the latter LSN!",
last_tracked_lsn, tracking_start_lsn);
}
}
ib_logf(IB_LOG_LEVEL_INFO, "starting tracking changed pages from LSN "
- LSN_PF "\n", tracking_start_lsn);
+ LSN_PF, tracking_start_lsn);
log_bmp_sys->start_lsn = tracking_start_lsn;
log_set_tracked_lsn(tracking_start_lsn);
}
@@ -911,7 +903,7 @@ log_online_is_valid_log_seg(
ib_logf(IB_LOG_LEVEL_ERROR,
"log block checksum mismatch: expected " ULINTPF ", "
- "calculated checksum " ULINTPF "\n",
+ "calculated checksum " ULINTPF,
log_block_get_checksum(log_block),
log_block_calc_checksum(log_block));
}
@@ -1110,7 +1102,7 @@ log_online_write_bitmap_page(
/* The following call prints an error message */
os_file_get_last_error(TRUE);
ib_logf(IB_LOG_LEVEL_ERROR, "failed writing changed page "
- "bitmap file \'%s\'\n", log_bmp_sys->out.name);
+ "bitmap file \'%s\'", log_bmp_sys->out.name);
return FALSE;
}
@@ -1120,7 +1112,7 @@ log_online_write_bitmap_page(
/* The following call prints an error message */
os_file_get_last_error(TRUE);
ib_logf(IB_LOG_LEVEL_ERROR, "failed flushing changed page "
- "bitmap file \'%s\'\n", log_bmp_sys->out.name);
+ "bitmap file \'%s\'", log_bmp_sys->out.name);
return FALSE;
}
@@ -1267,8 +1259,7 @@ log_online_diagnose_inconsistent_dir(
ib_logf(IB_LOG_LEVEL_WARN,
"InnoDB: Warning: inconsistent bitmap file "
"directory for a "
- "INFORMATION_SCHEMA.INNODB_CHANGED_PAGES query"
- "\n");
+ "INFORMATION_SCHEMA.INNODB_CHANGED_PAGES query");
free(bitmap_files->files);
}
@@ -1310,7 +1301,7 @@ log_online_setup_bitmap_file_range(
if (UNIV_UNLIKELY(!bitmap_dir)) {
ib_logf(IB_LOG_LEVEL_ERROR,
- "failed to open bitmap directory \'%s\'\n",
+ "failed to open bitmap directory \'%s\'",
srv_data_home);
return FALSE;
}
@@ -1360,7 +1351,7 @@ log_online_setup_bitmap_file_range(
if (UNIV_UNLIKELY(os_file_closedir(bitmap_dir))) {
os_file_get_last_error(TRUE);
- ib_logf(IB_LOG_LEVEL_ERROR, "cannot close \'%s\'\n",
+ ib_logf(IB_LOG_LEVEL_ERROR, "cannot close \'%s\'",
srv_data_home);
return FALSE;
}
@@ -1381,7 +1372,7 @@ log_online_setup_bitmap_file_range(
if (UNIV_UNLIKELY(!bitmap_dir)) {
ib_logf(IB_LOG_LEVEL_ERROR,
- "failed to open bitmap directory \'%s\'\n",
+ "failed to open bitmap directory \'%s\'",
srv_data_home);
return FALSE;
}
@@ -1432,7 +1423,7 @@ log_online_setup_bitmap_file_range(
if (UNIV_UNLIKELY(os_file_closedir(bitmap_dir))) {
os_file_get_last_error(TRUE);
- ib_logf(IB_LOG_LEVEL_ERROR, "cannot close \'%s\'\n",
+ ib_logf(IB_LOG_LEVEL_ERROR, "cannot close \'%s\'",
srv_data_home);
free(bitmap_files->files);
return FALSE;
@@ -1507,7 +1498,7 @@ log_online_open_bitmap_file_read_only(
/* Here and below assume that bitmap file names do not
contain apostrophes, thus no need for ut_print_filename(). */
ib_logf(IB_LOG_LEVEL_WARN,
- "error opening the changed page bitmap \'%s\'\n",
+ "error opening the changed page bitmap \'%s\'",
bitmap_file->name);
return FALSE;
}
@@ -1553,7 +1544,7 @@ log_online_diagnose_bitmap_eof(
ib_logf(IB_LOG_LEVEL_WARN,
"junk at the end of changed page bitmap file "
- "\'%s\'.\n", bitmap_file->name);
+ "\'%s\'.", bitmap_file->name);
}
if (UNIV_UNLIKELY(!last_page_in_run)) {
@@ -1564,7 +1555,7 @@ log_online_diagnose_bitmap_eof(
for the whole server */
ib_logf(IB_LOG_LEVEL_WARN,
"changed page bitmap file \'%s\' does not "
- "contain a complete run at the end.\n",
+ "contain a complete run at the end.",
bitmap_file->name);
return FALSE;
}
@@ -1757,7 +1748,7 @@ log_online_bitmap_iterator_next(
os_file_get_last_error(TRUE);
ib_logf(IB_LOG_LEVEL_WARN,
"failed reading changed page bitmap file "
- "\'%s\'\n", i->in_files.files[i->in_i].name);
+ "\'%s\'", i->in_files.files[i->in_i].name);
i->failed = TRUE;
return FALSE;
}
diff --git a/storage/xtradb/log/log0recv.cc b/storage/xtradb/log/log0recv.cc
index 23ca8b1381f..67f4050a1f1 100644
--- a/storage/xtradb/log/log0recv.cc
+++ b/storage/xtradb/log/log0recv.cc
@@ -90,10 +90,6 @@ UNIV_INTERN recv_sys_t* recv_sys = NULL;
otherwise. Note that this is FALSE while a background thread is
rolling back incomplete transactions. */
UNIV_INTERN ibool recv_recovery_on;
-#ifdef UNIV_LOG_ARCHIVE
-/** TRUE when applying redo log records from an archived log file */
-UNIV_INTERN ibool recv_recovery_from_backup_on;
-#endif /* UNIV_LOG_ARCHIVE */
#ifndef UNIV_HOTBACKUP
/** TRUE when recv_init_crash_recovery() has been called. */
@@ -306,10 +302,6 @@ recv_sys_var_init(void)
recv_recovery_on = FALSE;
-#ifdef UNIV_LOG_ARCHIVE
- recv_recovery_from_backup_on = FALSE;
-#endif /* UNIV_LOG_ARCHIVE */
-
recv_needed_recovery = FALSE;
recv_lsn_checks_on = FALSE;
@@ -3779,332 +3771,6 @@ recv_reset_log_files_for_backup(
}
#endif /* UNIV_HOTBACKUP */
-#ifdef UNIV_LOG_ARCHIVE
-/******************************************************//**
-Reads from the archive of a log group and performs recovery.
-@return TRUE if no more complete consistent archive files */
-static
-ibool
-log_group_recover_from_archive_file(
-/*================================*/
- log_group_t* group) /*!< in: log group */
-{
- os_file_t file_handle;
- ib_uint64_t start_lsn;
- ib_uint64_t file_end_lsn;
- ib_uint64_t dummy_lsn;
- ib_uint64_t scanned_lsn;
- ulint len;
- ibool ret;
- byte* buf;
- os_offset_t read_offset;
- os_offset_t file_size;
- int input_char;
- char name[OS_FILE_MAX_PATH];
- dberr_t err;
-
- ut_a(0);
-
-try_open_again:
- buf = log_sys->buf;
-
- /* Add the file to the archive file space; open the file */
-
- log_archived_file_name_gen(name, sizeof(name),
- group->id, group->archived_file_no);
-
- file_handle = os_file_create(innodb_file_log_key,
- name, OS_FILE_OPEN,
- OS_FILE_LOG, OS_FILE_AIO, &ret, FALSE);
-
- if (ret == FALSE) {
-ask_again:
- fprintf(stderr,
- "InnoDB: Do you want to copy additional"
- " archived log files\n"
- "InnoDB: to the directory\n");
- fprintf(stderr,
- "InnoDB: or were these all the files needed"
- " in recovery?\n");
- fprintf(stderr,
- "InnoDB: (Y == copy more files; N == this is all)?");
-
- input_char = getchar();
-
- if (input_char == (int) 'N') {
-
- return(TRUE);
- } else if (input_char == (int) 'Y') {
-
- goto try_open_again;
- } else {
- goto ask_again;
- }
- }
-
- file_size = os_file_get_size(file_handle);
- ut_a(file_size != (os_offset_t) -1);
-
- fprintf(stderr, "InnoDB: Opened archived log file %s\n", name);
-
- ret = os_file_close(file_handle);
-
- if (file_size < LOG_FILE_HDR_SIZE) {
- fprintf(stderr,
- "InnoDB: Archive file header incomplete %s\n", name);
-
- return(TRUE);
- }
-
- ut_a(ret);
-
- /* Add the archive file as a node to the space */
-
- ut_a(fil_node_create(name, 1 + file_size / UNIV_PAGE_SIZE,
- group->archive_space_id, FALSE));
- ut_a(RECV_SCAN_SIZE >= LOG_FILE_HDR_SIZE);
-
- /* Read the archive file header */
- fil_io(OS_FILE_READ | OS_FILE_LOG, true, group->archive_space_id, 0,
- 0, 0,
- LOG_FILE_HDR_SIZE, buf, NULL, 0);
-
- /* Check if the archive file header is consistent */
-
- if (mach_read_from_4(buf + LOG_GROUP_ID) != group->id
- || mach_read_from_8(buf + LOG_FILE_START_LSN)
- != group->archived_file_no) {
- fprintf(stderr,
- "InnoDB: Archive file header inconsistent %s\n", name);
-
- return(TRUE);
- }
-
- if (!mach_read_from_4(buf + LOG_FILE_ARCH_COMPLETED)) {
- fprintf(stderr,
- "InnoDB: Archive file not completely written %s\n",
- name);
-
- return(TRUE);
- }
-
- start_lsn = mach_read_from_8(buf + LOG_FILE_START_LSN);
- file_end_lsn = mach_read_from_8(buf + LOG_FILE_END_LSN);
-
- if (!recv_sys->scanned_lsn) {
-
- if (recv_sys->parse_start_lsn < start_lsn) {
- fprintf(stderr,
- "InnoDB: Archive log file %s"
- " starts from too big a lsn\n",
- name);
- return(TRUE);
- }
-
- recv_sys->scanned_lsn = start_lsn;
- }
-
- if (recv_sys->scanned_lsn != start_lsn) {
-
- fprintf(stderr,
- "InnoDB: Archive log file %s starts from"
- " a wrong lsn\n",
- name);
- return(TRUE);
- }
-
- read_offset = LOG_FILE_HDR_SIZE;
-
- for (;;) {
- len = RECV_SCAN_SIZE;
-
- if (read_offset + len > file_size) {
- len = ut_calc_align_down(file_size - read_offset,
- OS_FILE_LOG_BLOCK_SIZE);
- }
-
- if (len == 0) {
-
- break;
- }
-
-#ifdef UNIV_DEBUG
- if (log_debug_writes) {
- fprintf(stderr,
- "InnoDB: Archive read starting at"
- " lsn " LSN_PF ", len %lu from file %s\n",
- start_lsn,
- (ulong) len, name);
- }
-#endif /* UNIV_DEBUG */
-
- fil_io(OS_FILE_READ | OS_FILE_LOG, true,
- group->archive_space_id, 0,
- read_offset / UNIV_PAGE_SIZE,
- read_offset % UNIV_PAGE_SIZE, len, buf, NULL, 0);
-
- ret = recv_scan_log_recs(
- (buf_pool_get_n_pages()
- - (recv_n_pool_free_frames * srv_buf_pool_instances))
- * UNIV_PAGE_SIZE, TRUE, buf, len, start_lsn,
- &dummy_lsn, &scanned_lsn, &err);
-
- if (err != DB_SUCCESS) {
- return(FALSE);
- }
-
- if (scanned_lsn == file_end_lsn) {
-
- return(FALSE);
- }
-
- if (ret) {
- fprintf(stderr,
- "InnoDB: Archive log file %s"
- " does not scan right\n",
- name);
- return(TRUE);
- }
-
- read_offset += len;
- start_lsn += len;
-
- ut_ad(start_lsn == scanned_lsn);
- }
-
- return(FALSE);
-}
-
-/********************************************************//**
-Recovers from archived log files, and also from log files, if they exist.
-@return error code or DB_SUCCESS */
-UNIV_INTERN
-dberr_t
-recv_recovery_from_archive_start(
-/*=============================*/
- ib_uint64_t min_flushed_lsn,/*!< in: min flushed lsn field from the
- data files */
- ib_uint64_t limit_lsn, /*!< in: recover up to this lsn if
- possible */
- lsn_t first_log_no) /*!< in: number of the first archived
- log file to use in the recovery; the
- file will be searched from
- INNOBASE_LOG_ARCH_DIR specified in
- server config file */
-{
- log_group_t* group;
- ulint group_id;
- ulint trunc_len;
- ibool ret;
- dberr_t err;
-
- ut_a(0);
-
- recv_sys_create();
- recv_sys_init(buf_pool_get_curr_size());
-
- recv_recovery_on = TRUE;
- recv_recovery_from_backup_on = TRUE;
-
- recv_sys->limit_lsn = limit_lsn;
-
- group_id = 0;
-
- group = UT_LIST_GET_FIRST(log_sys->log_groups);
-
- while (group) {
- if (group->id == group_id) {
-
- break;
- }
-
- group = UT_LIST_GET_NEXT(log_groups, group);
- }
-
- if (!group) {
- fprintf(stderr,
- "InnoDB: There is no log group defined with id %lu!\n",
- (ulong) group_id);
- return(DB_ERROR);
- }
-
- group->archived_file_no = first_log_no;
-
- recv_sys->parse_start_lsn = min_flushed_lsn;
-
- recv_sys->scanned_lsn = 0;
- recv_sys->scanned_checkpoint_no = 0;
- recv_sys->recovered_lsn = recv_sys->parse_start_lsn;
-
- recv_sys->archive_group = group;
-
- ret = FALSE;
-
- mutex_enter(&(log_sys->mutex));
-
- while (!ret) {
- ret = log_group_recover_from_archive_file(group);
-
- /* Close and truncate a possible processed archive file
- from the file space */
-
- trunc_len = UNIV_PAGE_SIZE
- * fil_space_get_size(group->archive_space_id);
- if (trunc_len > 0) {
- fil_space_truncate_start(group->archive_space_id,
- trunc_len);
- }
-
- group->archived_file_no += group->file_size - LOG_FILE_HDR_SIZE;
- }
-
- if (recv_sys->recovered_lsn < limit_lsn) {
-
- if (!recv_sys->scanned_lsn) {
-
- recv_sys->scanned_lsn = recv_sys->parse_start_lsn;
- }
-
- mutex_exit(&(log_sys->mutex));
-
- err = recv_recovery_from_checkpoint_start(LOG_ARCHIVE,
- limit_lsn,
- LSN_MAX,
- LSN_MAX);
- if (err != DB_SUCCESS) {
-
- return(err);
- }
-
- mutex_enter(&(log_sys->mutex));
- }
-
- if (limit_lsn != LSN_MAX) {
-
- recv_apply_hashed_log_recs(FALSE);
-
- recv_reset_logs(0, FALSE, recv_sys->recovered_lsn);
- }
-
- mutex_exit(&(log_sys->mutex));
-
- return(DB_SUCCESS);
-}
-
-/********************************************************//**
-Completes recovery from archive. */
-UNIV_INTERN
-void
-recv_recovery_from_archive_finish(void)
-/*===================================*/
-{
- recv_recovery_from_checkpoint_finish();
-
- recv_recovery_from_backup_on = FALSE;
-}
-#endif /* UNIV_LOG_ARCHIVE */
-
-
void recv_dblwr_t::add(byte* page)
{
pages.push_back(page);
diff --git a/storage/xtradb/mem/mem0pool.cc b/storage/xtradb/mem/mem0pool.cc
index fe9a84d21fa..42d0417c768 100644
--- a/storage/xtradb/mem/mem0pool.cc
+++ b/storage/xtradb/mem/mem0pool.cc
@@ -280,6 +280,7 @@ mem_pool_free(
/*==========*/
mem_pool_t* pool) /*!< in, own: memory pool */
{
+ mutex_free(&pool->mutex);
ut_free(pool->buf);
ut_free(pool);
}
diff --git a/storage/xtradb/os/os0file.cc b/storage/xtradb/os/os0file.cc
index 4010ef10359..41a4076c56c 100644
--- a/storage/xtradb/os/os0file.cc
+++ b/storage/xtradb/os/os0file.cc
@@ -554,6 +554,42 @@ PIMAGE_TLS_CALLBACK p_thread_callback_base = win_tls_thread_exit;
#endif /*_WIN32 */
/***********************************************************************//**
+For an EINVAL I/O error, prints a diagnostic message if innodb_flush_method
+== ALL_O_DIRECT.
+@return true if the diagnostic message was printed
+@return false if the diagnostic message does not apply */
+static
+bool
+os_diagnose_all_o_direct_einval(
+/*============================*/
+ ulint err) /*!< in: C error code */
+{
+ if ((err == EINVAL)
+ && (srv_unix_file_flush_method == SRV_UNIX_ALL_O_DIRECT)) {
+ ib_logf(IB_LOG_LEVEL_INFO,
+ "The error might be caused by redo log I/O not "
+ "satisfying innodb_flush_method=ALL_O_DIRECT "
+ "requirements by the underlying file system.");
+ if (srv_log_block_size != 512)
+ ib_logf(IB_LOG_LEVEL_INFO,
+ "This might be caused by an incompatible "
+ "non-default innodb_log_block_size value %lu.",
+ srv_log_block_size);
+ ib_logf(IB_LOG_LEVEL_INFO,
+ "Please file a bug at https://bugs.percona.com and "
+ "include this error message, my.cnf settings, and "
+ "information about the file system where the redo log "
+ "resides.");
+ ib_logf(IB_LOG_LEVEL_INFO,
+ "A possible workaround is to change "
+ "innodb_flush_method value to something else "
+ "than ALL_O_DIRECT.");
+ return(true);
+ }
+ return(false);
+}
+
+/***********************************************************************//**
Retrieves the last error number if an error occurs in a file io function.
The number should be retrieved before any other OS calls (because they may
overwrite the error number). If the number is not known to this program,
@@ -717,7 +753,7 @@ os_file_get_last_error_low(
"InnoDB: Error trying to enable atomic writes on "
"non-supported destination!\n");
}
- } else {
+ } else if (!os_diagnose_all_o_direct_einval(err)) {
if (strerror(err) != NULL) {
fprintf(stderr,
"InnoDB: Error number %d"
@@ -974,7 +1010,7 @@ os_file_lock(
#ifndef UNIV_HOTBACKUP
/****************************************************************//**
Creates the seek mutexes used in positioned reads and writes. */
-UNIV_INTERN
+static
void
os_io_init_simple(void)
/*===================*/
@@ -2528,7 +2564,7 @@ os_file_set_size(
ib_logf(IB_LOG_LEVEL_ERROR, "preallocating file "
"space for file \'%s\' failed. Current size "
- INT64PF ", desired size " INT64PF "\n",
+ INT64PF ", desired size " INT64PF,
name, current_size, size);
os_file_handle_error_no_exit (name, "posix_fallocate",
FALSE, __FILE__, __LINE__);
@@ -2998,6 +3034,9 @@ os_file_pwrite(
/* Handle partial writes and signal interruptions correctly */
for (ret = 0; ret < (ssize_t) n; ) {
n_written = pwrite(file, buf, (ssize_t)n - ret, offs);
+ DBUG_EXECUTE_IF("xb_simulate_all_o_direct_write_failure",
+ n_written = -1;
+ errno = EINVAL;);
if (n_written >= 0) {
ret += n_written;
offs += n_written;
@@ -3143,6 +3182,10 @@ try_again:
try_again:
ret = os_file_pread(file, buf, n, offset, trx);
+ DBUG_EXECUTE_IF("xb_simulate_all_o_direct_read_failure",
+ ret = -1;
+ errno = EINVAL;);
+
if ((ulint) ret == n) {
return(TRUE);
} else if (ret == -1) {
@@ -3472,6 +3515,8 @@ retry:
"InnoDB: "
REFMAN "operating-system-error-codes.html\n");
+ os_diagnose_all_o_direct_einval(errno);
+
os_has_said_disk_full = TRUE;
}
@@ -4439,6 +4484,14 @@ os_aio_free(void)
os_event_free(os_aio_segment_wait_events[i]);
}
+#if !defined(HAVE_ATOMIC_BUILTINS) || UNIV_WORD_SIZE < 8
+ os_mutex_free(os_file_count_mutex);
+#endif /* !HAVE_ATOMIC_BUILTINS || UNIV_WORD_SIZE < 8 */
+
+ for (ulint i = 0; i < OS_FILE_N_SEEK_MUTEXES; i++) {
+ os_mutex_free(os_file_seek_mutexes[i]);
+ }
+
ut_free(os_aio_segment_wait_events);
os_aio_segment_wait_events = 0;
os_aio_n_segments = 0;
@@ -6126,7 +6179,7 @@ os_aio_print(
srv_io_thread_function[i]);
#ifndef __WIN__
- if (os_aio_segment_wait_events[i]->is_set) {
+ if (os_aio_segment_wait_events[i]->is_set()) {
fprintf(file, " ev set");
}
#endif /* __WIN__ */
diff --git a/storage/xtradb/os/os0sync.cc b/storage/xtradb/os/os0sync.cc
index 03c53848832..1569fa7a865 100644
--- a/storage/xtradb/os/os0sync.cc
+++ b/storage/xtradb/os/os0sync.cc
@@ -47,27 +47,14 @@ struct os_mutex_t{
do not assume that the OS mutex
supports recursive locking, though
NT seems to do that */
- UT_LIST_NODE_T(os_mutex_t) os_mutex_list;
- /* list of all 'slow' OS mutexes created */
};
-/** Mutex protecting counts and the lists of OS mutexes and events */
-UNIV_INTERN os_ib_mutex_t os_sync_mutex;
-/** TRUE if os_sync_mutex has been initialized */
-static ibool os_sync_mutex_inited = FALSE;
-/** TRUE when os_sync_free() is being executed */
-static ibool os_sync_free_called = FALSE;
+// All the os_*_count variables are accessed atomically
/** This is incremented by 1 in os_thread_create and decremented by 1 in
-os_thread_exit */
+os_thread_exit. */
UNIV_INTERN ulint os_thread_count = 0;
-/** The list of all events created */
-static UT_LIST_BASE_NODE_T(os_event) os_event_list;
-
-/** The list of all OS 'slow' mutexes */
-static UT_LIST_BASE_NODE_T(os_mutex_t) os_mutex_list;
-
UNIV_INTERN ulint os_event_count = 0;
UNIV_INTERN ulint os_mutex_count = 0;
UNIV_INTERN ulint os_fast_mutex_count = 0;
@@ -80,11 +67,6 @@ UNIV_INTERN mysql_pfs_key_t event_os_mutex_key;
UNIV_INTERN mysql_pfs_key_t os_mutex_key;
#endif
-/* Because a mutex is embedded inside an event and there is an
-event embedded inside a mutex, on free, this generates a recursive call.
-This version of the free event function doesn't acquire the global lock */
-static void os_event_free_internal(os_event_t event);
-
/* On Windows (Vista and later), load function pointers for condition
variable handling. Those functions are not available in prior versions,
so we have to use them via runtime loading, as long as we support XP. */
@@ -289,74 +271,21 @@ void
os_sync_init(void)
/*==============*/
{
- UT_LIST_INIT(os_event_list);
- UT_LIST_INIT(os_mutex_list);
-
- os_sync_mutex = NULL;
- os_sync_mutex_inited = FALSE;
-
/* Now for Windows only */
os_cond_module_init();
-
- os_sync_mutex = os_mutex_create();
-
- os_sync_mutex_inited = TRUE;
}
-/*********************************************************//**
-Frees created events and OS 'slow' mutexes. */
+/** Create an event semaphore, i.e., a semaphore which may just have two
+states: signaled and nonsignaled. The created event is manual reset: it must be
+reset explicitly by calling sync_os_reset_event.
+@param[in,out] event memory block where to create the event */
UNIV_INTERN
void
-os_sync_free(void)
-/*==============*/
-{
- os_event_t event;
- os_ib_mutex_t mutex;
-
- os_sync_free_called = TRUE;
- event = UT_LIST_GET_FIRST(os_event_list);
-
- while (event) {
-
- os_event_free(event);
-
- event = UT_LIST_GET_FIRST(os_event_list);
- }
-
- mutex = UT_LIST_GET_FIRST(os_mutex_list);
-
- while (mutex) {
- if (mutex == os_sync_mutex) {
- /* Set the flag to FALSE so that we do not try to
- reserve os_sync_mutex any more in remaining freeing
- operations in shutdown */
- os_sync_mutex_inited = FALSE;
- }
-
- os_mutex_free(mutex);
-
- mutex = UT_LIST_GET_FIRST(os_mutex_list);
- }
- os_sync_free_called = FALSE;
-}
-
-/*********************************************************//**
-Creates an event semaphore, i.e., a semaphore which may just have two
-states: signaled and nonsignaled. The created event is manual reset: it
-must be reset explicitly by calling sync_os_reset_event.
-@return the event handle */
-UNIV_INTERN
-os_event_t
-os_event_create(void)
-/*==================*/
+os_event_create(os_event_t event)
{
- os_event_t event;
-
#ifdef __WIN__
if(!srv_use_native_conditions) {
- event = static_cast<os_event_t>(ut_malloc(sizeof(*event)));
-
event->handle = CreateEvent(NULL, TRUE, FALSE, NULL);
if (!event->handle) {
fprintf(stderr,
@@ -367,8 +296,6 @@ os_event_create(void)
} else /* Windows with condition variables */
#endif
{
- event = static_cast<os_event_t>(ut_malloc(sizeof *event));
-
#ifndef PFS_SKIP_EVENT_MUTEX
os_fast_mutex_init(event_os_mutex_key, &event->os_mutex);
#else
@@ -377,32 +304,25 @@ os_event_create(void)
os_cond_init(&(event->cond_var));
- event->is_set = FALSE;
-
- /* We return this value in os_event_reset(), which can then be
- be used to pass to the os_event_wait_low(). The value of zero
- is reserved in os_event_wait_low() for the case when the
- caller does not want to pass any signal_count value. To
- distinguish between the two cases we initialize signal_count
- to 1 here. */
- event->signal_count = 1;
+ event->init_count_and_set();
}
- /* The os_sync_mutex can be NULL because during startup an event
- can be created [ because it's embedded in the mutex/rwlock ] before
- this module has been initialized */
- if (os_sync_mutex != NULL) {
- os_mutex_enter(os_sync_mutex);
- }
-
- /* Put to the list of events */
- UT_LIST_ADD_FIRST(os_event_list, os_event_list, event);
+ os_atomic_increment_ulint(&os_event_count, 1);
+}
- os_event_count++;
+/*********************************************************//**
+Creates an event semaphore, i.e., a semaphore which may just have two
+states: signaled and nonsignaled. The created event is manual reset: it
+must be reset explicitly by calling sync_os_reset_event.
+@return the event handle */
+UNIV_INTERN
+os_event_t
+os_event_create(void)
+/*==================*/
+{
+ os_event_t event = static_cast<os_event_t>(ut_malloc(sizeof(*event)));;
- if (os_sync_mutex != NULL) {
- os_mutex_exit(os_sync_mutex);
- }
+ os_event_create(event);
return(event);
}
@@ -427,11 +347,11 @@ os_event_set(
os_fast_mutex_lock(&(event->os_mutex));
- if (event->is_set) {
+ if (UNIV_UNLIKELY(event->is_set())) {
/* Do nothing */
} else {
- event->is_set = TRUE;
- event->signal_count += 1;
+ event->set();
+ event->inc_signal_count();
os_cond_broadcast(&(event->cond_var));
}
@@ -465,55 +385,26 @@ os_event_reset(
os_fast_mutex_lock(&(event->os_mutex));
- if (!event->is_set) {
+ if (UNIV_UNLIKELY(!event->is_set())) {
/* Do nothing */
} else {
- event->is_set = FALSE;
+ event->reset();
}
- ret = event->signal_count;
+ ret = event->signal_count();
os_fast_mutex_unlock(&(event->os_mutex));
return(ret);
}
/**********************************************************//**
-Frees an event object, without acquiring the global lock. */
-static
-void
-os_event_free_internal(
-/*===================*/
- os_event_t event) /*!< in: event to free */
-{
-#ifdef __WIN__
- if(!srv_use_native_conditions) {
- ut_a(event);
- ut_a(CloseHandle(event->handle));
- } else
-#endif
- {
- ut_a(event);
-
- /* This is to avoid freeing the mutex twice */
- os_fast_mutex_free(&(event->os_mutex));
-
- os_cond_destroy(&(event->cond_var));
- }
-
- /* Remove from the list of events */
- UT_LIST_REMOVE(os_event_list, os_event_list, event);
-
- os_event_count--;
-
- ut_free(event);
-}
-
-/**********************************************************//**
Frees an event object. */
UNIV_INTERN
void
os_event_free(
/*==========*/
- os_event_t event) /*!< in: event to free */
+ os_event_t event, /*!< in: event to free */
+ bool free_memory)/*!< in: if true, deallocate the event
+ memory block too */
{
ut_a(event);
@@ -528,16 +419,10 @@ os_event_free(
os_cond_destroy(&(event->cond_var));
}
- /* Remove from the list of events */
- os_mutex_enter(os_sync_mutex);
+ os_atomic_decrement_ulint(&os_event_count, 1);
- UT_LIST_REMOVE(os_event_list, os_event_list, event);
-
- os_event_count--;
-
- os_mutex_exit(os_sync_mutex);
-
- ut_free(event);
+ if (free_memory)
+ ut_free(event);
}
/**********************************************************//**
@@ -585,10 +470,10 @@ os_event_wait_low(
os_fast_mutex_lock(&event->os_mutex);
if (!reset_sig_count) {
- reset_sig_count = event->signal_count;
+ reset_sig_count = event->signal_count();
}
- while (!event->is_set && event->signal_count == reset_sig_count) {
+ while (!event->is_set() && event->signal_count() == reset_sig_count) {
os_cond_wait(&(event->cond_var), &(event->os_mutex));
/* Solaris manual said that spurious wakeups may occur: we
@@ -686,11 +571,12 @@ os_event_wait_time_low(
os_fast_mutex_lock(&event->os_mutex);
if (!reset_sig_count) {
- reset_sig_count = event->signal_count;
+ reset_sig_count = event->signal_count();
}
do {
- if (event->is_set || event->signal_count != reset_sig_count) {
+ if (event->is_set()
+ || event->signal_count() != reset_sig_count) {
break;
}
@@ -734,18 +620,7 @@ os_mutex_create(void)
mutex_str->count = 0;
mutex_str->event = os_event_create();
- if (UNIV_LIKELY(os_sync_mutex_inited)) {
- /* When creating os_sync_mutex itself we cannot reserve it */
- os_mutex_enter(os_sync_mutex);
- }
-
- UT_LIST_ADD_FIRST(os_mutex_list, os_mutex_list, mutex_str);
-
- os_mutex_count++;
-
- if (UNIV_LIKELY(os_sync_mutex_inited)) {
- os_mutex_exit(os_sync_mutex);
- }
+ os_atomic_increment_ulint(&os_mutex_count, 1);
return(mutex_str);
}
@@ -791,21 +666,9 @@ os_mutex_free(
{
ut_a(mutex);
- if (UNIV_LIKELY(!os_sync_free_called)) {
- os_event_free_internal(mutex->event);
- }
-
- if (UNIV_LIKELY(os_sync_mutex_inited)) {
- os_mutex_enter(os_sync_mutex);
- }
-
- UT_LIST_REMOVE(os_mutex_list, os_mutex_list, mutex);
+ os_event_free(mutex->event);
- os_mutex_count--;
-
- if (UNIV_LIKELY(os_sync_mutex_inited)) {
- os_mutex_exit(os_sync_mutex);
- }
+ os_atomic_decrement_ulint(&os_mutex_count, 1);
os_fast_mutex_free(static_cast<os_fast_mutex_t*>(mutex->handle));
ut_free(mutex->handle);
@@ -827,18 +690,7 @@ os_fast_mutex_init_func(
#else
ut_a(0 == pthread_mutex_init(fast_mutex, MY_MUTEX_INIT_FAST));
#endif
- if (UNIV_LIKELY(os_sync_mutex_inited)) {
- /* When creating os_sync_mutex itself (in Unix) we cannot
- reserve it */
-
- os_mutex_enter(os_sync_mutex);
- }
-
- os_fast_mutex_count++;
-
- if (UNIV_LIKELY(os_sync_mutex_inited)) {
- os_mutex_exit(os_sync_mutex);
- }
+ os_atomic_increment_ulint(&os_fast_mutex_count, 1);
}
/**********************************************************//**
@@ -919,17 +771,6 @@ os_fast_mutex_free_func(
putc('\n', stderr);
}
#endif
- if (UNIV_LIKELY(os_sync_mutex_inited)) {
- /* When freeing the last mutexes, we have
- already freed os_sync_mutex */
-
- os_mutex_enter(os_sync_mutex);
- }
- ut_ad(os_fast_mutex_count > 0);
- os_fast_mutex_count--;
-
- if (UNIV_LIKELY(os_sync_mutex_inited)) {
- os_mutex_exit(os_sync_mutex);
- }
+ os_atomic_decrement_ulint(&os_fast_mutex_count, 1);
}
diff --git a/storage/xtradb/os/os0thread.cc b/storage/xtradb/os/os0thread.cc
index 8420a94787b..aabdd06d76b 100644
--- a/storage/xtradb/os/os0thread.cc
+++ b/storage/xtradb/os/os0thread.cc
@@ -145,9 +145,7 @@ os_thread_create_func(
os_thread_t thread;
DWORD win_thread_id;
- os_mutex_enter(os_sync_mutex);
- os_thread_count++;
- os_mutex_exit(os_sync_mutex);
+ os_atomic_increment_ulint(&os_thread_count, 1);
thread = CreateThread(NULL, /* no security attributes */
0, /* default size stack */
@@ -186,9 +184,8 @@ os_thread_create_func(
exit(1);
}
#endif
- os_mutex_enter(os_sync_mutex);
- os_thread_count++;
- os_mutex_exit(os_sync_mutex);
+ ulint new_count = os_atomic_increment_ulint(&os_thread_count, 1);
+ ut_a(new_count <= OS_THREAD_MAX_N);
#ifdef UNIV_HPUX10
ret = pthread_create(&pthread, pthread_attr_default, func, arg);
@@ -205,8 +202,6 @@ os_thread_create_func(
pthread_attr_destroy(&attr);
#endif
- ut_a(os_thread_count <= OS_THREAD_MAX_N);
-
if (thread_id) {
*thread_id = pthread;
}
@@ -233,9 +228,7 @@ os_thread_exit(
pfs_delete_thread();
#endif
- os_mutex_enter(os_sync_mutex);
- os_thread_count--;
- os_mutex_exit(os_sync_mutex);
+ os_atomic_decrement_ulint(&os_thread_count, 1);
#ifdef __WIN__
ExitThread((DWORD) exit_value);
diff --git a/storage/xtradb/row/row0merge.cc b/storage/xtradb/row/row0merge.cc
index b45c7f2171b..19e18fd89e7 100644
--- a/storage/xtradb/row/row0merge.cc
+++ b/storage/xtradb/row/row0merge.cc
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 2005, 2015, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2005, 2016, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -2176,7 +2176,8 @@ wait_again:
if (max_doc_id && err == DB_SUCCESS) {
/* Sync fts cache for other fts indexes to keep all
fts indexes consistent in sync_doc_id. */
- err = fts_sync_table(const_cast<dict_table_t*>(new_table));
+ err = fts_sync_table(const_cast<dict_table_t*>(new_table),
+ false, true);
if (err == DB_SUCCESS) {
fts_update_next_doc_id(
@@ -3913,7 +3914,7 @@ row_merge_drop_table(
/* There must be no open transactions on the table. */
ut_a(table->n_ref_count == 0);
- return(row_drop_table_for_mysql(table->name, trx, false, false));
+ return(row_drop_table_for_mysql(table->name, trx, false, false, false));
}
/*********************************************************************//**
diff --git a/storage/xtradb/row/row0mysql.cc b/storage/xtradb/row/row0mysql.cc
index 9427b20daf9..ccdfc1332a0 100644
--- a/storage/xtradb/row/row0mysql.cc
+++ b/storage/xtradb/row/row0mysql.cc
@@ -2419,7 +2419,7 @@ err_exit:
dict_table_close(table, TRUE, FALSE);
- row_drop_table_for_mysql(table->name, trx, FALSE);
+ row_drop_table_for_mysql(table->name, trx, FALSE, TRUE);
if (commit) {
trx_commit_for_mysql(trx);
@@ -2579,7 +2579,7 @@ error_handling:
trx_rollback_to_savepoint(trx, NULL);
- row_drop_table_for_mysql(table_name, trx, FALSE);
+ row_drop_table_for_mysql(table_name, trx, FALSE, TRUE);
trx_commit_for_mysql(trx);
@@ -2656,7 +2656,7 @@ row_table_add_foreign_constraints(
trx_rollback_to_savepoint(trx, NULL);
- row_drop_table_for_mysql(name, trx, FALSE);
+ row_drop_table_for_mysql(name, trx, FALSE, TRUE);
trx_commit_for_mysql(trx);
@@ -2697,7 +2697,7 @@ row_drop_table_for_mysql_in_background(
/* Try to drop the table in InnoDB */
- error = row_drop_table_for_mysql(name, trx, FALSE);
+ error = row_drop_table_for_mysql(name, trx, FALSE, FALSE);
/* Flush the log to reduce probability that the .frm files and
the InnoDB data dictionary get out-of-sync if the user runs
@@ -3844,6 +3844,9 @@ row_drop_table_for_mysql(
const char* name, /*!< in: table name */
trx_t* trx, /*!< in: transaction handle */
bool drop_db,/*!< in: true=dropping whole database */
+ ibool create_failed,/*!<in: TRUE=create table failed
+ because e.g. foreign key column
+ type mismatch. */
bool nonatomic)
/*!< in: whether it is permitted
to release and reacquire dict_operation_lock */
@@ -4049,7 +4052,12 @@ row_drop_table_for_mysql(
name,
foreign->foreign_table_name_lookup);
- if (foreign->foreign_table != table && !ref_ok) {
+ /* We should allow dropping a referenced table if creating
+ that referenced table has failed for some reason. For example
+ if referenced table is created but it column types that are
+ referenced do not match. */
+ if (foreign->foreign_table != table &&
+ !create_failed && !ref_ok) {
FILE* ef = dict_foreign_err_file;
@@ -4593,7 +4601,7 @@ row_mysql_drop_temp_tables(void)
table = dict_table_get_low(table_name);
if (table) {
- row_drop_table_for_mysql(table_name, trx, FALSE);
+ row_drop_table_for_mysql(table_name, trx, FALSE, FALSE);
trx_commit_for_mysql(trx);
}
@@ -4762,7 +4770,7 @@ loop:
goto loop;
}
- err = row_drop_table_for_mysql(table_name, trx, TRUE);
+ err = row_drop_table_for_mysql(table_name, trx, TRUE, FALSE);
trx_commit_for_mysql(trx);
if (err != DB_SUCCESS) {
diff --git a/storage/xtradb/srv/srv0conc.cc b/storage/xtradb/srv/srv0conc.cc
index c4cf1b9ab7b..b070a8289df 100644
--- a/storage/xtradb/srv/srv0conc.cc
+++ b/storage/xtradb/srv/srv0conc.cc
@@ -165,6 +165,10 @@ srv_conc_free(void)
{
#ifndef HAVE_ATOMIC_BUILTINS
os_fast_mutex_free(&srv_conc_mutex);
+
+ for (ulint i = 0; i < OS_THREAD_MAX_N; i++)
+ os_event_free(srv_conc_slots[i].event);
+
mem_free(srv_conc_slots);
srv_conc_slots = NULL;
#endif /* !HAVE_ATOMIC_BUILTINS */
diff --git a/storage/xtradb/srv/srv0srv.cc b/storage/xtradb/srv/srv0srv.cc
index c98bfc85086..93e8f3bf2f4 100644
--- a/storage/xtradb/srv/srv0srv.cc
+++ b/storage/xtradb/srv/srv0srv.cc
@@ -372,8 +372,6 @@ UNIV_INTERN ulong srv_read_ahead_threshold = 56;
#ifdef UNIV_LOG_ARCHIVE
UNIV_INTERN ibool srv_log_archive_on = FALSE;
-UNIV_INTERN ibool srv_archive_recovery = 0;
-UNIV_INTERN ib_uint64_t srv_archive_recovery_limit_lsn;
#endif /* UNIV_LOG_ARCHIVE */
/* This parameter is used to throttle the number of insert buffers that are
@@ -810,6 +808,10 @@ struct srv_sys_t{
srv_stats_t::ulint_ctr_1_t
activity_count; /*!< For tracking server
activity */
+ srv_stats_t::ulint_ctr_1_t
+ ibuf_merge_activity_count;/*!< For tracking change
+ buffer merge activity, a subset
+ of overall server activity */
};
#ifndef HAVE_ATOMIC_BUILTINS
@@ -1200,8 +1202,9 @@ srv_init(void)
srv_checkpoint_completed_event = os_event_create();
+ srv_redo_log_tracked_event = os_event_create();
+
if (srv_track_changed_pages) {
- srv_redo_log_tracked_event = os_event_create();
os_event_set(srv_redo_log_tracked_event);
}
@@ -1251,17 +1254,34 @@ srv_free(void)
{
srv_conc_free();
- /* The mutexes srv_sys->mutex and srv_sys->tasks_mutex should have
- been freed by sync_close() already. */
- mem_free(srv_sys);
- srv_sys = NULL;
+ if (!srv_read_only_mode) {
- trx_i_s_cache_free(trx_i_s_cache);
+ for (ulint i = 0; i < srv_sys->n_sys_threads; i++)
+ os_event_free(srv_sys->sys_threads[i].event);
- if (!srv_read_only_mode) {
+ os_event_free(srv_error_event);
+ os_event_free(srv_monitor_event);
os_event_free(srv_buf_dump_event);
- srv_buf_dump_event = NULL;
+ os_event_free(srv_checkpoint_completed_event);
+ os_event_free(srv_redo_log_tracked_event);
+ mutex_free(&srv_sys->mutex);
+ mutex_free(&srv_sys->tasks_mutex);
}
+
+#ifdef WITH_INNODB_DISALLOW_WRITES
+ os_event_free(srv_allow_writes_event);
+#endif /* WITH_INNODB_DISALLOW_WRITES */
+
+#ifndef HAVE_ATOMIC_BUILTINS
+ mutex_free(&server_mutex);
+#endif
+ mutex_free(&srv_innodb_monitor_mutex);
+ mutex_free(&page_zip_stat_per_index_mutex);
+
+ mem_free(srv_sys);
+ srv_sys = NULL;
+
+ trx_i_s_cache_free(trx_i_s_cache);
}
/*********************************************************************//**
@@ -2320,7 +2340,7 @@ loop:
if (sync_array_print_long_waits(&waiter, &sema)
&& sema == old_sema && os_thread_eq(waiter, old_waiter)) {
#if defined(WITH_WSREP) && defined(WITH_INNODB_DISALLOW_WRITES)
- if (srv_allow_writes_event->is_set) {
+ if (srv_allow_writes_event->is_set()) {
#endif /* WITH_WSREP */
fatal_cnt++;
#if defined(WITH_WSREP) && defined(WITH_INNODB_DISALLOW_WRITES)
@@ -2408,10 +2428,15 @@ rescan_idle:
Increment the server activity count. */
UNIV_INTERN
void
-srv_inc_activity_count(void)
-/*========================*/
+srv_inc_activity_count(
+/*===================*/
+ bool ibuf_merge_activity) /*!< whether this activity bump
+ is caused by the background
+ change buffer merge */
{
srv_sys->activity_count.inc();
+ if (ibuf_merge_activity)
+ srv_sys->ibuf_merge_activity_count.inc();
}
/**********************************************************************//**
@@ -2531,7 +2556,7 @@ DECLARE_THREAD(srv_redo_log_follow_thread)(
/* TODO: sync with I_S log tracking status? */
ib_logf(IB_LOG_LEVEL_ERROR,
"log tracking bitmap write failed, "
- "stopping log tracking thread!\n");
+ "stopping log tracking thread!");
break;
}
os_event_set(srv_redo_log_tracked_event);
@@ -2573,7 +2598,7 @@ purge_archived_logs(
if (!dir) {
ib_logf(IB_LOG_LEVEL_WARN,
"opening archived log directory %s failed. "
- "Purge archived logs are not available\n",
+ "Purge archived logs are not available",
srv_arch_dir);
/* failed to open directory */
return(DB_ERROR);
@@ -2661,7 +2686,7 @@ purge_archived_logs(
archived_log_filename)) {
ib_logf(IB_LOG_LEVEL_WARN,
- "can't delete archived log file %s.\n",
+ "can't delete archived log file %s.",
archived_log_filename);
mutex_exit(&log_sys->mutex);
@@ -2769,16 +2794,49 @@ srv_get_activity_count(void)
return(srv_sys->activity_count);
}
+/** Get current server ibuf merge activity count.
+@return ibuf merge activity count */
+static
+ulint
+srv_get_ibuf_merge_activity_count(void)
+{
+ return(srv_sys->ibuf_merge_activity_count);
+}
+
/*******************************************************************//**
-Check if there has been any activity.
+Check if there has been any activity. Considers background change buffer
+merge as regular server activity unless a non-default
+old_ibuf_merge_activity_count value is passed, in which case the merge will be
+treated as keeping server idle.
@return FALSE if no change in activity counter. */
UNIV_INTERN
ibool
srv_check_activity(
/*===============*/
- ulint old_activity_count) /*!< in: old activity count */
+ ulint old_activity_count, /*!< in: old activity count */
+ /*!< old change buffer merge
+ activity count, or
+ ULINT_UNDEFINED */
+ ulint old_ibuf_merge_activity_count)
{
- return(srv_sys->activity_count != old_activity_count);
+ ulint new_activity_count = srv_sys->activity_count;
+ if (old_ibuf_merge_activity_count == ULINT_UNDEFINED)
+ return(new_activity_count != old_activity_count);
+
+ /* If we care about ibuf merge activity, then the server is considered
+ idle if all activity, if any, was due to ibuf merge. */
+ ulint new_ibuf_merge_activity_count
+ = srv_sys->ibuf_merge_activity_count;
+
+ ut_ad(new_ibuf_merge_activity_count <= new_activity_count);
+ ut_ad(new_ibuf_merge_activity_count >= old_ibuf_merge_activity_count);
+ ut_ad(new_activity_count >= old_activity_count);
+
+ ulint ibuf_merge_activity_delta =
+ new_ibuf_merge_activity_count - old_ibuf_merge_activity_count;
+ ulint activity_delta = new_activity_count - old_activity_count;
+
+ return (activity_delta > ibuf_merge_activity_delta);
}
/********************************************************************//**
@@ -3142,6 +3200,8 @@ DECLARE_THREAD(srv_master_thread)(
{
srv_slot_t* slot;
ulint old_activity_count = srv_get_activity_count();
+ ulint old_ibuf_merge_activity_count
+ = srv_get_ibuf_merge_activity_count();
ib_time_t last_print_time;
ut_ad(!srv_read_only_mode);
@@ -3179,8 +3239,12 @@ loop:
srv_current_thread_priority = srv_master_thread_priority;
- if (srv_check_activity(old_activity_count)) {
+ if (srv_check_activity(old_activity_count,
+ old_ibuf_merge_activity_count)) {
+
old_activity_count = srv_get_activity_count();
+ old_ibuf_merge_activity_count
+ = srv_get_ibuf_merge_activity_count();
srv_master_do_active_tasks();
} else {
srv_master_do_idle_tasks();
diff --git a/storage/xtradb/srv/srv0start.cc b/storage/xtradb/srv/srv0start.cc
index 2455dc1c9a9..61ef03cfc03 100644
--- a/storage/xtradb/srv/srv0start.cc
+++ b/storage/xtradb/srv/srv0start.cc
@@ -2483,40 +2483,6 @@ files_checked:
create_log_files_rename(logfilename, dirnamelen,
max_flushed_lsn, logfile0);
-#ifdef UNIV_LOG_ARCHIVE
- } else if (srv_archive_recovery) {
-
- ib_logf(IB_LOG_LEVEL_INFO,
- " Starting archive recovery from a backup...");
-
- err = recv_recovery_from_archive_start(
- min_flushed_lsn, srv_archive_recovery_limit_lsn,
- min_arch_log_no);
- if (err != DB_SUCCESS) {
-
- return(DB_ERROR);
- }
- /* Since ibuf init is in dict_boot, and ibuf is needed
- in any disk i/o, first call dict_boot */
-
- err = dict_boot();
-
- if (err != DB_SUCCESS) {
- return(err);
- }
-
- ib_bh = trx_sys_init_at_db_start();
- n_recovered_trx = UT_LIST_GET_LEN(trx_sys->rw_trx_list);
-
- /* The purge system needs to create the purge view and
- therefore requires that the trx_sys is inited. */
-
- trx_purge_sys_create(srv_n_purge_threads, ib_bh);
-
- srv_startup_is_before_trx_rollback_phase = FALSE;
-
- recv_recovery_from_archive_finish();
-#endif /* UNIV_LOG_ARCHIVE */
} else {
/* Check if we support the max format that is stamped
@@ -3213,15 +3179,13 @@ innobase_shutdown_for_mysql(void)
logs_empty_and_mark_files_at_shutdown() and should have
already quit or is quitting right now. */
-
if (srv_use_mtflush) {
/* g. Exit the multi threaded flush threads */
buf_mtflu_io_thread_exit();
}
- os_mutex_enter(os_sync_mutex);
-
+ os_rmb;
if (os_thread_count == 0) {
/* All the threads have exited or are just exiting;
NOTE that the threads may not have completed their
@@ -3231,15 +3195,11 @@ innobase_shutdown_for_mysql(void)
os_thread_exit(). Now we just sleep 0.1
seconds and hope that is enough! */
- os_mutex_exit(os_sync_mutex);
-
os_thread_sleep(100000);
break;
}
- os_mutex_exit(os_sync_mutex);
-
os_thread_sleep(100000);
}
@@ -3348,26 +3308,23 @@ innobase_shutdown_for_mysql(void)
que_close();
row_mysql_close();
srv_mon_free();
- sync_close();
srv_free();
fil_close();
- /* 4. Free the os_conc_mutex and all os_events and os_mutexes */
-
- os_sync_free();
-
- /* 5. Free all allocated memory */
+ /* 4. Free all allocated memory */
pars_lexer_close();
log_mem_free();
buf_pool_free(srv_buf_pool_instances);
mem_close();
+ sync_close();
/* ut_free_all_mem() frees all allocated memory not freed yet
in shutdown, and it will also free the ut_list_mutex, so it
should be the last one for all operation */
ut_free_all_mem();
+ os_rmb;
if (os_thread_count != 0
|| os_event_count != 0
|| os_mutex_count != 0
diff --git a/storage/xtradb/sync/sync0arr.cc b/storage/xtradb/sync/sync0arr.cc
index d881c5de2f5..c311d2cbd7d 100644
--- a/storage/xtradb/sync/sync0arr.cc
+++ b/storage/xtradb/sync/sync0arr.cc
@@ -336,21 +336,21 @@ sync_cell_get_event(
ulint type = cell->request_type;
if (type == SYNC_MUTEX) {
- return(((ib_mutex_t*) cell->wait_object)->event);
+ return(&((ib_mutex_t*) cell->wait_object)->event);
} else if (type == SYNC_PRIO_MUTEX) {
- return(((ib_prio_mutex_t*) cell->wait_object)
+ return(&((ib_prio_mutex_t*) cell->wait_object)
->high_priority_event);
} else if (type == RW_LOCK_WAIT_EX) {
- return(((rw_lock_t*) cell->wait_object)->wait_ex_event);
+ return(&((rw_lock_t*) cell->wait_object)->wait_ex_event);
} else if (type == PRIO_RW_LOCK_SHARED) {
- return(((prio_rw_lock_t *) cell->wait_object)
+ return(&((prio_rw_lock_t *) cell->wait_object)
->high_priority_s_event);
} else if (type == PRIO_RW_LOCK_EX) {
- return(((prio_rw_lock_t *) cell->wait_object)
+ return(&((prio_rw_lock_t *) cell->wait_object)
->high_priority_x_event);
} else { /* RW_LOCK_SHARED and RW_LOCK_EX wait on the same event */
ut_ad(type == RW_LOCK_SHARED || type == RW_LOCK_EX);
- return(((rw_lock_t*) cell->wait_object)->event);
+ return(&((rw_lock_t*) cell->wait_object)->event);
}
}
diff --git a/storage/xtradb/sync/sync0rw.cc b/storage/xtradb/sync/sync0rw.cc
index 00fb5e511a4..729f510013d 100644
--- a/storage/xtradb/sync/sync0rw.cc
+++ b/storage/xtradb/sync/sync0rw.cc
@@ -261,8 +261,8 @@ rw_lock_create_func(
lock->last_x_file_name = "not yet reserved";
lock->last_s_line = 0;
lock->last_x_line = 0;
- lock->event = os_event_create();
- lock->wait_ex_event = os_event_create();
+ os_event_create(&lock->event);
+ os_event_create(&lock->wait_ex_event);
mutex_enter(&rw_lock_list_mutex);
@@ -304,9 +304,9 @@ rw_lock_create_func(
cline);
lock->high_priority_s_waiters = 0;
- lock->high_priority_s_event = os_event_create();
+ os_event_create(&lock->high_priority_s_event);
lock->high_priority_x_waiters = 0;
- lock->high_priority_x_event = os_event_create();
+ os_event_create(&lock->high_priority_x_event);
lock->high_priority_wait_ex_waiter = 0;
}
@@ -334,9 +334,9 @@ rw_lock_free_func(
mutex = rw_lock_get_mutex(lock);
#endif /* !INNODB_RW_LOCKS_USE_ATOMICS */
- os_event_free(lock->event);
+ os_event_free(&lock->event, false);
- os_event_free(lock->wait_ex_event);
+ os_event_free(&lock->wait_ex_event, false);
ut_ad(UT_LIST_GET_PREV(list, lock) == NULL
|| UT_LIST_GET_PREV(list, lock)->magic_n == RW_LOCK_MAGIC_N);
@@ -366,8 +366,8 @@ rw_lock_free_func(
/*==============*/
prio_rw_lock_t* lock) /*!< in: rw-lock */
{
- os_event_free(lock->high_priority_s_event);
- os_event_free(lock->high_priority_x_event);
+ os_event_free(&lock->high_priority_s_event, false);
+ os_event_free(&lock->high_priority_x_event, false);
rw_lock_free_func(&lock->base_lock);
}
diff --git a/storage/xtradb/sync/sync0sync.cc b/storage/xtradb/sync/sync0sync.cc
index 702dd240f16..c699fbf2ded 100644
--- a/storage/xtradb/sync/sync0sync.cc
+++ b/storage/xtradb/sync/sync0sync.cc
@@ -212,10 +212,7 @@ UNIV_INTERN mysql_pfs_key_t sync_thread_mutex_key;
/** Global list of database mutexes (not OS mutexes) created. */
UNIV_INTERN ut_list_base_node_t mutex_list;
-/** Global list of priority mutexes. A subset of mutex_list */
-UNIV_INTERN UT_LIST_BASE_NODE_T(ib_prio_mutex_t) prio_mutex_list;
-
-/** Mutex protecting the mutex_list and prio_mutex_list variables */
+/** Mutex protecting the mutex_list variable */
UNIV_INTERN ib_mutex_t mutex_list_mutex;
#ifdef UNIV_PFS_MUTEX
@@ -280,7 +277,7 @@ mutex_create_func(
os_fast_mutex_init(PFS_NOT_INSTRUMENTED, &mutex->os_fast_mutex);
mutex->lock_word = 0;
#endif
- mutex->event = os_event_create();
+ os_event_create(&mutex->event);
mutex_set_waiters(mutex, 0);
#ifdef UNIV_DEBUG
mutex->magic_n = MUTEX_MAGIC_N;
@@ -349,11 +346,7 @@ mutex_create_func(
cline,
cmutex_name);
mutex->high_priority_waiters = 0;
- mutex->high_priority_event = os_event_create();
-
- mutex_enter(&mutex_list_mutex);
- UT_LIST_ADD_FIRST(list, prio_mutex_list, mutex);
- mutex_exit(&mutex_list_mutex);
+ os_event_create(&mutex->high_priority_event);
}
/******************************************************************//**
@@ -400,7 +393,7 @@ mutex_free_func(
mutex_exit(&mutex_list_mutex);
}
- os_event_free(mutex->event);
+ os_event_free(&mutex->event, false);
#ifdef UNIV_MEM_DEBUG
func_exit:
#endif /* UNIV_MEM_DEBUG */
@@ -427,12 +420,8 @@ mutex_free_func(
/*============*/
ib_prio_mutex_t* mutex) /*!< in: mutex */
{
- mutex_enter(&mutex_list_mutex);
- UT_LIST_REMOVE(list, prio_mutex_list, mutex);
- mutex_exit(&mutex_list_mutex);
-
ut_a(mutex->high_priority_waiters == 0);
- os_event_free(mutex->high_priority_event);
+ os_event_free(&mutex->high_priority_event, false);
mutex_free_func(&mutex->base_mutex);
}
@@ -722,7 +711,7 @@ mutex_signal_object(
/* The memory order of resetting the waiters field and
signaling the object is important. See LEMMA 1 above. */
- os_event_set(mutex->event);
+ os_event_set(&mutex->event);
sync_array_object_signalled();
}
@@ -1555,7 +1544,6 @@ sync_init(void)
/* Init the mutex list and create the mutex to protect it. */
UT_LIST_INIT(mutex_list);
- UT_LIST_INIT(prio_mutex_list);
mutex_create(mutex_list_mutex_key, &mutex_list_mutex,
SYNC_NO_ORDER_CHECK);
#ifdef UNIV_SYNC_DEBUG
@@ -1602,22 +1590,17 @@ sync_thread_level_arrays_free(void)
#endif /* UNIV_SYNC_DEBUG */
/******************************************************************//**
-Frees the resources in InnoDB's own synchronization data structures. Use
-os_sync_free() after calling this. */
+Frees the resources in InnoDB's own synchronization data structures. */
UNIV_INTERN
void
sync_close(void)
/*===========*/
{
ib_mutex_t* mutex;
- ib_prio_mutex_t* prio_mutex;
sync_array_close();
- for (prio_mutex = UT_LIST_GET_FIRST(prio_mutex_list); prio_mutex;) {
- mutex_free(prio_mutex);
- prio_mutex = UT_LIST_GET_FIRST(prio_mutex_list);
- }
+ mutex_free(&rw_lock_list_mutex);
for (mutex = UT_LIST_GET_FIRST(mutex_list);
mutex != NULL;
@@ -1635,7 +1618,6 @@ sync_close(void)
mutex = UT_LIST_GET_FIRST(mutex_list);
}
- mutex_free(&mutex_list_mutex);
#ifdef UNIV_SYNC_DEBUG
mutex_free(&sync_thread_mutex);
@@ -1646,6 +1628,8 @@ sync_close(void)
os_fast_mutex_free(&rw_lock_debug_mutex);
#endif /* UNIV_SYNC_DEBUG */
+ mutex_free(&mutex_list_mutex);
+
sync_initialized = FALSE;
}
diff --git a/storage/xtradb/trx/trx0i_s.cc b/storage/xtradb/trx/trx0i_s.cc
index 3230ac8308c..eacd9212d2f 100644
--- a/storage/xtradb/trx/trx0i_s.cc
+++ b/storage/xtradb/trx/trx0i_s.cc
@@ -1466,6 +1466,8 @@ trx_i_s_cache_free(
/*===============*/
trx_i_s_cache_t* cache) /*!< in, own: cache to free */
{
+ rw_lock_free(&cache->rw_lock);
+ mutex_free(&cache->last_read_mutex);
hash_table_free(cache->locks_hash);
ha_storage_free(cache->storage);
table_cache_free(&cache->innodb_trx);
diff --git a/storage/xtradb/trx/trx0roll.cc b/storage/xtradb/trx/trx0roll.cc
index 67f63d08303..4e8547c7d64 100644
--- a/storage/xtradb/trx/trx0roll.cc
+++ b/storage/xtradb/trx/trx0roll.cc
@@ -653,7 +653,7 @@ trx_rollback_active(
"in recovery",
table->name, trx->table_id);
- err = row_drop_table_for_mysql(table->name, trx, TRUE);
+ err = row_drop_table_for_mysql(table->name, trx, TRUE, FALSE);
trx_commit_for_mysql(trx);
ut_a(err == DB_SUCCESS);