summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rwxr-xr-x.travis.compiler.sh4
-rw-r--r--.travis.yml24
-rw-r--r--VERSION2
-rw-r--r--client/mysql.cc1
-rw-r--r--client/mysql_upgrade.c4
-rw-r--r--client/mysqladmin.cc104
-rw-r--r--client/mysqlbinlog.cc2
-rw-r--r--client/mysqldump.c6
-rw-r--r--client/mysqltest.cc5
-rw-r--r--cmake/wsrep.cmake1
-rw-r--r--config.h.cmake17
-rw-r--r--extra/innochecksum.cc14
-rw-r--r--extra/mariabackup/CMakeLists.txt96
-rw-r--r--extra/mariabackup/backup_copy.cc2
-rw-r--r--extra/mariabackup/changed_page_bitmap.cc2
-rw-r--r--extra/mariabackup/crc/CMakeLists.txt2
-rw-r--r--extra/mariabackup/crc/crc_glue.c2
-rw-r--r--extra/mariabackup/fil_cur.h2
-rw-r--r--extra/mariabackup/innobackupex.cc4
-rw-r--r--extra/mariabackup/xb0xb.h10
-rw-r--r--extra/mariabackup/xbstream.h1
-rw-r--r--extra/mariabackup/xtrabackup.cc413
-rw-r--r--extra/mariabackup/xtrabackup.h13
-rw-r--r--extra/replace.c1
-rw-r--r--include/my_sys.h6
-rw-r--r--libmysqld/libmysql.c5
-rw-r--r--mysql-test/include/innodb_page_size.combinations16
-rw-r--r--mysql-test/include/innodb_page_size.inc4
-rw-r--r--mysql-test/include/innodb_page_size_small.combinations8
-rw-r--r--mysql-test/include/innodb_page_size_small.inc4
-rw-r--r--mysql-test/include/varchar.inc1
-rw-r--r--mysql-test/lib/mtr_report.pm4
-rwxr-xr-xmysql-test/mysql-test-run.pl42
-rw-r--r--mysql-test/r/alter_table_online.result35
-rw-r--r--mysql-test/r/analyze_format_json.result33
-rw-r--r--mysql-test/r/cte_nonrecursive.result2
-rw-r--r--mysql-test/r/derived_view.result59
-rw-r--r--mysql-test/r/func_regexp_pcre.result14
-rw-r--r--mysql-test/r/innodb_ext_key.result77
-rw-r--r--mysql-test/r/join_outer.result95
-rw-r--r--mysql-test/r/join_outer_jcl6.result95
-rw-r--r--mysql-test/r/limit_rows_examined.result5
-rw-r--r--mysql-test/r/log_tables-big.result8
-rw-r--r--mysql-test/r/mix2_myisam.result3
-rw-r--r--mysql-test/r/mrr_icp_extra.result3
-rw-r--r--mysql-test/r/myisam.result3
-rw-r--r--mysql-test/r/myisam_debug.result12
-rw-r--r--mysql-test/r/partition_alter.result47
-rw-r--r--mysql-test/r/subselect_innodb.result39
-rw-r--r--mysql-test/r/subselect_mat_cost_bugs.result43
-rw-r--r--mysql-test/r/subselect_sj.result95
-rw-r--r--mysql-test/r/subselect_sj2_mat.result23
-rw-r--r--mysql-test/r/subselect_sj_jcl6.result95
-rw-r--r--mysql-test/r/trigger.result10
-rw-r--r--mysql-test/r/union.result29
-rw-r--r--mysql-test/suite/binlog/r/mysqladmin.result12
-rw-r--r--mysql-test/suite/binlog/t/mysqladmin.test12
-rw-r--r--mysql-test/suite/csv/read_only.result30
-rw-r--r--mysql-test/suite/csv/read_only.test19
-rw-r--r--mysql-test/suite/encryption/disabled.def1
-rw-r--r--mysql-test/suite/encryption/r/innodb-checksum-algorithm,32k.rdiff38
-rw-r--r--mysql-test/suite/encryption/r/innodb-checksum-algorithm,64k.rdiff38
-rw-r--r--mysql-test/suite/encryption/r/innodb-checksum-algorithm.result643
-rw-r--r--mysql-test/suite/encryption/r/innodb-compressed-blob.result1
-rw-r--r--mysql-test/suite/encryption/r/innodb-first-page-read.result89
-rw-r--r--mysql-test/suite/encryption/r/innodb-key-rotation-disable.result1
-rw-r--r--mysql-test/suite/encryption/r/innodb_encryption-page-compression.result2
-rw-r--r--mysql-test/suite/encryption/r/innodb_lotoftables.result28
-rw-r--r--mysql-test/suite/encryption/t/innodb-checksum-algorithm.test120
-rw-r--r--mysql-test/suite/encryption/t/innodb-compressed-blob.combinations12
-rw-r--r--mysql-test/suite/encryption/t/innodb-compressed-blob.opt4
-rw-r--r--mysql-test/suite/encryption/t/innodb-compressed-blob.test3
-rw-r--r--mysql-test/suite/encryption/t/innodb-first-page-read.opt5
-rw-r--r--mysql-test/suite/encryption/t/innodb-first-page-read.test97
-rw-r--r--mysql-test/suite/encryption/t/innodb-key-rotation-disable.test14
-rw-r--r--mysql-test/suite/federated/assisted_discovery.result34
-rw-r--r--mysql-test/suite/federated/assisted_discovery.test24
-rw-r--r--mysql-test/suite/galera/galera_2nodes.cnf2
-rw-r--r--mysql-test/suite/galera/r/MW-309.result22
-rw-r--r--mysql-test/suite/galera/t/MW-309.test32
-rw-r--r--mysql-test/suite/galera/t/galera_autoinc_sst_xtrabackup.cnf5
-rw-r--r--mysql-test/suite/galera/t/galera_autoinc_sst_xtrabackup.test2
-rw-r--r--mysql-test/suite/galera/t/galera_var_cluster_address.test1
-rw-r--r--mysql-test/suite/galera_3nodes/disabled.def1
-rw-r--r--mysql-test/suite/galera_3nodes/galera_3nodes.cnf9
-rw-r--r--mysql-test/suite/innodb/include/innodb-page-compression.inc131
-rw-r--r--mysql-test/suite/innodb/include/wait_all_purged.inc19
-rw-r--r--mysql-test/suite/innodb/r/doublewrite.result12
-rw-r--r--mysql-test/suite/innodb/r/drop_table_background.result9
-rw-r--r--mysql-test/suite/innodb/r/innodb-page_compression_default.result118
-rw-r--r--mysql-test/suite/innodb/r/innodb-page_compression_snappy.result491
-rw-r--r--mysql-test/suite/innodb/r/innodb.result3
-rw-r--r--mysql-test/suite/innodb/r/innodb_stats_del_mark.result91
-rw-r--r--mysql-test/suite/innodb/r/innodb_stats_persistent.result116
-rw-r--r--mysql-test/suite/innodb/r/row_format_redundant.result79
-rw-r--r--mysql-test/suite/innodb/r/temporary_table.result4
-rw-r--r--mysql-test/suite/innodb/t/101_compatibility.test2
-rw-r--r--mysql-test/suite/innodb/t/alter_missing_tablespace.test2
-rw-r--r--mysql-test/suite/innodb/t/doublewrite.test50
-rw-r--r--mysql-test/suite/innodb/t/drop_table_background.test30
-rw-r--r--mysql-test/suite/innodb/t/innodb-alter-debug.test2
-rw-r--r--mysql-test/suite/innodb/t/innodb-alter-nullable.test2
-rw-r--r--mysql-test/suite/innodb/t/innodb-alter-table.test2
-rw-r--r--mysql-test/suite/innodb/t/innodb-alter-tempfile.test3
-rw-r--r--mysql-test/suite/innodb/t/innodb-page_compression_default.test51
-rw-r--r--mysql-test/suite/innodb/t/innodb-page_compression_snappy.test243
-rw-r--r--mysql-test/suite/innodb/t/innodb_stats_del_mark-master.opt1
-rw-r--r--mysql-test/suite/innodb/t/innodb_stats_del_mark.test113
-rw-r--r--mysql-test/suite/innodb/t/innodb_stats_persistent.test95
-rw-r--r--mysql-test/suite/innodb/t/log_data_file_size.test2
-rw-r--r--mysql-test/suite/innodb/t/row_format_redundant.test157
-rw-r--r--mysql-test/suite/innodb/t/temporary_table.test7
-rw-r--r--mysql-test/suite/innodb/t/truncate_purge_debug.test19
-rw-r--r--mysql-test/suite/innodb_fts/r/innodb_fts_multiple_index.result12
-rw-r--r--mysql-test/suite/innodb_zip/include/have_innodb_zip.inc4
-rw-r--r--mysql-test/suite/innodb_zip/r/wl6344_compress_level.result60
-rw-r--r--mysql-test/suite/innodb_zip/t/bug36169.test3
-rw-r--r--mysql-test/suite/innodb_zip/t/bug52745.test3
-rw-r--r--mysql-test/suite/innodb_zip/t/bug53591.test3
-rw-r--r--mysql-test/suite/innodb_zip/t/bug56680.test3
-rw-r--r--mysql-test/suite/innodb_zip/t/create_options.test3
-rw-r--r--mysql-test/suite/innodb_zip/t/innochecksum.test3
-rw-r--r--mysql-test/suite/innodb_zip/t/innochecksum_2.test3
-rw-r--r--mysql-test/suite/innodb_zip/t/innochecksum_3.test3
-rw-r--r--mysql-test/suite/innodb_zip/t/innodb-zip.test4
-rw-r--r--mysql-test/suite/innodb_zip/t/innodb_bug36169.test2
-rw-r--r--mysql-test/suite/innodb_zip/t/innodb_bug36172.test2
-rw-r--r--mysql-test/suite/innodb_zip/t/innodb_bug52745.test2
-rw-r--r--mysql-test/suite/innodb_zip/t/innodb_bug53591.test2
-rw-r--r--mysql-test/suite/innodb_zip/t/innodb_bug56680.test2
-rw-r--r--mysql-test/suite/innodb_zip/t/recover.test7
-rw-r--r--mysql-test/suite/innodb_zip/t/restart.test4
-rw-r--r--mysql-test/suite/innodb_zip/t/wl5522_debug_zip.test3
-rw-r--r--mysql-test/suite/innodb_zip/t/wl5522_zip.test3
-rw-r--r--mysql-test/suite/innodb_zip/t/wl6344_compress_level.test63
-rw-r--r--mysql-test/suite/maria/maria.result3
-rw-r--r--mysql-test/suite/mariabackup/include/restart_and_restore.inc2
-rw-r--r--mysql-test/suite/mariabackup/suite.pm5
-rw-r--r--mysql-test/suite/mariabackup/tar.result12
-rw-r--r--mysql-test/suite/mariabackup/tar.test30
-rw-r--r--mysql-test/suite/mariabackup/xbstream.test2
-rw-r--r--mysql-test/suite/multi_source/mdev-9544.cnf22
-rw-r--r--mysql-test/suite/multi_source/mdev-9544.result90
-rw-r--r--mysql-test/suite/multi_source/mdev-9544.test116
-rw-r--r--mysql-test/suite/perfschema/r/start_server_1_digest.result7
-rw-r--r--mysql-test/suite/perfschema/t/start_server_1_digest-master.opt1
-rw-r--r--mysql-test/suite/perfschema/t/start_server_1_digest.test15
-rw-r--r--mysql-test/suite/rpl/r/rpl_mdev-11092.result21
-rw-r--r--mysql-test/suite/rpl/t/rpl_mdev-11092.opt1
-rw-r--r--mysql-test/suite/rpl/t/rpl_mdev-11092.test53
-rw-r--r--mysql-test/suite/storage_engine/alter_table.result2
-rw-r--r--mysql-test/suite/storage_engine/alter_tablespace.result2
-rw-r--r--mysql-test/suite/storage_engine/create_table.result2
-rw-r--r--mysql-test/suite/storage_engine/disabled.def12
-rw-r--r--mysql-test/suite/storage_engine/insert_delayed.test2
-rw-r--r--mysql-test/suite/storage_engine/repair_table.inc3
-rw-r--r--mysql-test/suite/storage_engine/repair_table.result3
-rw-r--r--mysql-test/suite/storage_engine/tbl_opt_data_dir.result (renamed from mysql-test/suite/storage_engine/tbl_opt_data_index_dir.result)9
-rw-r--r--mysql-test/suite/storage_engine/tbl_opt_data_dir.test52
-rw-r--r--mysql-test/suite/storage_engine/tbl_opt_data_index_dir.test52
-rw-r--r--mysql-test/suite/storage_engine/tbl_opt_index_dir.result19
-rw-r--r--mysql-test/suite/storage_engine/tbl_opt_index_dir.test52
-rw-r--r--mysql-test/suite/storage_engine/tbl_opt_row_format.result20
-rw-r--r--mysql-test/suite/storage_engine/tbl_opt_row_format.test28
-rw-r--r--mysql-test/suite/storage_engine/type_char_indexes.result2
-rw-r--r--mysql-test/suite/storage_engine/type_date_time.result2
-rw-r--r--mysql-test/suite/storage_engine/vcol.result24
-rw-r--r--mysql-test/suite/sys_vars/r/delay_key_write_func.result42
-rw-r--r--mysql-test/suite/sys_vars/r/innodb_sched_priority_cleaner_basic.result2
-rw-r--r--mysql-test/suite/sys_vars/r/sysvars_innodb,32bit,xtradb.rdiff-disabled4
-rw-r--r--mysql-test/suite/sys_vars/r/sysvars_innodb,xtradb.rdiff-disabled4
-rw-r--r--mysql-test/suite/sys_vars/r/sysvars_innodb.result2
-rw-r--r--mysql-test/suite/sys_vars/t/delay_key_write_func.test42
-rw-r--r--mysql-test/suite/sys_vars/t/innodb_sched_priority_cleaner_basic.test10
-rw-r--r--mysql-test/suite/wsrep/include/check_galera_version.inc26
-rw-r--r--mysql-test/t/alter_table_online.test24
-rw-r--r--mysql-test/t/derived_view.test50
-rw-r--r--mysql-test/t/func_regexp_pcre.test11
-rw-r--r--mysql-test/t/innodb_ext_key.test45
-rw-r--r--mysql-test/t/join_outer.test82
-rw-r--r--mysql-test/t/log_tables-big-master.opt2
-rw-r--r--mysql-test/t/myisam_debug.test13
-rw-r--r--mysql-test/t/mysqld--help.test1
-rw-r--r--mysql-test/t/partition_alter.test39
-rw-r--r--mysql-test/t/subselect_innodb.test35
-rw-r--r--mysql-test/t/subselect_mat_cost_bugs.test44
-rw-r--r--mysql-test/t/subselect_sj.test72
-rw-r--r--mysql-test/t/subselect_sj2_mat.test20
-rw-r--r--mysql-test/t/trigger.test24
-rw-r--r--mysql-test/t/union.test25
-rw-r--r--mysys/lf_alloc-pin.c6
-rw-r--r--mysys/lf_hash.c3
-rw-r--r--mysys/ma_dyncol.c2
-rw-r--r--mysys/waiting_threads.c2
-rw-r--r--plugin/auth_pam/auth_pam.c3
-rw-r--r--plugin/aws_key_management/aws_key_management_plugin.cc2
-rw-r--r--plugin/cracklib_password_check/cracklib_password_check.c1
-rw-r--r--plugin/feedback/sender_thread.cc2
-rw-r--r--plugin/server_audit/server_audit.c5
-rwxr-xr-xscripts/galera_new_cluster.sh2
-rw-r--r--sql-common/client_plugin.c11
-rw-r--r--sql/CMakeLists.txt15
-rw-r--r--sql/discover.cc3
-rw-r--r--sql/events.cc2
-rw-r--r--sql/filesort.cc1
-rw-r--r--sql/handler.cc16
-rw-r--r--sql/item.cc5
-rw-r--r--sql/item_cmpfunc.cc83
-rw-r--r--sql/item_cmpfunc.h14
-rw-r--r--sql/item_func.cc5
-rw-r--r--sql/item_func.h31
-rw-r--r--sql/item_strfunc.cc12
-rw-r--r--sql/item_subselect.cc29
-rw-r--r--sql/item_subselect.h8
-rw-r--r--sql/item_sum.h1
-rw-r--r--sql/lock.cc3
-rw-r--r--sql/log.cc9
-rw-r--r--sql/log_event.cc1
-rw-r--r--sql/log_event_old.cc2
-rw-r--r--sql/mysqld.cc9
-rw-r--r--sql/mysqld.h4
-rw-r--r--sql/opt_range.cc7
-rw-r--r--sql/opt_subselect.cc109
-rw-r--r--sql/opt_sum.cc12
-rw-r--r--sql/rpl_gtid.cc2
-rw-r--r--sql/rpl_mi.cc57
-rw-r--r--sql/rpl_mi.h2
-rw-r--r--sql/slave.cc5
-rw-r--r--sql/sp_head.cc2
-rw-r--r--sql/sql_acl.cc6
-rw-r--r--sql/sql_audit.cc9
-rw-r--r--sql/sql_class.cc1
-rw-r--r--sql/sql_class.h2
-rw-r--r--sql/sql_digest.cc4
-rw-r--r--sql/sql_lex.cc6
-rw-r--r--sql/sql_lex.h5
-rw-r--r--sql/sql_parse.cc16
-rw-r--r--sql/sql_plugin.cc5
-rw-r--r--sql/sql_prepare.cc6
-rw-r--r--sql/sql_reload.cc8
-rw-r--r--sql/sql_repl.cc6
-rw-r--r--sql/sql_select.cc27
-rw-r--r--sql/sql_show.cc3
-rw-r--r--sql/sql_table.cc11
-rw-r--r--sql/sql_yacc.yy2
-rw-r--r--sql/sys_vars.cc3
-rw-r--r--sql/table.cc39
-rw-r--r--sql/wsrep_thd.cc3
-rw-r--r--storage/connect/CMakeLists.txt10
-rw-r--r--storage/connect/array.cpp26
-rw-r--r--storage/connect/array.h4
-rw-r--r--storage/connect/blkfil.cpp16
-rw-r--r--storage/connect/blkfil.h8
-rw-r--r--storage/connect/block.h4
-rw-r--r--storage/connect/catalog.h11
-rw-r--r--storage/connect/colblk.cpp26
-rw-r--r--storage/connect/colblk.h12
-rw-r--r--storage/connect/connect.cc601
-rw-r--r--storage/connect/csort.h4
-rw-r--r--storage/connect/domdoc.cpp20
-rw-r--r--storage/connect/domdoc.h8
-rw-r--r--storage/connect/filamap.cpp21
-rw-r--r--storage/connect/filamdbf.cpp10
-rw-r--r--storage/connect/filamdbf.h4
-rw-r--r--storage/connect/filamfix.cpp6
-rw-r--r--storage/connect/filamgz.cpp4
-rw-r--r--storage/connect/filamtxt.cpp36
-rw-r--r--storage/connect/filamtxt.h2
-rwxr-xr-xstorage/connect/filamvct.cpp110
-rw-r--r--storage/connect/filamvct.h6
-rw-r--r--storage/connect/filamzip.cpp164
-rw-r--r--storage/connect/filamzip.h47
-rw-r--r--storage/connect/filter.cpp25
-rw-r--r--storage/connect/filter.h4
-rw-r--r--storage/connect/global.h9
-rw-r--r--storage/connect/ha_connect.cc1450
-rw-r--r--storage/connect/ha_connect.h34
-rw-r--r--storage/connect/jdbccat.h20
-rw-r--r--storage/connect/jdbconn.cpp39
-rw-r--r--storage/connect/jdbconn.h30
-rw-r--r--storage/connect/json.cpp339
-rw-r--r--storage/connect/json.h20
-rw-r--r--storage/connect/jsonudf.cpp387
-rw-r--r--storage/connect/jsonudf.h2
-rw-r--r--storage/connect/libdoc.cpp27
-rw-r--r--storage/connect/macutil.cpp2
-rw-r--r--storage/connect/mycat.cc35
-rw-r--r--storage/connect/mycat.h5
-rw-r--r--storage/connect/myconn.cpp48
-rw-r--r--storage/connect/mysql-test/connect/disabled.def4
-rw-r--r--storage/connect/mysql-test/connect/r/jdbc_new.result4
-rw-r--r--storage/connect/mysql-test/connect/t/jdbc_new.test4
-rw-r--r--storage/connect/myutil.cpp21
-rw-r--r--storage/connect/myutil.h8
-rw-r--r--storage/connect/odbccat.h18
-rw-r--r--storage/connect/odbconn.cpp114
-rw-r--r--storage/connect/odbconn.h32
-rw-r--r--storage/connect/os.h3
-rw-r--r--storage/connect/osutil.c52
-rw-r--r--storage/connect/plgdbsem.h19
-rw-r--r--storage/connect/plgdbutl.cpp196
-rw-r--r--storage/connect/plgxml.cpp4
-rw-r--r--storage/connect/plgxml.h12
-rw-r--r--storage/connect/plugutil.cpp (renamed from storage/connect/plugutil.c)39
-rw-r--r--storage/connect/preparse.h4
-rw-r--r--storage/connect/reldef.cpp45
-rw-r--r--storage/connect/reldef.h18
-rw-r--r--storage/connect/tabcol.cpp8
-rw-r--r--storage/connect/tabcol.h8
-rw-r--r--storage/connect/tabdos.cpp95
-rw-r--r--storage/connect/tabdos.h23
-rw-r--r--storage/connect/tabext.cpp103
-rw-r--r--storage/connect/tabext.h38
-rw-r--r--storage/connect/tabfix.cpp42
-rw-r--r--storage/connect/tabfix.h4
-rw-r--r--storage/connect/tabfmt.cpp24
-rw-r--r--storage/connect/tabfmt.h6
-rw-r--r--storage/connect/tabjdbc.cpp202
-rw-r--r--storage/connect/tabjdbc.h60
-rw-r--r--storage/connect/tabjson.cpp147
-rw-r--r--storage/connect/tabjson.h55
-rw-r--r--storage/connect/table.cpp10
-rw-r--r--storage/connect/tabmac.cpp2
-rw-r--r--storage/connect/tabmul.cpp695
-rw-r--r--storage/connect/tabmul.h66
-rw-r--r--storage/connect/tabmysql.cpp150
-rw-r--r--storage/connect/tabmysql.h24
-rw-r--r--storage/connect/tabodbc.cpp469
-rw-r--r--storage/connect/tabodbc.h70
-rw-r--r--storage/connect/tabpivot.cpp389
-rw-r--r--storage/connect/tabpivot.h18
-rw-r--r--storage/connect/tabsys.cpp44
-rw-r--r--storage/connect/tabsys.h10
-rw-r--r--storage/connect/tabutil.cpp18
-rw-r--r--storage/connect/tabutil.h4
-rw-r--r--storage/connect/tabvct.cpp18
-rw-r--r--storage/connect/tabvir.cpp10
-rw-r--r--storage/connect/tabvir.h2
-rw-r--r--storage/connect/tabwmi.cpp4
-rw-r--r--storage/connect/tabwmi.h2
-rw-r--r--storage/connect/tabxml.cpp79
-rw-r--r--storage/connect/tabxml.h14
-rw-r--r--storage/connect/tabzip.cpp4
-rw-r--r--storage/connect/tabzip.h6
-rw-r--r--storage/connect/valblk.cpp60
-rw-r--r--storage/connect/valblk.h24
-rw-r--r--storage/connect/value.cpp325
-rw-r--r--storage/connect/value.h51
-rwxr-xr-xstorage/connect/xindex.cpp30
-rw-r--r--storage/connect/xindex.h4
-rw-r--r--storage/connect/xobject.cpp35
-rw-r--r--storage/connect/xobject.h14
-rw-r--r--storage/connect/xtable.h74
-rw-r--r--storage/csv/ha_tina.cc30
-rw-r--r--storage/federated/ha_federated.cc2
-rw-r--r--storage/federatedx/ha_federatedx.h2
-rw-r--r--storage/heap/hp_create.c4
-rw-r--r--storage/innobase/btr/btr0btr.cc37
-rw-r--r--storage/innobase/btr/btr0cur.cc55
-rw-r--r--storage/innobase/btr/btr0defragment.cc17
-rw-r--r--storage/innobase/buf/buf0buf.cc166
-rw-r--r--storage/innobase/buf/buf0dblwr.cc65
-rw-r--r--storage/innobase/buf/buf0dump.cc6
-rw-r--r--storage/innobase/buf/buf0flu.cc17
-rw-r--r--storage/innobase/buf/buf0rea.cc15
-rw-r--r--storage/innobase/data/data0type.cc1
-rw-r--r--storage/innobase/dict/dict0crea.cc7
-rw-r--r--storage/innobase/dict/dict0dict.cc5
-rw-r--r--storage/innobase/dict/dict0load.cc89
-rw-r--r--storage/innobase/dict/dict0stats.cc37
-rw-r--r--storage/innobase/dict/dict0stats_bg.cc53
-rw-r--r--storage/innobase/fil/fil0crypt.cc98
-rw-r--r--storage/innobase/fil/fil0fil.cc117
-rw-r--r--storage/innobase/fil/fil0pagecompress.cc56
-rw-r--r--storage/innobase/fsp/fsp0fsp.cc75
-rw-r--r--storage/innobase/fsp/fsp0space.cc3
-rw-r--r--storage/innobase/fsp/fsp0sysspace.cc15
-rw-r--r--storage/innobase/fts/fts0fts.cc121
-rw-r--r--storage/innobase/fts/fts0plugin.cc1
-rw-r--r--storage/innobase/fts/fts0que.cc5
-rw-r--r--storage/innobase/handler/ha_innodb.cc80
-rw-r--r--storage/innobase/handler/ha_innodb.h15
-rw-r--r--storage/innobase/handler/handler0alter.cc19
-rw-r--r--storage/innobase/handler/i_s.cc242
-rw-r--r--storage/innobase/handler/i_s.h4
-rw-r--r--storage/innobase/ibuf/ibuf0ibuf.cc2
-rw-r--r--storage/innobase/include/btr0cur.h25
-rw-r--r--storage/innobase/include/btr0defragment.h16
-rw-r--r--storage/innobase/include/buf0buf.h7
-rw-r--r--storage/innobase/include/buf0buf.ic3
-rw-r--r--storage/innobase/include/buf0dblwr.h19
-rw-r--r--storage/innobase/include/data0type.ic3
-rw-r--r--storage/innobase/include/dict0mem.h9
-rw-r--r--storage/innobase/include/dict0stats.h7
-rw-r--r--storage/innobase/include/dict0stats_bg.h13
-rw-r--r--storage/innobase/include/fil0fil.h19
-rw-r--r--storage/innobase/include/fil0pagecompress.h3
-rw-r--r--storage/innobase/include/fsp0file.h8
-rw-r--r--storage/innobase/include/fsp0fsp.h14
-rw-r--r--storage/innobase/include/fts0priv.h7
-rw-r--r--storage/innobase/include/fts0types.h4
-rw-r--r--storage/innobase/include/fts0types.ic22
-rw-r--r--storage/innobase/include/lock0lock.h26
-rw-r--r--storage/innobase/include/log0log.h46
-rw-r--r--storage/innobase/include/log0recv.h7
-rw-r--r--storage/innobase/include/mach0data.ic14
-rw-r--r--storage/innobase/include/os0file.h106
-rw-r--r--storage/innobase/include/os0file.ic131
-rw-r--r--storage/innobase/include/os0thread.h13
-rw-r--r--storage/innobase/include/page0zip.ic3
-rw-r--r--storage/innobase/include/rem0cmp.h3
-rw-r--r--storage/innobase/include/row0merge.h14
-rw-r--r--storage/innobase/include/row0mysql.h1
-rw-r--r--storage/innobase/include/srv0srv.h2
-rw-r--r--storage/innobase/include/srv0start.h54
-rw-r--r--storage/innobase/include/sync0types.h6
-rw-r--r--storage/innobase/include/trx0rec.h18
-rw-r--r--storage/innobase/include/trx0rseg.h7
-rw-r--r--storage/innobase/include/trx0trx.h2
-rw-r--r--storage/innobase/include/trx0xa.h7
-rw-r--r--storage/innobase/include/univ.i15
-rw-r--r--storage/innobase/include/ut0new.h51
-rw-r--r--storage/innobase/include/ut0rnd.ic7
-rw-r--r--storage/innobase/innodb.cmake6
-rw-r--r--storage/innobase/lock/lock0lock.cc21
-rw-r--r--storage/innobase/log/log0crypt.cc22
-rw-r--r--storage/innobase/log/log0log.cc138
-rw-r--r--storage/innobase/log/log0recv.cc210
-rw-r--r--storage/innobase/mysql-test/storage_engine/disabled.def4
-rw-r--r--storage/innobase/mysql-test/storage_engine/repair_table.rdiff2
-rw-r--r--storage/innobase/mysql-test/storage_engine/suite.opt2
-rw-r--r--storage/innobase/mysql-test/storage_engine/tbl_opt_index_dir.rdiff (renamed from storage/innobase/mysql-test/storage_engine/tbl_opt_data_index_dir.rdiff)0
-rw-r--r--storage/innobase/mysql-test/storage_engine/tbl_opt_row_format.rdiff48
-rw-r--r--storage/innobase/mysql-test/storage_engine/type_spatial_indexes.rdiff712
-rw-r--r--storage/innobase/os/os0file.cc43
-rw-r--r--storage/innobase/os/os0thread.cc54
-rw-r--r--storage/innobase/que/que0que.cc3
-rw-r--r--storage/innobase/rem/rem0rec.cc6
-rw-r--r--storage/innobase/row/row0ftsort.cc7
-rw-r--r--storage/innobase/row/row0import.cc1
-rw-r--r--storage/innobase/row/row0ins.cc12
-rw-r--r--storage/innobase/row/row0log.cc64
-rw-r--r--storage/innobase/row/row0merge.cc69
-rw-r--r--storage/innobase/row/row0mysql.cc104
-rw-r--r--storage/innobase/row/row0purge.cc13
-rw-r--r--storage/innobase/row/row0sel.cc87
-rw-r--r--storage/innobase/row/row0uins.cc18
-rw-r--r--storage/innobase/row/row0umod.cc34
-rw-r--r--storage/innobase/row/row0upd.cc16
-rw-r--r--storage/innobase/srv/srv0srv.cc394
-rw-r--r--storage/innobase/srv/srv0start.cc277
-rw-r--r--storage/innobase/sync/sync0debug.cc2
-rw-r--r--storage/innobase/trx/trx0purge.cc26
-rw-r--r--storage/innobase/trx/trx0rec.cc53
-rw-r--r--storage/innobase/trx/trx0roll.cc1
-rw-r--r--storage/innobase/trx/trx0rseg.cc4
-rw-r--r--storage/innobase/trx/trx0sys.cc5
-rw-r--r--storage/innobase/usr/usr0sess.cc1
-rw-r--r--storage/innobase/ut/ut0ut.cc11
-rw-r--r--storage/maria/ha_maria.cc8
-rw-r--r--storage/maria/ma_check.c4
-rw-r--r--storage/maria/ma_extra.c3
-rw-r--r--storage/maria/ma_loghandler.c53
-rw-r--r--storage/maria/ma_packrec.c2
-rw-r--r--storage/maria/ma_pagecache.c16
-rw-r--r--storage/maria/ma_recovery.c5
-rw-r--r--storage/myisam/ha_myisam.cc25
-rw-r--r--storage/myisam/mi_check.c3
-rw-r--r--storage/myisam/mi_extra.c3
-rw-r--r--storage/myisam/mi_locking.c20
-rw-r--r--storage/myisam/mi_open.c2
-rw-r--r--storage/myisam/mysql-test/storage_engine/alter_tablespace.rdiff2
-rw-r--r--storage/myisammrg/mysql-test/storage_engine/alter_tablespace.rdiff2
-rw-r--r--storage/myisammrg/mysql-test/storage_engine/create_table.rdiff2
-rw-r--r--storage/myisammrg/mysql-test/storage_engine/disabled.def3
-rw-r--r--storage/myisammrg/mysql-test/storage_engine/parts/repair_table.rdiff7
-rw-r--r--storage/myisammrg/mysql-test/storage_engine/repair_table.rdiff8
-rw-r--r--storage/myisammrg/mysql-test/storage_engine/tbl_opt_data_dir.rdiff (renamed from storage/myisammrg/mysql-test/storage_engine/tbl_opt_data_index_dir.rdiff)16
-rw-r--r--storage/myisammrg/mysql-test/storage_engine/tbl_opt_index_dir.rdiff18
-rw-r--r--storage/myisammrg/mysql-test/storage_engine/tbl_opt_row_format.rdiff28
-rw-r--r--storage/myisammrg/mysql-test/storage_engine/vcol.rdiff30
-rw-r--r--storage/perfschema/ha_perfschema.cc2
-rw-r--r--storage/perfschema/pfs.cc3
-rw-r--r--storage/perfschema/pfs_digest.cc5
-rw-r--r--storage/perfschema/unittest/pfs-t.cc46
-rw-r--r--storage/perfschema/unittest/pfs_account-oom-t.cc4
-rw-r--r--storage/perfschema/unittest/pfs_connect_attr-t.cc4
-rw-r--r--storage/perfschema/unittest/pfs_host-oom-t.cc4
-rw-r--r--storage/perfschema/unittest/pfs_instr-oom-t.cc29
-rw-r--r--storage/perfschema/unittest/pfs_instr-t.cc9
-rw-r--r--storage/perfschema/unittest/pfs_instr_class-oom-t.cc4
-rw-r--r--storage/perfschema/unittest/pfs_instr_class-t.cc4
-rw-r--r--storage/perfschema/unittest/pfs_misc-t.cc4
-rw-r--r--storage/perfschema/unittest/pfs_timer-t.cc4
-rw-r--r--storage/perfschema/unittest/pfs_user-oom-t.cc4
-rw-r--r--storage/rocksdb/rdb_datadic.h2
-rw-r--r--storage/sphinx/ha_sphinx.cc7
-rw-r--r--storage/spider/spd_db_conn.cc2
-rw-r--r--storage/spider/spd_table.cc1
-rw-r--r--storage/tokudb/CMakeLists.txt5
-rw-r--r--storage/tokudb/PerconaFT/buildheader/make_tdb.cc3
-rw-r--r--storage/tokudb/PerconaFT/ft/ft-ops.cc393
-rw-r--r--storage/tokudb/PerconaFT/src/ydb.cc162
-rw-r--r--storage/tokudb/ha_tokudb.cc28
-rw-r--r--storage/tokudb/mysql-test/tokudb/r/bug-1657908.result70
-rw-r--r--storage/tokudb/mysql-test/tokudb/r/dir_cmd.result58
-rw-r--r--storage/tokudb/mysql-test/tokudb/t/background_job_manager.opt1
-rw-r--r--storage/tokudb/mysql-test/tokudb/t/background_job_manager.test1
-rw-r--r--storage/tokudb/mysql-test/tokudb/t/bug-1657908.test73
-rw-r--r--storage/tokudb/mysql-test/tokudb/t/card_auto_analyze_lots.test1
-rw-r--r--storage/tokudb/mysql-test/tokudb/t/dir_cmd.test52
-rw-r--r--storage/tokudb/mysql-test/tokudb/t/i_s_tokudb_lock_waits_timeout.test1
-rw-r--r--storage/tokudb/mysql-test/tokudb/t/i_s_tokudb_locks.test1
-rw-r--r--storage/tokudb/mysql-test/tokudb/t/i_s_tokudb_trx.test1
-rw-r--r--storage/tokudb/mysql-test/tokudb_backup/r/backup_master_info.result26
-rw-r--r--storage/tokudb/mysql-test/tokudb_backup/r/backup_master_state.result36
-rw-r--r--storage/tokudb/mysql-test/tokudb_backup/r/empty_slave_info_file.result1
-rw-r--r--storage/tokudb/mysql-test/tokudb_backup/r/innodb_use_native_aio_enabled.result5
-rw-r--r--storage/tokudb/mysql-test/tokudb_backup/r/rpl_safe_slave.result77
-rw-r--r--storage/tokudb/mysql-test/tokudb_backup/r/rpl_tokudb_commit_sync.result59
-rw-r--r--storage/tokudb/mysql-test/tokudb_backup/t/backup_master_info.test94
-rw-r--r--storage/tokudb/mysql-test/tokudb_backup/t/backup_master_state.test87
-rw-r--r--storage/tokudb/mysql-test/tokudb_backup/t/empty_slave_info_file.test23
-rw-r--r--storage/tokudb/mysql-test/tokudb_backup/t/innodb_use_native_aio_enabled-master.opt1
-rw-r--r--storage/tokudb/mysql-test/tokudb_backup/t/innodb_use_native_aio_enabled.test19
-rw-r--r--storage/tokudb/mysql-test/tokudb_backup/t/rpl_safe_slave-master.opt1
-rw-r--r--storage/tokudb/mysql-test/tokudb_backup/t/rpl_safe_slave-slave.opt1
-rw-r--r--storage/tokudb/mysql-test/tokudb_backup/t/rpl_safe_slave.cnf14
-rw-r--r--storage/tokudb/mysql-test/tokudb_backup/t/rpl_safe_slave.inc112
-rw-r--r--storage/tokudb/mysql-test/tokudb_backup/t/rpl_safe_slave.test49
-rw-r--r--storage/tokudb/mysql-test/tokudb_backup/t/rpl_tokudb_commit_sync-slave.opt1
-rw-r--r--storage/tokudb/mysql-test/tokudb_backup/t/rpl_tokudb_commit_sync.test72
-rw-r--r--storage/tokudb/mysql-test/tokudb_backup/t/suite.opt2
-rw-r--r--storage/tokudb/mysql-test/tokudb_bugs/r/db233.result10
-rw-r--r--storage/tokudb/mysql-test/tokudb_bugs/t/db233.test18
-rw-r--r--storage/tokudb/mysql-test/tokudb_bugs/t/db397_delete_trigger.test1
-rw-r--r--storage/tokudb/mysql-test/tokudb_bugs/t/db397_insert_trigger.test1
-rw-r--r--storage/tokudb/mysql-test/tokudb_bugs/t/db397_update_trigger.test1
-rw-r--r--storage/tokudb/mysql-test/tokudb_bugs/t/db938.test1
-rw-r--r--storage/tokudb/mysql-test/tokudb_bugs/t/db945.test1
-rw-r--r--storage/tokudb/mysql-test/tokudb_bugs/t/leak172.test2
-rw-r--r--storage/tokudb/mysql-test/tokudb_bugs/t/tokudb718.test1
-rw-r--r--storage/tokudb/tokudb_dir_cmd.cc331
-rw-r--r--storage/tokudb/tokudb_dir_cmd.h46
-rw-r--r--storage/tokudb/tokudb_sysvars.cc7
-rw-r--r--storage/xtradb/btr/btr0btr.cc47
-rw-r--r--storage/xtradb/btr/btr0cur.cc72
-rw-r--r--storage/xtradb/btr/btr0defragment.cc18
-rw-r--r--storage/xtradb/btr/btr0sea.cc2
-rw-r--r--storage/xtradb/buf/buf0buf.cc208
-rw-r--r--storage/xtradb/buf/buf0dblwr.cc80
-rw-r--r--storage/xtradb/buf/buf0dump.cc5
-rw-r--r--storage/xtradb/buf/buf0flu.cc50
-rw-r--r--storage/xtradb/dict/dict0dict.cc6
-rw-r--r--storage/xtradb/dict/dict0mem.cc4
-rw-r--r--storage/xtradb/dict/dict0stats.cc5
-rw-r--r--storage/xtradb/dict/dict0stats_bg.cc35
-rw-r--r--storage/xtradb/fil/fil0crypt.cc83
-rw-r--r--storage/xtradb/fil/fil0fil.cc408
-rw-r--r--storage/xtradb/fil/fil0pagecompress.cc84
-rw-r--r--storage/xtradb/fsp/fsp0fsp.cc51
-rw-r--r--storage/xtradb/fts/fts0que.cc20
-rw-r--r--storage/xtradb/handler/ha_innodb.cc235
-rw-r--r--storage/xtradb/handler/ha_innodb.h2
-rw-r--r--storage/xtradb/handler/handler0alter.cc12
-rw-r--r--storage/xtradb/handler/i_s.cc287
-rw-r--r--storage/xtradb/handler/i_s.h4
-rw-r--r--storage/xtradb/ibuf/ibuf0ibuf.cc5
-rw-r--r--storage/xtradb/include/btr0cur.h25
-rw-r--r--storage/xtradb/include/btr0defragment.h15
-rw-r--r--storage/xtradb/include/buf0buf.h7
-rw-r--r--storage/xtradb/include/buf0dblwr.h20
-rw-r--r--storage/xtradb/include/buf0flu.h2
-rw-r--r--storage/xtradb/include/data0type.ic3
-rw-r--r--storage/xtradb/include/dict0dict.h4
-rw-r--r--storage/xtradb/include/dict0stats_bg.h6
-rw-r--r--storage/xtradb/include/fil0fil.h72
-rw-r--r--storage/xtradb/include/fil0pagecompress.h3
-rw-r--r--storage/xtradb/include/fsp0fsp.h14
-rw-r--r--storage/xtradb/include/ha0ha.h6
-rw-r--r--storage/xtradb/include/ha_prototypes.h3
-rw-r--r--storage/xtradb/include/log0online.h2
-rw-r--r--storage/xtradb/include/log0recv.h36
-rw-r--r--storage/xtradb/include/mach0data.ic15
-rw-r--r--storage/xtradb/include/os0file.h288
-rw-r--r--storage/xtradb/include/os0file.ic203
-rw-r--r--storage/xtradb/include/os0sync.h9
-rw-r--r--storage/xtradb/include/page0zip.ic4
-rw-r--r--storage/xtradb/include/row0mysql.h2
-rw-r--r--storage/xtradb/include/srv0srv.h9
-rw-r--r--storage/xtradb/include/srv0start.h24
-rw-r--r--storage/xtradb/include/trx0rec.h11
-rw-r--r--storage/xtradb/include/trx0rseg.h11
-rw-r--r--storage/xtradb/include/trx0trx.h21
-rw-r--r--storage/xtradb/include/trx0xa.h15
-rw-r--r--storage/xtradb/include/univ.i14
-rw-r--r--storage/xtradb/include/ut0rnd.ic7
-rw-r--r--storage/xtradb/lock/lock0lock.cc22
-rw-r--r--storage/xtradb/log/log0log.cc25
-rw-r--r--storage/xtradb/log/log0online.cc61
-rw-r--r--storage/xtradb/log/log0recv.cc56
-rw-r--r--storage/xtradb/mysql-test/storage_engine/suite.pm8
-rw-r--r--storage/xtradb/mysql-test/storage_engine/tbl_opt_index_dir.rdiff (renamed from storage/xtradb/mysql-test/storage_engine/tbl_opt_data_index_dir.rdiff)0
-rw-r--r--storage/xtradb/os/os0file.cc202
-rw-r--r--storage/xtradb/rem/rem0rec.cc6
-rw-r--r--storage/xtradb/row/row0ftsort.cc3
-rw-r--r--storage/xtradb/row/row0import.cc1
-rw-r--r--storage/xtradb/row/row0ins.cc9
-rw-r--r--storage/xtradb/row/row0log.cc19
-rw-r--r--storage/xtradb/row/row0merge.cc37
-rw-r--r--storage/xtradb/row/row0mysql.cc3
-rw-r--r--storage/xtradb/row/row0purge.cc4
-rw-r--r--storage/xtradb/row/row0sel.cc103
-rw-r--r--storage/xtradb/srv/srv0srv.cc394
-rw-r--r--storage/xtradb/srv/srv0start.cc419
-rw-r--r--storage/xtradb/sync/sync0sync.cc5
-rw-r--r--storage/xtradb/trx/trx0i_s.cc4
-rw-r--r--storage/xtradb/trx/trx0purge.cc18
-rw-r--r--storage/xtradb/trx/trx0rec.cc42
-rw-r--r--storage/xtradb/trx/trx0roll.cc4
-rw-r--r--storage/xtradb/trx/trx0rseg.cc31
-rw-r--r--storage/xtradb/trx/trx0sys.cc18
-rw-r--r--storage/xtradb/trx/trx0trx.cc3
-rw-r--r--storage/xtradb/usr/usr0sess.cc3
-rw-r--r--strings/ctype-utf8.c10
-rw-r--r--strings/ctype.c2
-rw-r--r--strings/dtoa.c8
-rw-r--r--tests/mysql_client_test.c1
-rw-r--r--unittest/mysys/ma_dyncol-t.c3
629 files changed, 14689 insertions, 11104 deletions
diff --git a/.travis.compiler.sh b/.travis.compiler.sh
index 35e79e177ef..6058d95a170 100755
--- a/.travis.compiler.sh
+++ b/.travis.compiler.sh
@@ -39,8 +39,4 @@ else
fi
fi
-# main.mysqlhotcopy_myisam consitently failed in travis containers
-# https://travis-ci.org/grooverdan/mariadb-server/builds/217661580
-echo 'main.mysqlhotcopy_myisam : unstable in containers' >> ${TRAVIS_BUILD_DIR}/mysql-test/unstable-tests
-echo 'archive.mysqlhotcopy_archive : unstable in containers' >> ${TRAVIS_BUILD_DIR}/mysql-test/unstable-tests
set +v +x
diff --git a/.travis.yml b/.travis.yml
index f33de076289..d7657509297 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -42,13 +42,33 @@ matrix:
compiler: gcc
script:
- ${CC} --version ; ${CXX} --version
- # Just for disabling hotcopy tests for now
- source .travis.compiler.sh
# https://github.com/travis-ci/travis-ci/issues/7062 - /run/shm isn't writable or executable
# in trusty containers
- export MTR_MEM=/tmp
- env DEB_BUILD_OPTIONS="parallel=6" debian/autobake-deb.sh;
- ccache --show-stats
+ # Until OSX becomes a bit more stable: MDEV-12435
+ allow_failures:
+ - os: osx
+ compiler: clang
+ env: GCC_VERSION=4.8 TYPE=RelWithDebInfo MYSQL_TEST_SUITES=rpl
+ - os: osx
+ compiler: clang
+ env: GCC_VERSION=5 TYPE=RelWithDebInfo MYSQL_TEST_SUITES=main,archive,optimizer_unfixed_bugs,parts,sys_vars,unit,vcol,innodb,innodb_gis,innodb_zip,innodb_fts
+ - os: osx
+ compiler: clang
+ env: GCC_VERSION=6 TYPE=RelWithDebInfo MYSQL_TEST_SUITES=binlog,binlog_encryption,encryption,rocksdb
+ - os: osx
+ compiler: clang
+ env: GCC_VERSION=6 TYPE=RelWithDebInfo MYSQL_TEST_SUITES=csv,federated,funcs_1,funcs_2,gcol,handler,heap,json,maria,percona,perfschema,plugins,multi_source,roles
+ # MDEV-13002 plugins.server_audit and plugins.thread_pool_server_audit test fail due to mysqltest error
+ - os: linux
+ compiler: gcc
+ env: GCC_VERSION=6 TYPE=RelWithDebInfo MYSQL_TEST_SUITES=csv,federated,funcs_1,funcs_2,gcol,handler,heap,json,maria,percona,perfschema,plugins,multi_source,roles
+ - os: linux
+ compiler: clang
+ env: GCC_VERSION=6 TYPE=RelWithDebInfo MYSQL_TEST_SUITES=csv,federated,funcs_1,funcs_2,gcol,handler,heap,json,maria,percona,perfschema,plugins,multi_source,roles
# Matrix include for coverity
# - env:
@@ -108,6 +128,7 @@ addons:
- libaio-dev
- libboost-dev
- libcurl3-dev
+ - libdbd-mysql
- libjudy-dev
- libncurses5-dev
- libpam0g-dev
@@ -128,6 +149,7 @@ addons:
- liblzma-dev
- libzmq-dev
- libdistro-info-perl
+ - uuid-dev
- devscripts # implicit for any build on Ubuntu
# libsystemd-daemon-dev # https://github.com/travis-ci/apt-package-whitelist/issues/3882
diff --git a/VERSION b/VERSION
index ed825f4c577..1fe32ad474c 100644
--- a/VERSION
+++ b/VERSION
@@ -1,3 +1,3 @@
MYSQL_VERSION_MAJOR=10
MYSQL_VERSION_MINOR=2
-MYSQL_VERSION_PATCH=6
+MYSQL_VERSION_PATCH=7
diff --git a/client/mysql.cc b/client/mysql.cc
index 22f518ed70f..f9aba0fd62a 100644
--- a/client/mysql.cc
+++ b/client/mysql.cc
@@ -3061,7 +3061,6 @@ static int com_server_help(String *buffer __attribute__((unused)),
{
unsigned int num_fields= mysql_num_fields(result);
my_ulonglong num_rows= mysql_num_rows(result);
- mysql_fetch_fields(result);
if (num_fields==3 && num_rows==1)
{
if (!(cur= mysql_fetch_row(result)))
diff --git a/client/mysql_upgrade.c b/client/mysql_upgrade.c
index 9ac438ff6ea..4e4970bacab 100644
--- a/client/mysql_upgrade.c
+++ b/client/mysql_upgrade.c
@@ -1130,7 +1130,7 @@ static int check_version_match(void)
int main(int argc, char **argv)
{
- char self_name[FN_REFLEN];
+ char self_name[FN_REFLEN + 1];
MY_INIT(argv[0]);
@@ -1138,7 +1138,7 @@ int main(int argc, char **argv)
if (GetModuleFileName(NULL, self_name, FN_REFLEN) == 0)
#endif
{
- strncpy(self_name, argv[0], FN_REFLEN);
+ strmake_buf(self_name, argv[0]);
}
if (init_dynamic_string(&ds_args, "", 512, 256) ||
diff --git a/client/mysqladmin.cc b/client/mysqladmin.cc
index 1eba3f25ee8..45b332a6cd6 100644
--- a/client/mysqladmin.cc
+++ b/client/mysqladmin.cc
@@ -47,6 +47,7 @@ static uint opt_count_iterations= 0, my_end_arg;
static ulong opt_connect_timeout, opt_shutdown_timeout;
static char * unix_port=0;
static char *opt_plugin_dir= 0, *opt_default_auth= 0;
+static bool sql_log_bin_off= false;
#ifdef HAVE_SMEM
static char *shared_memory_base_name=0;
@@ -600,6 +601,31 @@ static my_bool sql_connect(MYSQL *mysql, uint wait)
}
+static int maybe_disable_binlog(MYSQL *mysql)
+{
+ if (opt_local && !sql_log_bin_off)
+ {
+ if (mysql_query(mysql, "set local sql_log_bin=0"))
+ {
+ my_printf_error(0, "SET LOCAL SQL_LOG_BIN=0 failed; error: '%-.200s'",
+ error_flags, mysql_error(mysql));
+ return -1;
+ }
+ }
+ sql_log_bin_off= true;
+ return 0;
+}
+
+
+int flush(MYSQL *mysql, const char *what)
+{
+ char buf[FN_REFLEN];
+ my_snprintf(buf, sizeof(buf), "flush %s%s",
+ (opt_local && !sql_log_bin_off ? "local " : ""), what);
+ return mysql_query(mysql, buf);
+}
+
+
/**
@brief Execute all commands
@@ -616,6 +642,7 @@ static my_bool sql_connect(MYSQL *mysql, uint wait)
static int execute_commands(MYSQL *mysql,int argc, char **argv)
{
+ int ret = 0;
const char *status;
/*
MySQL documentation relies on the fact that mysqladmin will
@@ -628,17 +655,6 @@ static int execute_commands(MYSQL *mysql,int argc, char **argv)
struct my_rnd_struct rand_st;
char buff[FN_REFLEN + 20];
- if (opt_local)
- {
- sprintf(buff, "set local sql_log_bin=0");
- if (mysql_query(mysql, buff))
- {
- my_printf_error(0, "SET LOCAL SQL_LOG_BIN=0 failed; error: '%-.200s'",
- error_flags, mysql_error(mysql));
- return -1;
- }
- }
-
for (; argc > 0 ; argv++,argc--)
{
int command;
@@ -650,6 +666,8 @@ static int execute_commands(MYSQL *mysql,int argc, char **argv)
my_printf_error(0, "Too few arguments to create", error_flags);
return 1;
}
+ if (maybe_disable_binlog(mysql))
+ return -1;
sprintf(buff,"create database `%.*s`",FN_REFLEN,argv[1]);
if (mysql_query(mysql,buff))
{
@@ -667,6 +685,8 @@ static int execute_commands(MYSQL *mysql,int argc, char **argv)
my_printf_error(0, "Too few arguments to drop", error_flags);
return 1;
}
+ if (maybe_disable_binlog(mysql))
+ return -1;
if (drop_db(mysql,argv[1]))
return -1;
argc--; argv++;
@@ -707,7 +727,7 @@ static int execute_commands(MYSQL *mysql,int argc, char **argv)
}
case ADMIN_FLUSH_PRIVILEGES:
case ADMIN_RELOAD:
- if (mysql_query(mysql,"flush privileges"))
+ if (flush(mysql, "privileges"))
{
my_printf_error(0, "reload failed; error: '%s'", error_flags,
mysql_error(mysql));
@@ -911,7 +931,7 @@ static int execute_commands(MYSQL *mysql,int argc, char **argv)
}
case ADMIN_FLUSH_LOGS:
{
- if (mysql_query(mysql,"flush logs"))
+ if (flush(mysql, "logs"))
{
my_printf_error(0, "flush failed; error: '%s'", error_flags,
mysql_error(mysql));
@@ -921,7 +941,7 @@ static int execute_commands(MYSQL *mysql,int argc, char **argv)
}
case ADMIN_FLUSH_BINARY_LOG:
{
- if (mysql_query(mysql, "flush binary logs"))
+ if (flush(mysql, "binary logs"))
{
my_printf_error(0, "flush failed; error: '%s'", error_flags,
mysql_error(mysql));
@@ -931,7 +951,7 @@ static int execute_commands(MYSQL *mysql,int argc, char **argv)
}
case ADMIN_FLUSH_ENGINE_LOG:
{
- if (mysql_query(mysql,"flush engine logs"))
+ if (flush(mysql, "engine logs"))
{
my_printf_error(0, "flush failed; error: '%s'", error_flags,
mysql_error(mysql));
@@ -941,7 +961,7 @@ static int execute_commands(MYSQL *mysql,int argc, char **argv)
}
case ADMIN_FLUSH_ERROR_LOG:
{
- if (mysql_query(mysql, "flush error logs"))
+ if (flush(mysql, "error logs"))
{
my_printf_error(0, "flush failed; error: '%s'", error_flags,
mysql_error(mysql));
@@ -951,7 +971,7 @@ static int execute_commands(MYSQL *mysql,int argc, char **argv)
}
case ADMIN_FLUSH_GENERAL_LOG:
{
- if (mysql_query(mysql, "flush general logs"))
+ if (flush(mysql, "general logs"))
{
my_printf_error(0, "flush failed; error: '%s'", error_flags,
mysql_error(mysql));
@@ -961,7 +981,7 @@ static int execute_commands(MYSQL *mysql,int argc, char **argv)
}
case ADMIN_FLUSH_RELAY_LOG:
{
- if (mysql_query(mysql, "flush relay logs"))
+ if (flush(mysql, "relay logs"))
{
my_printf_error(0, "flush failed; error: '%s'", error_flags,
mysql_error(mysql));
@@ -971,7 +991,7 @@ static int execute_commands(MYSQL *mysql,int argc, char **argv)
}
case ADMIN_FLUSH_SLOW_LOG:
{
- if (mysql_query(mysql,"flush slow logs"))
+ if (flush(mysql, "slow logs"))
{
my_printf_error(0, "flush failed; error: '%s'", error_flags,
mysql_error(mysql));
@@ -981,7 +1001,7 @@ static int execute_commands(MYSQL *mysql,int argc, char **argv)
}
case ADMIN_FLUSH_HOSTS:
{
- if (mysql_query(mysql,"flush hosts"))
+ if (flush(mysql, "hosts"))
{
my_printf_error(0, "flush failed; error: '%s'", error_flags,
mysql_error(mysql));
@@ -991,7 +1011,7 @@ static int execute_commands(MYSQL *mysql,int argc, char **argv)
}
case ADMIN_FLUSH_TABLES:
{
- if (mysql_query(mysql,"flush tables"))
+ if (flush(mysql, "tables"))
{
my_printf_error(0, "flush failed; error: '%s'", error_flags,
mysql_error(mysql));
@@ -1001,7 +1021,7 @@ static int execute_commands(MYSQL *mysql,int argc, char **argv)
}
case ADMIN_FLUSH_STATUS:
{
- if (mysql_query(mysql,"flush status"))
+ if (flush(mysql, "status"))
{
my_printf_error(0, "flush failed; error: '%s'", error_flags,
mysql_error(mysql));
@@ -1011,7 +1031,7 @@ static int execute_commands(MYSQL *mysql,int argc, char **argv)
}
case ADMIN_FLUSH_TABLE_STATISTICS:
{
- if (mysql_query(mysql,"flush table_statistics"))
+ if (flush(mysql, "table_statistics"))
{
my_printf_error(0, "flush failed; error: '%s'", error_flags,
mysql_error(mysql));
@@ -1021,7 +1041,7 @@ static int execute_commands(MYSQL *mysql,int argc, char **argv)
}
case ADMIN_FLUSH_INDEX_STATISTICS:
{
- if (mysql_query(mysql,"flush index_statistics"))
+ if (flush(mysql, "index_statistics"))
{
my_printf_error(0, "flush failed; error: '%s'", error_flags,
mysql_error(mysql));
@@ -1031,7 +1051,7 @@ static int execute_commands(MYSQL *mysql,int argc, char **argv)
}
case ADMIN_FLUSH_USER_STATISTICS:
{
- if (mysql_query(mysql,"flush user_statistics"))
+ if (flush(mysql, "user_statistics"))
{
my_printf_error(0, "flush failed; error: '%s'", error_flags,
mysql_error(mysql));
@@ -1041,7 +1061,7 @@ static int execute_commands(MYSQL *mysql,int argc, char **argv)
}
case ADMIN_FLUSH_USER_RESOURCES:
{
- if (mysql_query(mysql, "flush user_resources"))
+ if (flush(mysql, "user_resources"))
{
my_printf_error(0, "flush failed; error: '%s'", error_flags,
mysql_error(mysql));
@@ -1051,7 +1071,7 @@ static int execute_commands(MYSQL *mysql,int argc, char **argv)
}
case ADMIN_FLUSH_CLIENT_STATISTICS:
{
- if (mysql_query(mysql,"flush client_statistics"))
+ if (flush(mysql, "client_statistics"))
{
my_printf_error(0, "flush failed; error: '%s'", error_flags,
mysql_error(mysql));
@@ -1061,9 +1081,8 @@ static int execute_commands(MYSQL *mysql,int argc, char **argv)
}
case ADMIN_FLUSH_ALL_STATISTICS:
{
- if (mysql_query(mysql,
- "flush table_statistics,index_statistics,"
- "user_statistics,client_statistics"))
+ if (flush(mysql, "table_statistics,index_statistics,"
+ "user_statistics,client_statistics"))
{
my_printf_error(0, "flush failed; error: '%s'", error_flags,
mysql_error(mysql));
@@ -1073,9 +1092,8 @@ static int execute_commands(MYSQL *mysql,int argc, char **argv)
}
case ADMIN_FLUSH_ALL_STATUS:
{
- if (mysql_query(mysql,
- "flush status,table_statistics,index_statistics,"
- "user_statistics,client_statistics"))
+ if (flush(mysql, "status,table_statistics,index_statistics,"
+ "user_statistics,client_statistics"))
{
my_printf_error(0, "flush failed; error: '%s'", error_flags,
mysql_error(mysql));
@@ -1093,6 +1111,8 @@ static int execute_commands(MYSQL *mysql,int argc, char **argv)
start_time=time((time_t*) 0);
my_rnd_init(&rand_st,(ulong) start_time,(ulong) start_time/2);
+ if (maybe_disable_binlog(mysql))
+ return -1;
if (argc < 1)
{
my_printf_error(0, "Too few arguments to change password", error_flags);
@@ -1106,7 +1126,8 @@ static int execute_commands(MYSQL *mysql,int argc, char **argv)
if (strcmp(typed_password, verified) != 0)
{
my_printf_error(0,"Passwords don't match",MYF(ME_BELL));
- return -1;
+ ret = -1;
+ goto password_done;
}
}
else
@@ -1133,7 +1154,8 @@ static int execute_commands(MYSQL *mysql,int argc, char **argv)
{
my_printf_error(0, "Could not determine old_passwords setting from server; error: '%s'",
error_flags, mysql_error(mysql));
- return -1;
+ ret = -1;
+ goto password_done;
}
else
{
@@ -1144,7 +1166,8 @@ static int execute_commands(MYSQL *mysql,int argc, char **argv)
"Could not get old_passwords setting from "
"server; error: '%s'",
error_flags, mysql_error(mysql));
- return -1;
+ ret = -1;
+ goto password_done;
}
if (!mysql_num_rows(res))
old= 1;
@@ -1169,15 +1192,15 @@ static int execute_commands(MYSQL *mysql,int argc, char **argv)
{
my_printf_error(0, "Can't turn off logging; error: '%s'",
error_flags, mysql_error(mysql));
- return -1;
+ ret = -1;
}
+ else
if (mysql_query(mysql,buff))
{
if (mysql_errno(mysql)!=1290)
{
my_printf_error(0,"unable to change password; error: '%s'",
error_flags, mysql_error(mysql));
- return -1;
}
else
{
@@ -1191,9 +1214,10 @@ static int execute_commands(MYSQL *mysql,int argc, char **argv)
" --skip-grant-tables).\n"
"Use: \"mysqladmin flush-privileges password '*'\""
" instead", error_flags);
- return -1;
}
+ ret = -1;
}
+password_done:
/* free up memory from prompted password */
if (typed_password != argv[1])
{
@@ -1300,7 +1324,7 @@ static int execute_commands(MYSQL *mysql,int argc, char **argv)
return 1;
}
}
- return 0;
+ return ret;
}
/**
diff --git a/client/mysqlbinlog.cc b/client/mysqlbinlog.cc
index 92534501fb1..34e810f7b6b 100644
--- a/client/mysqlbinlog.cc
+++ b/client/mysqlbinlog.cc
@@ -2480,7 +2480,7 @@ static Exit_status dump_remote_log_entries(PRINT_EVENT_INFO *print_event_info,
int2store(buf + BIN_LOG_HEADER_SIZE, binlog_flags);
size_t tlen = strlen(logname);
- if (tlen > UINT_MAX)
+ if (tlen > sizeof(buf) - 10)
{
error("Log name too long.");
DBUG_RETURN(ERROR_STOP);
diff --git a/client/mysqldump.c b/client/mysqldump.c
index 8235090ccf3..2028606cd5a 100644
--- a/client/mysqldump.c
+++ b/client/mysqldump.c
@@ -2174,6 +2174,7 @@ static void print_xml_comment(FILE *xml_file, size_t len,
case '-':
if (*(comment_string + 1) == '-') /* Only one hyphen allowed. */
break;
+ /* fall through */
default:
fputc(*comment_string, xml_file);
break;
@@ -2850,6 +2851,8 @@ static uint get_table_structure(char *table, char *db, char *table_type,
my_free(scv_buff);
+ if (path)
+ my_fclose(sql_file, MYF(MY_WME));
DBUG_RETURN(0);
}
else
@@ -5940,8 +5943,7 @@ static my_bool get_view_structure(char *table, char* db)
dynstr_free(&ds_view);
}
- if (switch_character_set_results(mysql, default_charset))
- DBUG_RETURN(1);
+ switch_character_set_results(mysql, default_charset);
/* If a separate .sql file was opened, close it now */
if (sql_file != md_result_file)
diff --git a/client/mysqltest.cc b/client/mysqltest.cc
index c92281c59e9..4c14234174e 100644
--- a/client/mysqltest.cc
+++ b/client/mysqltest.cc
@@ -1720,13 +1720,12 @@ void log_msg(const char *fmt, ...)
int cat_file(DYNAMIC_STRING* ds, const char* filename)
{
int fd;
- size_t len;
+ int len;
char buff[16384];
if ((fd= my_open(filename, O_RDONLY, MYF(0))) < 0)
return 1;
- while((len= my_read(fd, (uchar*)&buff,
- sizeof(buff)-1, MYF(0))) > 0)
+ while((len= (int)my_read(fd, (uchar*)&buff, sizeof(buff)-1, MYF(0))) > 0)
{
char *p= buff, *start= buff,*end=buff+len;
while (p < end)
diff --git a/cmake/wsrep.cmake b/cmake/wsrep.cmake
index 0a1c7dd9697..be56a4c0772 100644
--- a/cmake/wsrep.cmake
+++ b/cmake/wsrep.cmake
@@ -41,4 +41,3 @@ SET(WSREP_PROC_INFO ${WITH_WSREP})
IF(WITH_WSREP)
SET(WSREP_PATCH_VERSION "wsrep_${WSREP_VERSION}")
ENDIF()
-
diff --git a/config.h.cmake b/config.h.cmake
index c0fad4a3efe..51abd4b11d5 100644
--- a/config.h.cmake
+++ b/config.h.cmake
@@ -576,21 +576,4 @@
#define __STDC_FORMAT_MACROS
#endif
-/*
- stat structure (from <sys/stat.h>) is conditionally defined
- to have different layout and size depending on the defined macros.
- The correct macro is defined in my_config.h, which means it MUST be
- included first (or at least before <features.h> - so, practically,
- before including any system headers).
-
- Check the include order by looking at __GLIBC__ (defined in <features.h>)
-
- But we cannot force all third-party clients/connectors to include
- my_config.h first. So, their crashes are their responsibility,
- we enable this check only for MariaDB sources (SAFE_MUTEX check).
-*/
-#if defined(__GLIBC__) && defined(SAFE_MUTEX)
-#error <my_config.h> MUST be included first!
-#endif
-
#endif
diff --git a/extra/innochecksum.cc b/extra/innochecksum.cc
index 238a5a80ac5..fe828026b4b 100644
--- a/extra/innochecksum.cc
+++ b/extra/innochecksum.cc
@@ -82,7 +82,7 @@ uintmax_t cur_page_num;
/* Skip the checksum verification. */
static bool no_check;
/* Enabled for strict checksum verification. */
-bool strict_verify = 0;
+bool strict_verify;
/* Enabled for rewrite checksum. */
static bool do_write;
/* Mismatches count allowed (0 by default). */
@@ -280,7 +280,8 @@ void print_index_leaf_stats(
fprintf(fil_out, "page_no\tdata_size\tn_recs\n");
while (it_page != index.leaves.end()) {
const per_page_stats& stat = it_page->second;
- fprintf(fil_out, "%llu\t%lu\t%lu\n", it_page->first, stat.data_size, stat.n_recs);
+ fprintf(fil_out, "%llu\t" ULINTPF "\t" ULINTPF "\n",
+ it_page->first, stat.data_size, stat.n_recs);
page_no = stat.right_page_no;
it_page = index.leaves.find(page_no);
}
@@ -315,12 +316,15 @@ void defrag_analysis(
}
if (index.leaf_pages) {
- fprintf(fil_out, "count = %lu free = %lu\n", index.count, index.free_pages);
+ fprintf(fil_out, "count = " ULINTPF " free = " ULINTPF "\n",
+ index.count, index.free_pages);
}
- fprintf(fil_out, "%llu\t\t%llu\t\t%lu\t\t%lu\t\t%lu\t\t%.2f\t%lu\n",
+ fprintf(fil_out, "%llu\t\t%llu\t\t"
+ ULINTPF "\t\t%lu\t\t" ULINTPF "\t\t%.2f\t" ULINTPF "\n",
id, index.leaf_pages, n_leaf_pages, n_merge, n_pages,
- 1.0 - (double)n_pages / (double)n_leaf_pages, index.max_data_size);
+ 1.0 - (double)n_pages / (double)n_leaf_pages,
+ index.max_data_size);
}
void print_leaf_stats(
diff --git a/extra/mariabackup/CMakeLists.txt b/extra/mariabackup/CMakeLists.txt
index 693082b765a..ac15460660c 100644
--- a/extra/mariabackup/CMakeLists.txt
+++ b/extra/mariabackup/CMakeLists.txt
@@ -27,101 +27,6 @@ IF(NOT WIN32)
ENDIF()
ENDIF()
-IF(WITH_LIBARCHIVE STREQUAL "STATIC")
- SET(CMAKE_FIND_LIBRARY_SUFFIXES .a .lib)
-ENDIF()
-
-FIND_PACKAGE(LibArchive)
-
-IF(NOT DEFINED WITH_LIBARCHIVE)
- IF(LibArchive_FOUND)
- SET(WITH_LIBARCHIVE_DEFAULT ON)
- ELSE()
- SET(WITH_LIBARCHIVE_DEFAULT OFF)
- ENDIF()
- SET(WITH_LIBARCHIVE ${WITH_LIBARCHIVE_DEFAULT} CACHE STRING "Use libarchive for streaming features (ON, OFF or STATIC)" )
-ENDIF()
-
-IF(NOT WITH_LIBARCHIVE MATCHES "^(ON|OFF|STATIC)$")
- MESSAGE(FATAL_ERROR "Invalid value for WITH_LIBARCHIVE: '${WITH_LIBARCHIVE}'. Use one of ON, OFF or STATIC")
-ENDIF()
-
-IF(UNIX)
- SET(PIC_FLAG -fPIC)
-ENDIF()
-
-IF((NOT WITH_LIBARCHIVE STREQUAL "OFF") AND (NOT LibArchive_FOUND))
- IF(CMAKE_VERSION VERSION_LESS "2.8.12")
- MESSAGE("libarchive can't be built, old cmake")
- ELSE()
- # Build a local version
- INCLUDE(ExternalProject)
- SET(LIBARCHIVE_DIR ${CMAKE_CURRENT_BINARY_DIR}/libarchive)
- SET(libarchive_PREFIX ${CMAKE_CURRENT_BINARY_DIR}/libarchive)
- SET(libarchive_CMAKE_ARGS
- -DCMAKE_INSTALL_PREFIX:PATH=<INSTALL_DIR>
- -DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE}
- -DENABLE_ICONV=OFF
- -DENABLE_TAR=ON
- -DENABLE_OPENSSL=OFF
- -DENABLE_TEST=OFF
- "-DCMAKE_C_FLAGS_DEBUG=${CMAKE_C_FLAGS_DEBUG} ${PIC_FLAG}"
- "-DCMAKE_C_FLAGS_RELWITHDEBINFO=${CMAKE_C_FLAGS_RELWITHDEBINFO} ${PIC_FLAG}"
- "-DCMAKE_C_FLAGS_RELEASE=${CMAKE_C_FLAGS_RELEASE} ${PIC_FLAG}"
- "-DCMAKE_C_FLAGS_MINSIZEREL=${CMAKE_C_FLAGS_MINSIZEREL} ${PIC_FLAG}"
- )
- IF(WIN32)
- SET(libarchive_CMAKE_ARGS ${libarchive_CMAKE_ARGS} -DWINDOWS_VERSION=WIN7 -DCMAKE_DEBUG_POSTFIX=d)
- SET(LIBARCHIVE_RELEASE_LIB ${LIBARCHIVE_DIR}/lib/${CMAKE_STATIC_LIBRARY_PREFIX}archive_static${CMAKE_STATIC_LIBRARY_SUFFIX})
- SET(LIBARCHIVE_DEBUG_LIB ${LIBARCHIVE_DIR}/lib/${CMAKE_STATIC_LIBRARY_PREFIX}archive_staticd${CMAKE_STATIC_LIBRARY_SUFFIX})
- SET(byproducts ${LIBARCHIVE_RELEASE_LIB} ${LIBARCHIVE_DEBUG_LIB})
- ELSE()
- SET(LIBARCHIVE_LIB ${LIBARCHIVE_DIR}/lib/${CMAKE_STATIC_LIBRARY_PREFIX}archive${CMAKE_STATIC_LIBRARY_SUFFIX})
- SET(byproducts ${LIBARCHIVE_LIB})
- ENDIF()
-
- IF(CMAKE_VERSION VERSION_GREATER "3.1")
- SET(byproducts BUILD_BYPRODUCTS ${byproducts})
- ENDIF()
-
- ExternalProject_Add(libarchive
- PREFIX ${libarchive_PREFIX}
- DOWNLOAD_DIR ${LIBARCHIVE_DIR}
- URL http://www.libarchive.org/downloads/libarchive-3.2.2.tar.gz
- INSTALL_DIR ${LIBARCHIVE_DIR}
- CMAKE_ARGS ${libarchive_CMAKE_ARGS}
- ${byproducts}
- )
- ADD_LIBRARY(archive_static STATIC IMPORTED)
- ADD_DEPENDENCIES(archive_static libarchive)
- IF(WIN32)
- SET_PROPERTY(TARGET archive_static PROPERTY IMPORTED_LOCATION_RELWITHDEBINFO ${LIBARCHIVE_RELEASE_LIB})
- SET_PROPERTY(TARGET archive_static PROPERTY IMPORTED_LOCATION_RELEASE ${LIBARCHIVE_RELEASE_LIB})
- SET_PROPERTY(TARGET archive_static PROPERTY IMPORTED_LOCATION_DEBUG ${LIBARCHIVE_DEBUG_LIB})
- SET_PROPERTY(TARGET archive_static PROPERTY IMPORTED_LOCATION_MINSIZEREL ${LIBARCHIVE_RELEASE_LIB})
- ELSE()
- SET_PROPERTY(TARGET archive_static PROPERTY IMPORTED_LOCATION ${LIBARCHIVE_LIB})
- ENDIF()
-
- SET(LibArchive_FOUND ON )
- SET(LibArchive_INCLUDE_DIRS ${LIBARCHIVE_DIR}/include )
- SET(LibArchive_LIBRARIES archive_static)
- IF(WIN32)
- SET(LIBARCHIVE_STATIC 1)
- ENDIF()
- ENDIF()
-ENDIF()
-
-
-IF(WITH_LIBARCHIVE AND LibArchive_FOUND)
- ADD_DEFINITIONS(-DHAVE_LIBARCHIVE)
- IF(LIBARCHIVE_STATIC)
- ADD_DEFINITIONS(-DLIBARCHIVE_STATIC)
- ENDIF()
- INCLUDE_DIRECTORIES(${LibArchive_INCLUDE_DIRS})
- LINK_LIBRARIES(${LibArchive_LIBRARIES})
- SET(DS_ARCHIVE_SOURCE ds_archive.c)
-ENDIF()
INCLUDE_DIRECTORIES(
${CMAKE_SOURCE_DIR}/include
@@ -154,7 +59,6 @@ MYSQL_ADD_EXECUTABLE(mariabackup
innobackupex.cc
changed_page_bitmap.cc
datasink.c
- ${DS_ARCHIVE_SOURCE}
ds_buffer.c
ds_compress.c
ds_local.c
diff --git a/extra/mariabackup/backup_copy.cc b/extra/mariabackup/backup_copy.cc
index 1565e20d732..306009e2139 100644
--- a/extra/mariabackup/backup_copy.cc
+++ b/extra/mariabackup/backup_copy.cc
@@ -450,7 +450,7 @@ datadir_iter_free(datadir_iter_t *it)
/************************************************************************
Holds the state needed to copy single data file. */
struct datafile_cur_t {
- os_file_t file;
+ pfs_os_file_t file;
char rel_path[FN_REFLEN];
char abs_path[FN_REFLEN];
MY_STAT statinfo;
diff --git a/extra/mariabackup/changed_page_bitmap.cc b/extra/mariabackup/changed_page_bitmap.cc
index 435b7fb6172..86a873ef69c 100644
--- a/extra/mariabackup/changed_page_bitmap.cc
+++ b/extra/mariabackup/changed_page_bitmap.cc
@@ -35,7 +35,7 @@ Remove these on the first opportunity, i.e. single-binary XtraBackup. */
/** Single bitmap file information */
struct log_online_bitmap_file_t {
char name[FN_REFLEN]; /*!< Name with full path */
- os_file_t file; /*!< Handle to opened file */
+ pfs_os_file_t file; /*!< Handle to opened file */
ib_uint64_t size; /*!< Size of the file */
ib_uint64_t offset; /*!< Offset of the next read,
or count of already-read bytes
diff --git a/extra/mariabackup/crc/CMakeLists.txt b/extra/mariabackup/crc/CMakeLists.txt
index 577cab6080c..91758cdf520 100644
--- a/extra/mariabackup/crc/CMakeLists.txt
+++ b/extra/mariabackup/crc/CMakeLists.txt
@@ -30,4 +30,4 @@ ENDIF()
IF(HAVE_CLMUL_INSTRUCTION)
ADD_DEFINITIONS(-DHAVE_CLMUL_INSTRUCTION)
ENDIF()
-ADD_LIBRARY(crc crc_glue.c crc-intel-pclmul.c)
+ADD_LIBRARY(crc STATIC crc_glue.c crc-intel-pclmul.c)
diff --git a/extra/mariabackup/crc/crc_glue.c b/extra/mariabackup/crc/crc_glue.c
index ae3fa91c1b0..c301cb01e2e 100644
--- a/extra/mariabackup/crc/crc_glue.c
+++ b/extra/mariabackup/crc/crc_glue.c
@@ -17,7 +17,7 @@ along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
*******************************************************/
-
+#include "my_config.h"
#include "crc_glue.h"
#include "crc-intel-pclmul.h"
#include <stdint.h>
diff --git a/extra/mariabackup/fil_cur.h b/extra/mariabackup/fil_cur.h
index 88239efd2bb..f3601823a5a 100644
--- a/extra/mariabackup/fil_cur.h
+++ b/extra/mariabackup/fil_cur.h
@@ -29,7 +29,7 @@ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
#include "read_filt.h"
struct xb_fil_cur_t {
- os_file_t file; /*!< source file handle */
+ pfs_os_file_t file; /*!< source file handle */
fil_node_t* node; /*!< source tablespace node */
char rel_path[FN_REFLEN];
/*!< normalized file path */
diff --git a/extra/mariabackup/innobackupex.cc b/extra/mariabackup/innobackupex.cc
index 59fb8fb5565..2114e546440 100644
--- a/extra/mariabackup/innobackupex.cc
+++ b/extra/mariabackup/innobackupex.cc
@@ -848,9 +848,7 @@ ibx_get_one_option(int optid,
opt_ibx_decrypt = true;
break;
case OPT_STREAM:
- if (!strcasecmp(argument, "tar"))
- xtrabackup_stream_fmt = XB_STREAM_FMT_TAR;
- else if (!strcasecmp(argument, "xbstream"))
+ if (!strcasecmp(argument, "xbstream"))
xtrabackup_stream_fmt = XB_STREAM_FMT_XBSTREAM;
else {
ibx_msg("Invalid --stream argument: %s\n", argument);
diff --git a/extra/mariabackup/xb0xb.h b/extra/mariabackup/xb0xb.h
index 659ab8ea5d0..cb1aedd5d13 100644
--- a/extra/mariabackup/xb0xb.h
+++ b/extra/mariabackup/xb0xb.h
@@ -23,17 +23,16 @@ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
extern void os_io_init_simple(void);
-extern os_file_t files[1000];
+extern pfs_os_file_t files[1000];
extern const char *innodb_checksum_algorithm_names[];
extern TYPELIB innodb_checksum_algorithm_typelib;
extern dberr_t open_or_create_data_files(
- ibool* create_new_db,
+ bool* create_new_db,
#ifdef UNIV_LOG_ARCHIVE
lsn_t* min_arch_log_no,
lsn_t* max_arch_log_no,
-#endif
- lsn_t* min_flushed_lsn,
- lsn_t* max_flushed_lsn,
+#endif
+ lsn_t* flushed_lsn,
ulint* sum_of_new_sizes)
;
int
@@ -66,7 +65,6 @@ void
innodb_log_checksum_func_update(
/*============================*/
ulint algorithm) /*!< in: algorithm */;
-dberr_t recv_find_max_checkpoint(log_group_t** max_group, ulint* max_field);
dberr_t
srv_undo_tablespaces_init(
/*======================*/
diff --git a/extra/mariabackup/xbstream.h b/extra/mariabackup/xbstream.h
index ac1bf05e321..08b017ca5ce 100644
--- a/extra/mariabackup/xbstream.h
+++ b/extra/mariabackup/xbstream.h
@@ -42,7 +42,6 @@ typedef struct xb_wstream_file_struct xb_wstream_file_t;
typedef enum {
XB_STREAM_FMT_NONE,
- XB_STREAM_FMT_TAR,
XB_STREAM_FMT_XBSTREAM
} xb_stream_fmt_t;
diff --git a/extra/mariabackup/xtrabackup.cc b/extra/mariabackup/xtrabackup.cc
index c116d119cee..28de33a5524 100644
--- a/extra/mariabackup/xtrabackup.cc
+++ b/extra/mariabackup/xtrabackup.cc
@@ -309,8 +309,7 @@ my_bool xtrabackup_rebuild_indexes = FALSE;
my_bool xtrabackup_incremental_force_scan = FALSE;
/* The flushed lsn which is read from data files */
-lsn_t min_flushed_lsn= 0;
-lsn_t max_flushed_lsn= 0;
+lsn_t flushed_lsn= 0;
/* The size of archived log file */
ib_int64_t xtrabackup_arch_file_size = 0ULL;
@@ -564,7 +563,6 @@ enum options_xtrabackup
OPT_INNODB_LOG_BUFFER_SIZE,
OPT_INNODB_LOG_FILE_SIZE,
OPT_INNODB_LOG_FILES_IN_GROUP,
- OPT_INNODB_MIRRORED_LOG_GROUPS,
OPT_INNODB_OPEN_FILES,
OPT_INNODB_SYNC_SPIN_LOOPS,
OPT_INNODB_THREAD_CONCURRENCY,
@@ -704,11 +702,7 @@ struct my_option xb_client_options[] =
{"stream", OPT_XTRA_STREAM, "Stream all backup files to the standard output "
"in the specified format."
-#ifdef HAVE_LIBARCHIVE
- "Supported formats are 'tar' and 'xbstream'."
-#else
"Supported format is 'xbstream'."
-#endif
,
(G_PTR*) &xtrabackup_stream_str, (G_PTR*) &xtrabackup_stream_str, 0, GET_STR,
REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
@@ -1453,9 +1447,7 @@ xb_get_one_option(int optid,
xtrabackup_target_dir= xtrabackup_real_target_dir;
break;
case OPT_XTRA_STREAM:
- if (!strcasecmp(argument, "tar"))
- xtrabackup_stream_fmt = XB_STREAM_FMT_TAR;
- else if (!strcasecmp(argument, "xbstream"))
+ if (!strcasecmp(argument, "xbstream"))
xtrabackup_stream_fmt = XB_STREAM_FMT_XBSTREAM;
else
{
@@ -1906,8 +1898,8 @@ error:
return(TRUE);
}
-static my_bool
-innodb_end(void)
+static void
+innodb_end()
{
srv_fast_shutdown = (ulint) innobase_fast_shutdown;
innodb_inited = 0;
@@ -1915,9 +1907,7 @@ innodb_end(void)
msg("xtrabackup: starting shutdown with innodb_fast_shutdown = %lu\n",
srv_fast_shutdown);
- if (innobase_shutdown_for_mysql() != DB_SUCCESS) {
- goto error;
- }
+ innodb_shutdown();
free(internal_innobase_data_file_path);
internal_innobase_data_file_path = NULL;
@@ -1928,12 +1918,6 @@ innodb_end(void)
// pthread_mutex_destroy(&commit_threads_m);
// pthread_mutex_destroy(&commit_cond_m);
// pthread_cond_destroy(&commit_cond);
-
- return(FALSE);
-
-error:
- msg("xtrabackup: innodb_end(): Error occured.\n");
- return(TRUE);
}
/* ================= common ================= */
@@ -2169,7 +2153,7 @@ xb_write_delta_metadata(const char *filename, const xb_delta_info_t *info)
void
xtrabackup_io_throttling(void)
{
- if (xtrabackup_throttle && (io_ticket--) < 0) {
+ if (xtrabackup_backup && xtrabackup_throttle && (io_ticket--) < 0) {
os_event_reset(wait_throttle);
os_event_wait(wait_throttle);
}
@@ -2412,7 +2396,7 @@ check_if_skip_table(
Reads the space flags from a given data file and returns the compressed
page size, or 0 if the space is not compressed. */
ulint
-xb_get_zip_size(os_file_t file)
+xb_get_zip_size(pfs_os_file_t file)
{
byte *buf;
byte *page;
@@ -2611,96 +2595,6 @@ skip:
return(FALSE);
}
-static
-void
-xtrabackup_choose_lsn_offset(lsn_t start_lsn)
-{
-#if SUPPORT_PERCONA_5_5
- ulint no, alt_no, expected_no;
- ulint blocks_in_group;
- lsn_t tmp_offset, end_lsn;
- int lsn_chosen = 0;
- log_group_t *group;
-
- start_lsn = ut_uint64_align_down(start_lsn, OS_FILE_LOG_BLOCK_SIZE);
- end_lsn = start_lsn + RECV_SCAN_SIZE;
-
- group = UT_LIST_GET_FIRST(log_sys->log_groups);
-
- if (mysql_server_version < 50500 || mysql_server_version > 50600) {
- /* only make sense for Percona Server 5.5 */
- return;
- }
-
- if (server_flavor == FLAVOR_PERCONA_SERVER) {
- /* it is Percona Server 5.5 */
- group->alt_offset_chosen = true;
- group->lsn_offset = group->lsn_offset_alt;
- return;
- }
-
- if (group->lsn_offset_alt == group->lsn_offset ||
- group->lsn_offset_alt == (lsn_t) -1) {
- /* we have only one option */
- return;
- }
-
- no = alt_no = (ulint) -1;
- lsn_chosen = 0;
-
- blocks_in_group = log_block_convert_lsn_to_no(
- log_group_get_capacity(group)) - 1;
-
- /* read log block number from usual offset */
- if (group->lsn_offset < group->file_size * group->n_files &&
- (log_group_calc_lsn_offset(start_lsn, group) %
- UNIV_PAGE_SIZE) % OS_MIN_LOG_BLOCK_SIZE == 0) {
- log_group_read_log_seg(LOG_RECOVER, log_sys->buf,
- group, start_lsn, end_lsn);
- no = log_block_get_hdr_no(log_sys->buf);
- }
-
- /* read log block number from Percona Server 5.5 offset */
- tmp_offset = group->lsn_offset;
- group->lsn_offset = group->lsn_offset_alt;
-
- if (group->lsn_offset < group->file_size * group->n_files &&
- (log_group_calc_lsn_offset(start_lsn, group) %
- UNIV_PAGE_SIZE) % OS_MIN_LOG_BLOCK_SIZE == 0) {
- log_group_read_log_seg(LOG_RECOVER, log_sys->buf,
- group, start_lsn, end_lsn);
- alt_no = log_block_get_hdr_no(log_sys->buf);
- }
-
- expected_no = log_block_convert_lsn_to_no(start_lsn);
-
- ut_a(!(no == expected_no && alt_no == expected_no));
-
- group->lsn_offset = tmp_offset;
-
- if ((no <= expected_no &&
- ((expected_no - no) % blocks_in_group) == 0) ||
- ((expected_no | 0x40000000UL) - no) % blocks_in_group == 0) {
- /* default offset looks ok */
- ++lsn_chosen;
- }
-
- if ((alt_no <= expected_no &&
- ((expected_no - alt_no) % blocks_in_group) == 0) ||
- ((expected_no | 0x40000000UL) - alt_no) % blocks_in_group == 0) {
- /* PS 5.5 style offset looks ok */
- ++lsn_chosen;
- group->alt_offset_chosen = true;
- group->lsn_offset = group->lsn_offset_alt;
- }
-
- /* We are in trouble, because we can not make a
- decision to choose one over the other. Die just
- like a Buridan's ass */
- ut_a(lsn_chosen == 1);
-#endif
-}
-
extern ibool log_block_checksum_is_ok_or_old_format(const byte* block);
/*******************************************************//**
@@ -2868,8 +2762,6 @@ static my_bool
xtrabackup_copy_logfile(lsn_t from_lsn, my_bool is_last)
{
/* definition from recv_recovery_from_checkpoint_start() */
- log_group_t* group;
- lsn_t group_scanned_lsn;
lsn_t contiguous_lsn;
ut_a(dst_log_file != NULL);
@@ -2879,66 +2771,50 @@ xtrabackup_copy_logfile(lsn_t from_lsn, my_bool is_last)
/* TODO: We must check the contiguous_lsn still exists in log file.. */
- group = UT_LIST_GET_FIRST(log_sys->log_groups);
+ bool finished;
+ lsn_t start_lsn;
+ lsn_t end_lsn;
- while (group) {
- bool finished;
- lsn_t start_lsn;
- lsn_t end_lsn;
+ /* reference recv_group_scan_log_recs() */
- /* reference recv_group_scan_log_recs() */
- finished = false;
+ start_lsn = contiguous_lsn;
- start_lsn = contiguous_lsn;
+ do {
+ end_lsn = start_lsn + RECV_SCAN_SIZE;
- while (!finished) {
+ xtrabackup_io_throttling();
- end_lsn = start_lsn + RECV_SCAN_SIZE;
+ log_mutex_enter();
- xtrabackup_io_throttling();
-
- mutex_enter(&log_sys->mutex);
-
- log_group_read_log_seg(LOG_RECOVER, log_sys->buf,
- group, start_lsn, end_lsn, false);
-
- if (!xtrabackup_scan_log_recs(group, is_last,
- start_lsn, &contiguous_lsn, &group_scanned_lsn,
- &finished)) {
- goto error;
- }
+ log_group_read_log_seg(LOG_RECOVER, log_sys->buf,
+ &log_sys->log, start_lsn, end_lsn);
- mutex_exit(&log_sys->mutex);
+ bool success = xtrabackup_scan_log_recs(
+ &log_sys->log, is_last,
+ start_lsn, &contiguous_lsn,
+ &log_sys->log.scanned_lsn,
+ &finished);
- start_lsn = end_lsn;
+ log_mutex_exit();
+ if (!success) {
+ ds_close(dst_log_file);
+ msg("xtrabackup: Error: xtrabackup_copy_logfile()"
+ " failed.\n");
+ return(TRUE);
}
- group->scanned_lsn = group_scanned_lsn;
+ start_lsn = end_lsn;
+ } while (!finished);
- msg_ts(">> log scanned up to (" LSN_PF ")\n",
- group->scanned_lsn);
-
- group = UT_LIST_GET_NEXT(log_groups, group);
-
- /* update global variable*/
- log_copy_scanned_lsn = group_scanned_lsn;
-
- /* innodb_mirrored_log_groups must be 1, no other groups */
- ut_a(group == NULL);
-
- debug_sync_point("xtrabackup_copy_logfile_pause");
-
- }
+ msg_ts(">> log scanned up to (" LSN_PF ")\n",
+ log_sys->log.scanned_lsn);
+ /* update global variable*/
+ log_copy_scanned_lsn = log_sys->log.scanned_lsn;
+ debug_sync_point("xtrabackup_copy_logfile_pause");
return(FALSE);
-
-error:
- mutex_exit(&log_sys->mutex);
- ds_close(dst_log_file);
- msg("xtrabackup: Error: xtrabackup_copy_logfile() failed.\n");
- return(TRUE);
}
static
@@ -3106,14 +2982,6 @@ files first, and then streams them in a serialized way when closed. */
static void
xtrabackup_init_datasinks(void)
{
- if (xtrabackup_parallel > 1 && xtrabackup_stream &&
- xtrabackup_stream_fmt == XB_STREAM_FMT_TAR) {
- msg("xtrabackup: warning: the --parallel option does not have "
- "any effect when streaming in the 'tar' format. "
- "You can use the 'xbstream' format instead.\n");
- xtrabackup_parallel = 1;
- }
-
/* Start building out the pipelines from the terminus back */
if (xtrabackup_stream) {
/* All streaming goes to stdout */
@@ -3131,30 +2999,17 @@ xtrabackup_init_datasinks(void)
/* Stream formatting */
if (xtrabackup_stream) {
ds_ctxt_t *ds;
- if (xtrabackup_stream_fmt == XB_STREAM_FMT_TAR) {
- ds = ds_create(xtrabackup_target_dir, DS_TYPE_ARCHIVE);
- } else if (xtrabackup_stream_fmt == XB_STREAM_FMT_XBSTREAM) {
- ds = ds_create(xtrabackup_target_dir, DS_TYPE_XBSTREAM);
- } else {
- /* bad juju... */
- ds = NULL;
- }
+
+ ut_a(xtrabackup_stream_fmt == XB_STREAM_FMT_XBSTREAM);
+ ds = ds_create(xtrabackup_target_dir, DS_TYPE_XBSTREAM);
xtrabackup_add_datasink(ds);
ds_set_pipe(ds, ds_data);
ds_data = ds;
- if (xtrabackup_stream_fmt != XB_STREAM_FMT_XBSTREAM) {
- /* 'tar' does not allow parallel streams */
- ds_redo = ds_meta = ds_create(xtrabackup_target_dir,
- DS_TYPE_TMPFILE);
- xtrabackup_add_datasink(ds_meta);
- ds_set_pipe(ds_meta, ds);
- } else {
- ds_redo = ds_meta = ds_data;
- }
+ ds_redo = ds_meta = ds_data;
}
/* Encryption */
@@ -3271,13 +3126,12 @@ xb_fil_io_init(void)
Populates the tablespace memory cache by scanning for and opening data files.
@returns DB_SUCCESS or error code.*/
static
-ulint
-xb_load_tablespaces(void)
-/*=====================*/
+dberr_t
+xb_load_tablespaces()
{
ulint i;
- ibool create_new_db;
- ulint err;
+ bool create_new_db;
+ dberr_t err;
ulint sum_of_new_sizes;
lsn_t min_arch_logno, max_arch_logno;
@@ -3292,7 +3146,7 @@ xb_load_tablespaces(void)
err = open_or_create_data_files(&create_new_db,
&min_arch_logno, &max_arch_logno,
- &min_flushed_lsn, &max_flushed_lsn,
+ &flushed_lsn,
&sum_of_new_sizes);
if (err != DB_SUCCESS) {
msg("xtrabackup: Could not open or create data files.\n"
@@ -3348,9 +3202,9 @@ xb_load_tablespaces(void)
Initialize the tablespace memory cache and populate it by scanning for and
opening data files.
@returns DB_SUCCESS or error code.*/
-ulint
-xb_data_files_init(void)
-/*====================*/
+static
+dberr_t
+xb_data_files_init()
{
xb_fil_io_init();
@@ -3359,9 +3213,9 @@ xb_data_files_init(void)
/************************************************************************
Destroy the tablespace memory cache. */
+static
void
-xb_data_files_close(void)
-/*====================*/
+xb_data_files_close()
{
ulint i;
@@ -3772,7 +3626,6 @@ open_or_create_log_file(
ibool log_file_has_been_opened,/*!< in: TRUE if a log file has been
opened before: then it is an error
to try to create another log file */
- ulint k, /*!< in: log group number */
ulint i) /*!< in: log file number in group */
{
ibool ret;
@@ -3782,8 +3635,6 @@ open_or_create_log_file(
UT_NOT_USED(create_new_db);
UT_NOT_USED(log_file_has_been_opened);
- UT_NOT_USED(k);
- ut_ad(k == 0);
*log_file_created = FALSE;
@@ -3831,20 +3682,14 @@ open_or_create_log_file(
which is for this log group */
fil_space_create(name,
- 2 * k + SRV_LOG_SPACE_FIRST_ID, 0, FIL_LOG, 0, 0);
+ SRV_LOG_SPACE_FIRST_ID, 0, FIL_TYPE_LOG, 0, 0);
+ log_init(srv_n_log_files, srv_log_file_size * UNIV_PAGE_SIZE);
}
ut_a(fil_validate());
ut_a(fil_node_create(name, (ulint)srv_log_file_size,
- 2 * k + SRV_LOG_SPACE_FIRST_ID, FALSE));
- if (i == 0) {
- log_group_init(k, srv_n_log_files,
- srv_log_file_size * UNIV_PAGE_SIZE,
- 2 * k + SRV_LOG_SPACE_FIRST_ID,
- SRV_LOG_SPACE_FIRST_ID + 1); /* dummy arch
- space id */
- }
+ SRV_LOG_SPACE_FIRST_ID, FALSE));
return(DB_SUCCESS);
}
@@ -3938,7 +3783,7 @@ xtrabackup_backup_func(void)
lsn_t latest_cp;
uint i;
uint count;
- os_ib_mutex_t count_mutex;
+ pthread_mutex_t count_mutex;
data_thread_ctxt_t *data_threads;
#ifdef USE_POSIX_FADVISE
@@ -3962,6 +3807,7 @@ xtrabackup_backup_func(void)
mysql_data_home[0]=FN_CURLIB; // all paths are relative from here
mysql_data_home[1]=0;
+ srv_n_purge_threads = 1;
srv_read_only_mode = TRUE;
srv_backup_mode = TRUE;
@@ -4057,13 +3903,13 @@ xtrabackup_backup_func(void)
xb_fil_io_init();
- log_init();
+ log_sys_init();
lock_sys_create(srv_lock_table_size);
for (i = 0; i < srv_n_log_files; i++) {
err = open_or_create_log_file(FALSE, &log_file_created,
- log_opened, 0, i);
+ log_opened, i);
if (err != DB_SUCCESS) {
//return((int) err);
@@ -4118,72 +3964,60 @@ xtrabackup_backup_func(void)
fil_system_t* f_system = fil_system;
/* definition from recv_recovery_from_checkpoint_start() */
- log_group_t* max_cp_group;
ulint max_cp_field;
- byte* buf;
- byte* log_hdr_buf_;
- byte* log_hdr_buf;
- ulint err;
/* start back ground thread to copy newer log */
os_thread_id_t log_copying_thread_id;
datafiles_iter_t *it;
- log_hdr_buf_ = static_cast<byte *>
- (ut_malloc(LOG_FILE_HDR_SIZE + UNIV_PAGE_SIZE_MAX));
- log_hdr_buf = static_cast<byte *>
- (ut_align(log_hdr_buf_, UNIV_PAGE_SIZE_MAX));
-
/* get current checkpoint_lsn */
/* Look for the latest checkpoint from any of the log groups */
- mutex_enter(&log_sys->mutex);
+ log_mutex_enter();
- err = recv_find_max_checkpoint(&max_cp_group, &max_cp_field);
+ dberr_t err = recv_find_max_checkpoint(&max_cp_field);
if (err != DB_SUCCESS) {
+ exit(EXIT_FAILURE);
+ }
- ut_free(log_hdr_buf_);
+ if (log_sys->log.format == 0) {
+old_format:
+ msg("xtrabackup: Error: cannot process redo log"
+ " before MariaDB 10.2.2\n");
exit(EXIT_FAILURE);
}
- log_group_read_checkpoint_info(max_cp_group, max_cp_field);
- buf = log_sys->checkpoint_buf;
+ ut_ad(!((log_sys->log.format ^ LOG_HEADER_FORMAT_CURRENT)
+ & ~LOG_HEADER_FORMAT_ENCRYPTED));
+
+ const byte* buf = log_sys->checkpoint_buf;
checkpoint_lsn_start = mach_read_from_8(buf + LOG_CHECKPOINT_LSN);
checkpoint_no_start = mach_read_from_8(buf + LOG_CHECKPOINT_NO);
- mutex_exit(&log_sys->mutex);
-
reread_log_header:
- fil_io(OS_FILE_READ | OS_FILE_LOG, true, max_cp_group->space_id,
- 0,
- 0, 0, LOG_FILE_HDR_SIZE,
- log_hdr_buf, max_cp_group, NULL);
-
- /* check consistency of log file header to copy */
- mutex_enter(&log_sys->mutex);
-
- err = recv_find_max_checkpoint(&max_cp_group, &max_cp_field);
+ err = recv_find_max_checkpoint(&max_cp_field);
- if (err != DB_SUCCESS) {
+ if (err != DB_SUCCESS) {
+ exit(EXIT_FAILURE);
+ }
- ut_free(log_hdr_buf_);
- exit(EXIT_FAILURE);
- }
+ if (log_sys->log.format == 0) {
+ goto old_format;
+ }
- log_group_read_checkpoint_info(max_cp_group, max_cp_field);
- buf = log_sys->checkpoint_buf;
+ ut_ad(!((log_sys->log.format ^ LOG_HEADER_FORMAT_CURRENT)
+ & ~LOG_HEADER_FORMAT_ENCRYPTED));
if(checkpoint_no_start != mach_read_from_8(buf + LOG_CHECKPOINT_NO)) {
checkpoint_lsn_start = mach_read_from_8(buf + LOG_CHECKPOINT_LSN);
checkpoint_no_start = mach_read_from_8(buf + LOG_CHECKPOINT_NO);
- mutex_exit(&log_sys->mutex);
goto reread_log_header;
}
- mutex_exit(&log_sys->mutex);
+ log_mutex_exit();
xtrabackup_init_datasinks();
@@ -4230,10 +4064,6 @@ reread_log_header:
&io_watching_thread_id);
}
- mutex_enter(&log_sys->mutex);
- xtrabackup_choose_lsn_offset(checkpoint_lsn_start);
- mutex_exit(&log_sys->mutex);
-
/* copy log file by current position */
if(xtrabackup_copy_logfile(checkpoint_lsn_start, FALSE))
exit(EXIT_FAILURE);
@@ -4246,7 +4076,7 @@ reread_log_header:
err = xb_load_tablespaces();
if (err != DB_SUCCESS) {
msg("xtrabackup: error: xb_load_tablespaces() failed with"
- "error code %lu\n", err);
+ "error code %u\n", err);
exit(EXIT_FAILURE);
}
@@ -4323,35 +4153,24 @@ reread_log_header:
}
/* read the latest checkpoint lsn */
- latest_cp = 0;
{
- log_group_t* max_cp_group;
ulint max_cp_field;
- ulint err;
- mutex_enter(&log_sys->mutex);
+ log_mutex_enter();
- err = recv_find_max_checkpoint(&max_cp_group, &max_cp_field);
-
- if (err != DB_SUCCESS) {
+ if (recv_find_max_checkpoint(&max_cp_field) == DB_SUCCESS
+ && log_sys->log.format != 0) {
+ latest_cp = mach_read_from_8(log_sys->checkpoint_buf +
+ LOG_CHECKPOINT_LSN);
+ msg("xtrabackup: The latest check point"
+ " (for incremental): '" LSN_PF "'\n", latest_cp);
+ } else {
+ latest_cp = 0;
msg("xtrabackup: Error: recv_find_max_checkpoint() failed.\n");
- mutex_exit(&log_sys->mutex);
- goto skip_last_cp;
}
-
- log_group_read_checkpoint_info(max_cp_group, max_cp_field);
-
- xtrabackup_choose_lsn_offset(checkpoint_lsn_start);
-
- latest_cp = mach_read_from_8(log_sys->checkpoint_buf +
- LOG_CHECKPOINT_LSN);
-
- mutex_exit(&log_sys->mutex);
-
- msg("xtrabackup: The latest check point (for incremental): "
- "'" LSN_PF "'\n", latest_cp);
+ log_mutex_exit();
}
-skip_last_cp:
+
/* stop log_copying_thread */
log_copying = FALSE;
os_event_set(log_copying_stop);
@@ -4650,6 +4469,7 @@ xtrabackup_stats_func(int argc, char **argv)
mysql_data_home[0]=FN_CURLIB; // all paths are relative from here
mysql_data_home[1]=0;
+ srv_n_purge_threads = 1;
/* set read only */
srv_read_only_mode = TRUE;
@@ -4845,8 +4665,7 @@ end:
xb_filters_free();
/* shutdown InnoDB */
- if(innodb_end())
- exit(EXIT_FAILURE);
+ innodb_end();
}
/* ================= prepare ================= */
@@ -4854,7 +4673,7 @@ end:
static my_bool
xtrabackup_init_temp_log(void)
{
- os_file_t src_file = XB_FILE_UNDEFINED;
+ pfs_os_file_t src_file;
char src_path[FN_REFLEN];
char dst_path[FN_REFLEN];
ibool success;
@@ -5181,7 +5000,7 @@ xb_space_create_file(
ulint space_id, /*!<in: space id */
ulint flags __attribute__((unused)),/*!<in: tablespace
flags */
- os_file_t* file) /*!<out: file handle */
+ pfs_os_file_t* file) /*!<out: file handle */
{
ibool ret;
byte* buf;
@@ -5260,7 +5079,7 @@ mismatching ID, renames it to xtrabackup_tmp_#ID.ibd. If there was no
matching file, creates a new tablespace.
@return file handle of matched or created file */
static
-os_file_t
+pfs_os_file_t
xb_delta_open_matching_space(
const char* dbname, /* in: path to destination database dir */
const char* name, /* in: name of delta file (without .delta) */
@@ -5274,7 +5093,7 @@ xb_delta_open_matching_space(
char dest_space_name[FN_REFLEN];
ibool ok;
fil_space_t* fil_space;
- os_file_t file = 0;
+ pfs_os_file_t file;
ulint tablespace_flags;
xb_filter_entry_t* table;
@@ -5438,8 +5257,8 @@ xtrabackup_apply_delta(
including the .delta extension */
void* /*data*/)
{
- os_file_t src_file = XB_FILE_UNDEFINED;
- os_file_t dst_file = XB_FILE_UNDEFINED;
+ pfs_os_file_t src_file;
+ pfs_os_file_t dst_file;
char src_path[FN_REFLEN];
char dst_path[FN_REFLEN];
char meta_path[FN_REFLEN];
@@ -5813,7 +5632,7 @@ xtrabackup_apply_deltas()
static my_bool
xtrabackup_close_temp_log(my_bool clear_flag)
{
- os_file_t src_file = XB_FILE_UNDEFINED;
+ pfs_os_file_t src_file;
char src_path[FN_REFLEN];
char dst_path[FN_REFLEN];
ibool success;
@@ -6407,6 +6226,7 @@ skip_check:
/* Create logfiles for recovery from 'xtrabackup_logfile', before start InnoDB */
srv_max_n_threads = 1000;
+ srv_n_purge_threads = 1;
ut_mem_init();
/* temporally dummy value to avoid crash */
srv_page_size_shift = 14;
@@ -6499,13 +6319,13 @@ skip_check:
metadata_last_lsn);
xtrabackup_archived_to_lsn = metadata_last_lsn;
}
- if (xtrabackup_archived_to_lsn < min_flushed_lsn) {
+ if (xtrabackup_archived_to_lsn < flushed_lsn) {
msg("xtrabackup: error: logs applying "
"lsn limit " UINT64PF " is less than "
"min_flushed_lsn " UINT64PF
", there is nothing to do\n",
xtrabackup_archived_to_lsn,
- min_flushed_lsn);
+ flushed_lsn);
goto error_cleanup;
}
}
@@ -6516,7 +6336,7 @@ skip_check:
*/
xtrabackup_apply_log_only = srv_apply_log_only = true;
- if (!xtrabackup_arch_search_files(min_flushed_lsn)) {
+ if (!xtrabackup_arch_search_files(flushed_lsn)) {
goto error_cleanup;
}
@@ -6594,7 +6414,7 @@ skip_check:
if (xtrabackup_export) {
msg("xtrabackup: export option is specified.\n");
- os_file_t info_file = XB_FILE_UNDEFINED;
+ pfs_os_file_t info_file;
char info_file_path[FN_REFLEN];
ibool success;
char table_name[FN_REFLEN];
@@ -6788,8 +6608,7 @@ next_node:
xb_write_galera_info(xtrabackup_incremental);
#endif
- if(innodb_end())
- goto error_cleanup;
+ innodb_end();
innodb_free_param();
@@ -6875,9 +6694,7 @@ next_node:
if(innodb_init())
goto error;
- if(innodb_end())
- goto error;
-
+ innodb_end();
innodb_free_param();
}
@@ -7403,22 +7220,6 @@ int main(int argc, char **argv)
innobase_file_per_table = TRUE;
}
- if (xtrabackup_incremental && xtrabackup_stream &&
- xtrabackup_stream_fmt == XB_STREAM_FMT_TAR) {
- msg("xtrabackup: error: "
- "streaming incremental backups are incompatible with the \n"
- "'tar' streaming format. Use --stream=xbstream instead.\n");
- exit(EXIT_FAILURE);
- }
-
- if ((xtrabackup_compress || xtrabackup_encrypt) && xtrabackup_stream &&
- xtrabackup_stream_fmt == XB_STREAM_FMT_TAR) {
- msg("xtrabackup: error: "
- "compressed and encrypted backups are incompatible with the \n"
- "'tar' streaming format. Use --stream=xbstream instead.\n");
- exit(EXIT_FAILURE);
- }
-
if (!xtrabackup_prepare &&
(innobase_log_arch_dir || xtrabackup_archived_to_lsn)) {
diff --git a/extra/mariabackup/xtrabackup.h b/extra/mariabackup/xtrabackup.h
index 51491ce1f00..371466aad2c 100644
--- a/extra/mariabackup/xtrabackup.h
+++ b/extra/mariabackup/xtrabackup.h
@@ -27,7 +27,7 @@ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
#include "changed_page_bitmap.h"
#ifdef __WIN__
-#define XB_FILE_UNDEFINED NULL
+#define XB_FILE_UNDEFINED INVALID_HANDLE_VALUE
#else
#define XB_FILE_UNDEFINED (-1)
#endif
@@ -182,19 +182,10 @@ datafiles_iter_t *datafiles_iter_new(fil_system_t *f_system);
fil_node_t *datafiles_iter_next(datafiles_iter_t *it);
void datafiles_iter_free(datafiles_iter_t *it);
-/************************************************************************
-Initialize the tablespace memory cache and populate it by scanning for and
-opening data files */
-ulint xb_data_files_init(void);
-
-/************************************************************************
-Destroy the tablespace memory cache. */
-void xb_data_files_close(void);
-
/***********************************************************************
Reads the space flags from a given data file and returns the compressed
page size, or 0 if the space is not compressed. */
-ulint xb_get_zip_size(os_file_t file);
+ulint xb_get_zip_size(pfs_os_file_t file);
/************************************************************************
Checks if a table specified as a name in the form "database/name" (InnoDB 5.6)
diff --git a/extra/replace.c b/extra/replace.c
index a9982670384..b8c328f2902 100644
--- a/extra/replace.c
+++ b/extra/replace.c
@@ -174,6 +174,7 @@ register char **argv[];
break;
case 'V':
version=1;
+ /* fall through */
case 'I':
case '?':
help=1; /* Help text written */
diff --git a/include/my_sys.h b/include/my_sys.h
index dfabda42022..ed5a7200ec8 100644
--- a/include/my_sys.h
+++ b/include/my_sys.h
@@ -949,6 +949,12 @@ extern ulonglong my_getcputime(void);
#define hrtime_sec_part(X) ((ulong)((X).val % HRTIME_RESOLUTION))
#define my_time(X) hrtime_to_time(my_hrtime())
+#if STACK_DIRECTION < 0
+#define available_stack_size(CUR,END) (long) ((char*)(CUR) - (char*)(END))
+#else
+#define available_stack_size(CUR,END) (long) ((char*)(END) - (char*)(CUR))
+#endif
+
#ifdef HAVE_SYS_MMAN_H
#include <sys/mman.h>
diff --git a/libmysqld/libmysql.c b/libmysqld/libmysql.c
index 09d5abbbb6a..09a477d04ee 100644
--- a/libmysqld/libmysql.c
+++ b/libmysqld/libmysql.c
@@ -1404,7 +1404,7 @@ void set_stmt_errmsg(MYSQL_STMT *stmt, NET *net)
DBUG_ASSERT(stmt != 0);
stmt->last_errno= net->last_errno;
- if (net->last_error && net->last_error[0])
+ if (net->last_error[0])
strmov(stmt->last_error, net->last_error);
strmov(stmt->sqlstate, net->sqlstate);
@@ -4707,8 +4707,7 @@ my_bool STDCALL mysql_stmt_close(MYSQL_STMT *stmt)
{
uchar buff[MYSQL_STMT_HEADER]; /* 4 bytes - stmt id */
- if ((rc= reset_stmt_handle(stmt, RESET_ALL_BUFFERS | RESET_CLEAR_ERROR)))
- return rc;
+ reset_stmt_handle(stmt, RESET_ALL_BUFFERS | RESET_CLEAR_ERROR);
int4store(buff, stmt->stmt_id);
if ((rc= stmt_command(mysql, COM_STMT_CLOSE, buff, 4, stmt)))
diff --git a/mysql-test/include/innodb_page_size.combinations b/mysql-test/include/innodb_page_size.combinations
new file mode 100644
index 00000000000..b9b1e1b106f
--- /dev/null
+++ b/mysql-test/include/innodb_page_size.combinations
@@ -0,0 +1,16 @@
+[64k]
+innodb-page-size=64K
+innodb-buffer-pool-size=24M
+
+[32k]
+innodb-page-size=32K
+innodb-buffer-pool-size=24M
+
+[16k]
+innodb-page-size=16K
+
+[8k]
+innodb-page-size=8K
+
+[4k]
+innodb-page-size=4K
diff --git a/mysql-test/include/innodb_page_size.inc b/mysql-test/include/innodb_page_size.inc
new file mode 100644
index 00000000000..754b640da86
--- /dev/null
+++ b/mysql-test/include/innodb_page_size.inc
@@ -0,0 +1,4 @@
+# The goal of including this file is to enable innodb_page_size combinations
+# (see include/innodb_page_size.combinations)
+
+--source include/have_innodb.inc
diff --git a/mysql-test/include/innodb_page_size_small.combinations b/mysql-test/include/innodb_page_size_small.combinations
new file mode 100644
index 00000000000..a9b7c7ce14d
--- /dev/null
+++ b/mysql-test/include/innodb_page_size_small.combinations
@@ -0,0 +1,8 @@
+[16k]
+innodb-page-size=16K
+
+[8k]
+innodb-page-size=8K
+
+[4k]
+innodb-page-size=4K
diff --git a/mysql-test/include/innodb_page_size_small.inc b/mysql-test/include/innodb_page_size_small.inc
new file mode 100644
index 00000000000..754b640da86
--- /dev/null
+++ b/mysql-test/include/innodb_page_size_small.inc
@@ -0,0 +1,4 @@
+# The goal of including this file is to enable innodb_page_size combinations
+# (see include/innodb_page_size.combinations)
+
+--source include/have_innodb.inc
diff --git a/mysql-test/include/varchar.inc b/mysql-test/include/varchar.inc
index 50741130895..7add7113b8d 100644
--- a/mysql-test/include/varchar.inc
+++ b/mysql-test/include/varchar.inc
@@ -90,6 +90,7 @@ explain select count(*) from t1 where v between 'a' and 'a ' and v between 'a '
--replace_regex /Duplicate entry '[^']+' for key/Duplicate entry '{ ' for key/
--error ER_DUP_ENTRY
alter table t1 add unique(v);
+show warnings;
alter table t1 add key(v);
select concat('*',v,'*',c,'*',t,'*') as qq from t1 where v='a';
--replace_column 6 # 9 # 10 #
diff --git a/mysql-test/lib/mtr_report.pm b/mysql-test/lib/mtr_report.pm
index 97ace54f0fb..d93d8adf34c 100644
--- a/mysql-test/lib/mtr_report.pm
+++ b/mysql-test/lib/mtr_report.pm
@@ -208,6 +208,10 @@ sub mtr_report_test ($) {
{
mtr_report("[ skipped ]");
}
+ if ( $tinfo->{'warnings'} )
+ {
+ mtr_report($tinfo->{'warnings'});
+ }
}
elsif ($result eq 'MTR_RES_PASSED')
{
diff --git a/mysql-test/mysql-test-run.pl b/mysql-test/mysql-test-run.pl
index 1d41067bb1e..4fc0c90c07d 100755
--- a/mysql-test/mysql-test-run.pl
+++ b/mysql-test/mysql-test-run.pl
@@ -2794,15 +2794,26 @@ sub mysql_server_start($) {
}
my $mysqld_basedir= $mysqld->value('basedir');
+ my $extra_opts= get_extra_opts($mysqld, $tinfo);
+
if ( $basedir eq $mysqld_basedir )
{
if (! $opt_start_dirty) # If dirty, keep possibly grown system db
{
- # Copy datadir from installed system db
- my $path= ($opt_parallel == 1) ? "$opt_vardir" : "$opt_vardir/..";
- my $install_db= "$path/install.db";
- copytree($install_db, $datadir) if -d $install_db;
- mtr_error("Failed to copy system db to '$datadir'") unless -d $datadir;
+ # Some InnoDB options are incompatible with the default bootstrap.
+ # If they are used, re-bootstrap
+ if ( $extra_opts and
+ "@$extra_opts" =~ /--innodb[-_](?:page[-_]size|checksum[-_]algorithm|undo[-_]tablespaces|log[-_]group[-_]home[-_]dir|data[-_]home[-_]dir)/ )
+ {
+ mysql_install_db($mysqld, undef, $extra_opts);
+ }
+ else {
+ # Copy datadir from installed system db
+ my $path= ($opt_parallel == 1) ? "$opt_vardir" : "$opt_vardir/..";
+ my $install_db= "$path/install.db";
+ copytree($install_db, $datadir) if -d $install_db;
+ mtr_error("Failed to copy system db to '$datadir'") unless -d $datadir;
+ }
}
}
else
@@ -2841,7 +2852,6 @@ sub mysql_server_start($) {
if (!$opt_embedded_server)
{
- my $extra_opts= get_extra_opts($mysqld, $tinfo);
mysqld_start($mysqld,$extra_opts);
# Save this test case information, so next can examine it
@@ -3065,7 +3075,7 @@ sub default_mysqld {
sub mysql_install_db {
- my ($mysqld, $datadir)= @_;
+ my ($mysqld, $datadir, $extra_opts)= @_;
my $install_datadir= $datadir || $mysqld->value('datadir');
my $install_basedir= $mysqld->value('basedir');
@@ -3106,6 +3116,13 @@ sub mysql_install_db {
mtr_add_arg($args, $extra_opt);
}
}
+ # InnoDB options can come not only from the command line, but also
+ # from option files or combinations
+ foreach my $extra_opt ( @$extra_opts ) {
+ if ($extra_opt =~ /--innodb/) {
+ mtr_add_arg($args, $extra_opt);
+ }
+ }
# If DISABLE_GRANT_OPTIONS is defined when the server is compiled (e.g.,
# configure --disable-grant-options), mysqld will not recognize the
@@ -3990,12 +4007,13 @@ sub run_testcase ($$) {
{
my $res= $test->exit_status();
- if ($res == 0 and $opt_warnings and check_warnings($tinfo) )
+ if (($res == 0 or $res == 62) and $opt_warnings and check_warnings($tinfo) )
{
- # Test case suceeded, but it has produced unexpected
- # warnings, continue in $res == 1
- $res= 1;
- resfile_output($tinfo->{'warnings'}) if $opt_resfile;
+ # If test case suceeded, but it has produced unexpected
+ # warnings, continue with $res == 1;
+ # but if the test was skipped, it should remain skipped
+ $res= 1 if $res == 0;
+ resfile_output($tinfo->{'warnings'}) if $opt_resfile;
}
if ( $res == 0 )
diff --git a/mysql-test/r/alter_table_online.result b/mysql-test/r/alter_table_online.result
index b3ef9c354f7..54df4e0c96c 100644
--- a/mysql-test/r/alter_table_online.result
+++ b/mysql-test/r/alter_table_online.result
@@ -184,6 +184,35 @@ CREATE TABLE t1 (a LONGTEXT COLLATE latin1_general_ci);
ALTER TABLE t1 MODIFY a LONGTEXT COLLATE latin1_swedish_ci, ALGORITHM=INPLACE;
ERROR 0A000: ALGORITHM=INPLACE is not supported for this operation. Try ALGORITHM=COPY
DROP TABLE t1;
-#
-# End of MDEV-8948 ALTER ... INPLACE does work for BINARY, BLOB
-#
+select @@global.delay_key_write;
+@@global.delay_key_write
+ON
+create table t1 (a int, b int, key(b));
+flush tables;
+flush status;
+show status like 'Feature_delay_key_write';
+Variable_name Value
+Feature_delay_key_write 0
+insert t1 values (1,2),(2,3),(3,4);
+show status like 'Feature_delay_key_write';
+Variable_name Value
+Feature_delay_key_write 0
+alter online table t1 delay_key_write=1;
+show status like 'Feature_delay_key_write';
+Variable_name Value
+Feature_delay_key_write 1
+flush tables;
+insert t1 values (1,2),(2,3),(3,4);
+show status like 'Feature_delay_key_write';
+Variable_name Value
+Feature_delay_key_write 2
+alter online table t1 delay_key_write=0;
+show status like 'Feature_delay_key_write';
+Variable_name Value
+Feature_delay_key_write 2
+flush tables;
+insert t1 values (1,2),(2,3),(3,4);
+show status like 'Feature_delay_key_write';
+Variable_name Value
+Feature_delay_key_write 2
+drop table t1;
diff --git a/mysql-test/r/analyze_format_json.result b/mysql-test/r/analyze_format_json.result
index e5edc1e344b..e077f919aa0 100644
--- a/mysql-test/r/analyze_format_json.result
+++ b/mysql-test/r/analyze_format_json.result
@@ -614,22 +614,24 @@ ANALYZE
},
"block-nl-join": {
"table": {
- "table_name": "<subquery2>",
+ "table_name": "t2",
"access_type": "ALL",
- "possible_keys": ["distinct_key"],
"r_loops": 1,
"rows": 2,
"r_rows": 2,
"r_total_time_ms": "REPLACED",
"filtered": 100,
- "r_filtered": 100
+ "r_filtered": 0,
+ "attached_condition": "<in_optimizer>(t2.b,t2.b in (subquery#2))"
},
"buffer_type": "flat",
"buffer_size": "256Kb",
"join_type": "BNL",
- "r_filtered": 100,
- "materialized": {
- "unique": 1,
+ "attached_condition": "<in_optimizer>(t2.b,t2.b in (subquery#2))",
+ "r_filtered": null
+ },
+ "subqueries": [
+ {
"query_block": {
"select_id": 2,
"r_loops": 1,
@@ -646,24 +648,7 @@ ANALYZE
}
}
}
- },
- "block-nl-join": {
- "table": {
- "table_name": "t2",
- "access_type": "ALL",
- "r_loops": 1,
- "rows": 2,
- "r_rows": 2,
- "r_total_time_ms": "REPLACED",
- "filtered": 100,
- "r_filtered": 100
- },
- "buffer_type": "incremental",
- "buffer_size": "256Kb",
- "join_type": "BNL",
- "attached_condition": "t2.b = `<subquery2>`.a",
- "r_filtered": 0
- }
+ ]
}
}
drop table t1,t2;
diff --git a/mysql-test/r/cte_nonrecursive.result b/mysql-test/r/cte_nonrecursive.result
index 317d8bd05df..262b49e9a60 100644
--- a/mysql-test/r/cte_nonrecursive.result
+++ b/mysql-test/r/cte_nonrecursive.result
@@ -507,6 +507,7 @@ select t.a, count(*) from t1,t where t1.a=t.a group by t.a;
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 8 Using where; Using temporary; Using filesort
1 PRIMARY t1 ALL NULL NULL NULL NULL 8 Using where; Using join buffer (flat, BNL join)
+1 PRIMARY <subquery3> eq_ref distinct_key distinct_key 35 func 1
3 MATERIALIZED t2 ALL NULL NULL NULL NULL 4 Using where
3 MATERIALIZED t1 ALL NULL NULL NULL NULL 8 Using where; Using join buffer (flat, BNL join)
explain
@@ -522,6 +523,7 @@ where t1.a=t.a group by t.a;
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 8 Using where; Using temporary; Using filesort
1 PRIMARY t1 ALL NULL NULL NULL NULL 8 Using where; Using join buffer (flat, BNL join)
+1 PRIMARY <subquery3> eq_ref distinct_key distinct_key 35 func 1
3 MATERIALIZED t2 ALL NULL NULL NULL NULL 4 Using where
3 MATERIALIZED t1 ALL NULL NULL NULL NULL 8 Using where; Using join buffer (flat, BNL join)
# with clause in the specification of a derived table
diff --git a/mysql-test/r/derived_view.result b/mysql-test/r/derived_view.result
index e986b486bf2..deba9b1f09a 100644
--- a/mysql-test/r/derived_view.result
+++ b/mysql-test/r/derived_view.result
@@ -2916,5 +2916,64 @@ Handler_read_rnd_deleted 0
Handler_read_rnd_next 27
deallocate prepare stmt1;
drop table t1,t2;
+#
+# Bug mdev-12670: mergeable derived / view with subqueries
+# subject to semi-join optimizations
+# (actually this is a 5.3 bug.)
+#
+create table t1 (a int) engine=myisam;
+insert into t1 values (5),(3),(2),(7),(2),(5),(1);
+create table t2 (b int, index idx(b)) engine=myisam;
+insert into t2 values (2),(3),(2),(1),(3),(4);
+insert into t2 select b+10 from t2;
+insert into t2 select b+10 from t2;
+insert into t2 select b+10 from t2;
+insert into t2 select b+10 from t2;
+insert into t2 select b+10 from t2;
+insert into t2 select b+10 from t2;
+insert into t2 select b+10 from t2;
+insert into t2 select b+10 from t2;
+insert into t2 select b+10 from t2;
+insert into t2 select b+10 from t2;
+analyze table t1,t2;
+Table Op Msg_type Msg_text
+test.t1 analyze status OK
+test.t2 analyze status OK
+explain select a from t1 where a in (select b from t2);
+id select_type table type possible_keys key key_len ref rows Extra
+1 PRIMARY t1 ALL NULL NULL NULL NULL 7 Using where
+1 PRIMARY t2 ref idx idx 5 test.t1.a 140 Using index; FirstMatch(t1)
+explain select * from (select a from t1 where a in (select b from t2)) t;
+id select_type table type possible_keys key key_len ref rows Extra
+1 PRIMARY t1 ALL NULL NULL NULL NULL 7 Using where
+1 PRIMARY t2 ref idx idx 5 test.t1.a 140 Using index; FirstMatch(t1)
+create view v1 as select a from t1 where a in (select b from t2);
+explain select * from v1;
+id select_type table type possible_keys key key_len ref rows Extra
+1 PRIMARY t1 ALL NULL NULL NULL NULL 7 Using where
+1 PRIMARY t2 ref idx idx 5 test.t1.a 140 Using index; FirstMatch(t1)
+drop view v1;
+drop table t1,t2;
+#
+# Bug mdev-12812: mergeable derived / view with subqueries
+# NOT subject to semi-join optimizations
+#
+CREATE TABLE t1 (c1 varchar(3)) ENGINE=MyISAM;
+INSERT INTO t1 VALUES ('foo'),('foo');
+CREATE TABLE t2 (c2 varchar(3)) ENGINE=MyISAM;
+INSERT INTO t2 VALUES ('bar'),('qux'),('foo');
+SELECT STRAIGHT_JOIN *
+FROM ( SELECT * FROM t1 WHERE c1 IN ( SELECT c2 FROM t2 ) ) AS sq;
+c1
+foo
+foo
+EXPLAIN EXTENDED SELECT STRAIGHT_JOIN *
+FROM ( SELECT * FROM t1 WHERE c1 IN ( SELECT c2 FROM t2 ) ) AS sq;
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 PRIMARY t1 ALL NULL NULL NULL NULL 2 100.00 Using where
+3 DEPENDENT SUBQUERY t2 ALL NULL NULL NULL NULL 3 100.00 Using where
+Warnings:
+Note 1003 /* select#1 */ select straight_join `test`.`t1`.`c1` AS `c1` from `test`.`t1` where <in_optimizer>(`test`.`t1`.`c1`,<exists>(/* select#3 */ select `test`.`t2`.`c2` from `test`.`t2` where <cache>(`test`.`t1`.`c1`) = `test`.`t2`.`c2`))
+DROP TABLE t1, t2;
set optimizer_switch=@exit_optimizer_switch;
set join_cache_level=@exit_join_cache_level;
diff --git a/mysql-test/r/func_regexp_pcre.result b/mysql-test/r/func_regexp_pcre.result
index 18aa7ed8379..266ea6c5eef 100644
--- a/mysql-test/r/func_regexp_pcre.result
+++ b/mysql-test/r/func_regexp_pcre.result
@@ -880,3 +880,17 @@ SET @regCheck= '\\xE0\\x01';
SELECT CAST(0xE001 AS BINARY) REGEXP @regCheck;
CAST(0xE001 AS BINARY) REGEXP @regCheck
1
+# MDEV-12420: Testing recursion overflow
+SELECT 1 FROM dual WHERE ('Alpha,Bravo,Charlie,Delta,Echo,Foxtrot,StrataCentral,Golf,Hotel,India,Juliet,Kilo,Lima,Mike,StrataL3,November,Oscar,StrataL2,Sand,P3,P4SwitchTest,Arsys,Poppa,ExtensionMgr,Arp,Quebec,Romeo,StrataApiV2,PtReyes,Sierra,SandAcl,Arrow,Artools,BridgeTest,Tango,SandT,PAlaska,Namespace,Agent,Qos,PatchPanel,ProjectReport,Ark,Gimp,Agent,SliceAgent,Arnet,Bgp,Ale,Tommy,Central,AsicPktTestLib,Hsc,SandL3,Abuild,Pca9555,Standby,ControllerDut,CalSys,SandLib,Sb820,PointV2,BfnLib,Evpn,BfnSdk,Sflow,ManagementActive,AutoTest,GatedTest,Bgp,Sand,xinetd,BfnAgentLib,bf-utils,Hello,BfnState,Eos,Artest,Qos,Scd,ThermoMgr,Uniform,EosUtils,Eb,FanController,Central,BfnL3,BfnL2,tcp_wrappers,Victor,Environment,Route,Failover,Whiskey,Xray,Gimp,BfnFixed,Strata,SoCal,XApi,Msrp,XpProfile,tcpdump,PatchPanel,ArosTest,FhTest,Arbus,XpAcl,MacConc,XpApi,telnet,QosTest,Alpha2,BfnVlan,Stp,VxlanControllerTest,MplsAgent,Bravo2,Lanz,BfnMbb,Intf,XCtrl,Unicast,SandTunnel,L3Unicast,Ipsec,MplsTest,Rsvp,EthIntf,StageMgr,Sol,MplsUtils,Nat,Ira,P4NamespaceDut,Counters,Charlie2,Aqlc,Mlag,Power,OpenFlow,Lag,RestApi,BfdTest,strongs,Sfa,CEosUtils,Adt746,MaintenanceMode,MlagDut,EosImage,IpEth,MultiProtocol,Launcher,Max3179,Snmp,Acl,IpEthTest,PhyEee,bf-syslibs,tacc,XpL2,p4-ar-switch,p4-bf-switch,LdpTest,BfnPhy,Mirroring,Phy6,Ptp' REGEXP '^((?!\b(Strata|StrataApi|StrataApiV2)\b).)*$');
+1
+Warnings:
+Warning 1139 Got error 'pcre_exec: recursion limit of NUM exceeded' from regexp
+SELECT REGEXP_INSTR('a_kollision', 'oll');
+REGEXP_INSTR('a_kollision', 'oll')
+4
+SELECT REGEXP_INSTR('a_kollision', '(oll)');
+REGEXP_INSTR('a_kollision', '(oll)')
+4
+SELECT REGEXP_INSTR('a_kollision', 'o([lm])\\1');
+REGEXP_INSTR('a_kollision', 'o([lm])\\1')
+4
diff --git a/mysql-test/r/innodb_ext_key.result b/mysql-test/r/innodb_ext_key.result
index 1305be86e5a..c55e8d138f8 100644
--- a/mysql-test/r/innodb_ext_key.result
+++ b/mysql-test/r/innodb_ext_key.result
@@ -1133,5 +1133,78 @@ where index_date_updated= 10 and index_id < 800;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t2 range index_date_updated index_date_updated 13 NULL # Using index condition
drop table t0,t1,t2;
-set optimizer_switch=@save_ext_key_optimizer_switch;
-SET SESSION STORAGE_ENGINE=DEFAULT;
+#
+# MDEV-11196: Error:Run-Time Check Failure #2 - Stack around the variable 'key_buff'
+# was corrupted, server crashes in opt_sum_query
+CREATE TABLE t1 (
+pk INT,
+f1 VARCHAR(3),
+f2 VARCHAR(1024),
+PRIMARY KEY (pk),
+KEY(f2)
+) ENGINE=InnoDB CHARSET utf8 ROW_FORMAT= DYNAMIC;
+INSERT INTO t1 VALUES (1,'foo','abc'),(2,'bar','def');
+SELECT MAX(t2.pk) FROM t1 t2 INNER JOIN t1 t3 ON t2.f1 = t3.f1 WHERE t2.pk <= 4;
+MAX(t2.pk)
+2
+drop table t1;
+CREATE TABLE t1 (
+pk1 INT,
+pk2 INT,
+f1 VARCHAR(3),
+f2 VARCHAR(1021),
+PRIMARY KEY (pk1,pk2),
+KEY(f2)
+) ENGINE=InnoDB CHARSET utf8 ROW_FORMAT= DYNAMIC;
+INSERT INTO t1 VALUES (1,2,'2','abc'),(2,3,'3','def');
+explain format= json
+select * from t1 force index(f2) where pk1 <= 5 and pk2 <=5 and f2 = 'abc' and f1 <= '3';
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "table": {
+ "table_name": "t1",
+ "access_type": "range",
+ "possible_keys": ["f2"],
+ "key": "f2",
+ "key_length": "3070",
+ "used_key_parts": ["f2", "pk1"],
+ "rows": 1,
+ "filtered": 100,
+ "index_condition": "t1.pk1 <= 5 and t1.pk2 <= 5 and t1.f2 = 'abc'",
+ "attached_condition": "t1.f1 <= '3'"
+ }
+ }
+}
+drop table t1;
+CREATE TABLE t1 (
+f2 INT,
+pk2 INT,
+f1 VARCHAR(3),
+pk1 VARCHAR(1000),
+PRIMARY KEY (pk1,pk2),
+KEY k1(pk1,f2)
+) ENGINE=InnoDB CHARSET utf8 ROW_FORMAT= DYNAMIC;
+INSERT INTO t1 VALUES (1,2,'2','abc'),(2,3,'3','def');
+explain format= json
+select * from t1 force index(k1) where f2 <= 5 and pk2 <=5 and pk1 = 'abc' and f1 <= '3';
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "table": {
+ "table_name": "t1",
+ "access_type": "range",
+ "possible_keys": ["k1"],
+ "key": "k1",
+ "key_length": "3011",
+ "used_key_parts": ["pk1", "f2", "pk2"],
+ "rows": 1,
+ "filtered": 100,
+ "index_condition": "t1.f2 <= 5 and t1.pk2 <= 5 and t1.pk1 = 'abc'",
+ "attached_condition": "t1.f1 <= '3'"
+ }
+ }
+}
+drop table t1;
diff --git a/mysql-test/r/join_outer.result b/mysql-test/r/join_outer.result
index d7b4e107a8d..46e542910a1 100644
--- a/mysql-test/r/join_outer.result
+++ b/mysql-test/r/join_outer.result
@@ -2337,4 +2337,99 @@ id select_type table type possible_keys key key_len ref rows filtered Extra
Warnings:
Note 1003 select `test`.`t1`.`i1` AS `i1`,`test`.`t1`.`v1` AS `v1`,`test`.`t2`.`i2` AS `i2`,`test`.`t2`.`v2` AS `v2`,`test`.`t3`.`i3` AS `i3`,`test`.`t3`.`v3` AS `v3` from `test`.`t1` join `test`.`t2` join `test`.`t3` where `test`.`t3`.`v3` = 4 and `test`.`t1`.`i1` = `test`.`t3`.`i3` and `test`.`t2`.`i2` = `test`.`t3`.`i3`
drop table t1,t2,t3;
+#
+# MDEV-11958: LEFT JOIN with stored routine produces incorrect result
+#
+CREATE TABLE t (x INT);
+INSERT INTO t VALUES(1),(NULL);
+CREATE FUNCTION f (val INT, ret INT) RETURNS INT DETERMINISTIC RETURN IFNULL(val, ret);
+SELECT t1.x, t2.x, IFNULL(t2.x,0), f(t2.x,0)
+FROM t t1 LEFT JOIN t t2
+ON t1.x = t2.x
+WHERE IFNULL(t2.x,0)=0;
+x x IFNULL(t2.x,0) f(t2.x,0)
+NULL NULL 0 0
+explain extended
+SELECT t1.x, t2.x, IFNULL(t2.x,0), f(t2.x,0)
+FROM t t1 LEFT JOIN t t2
+ON t1.x = t2.x
+WHERE IFNULL(t2.x,0)=0;
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 2 100.00
+1 SIMPLE t2 ALL NULL NULL NULL NULL 2 100.00 Using where
+Warnings:
+Note 1003 select `test`.`t1`.`x` AS `x`,`test`.`t2`.`x` AS `x`,ifnull(`test`.`t2`.`x`,0) AS `IFNULL(t2.x,0)`,`f`(`test`.`t2`.`x`,0) AS `f(t2.x,0)` from `test`.`t` `t1` left join `test`.`t` `t2` on(`test`.`t2`.`x` = `test`.`t1`.`x`) where ifnull(`test`.`t2`.`x`,0) = 0
+SELECT t1.x, t2.x, IFNULL(t2.x,0), f(t2.x,0)
+FROM t t1 LEFT JOIN t t2
+ON t1.x = t2.x
+WHERE f(t2.x,0)=0;
+x x IFNULL(t2.x,0) f(t2.x,0)
+NULL NULL 0 0
+explain extended
+SELECT t1.x, t2.x, IFNULL(t2.x,0), f(t2.x,0)
+FROM t t1 LEFT JOIN t t2
+ON t1.x = t2.x
+WHERE f(t2.x,0)=0;
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 2 100.00
+1 SIMPLE t2 ALL NULL NULL NULL NULL 2 100.00 Using where
+Warnings:
+Note 1003 select `test`.`t1`.`x` AS `x`,`test`.`t2`.`x` AS `x`,ifnull(`test`.`t2`.`x`,0) AS `IFNULL(t2.x,0)`,`f`(`test`.`t2`.`x`,0) AS `f(t2.x,0)` from `test`.`t` `t1` left join `test`.`t` `t2` on(`test`.`t2`.`x` = `test`.`t1`.`x`) where `f`(`test`.`t2`.`x`,0) = 0
+drop function f;
+drop table t;
+CREATE TABLE t1 (
+col1 DECIMAL(33,5) NULL DEFAULT NULL,
+col2 DECIMAL(33,5) NULL DEFAULT NULL
+);
+CREATE TABLE t2 (
+col1 DECIMAL(33,5) NULL DEFAULT NULL,
+col2 DECIMAL(33,5) NULL DEFAULT NULL,
+col3 DECIMAL(33,5) NULL DEFAULT NULL
+);
+INSERT INTO t1 VALUES (2, 1.1), (2, 2.1);
+INSERT INTO t2 VALUES (3, 3.1, 4), (1, 1, NULL);
+CREATE FUNCTION f1 ( p_num DECIMAL(45,15), p_return DECIMAL(45,15))
+RETURNS decimal(33,5)
+LANGUAGE SQL
+DETERMINISTIC
+CONTAINS SQL
+SQL SECURITY INVOKER
+BEGIN
+IF p_num IS NULL THEN
+RETURN p_return;
+ELSE
+RETURN p_num;
+END IF;
+END |
+SELECT t1.col1, t2.col1, t2.col3
+FROM t1 LEFT OUTER JOIN t2 ON t1.col1 = t2.col2
+WHERE IFNULL(t2.col3,0) = 0;
+col1 col1 col3
+2.00000 NULL NULL
+2.00000 NULL NULL
+EXPLAIN EXTENDED SELECT t1.col1, t2.col1, t2.col3
+FROM t1 LEFT OUTER JOIN t2 ON t1.col1 = t2.col2
+WHERE IFNULL(t2.col3,0) = 0;
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 2 100.00
+1 SIMPLE t2 ALL NULL NULL NULL NULL 2 100.00 Using where
+Warnings:
+Note 1003 select `test`.`t1`.`col1` AS `col1`,`test`.`t2`.`col1` AS `col1`,`test`.`t2`.`col3` AS `col3` from `test`.`t1` left join `test`.`t2` on(`test`.`t2`.`col2` = `test`.`t1`.`col1`) where ifnull(`test`.`t2`.`col3`,0) = 0
+SELECT t1.col1, t2.col1, t2.col3
+FROM t1 LEFT OUTER JOIN t2 ON t1.col1 = t2.col2
+WHERE f1(t2.col3,0) = 0;
+col1 col1 col3
+2.00000 NULL NULL
+2.00000 NULL NULL
+EXPLAIN EXTENDED SELECT t1.col1, t2.col1, t2.col3
+FROM t1 LEFT OUTER JOIN t2 ON t1.col1 = t2.col2
+WHERE f1(t2.col3,0) = 0;
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 2 100.00
+1 SIMPLE t2 ALL NULL NULL NULL NULL 2 100.00 Using where
+Warnings:
+Note 1003 select `test`.`t1`.`col1` AS `col1`,`test`.`t2`.`col1` AS `col1`,`test`.`t2`.`col3` AS `col3` from `test`.`t1` left join `test`.`t2` on(`test`.`t2`.`col2` = `test`.`t1`.`col1`) where `f1`(`test`.`t2`.`col3`,0) = 0
+DROP FUNCTION f1;
+DROP TABLE t1,t2;
+# end of 5.5 tests
SET optimizer_switch=@save_optimizer_switch;
diff --git a/mysql-test/r/join_outer_jcl6.result b/mysql-test/r/join_outer_jcl6.result
index cab4c78eb3a..65e2dde409e 100644
--- a/mysql-test/r/join_outer_jcl6.result
+++ b/mysql-test/r/join_outer_jcl6.result
@@ -2348,6 +2348,101 @@ id select_type table type possible_keys key key_len ref rows filtered Extra
Warnings:
Note 1003 select `test`.`t1`.`i1` AS `i1`,`test`.`t1`.`v1` AS `v1`,`test`.`t2`.`i2` AS `i2`,`test`.`t2`.`v2` AS `v2`,`test`.`t3`.`i3` AS `i3`,`test`.`t3`.`v3` AS `v3` from `test`.`t1` join `test`.`t2` join `test`.`t3` where `test`.`t3`.`v3` = 4 and `test`.`t1`.`i1` = `test`.`t3`.`i3` and `test`.`t2`.`i2` = `test`.`t3`.`i3`
drop table t1,t2,t3;
+#
+# MDEV-11958: LEFT JOIN with stored routine produces incorrect result
+#
+CREATE TABLE t (x INT);
+INSERT INTO t VALUES(1),(NULL);
+CREATE FUNCTION f (val INT, ret INT) RETURNS INT DETERMINISTIC RETURN IFNULL(val, ret);
+SELECT t1.x, t2.x, IFNULL(t2.x,0), f(t2.x,0)
+FROM t t1 LEFT JOIN t t2
+ON t1.x = t2.x
+WHERE IFNULL(t2.x,0)=0;
+x x IFNULL(t2.x,0) f(t2.x,0)
+NULL NULL 0 0
+explain extended
+SELECT t1.x, t2.x, IFNULL(t2.x,0), f(t2.x,0)
+FROM t t1 LEFT JOIN t t2
+ON t1.x = t2.x
+WHERE IFNULL(t2.x,0)=0;
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 2 100.00
+1 SIMPLE t2 ALL NULL NULL NULL NULL 2 100.00 Using where; Using join buffer (flat, BNL join)
+Warnings:
+Note 1003 select `test`.`t1`.`x` AS `x`,`test`.`t2`.`x` AS `x`,ifnull(`test`.`t2`.`x`,0) AS `IFNULL(t2.x,0)`,`f`(`test`.`t2`.`x`,0) AS `f(t2.x,0)` from `test`.`t` `t1` left join `test`.`t` `t2` on(`test`.`t2`.`x` = `test`.`t1`.`x`) where ifnull(`test`.`t2`.`x`,0) = 0
+SELECT t1.x, t2.x, IFNULL(t2.x,0), f(t2.x,0)
+FROM t t1 LEFT JOIN t t2
+ON t1.x = t2.x
+WHERE f(t2.x,0)=0;
+x x IFNULL(t2.x,0) f(t2.x,0)
+NULL NULL 0 0
+explain extended
+SELECT t1.x, t2.x, IFNULL(t2.x,0), f(t2.x,0)
+FROM t t1 LEFT JOIN t t2
+ON t1.x = t2.x
+WHERE f(t2.x,0)=0;
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 2 100.00
+1 SIMPLE t2 ALL NULL NULL NULL NULL 2 100.00 Using where; Using join buffer (flat, BNL join)
+Warnings:
+Note 1003 select `test`.`t1`.`x` AS `x`,`test`.`t2`.`x` AS `x`,ifnull(`test`.`t2`.`x`,0) AS `IFNULL(t2.x,0)`,`f`(`test`.`t2`.`x`,0) AS `f(t2.x,0)` from `test`.`t` `t1` left join `test`.`t` `t2` on(`test`.`t2`.`x` = `test`.`t1`.`x`) where `f`(`test`.`t2`.`x`,0) = 0
+drop function f;
+drop table t;
+CREATE TABLE t1 (
+col1 DECIMAL(33,5) NULL DEFAULT NULL,
+col2 DECIMAL(33,5) NULL DEFAULT NULL
+);
+CREATE TABLE t2 (
+col1 DECIMAL(33,5) NULL DEFAULT NULL,
+col2 DECIMAL(33,5) NULL DEFAULT NULL,
+col3 DECIMAL(33,5) NULL DEFAULT NULL
+);
+INSERT INTO t1 VALUES (2, 1.1), (2, 2.1);
+INSERT INTO t2 VALUES (3, 3.1, 4), (1, 1, NULL);
+CREATE FUNCTION f1 ( p_num DECIMAL(45,15), p_return DECIMAL(45,15))
+RETURNS decimal(33,5)
+LANGUAGE SQL
+DETERMINISTIC
+CONTAINS SQL
+SQL SECURITY INVOKER
+BEGIN
+IF p_num IS NULL THEN
+RETURN p_return;
+ELSE
+RETURN p_num;
+END IF;
+END |
+SELECT t1.col1, t2.col1, t2.col3
+FROM t1 LEFT OUTER JOIN t2 ON t1.col1 = t2.col2
+WHERE IFNULL(t2.col3,0) = 0;
+col1 col1 col3
+2.00000 NULL NULL
+2.00000 NULL NULL
+EXPLAIN EXTENDED SELECT t1.col1, t2.col1, t2.col3
+FROM t1 LEFT OUTER JOIN t2 ON t1.col1 = t2.col2
+WHERE IFNULL(t2.col3,0) = 0;
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 2 100.00
+1 SIMPLE t2 ALL NULL NULL NULL NULL 2 100.00 Using where; Using join buffer (flat, BNL join)
+Warnings:
+Note 1003 select `test`.`t1`.`col1` AS `col1`,`test`.`t2`.`col1` AS `col1`,`test`.`t2`.`col3` AS `col3` from `test`.`t1` left join `test`.`t2` on(`test`.`t2`.`col2` = `test`.`t1`.`col1`) where ifnull(`test`.`t2`.`col3`,0) = 0
+SELECT t1.col1, t2.col1, t2.col3
+FROM t1 LEFT OUTER JOIN t2 ON t1.col1 = t2.col2
+WHERE f1(t2.col3,0) = 0;
+col1 col1 col3
+2.00000 NULL NULL
+2.00000 NULL NULL
+EXPLAIN EXTENDED SELECT t1.col1, t2.col1, t2.col3
+FROM t1 LEFT OUTER JOIN t2 ON t1.col1 = t2.col2
+WHERE f1(t2.col3,0) = 0;
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 2 100.00
+1 SIMPLE t2 ALL NULL NULL NULL NULL 2 100.00 Using where; Using join buffer (flat, BNL join)
+Warnings:
+Note 1003 select `test`.`t1`.`col1` AS `col1`,`test`.`t2`.`col1` AS `col1`,`test`.`t2`.`col3` AS `col3` from `test`.`t1` left join `test`.`t2` on(`test`.`t2`.`col2` = `test`.`t1`.`col1`) where `f1`(`test`.`t2`.`col3`,0) = 0
+DROP FUNCTION f1;
+DROP TABLE t1,t2;
+# end of 5.5 tests
SET optimizer_switch=@save_optimizer_switch;
set join_cache_level=default;
show variables like 'join_cache_level';
diff --git a/mysql-test/r/limit_rows_examined.result b/mysql-test/r/limit_rows_examined.result
index 3bc97859303..c94599235b1 100644
--- a/mysql-test/r/limit_rows_examined.result
+++ b/mysql-test/r/limit_rows_examined.result
@@ -425,7 +425,7 @@ c1
bb
cc
Warnings:
-Warning 1931 Query execution was interrupted. The query examined at least 18 rows, which exceeds LIMIT ROWS EXAMINED (16). The query result may be incomplete
+Warning 1931 Query execution was interrupted. The query examined at least 17 rows, which exceeds LIMIT ROWS EXAMINED (16). The query result may be incomplete
select * from v1 LIMIT ROWS EXAMINED 11;
c1
bb
@@ -438,7 +438,8 @@ from (select * from t1
where c1 IN (select * from t2 where c2 > ' ' LIMIT ROWS EXAMINED 0)) as tmp
LIMIT ROWS EXAMINED 11;
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY t1 ALL NULL NULL NULL NULL 4 Using where
+1 PRIMARY t1 ALL NULL NULL NULL NULL 4
+1 PRIMARY <subquery3> eq_ref distinct_key distinct_key 2 func 1
3 MATERIALIZED t2 ALL NULL NULL NULL NULL 4 Using where
select *
from (select * from t1
diff --git a/mysql-test/r/log_tables-big.result b/mysql-test/r/log_tables-big.result
index 1e189a7726f..0a33510fe77 100644
--- a/mysql-test/r/log_tables-big.result
+++ b/mysql-test/r/log_tables-big.result
@@ -1,8 +1,12 @@
set @@global.log_output = 'TABLE';
+connect con1,localhost,root,,;
+connect con2,localhost,root,,;
+connection con1;
set session long_query_time=10;
select get_lock('bug27638', 1);
get_lock('bug27638', 1)
1
+connection con2;
set session long_query_time=1;
select get_lock('bug27638', 2);
get_lock('bug27638', 2)
@@ -25,7 +29,11 @@ select if (query_time >= '00:01:40', 'OK', 'WRONG') as qt, sql_text from mysql.s
where sql_text = 'select get_lock(\'bug27638\', 101)';
qt sql_text
OK select get_lock('bug27638', 101)
+connection con1;
select release_lock('bug27638');
release_lock('bug27638')
1
+connection default;
+disconnect con1;
+disconnect con2;
set @@global.log_output=default;
diff --git a/mysql-test/r/mix2_myisam.result b/mysql-test/r/mix2_myisam.result
index b282be15a56..34764466d2a 100644
--- a/mysql-test/r/mix2_myisam.result
+++ b/mysql-test/r/mix2_myisam.result
@@ -1549,6 +1549,9 @@ id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ref v v 13 const # Using where; Using index
alter table t1 add unique(v);
ERROR 23000: Duplicate entry '{ ' for key 'v_2'
+show warnings;
+Level Code Message
+Error 1062 Duplicate entry 'a' for key 'v_2'
alter table t1 add key(v);
Warnings:
Note 1831 Duplicate index `v_2`. This is deprecated and will be disallowed in a future release
diff --git a/mysql-test/r/mrr_icp_extra.result b/mysql-test/r/mrr_icp_extra.result
index 5706bf21895..95788b393dd 100644
--- a/mysql-test/r/mrr_icp_extra.result
+++ b/mysql-test/r/mrr_icp_extra.result
@@ -350,6 +350,9 @@ id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ref v v 13 const # Using where; Using index
alter table t1 add unique(v);
ERROR 23000: Duplicate entry '{ ' for key 'v_2'
+show warnings;
+Level Code Message
+Error 1062 Duplicate entry 'a' for key 'v_2'
alter table t1 add key(v);
Warnings:
Note 1831 Duplicate index `v_2`. This is deprecated and will be disallowed in a future release
diff --git a/mysql-test/r/myisam.result b/mysql-test/r/myisam.result
index f7eb5db6468..5b41b35c1ba 100644
--- a/mysql-test/r/myisam.result
+++ b/mysql-test/r/myisam.result
@@ -1255,6 +1255,9 @@ id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ref v v 13 const # Using where; Using index
alter table t1 add unique(v);
ERROR 23000: Duplicate entry '{ ' for key 'v_2'
+show warnings;
+Level Code Message
+Error 1062 Duplicate entry 'a' for key 'v_2'
alter table t1 add key(v);
Warnings:
Note 1831 Duplicate index `v_2`. This is deprecated and will be disallowed in a future release
diff --git a/mysql-test/r/myisam_debug.result b/mysql-test/r/myisam_debug.result
index 39cbd69cdb0..6232e3eac0e 100644
--- a/mysql-test/r/myisam_debug.result
+++ b/mysql-test/r/myisam_debug.result
@@ -29,3 +29,15 @@ Table Op Msg_type Msg_text
test.t1 check status OK
DROP TABLE t1,t2;
disconnect insertConn;
+call mtr.add_suppression("Index for table '.*test.t1\\.MYI' is corrupt; try to repair it");
+create table t1 (a int, index(a));
+lock tables t1 write;
+insert t1 values (1),(2),(1);
+set @old_dbug=@@debug_dbug;
+set debug_dbug='+d,mi_lock_database_failure';
+unlock tables;
+Warnings:
+Error 126 Index for table './test/t1.MYI' is corrupt; try to repair it
+Error 1030 Got error 22 "Invalid argument" from storage engine MyISAM
+set debug_dbug=@old_dbug;
+drop table t1;
diff --git a/mysql-test/r/partition_alter.result b/mysql-test/r/partition_alter.result
index cbd90b5ba7c..76b55cefb07 100644
--- a/mysql-test/r/partition_alter.result
+++ b/mysql-test/r/partition_alter.result
@@ -51,3 +51,50 @@ execute stmt;
execute stmt;
deallocate prepare stmt;
drop table test_data;
+create table t1(id int, d date not null, b bool not null default 0, primary key(id,d))
+engine=innodb
+partition by range columns (d) (
+partition p1 values less than ('2016-10-18'),
+partition p2 values less than ('2020-10-19'));
+insert t1 values (0, '2000-01-02', 0);
+insert t1 values (1, '2020-01-02', 10);
+alter table t1 add check (b in (0, 1));
+ERROR 23000: CONSTRAINT `CONSTRAINT_1` failed for `test`.`#sql-temporary`
+alter table t1 add check (b in (0, 10));
+show create table t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `id` int(11) NOT NULL,
+ `d` date NOT NULL,
+ `b` tinyint(1) NOT NULL DEFAULT 0,
+ PRIMARY KEY (`id`,`d`),
+ CONSTRAINT `CONSTRAINT_1` CHECK (`b` in (0,10))
+) ENGINE=InnoDB DEFAULT CHARSET=latin1
+ PARTITION BY RANGE COLUMNS(d)
+(PARTITION p1 VALUES LESS THAN ('2016-10-18') ENGINE = InnoDB,
+ PARTITION p2 VALUES LESS THAN ('2020-10-19') ENGINE = InnoDB)
+insert t1 values (2, '2020-01-03', 20);
+ERROR 23000: CONSTRAINT `CONSTRAINT_1` failed for `test`.`t1`
+drop table t1;
+create table t1(id int, d date not null, b bool not null default 0, primary key(id,d))
+partition by range columns (d) (
+partition p1 values less than ('2016-10-18'),
+partition p2 values less than ('2020-10-19'));
+insert t1 values (0, '2000-01-02', 0);
+insert t1 values (1, '2020-01-02', 10);
+alter table t1 add check (b in (0, 1));
+show create table t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `id` int(11) NOT NULL,
+ `d` date NOT NULL,
+ `b` tinyint(1) NOT NULL DEFAULT 0,
+ PRIMARY KEY (`id`,`d`),
+ CONSTRAINT `CONSTRAINT_1` CHECK (`b` in (0,1))
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+ PARTITION BY RANGE COLUMNS(d)
+(PARTITION p1 VALUES LESS THAN ('2016-10-18') ENGINE = MyISAM,
+ PARTITION p2 VALUES LESS THAN ('2020-10-19') ENGINE = MyISAM)
+insert t1 values (2, '2020-01-03', 20);
+ERROR 23000: CONSTRAINT `CONSTRAINT_1` failed for `test`.`t1`
+drop table t1;
diff --git a/mysql-test/r/subselect_innodb.result b/mysql-test/r/subselect_innodb.result
index 01257c33361..ec7f2c0a3d5 100644
--- a/mysql-test/r/subselect_innodb.result
+++ b/mysql-test/r/subselect_innodb.result
@@ -576,3 +576,42 @@ id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL #
2 DEPENDENT SUBQUERY t2 ref key1 key1 5 test.t1.a # Using where; Using filesort
drop table t1,t2;
+#
+# mdev-12931: semi-join in ON expression of STRAIGHT_JOIN
+# joining a base table and a mergeable derived table
+#
+CREATE TABLE t1 (f1 int) ENGINE=InnoDB;
+INSERT INTO t1 VALUES (3),(2);
+CREATE TABLE t2 (f2 int) ENGINE=InnoDB;
+INSERT INTO t2 VALUES (1),(4);
+CREATE TABLE t3 (f3 int) ENGINE=InnoDB;
+INSERT INTO t3 VALUES (5),(6);
+CREATE TABLE t4 (f4 int) ENGINE=InnoDB;
+INSERT INTO t4 VALUES (1),(8);
+SELECT *
+FROM t1
+INNER JOIN
+( t2 STRAIGHT_JOIN ( SELECT * FROM t3 ) AS sq
+ON ( 1 IN ( SELECT f4 FROM t4 ) ) )
+ON ( f1 >= f2 );
+f1 f2 f3
+3 1 5
+2 1 5
+3 1 6
+2 1 6
+EXPLAIN EXTENDED
+SELECT *
+FROM t1
+INNER JOIN
+( t2 STRAIGHT_JOIN ( SELECT * FROM t3 ) AS sq
+ON ( 1 IN ( SELECT f4 FROM t4 ) ) )
+ON ( f1 >= f2 );
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 PRIMARY <subquery3> eq_ref distinct_key distinct_key 4 func 1 100.00
+1 PRIMARY t1 ALL NULL NULL NULL NULL 2 100.00 Using join buffer (flat, BNL join)
+1 PRIMARY t2 ALL NULL NULL NULL NULL 2 100.00 Using where; Using join buffer (incremental, BNL join)
+1 PRIMARY t3 ALL NULL NULL NULL NULL 2 100.00 Using join buffer (incremental, BNL join)
+3 MATERIALIZED t4 ALL NULL NULL NULL NULL 2 100.00 Using where
+Warnings:
+Note 1003 select `test`.`t1`.`f1` AS `f1`,`test`.`t2`.`f2` AS `f2`,`test`.`t3`.`f3` AS `f3` from `test`.`t1` join `test`.`t2` semi join (`test`.`t4`) join `test`.`t3` where `test`.`t4`.`f4` = 1 and `test`.`t1`.`f1` >= `test`.`t2`.`f2`
+DROP TABLE t1,t2,t3,t4;
diff --git a/mysql-test/r/subselect_mat_cost_bugs.result b/mysql-test/r/subselect_mat_cost_bugs.result
index b4ddd5e5849..f5d5be8f726 100644
--- a/mysql-test/r/subselect_mat_cost_bugs.result
+++ b/mysql-test/r/subselect_mat_cost_bugs.result
@@ -379,6 +379,7 @@ drop table t3, t4, t5;
#
# LP BUG#858038 The result of a query with NOT IN subquery depends on the state of the optimizer switch
#
+set @optimizer_switch_save= @@optimizer_switch;
create table t1 (c1 char(2) not null, c2 char(2));
create table t2 (c3 char(2), c4 char(2));
insert into t1 values ('a1', 'b1');
@@ -400,6 +401,7 @@ id select_type table type possible_keys key key_len ref rows Extra
select * from t1 where c1 = 'a2' and (c1, c2) not in (select * from t2);
c1 c2
drop table t1, t2;
+set optimizer_switch= @optimizer_switch_save;
#
# MDEV-12673: cost-based choice between materialization and in-to-exists
#
@@ -442,3 +444,44 @@ id select_type table type possible_keys key key_len ref rows Extra
2 DEPENDENT SUBQUERY t3 const PRIMARY PRIMARY 4 const 1
2 DEPENDENT SUBQUERY t2 index NULL i2 11 NULL 2 Using where; Using index
DROP TABLE t1,t2,t3;
+#
+# MDEV-7599: in-to-exists chosen after min/max optimization
+#
+set @optimizer_switch_save= @@optimizer_switch;
+CREATE TABLE t1 (a INT, KEY(a)) ENGINE=MyISAM;
+INSERT INTO t1 VALUES (1),(2);
+CREATE TABLE t2 (b INT, c INT) ENGINE=MyISAM;
+INSERT INTO t2 VALUES (1,6),(2,4), (8,9);
+SELECT * FROM t2 WHERE b != ALL (SELECT MIN(a) FROM t1, t2 WHERE t2.c = t2.b);
+b c
+EXPLAIN EXTENDED SELECT * FROM t2 WHERE b != ALL (SELECT MIN(a) FROM t1, t2 WHERE t2.c = t2.b);
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 PRIMARY t2 ALL NULL NULL NULL NULL 3 100.00 Using where
+2 MATERIALIZED t1 index NULL a 5 NULL 2 100.00 Using index
+2 MATERIALIZED t2 ALL NULL NULL NULL NULL 3 100.00 Using where; Using join buffer (flat, BNL join)
+Warnings:
+Note 1003 /* select#1 */ select `test`.`t2`.`b` AS `b`,`test`.`t2`.`c` AS `c` from `test`.`t2` where !<expr_cache><`test`.`t2`.`b`>(<in_optimizer>(`test`.`t2`.`b`,`test`.`t2`.`b` in ( <materialize> (/* select#2 */ select min(`test`.`t1`.`a`) from `test`.`t1` join `test`.`t2` where `test`.`t2`.`c` = `test`.`t2`.`b` ), <primary_index_lookup>(`test`.`t2`.`b` in <temporary table> on distinct_key where `test`.`t2`.`b` = `<subquery2>`.`MIN(a)`))))
+set optimizer_switch= 'materialization=off';
+SELECT * FROM t2 WHERE b != ALL (SELECT MIN(a) FROM t1, t2 WHERE t2.c = t2.b);
+b c
+EXPLAIN EXTENDED SELECT * FROM t2 WHERE b != ALL (SELECT MIN(a) FROM t1, t2 WHERE t2.c = t2.b);
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 PRIMARY t2 ALL NULL NULL NULL NULL 3 100.00 Using where
+2 DEPENDENT SUBQUERY t1 index NULL a 5 NULL 2 100.00 Using index
+2 DEPENDENT SUBQUERY t2 ALL NULL NULL NULL NULL 3 100.00 Using where; Using join buffer (flat, BNL join)
+Warnings:
+Note 1003 /* select#1 */ select `test`.`t2`.`b` AS `b`,`test`.`t2`.`c` AS `c` from `test`.`t2` where !<expr_cache><`test`.`t2`.`b`>(<in_optimizer>(`test`.`t2`.`b`,<exists>(/* select#2 */ select min(`test`.`t1`.`a`) from `test`.`t1` join `test`.`t2` where `test`.`t2`.`c` = `test`.`t2`.`b` having trigcond(<cache>(`test`.`t2`.`b`) = <ref_null_helper>(min(`test`.`t1`.`a`))))))
+set optimizer_switch= @optimizer_switch_save;
+DROP TABLE t1,t2;
+CREATE TABLE t1 (f1 varchar(10)) ENGINE=MyISAM;
+INSERT INTO t1 VALUES ('foo'),('bar');
+CREATE TABLE t2 (f2 varchar(10), key(f2)) ENGINE=MyISAM;
+INSERT INTO t2 VALUES ('baz'),('qux');
+CREATE TABLE t3 (f3 varchar(10)) ENGINE=MyISAM;
+INSERT INTO t3 VALUES ('abc'),('def');
+SELECT * FROM t1
+WHERE f1 = ALL( SELECT MAX(t2a.f2)
+FROM t2 AS t2a INNER JOIN t2 t2b INNER JOIN t3
+ON (f3 = t2b.f2) );
+f1
+DROP TABLE t1,t2,t3;
diff --git a/mysql-test/r/subselect_sj.result b/mysql-test/r/subselect_sj.result
index 861360ddf9a..a601dac5337 100644
--- a/mysql-test/r/subselect_sj.result
+++ b/mysql-test/r/subselect_sj.result
@@ -1652,9 +1652,9 @@ CREATE VIEW v1 AS SELECT 1;
EXPLAIN
SELECT * FROM t1 INNER JOIN t2 ON t2.a != 0 AND t2.a IN (SELECT * FROM v1);
id select_type table type possible_keys key key_len ref rows Extra
+1 PRIMARY <derived3> system NULL NULL NULL NULL 1
1 PRIMARY t1 ALL NULL NULL NULL NULL 2
1 PRIMARY t2 ALL NULL NULL NULL NULL 2 Using where; Using join buffer (flat, BNL join)
-2 MATERIALIZED <derived3> system NULL NULL NULL NULL 1
3 DERIVED NULL NULL NULL NULL NULL NULL NULL No tables used
SELECT * FROM t1 INNER JOIN t2 ON t2.a != 0 AND t2.a IN (SELECT * FROM v1);
a a
@@ -3072,4 +3072,97 @@ project_number
aaa
drop table t1, t2, t3;
set optimizer_switch= @tmp_mdev6859;
+#
+# MDEV-12675: subquery subject to semi-join optimizations
+# in ON expression of INNER JOIN
+#
+set @tmp_mdev12675=@@optimizer_switch;
+set optimizer_switch=default;
+create table t1 (a int) engine=myisam;
+insert into t1 values (5),(3),(2),(7),(2),(5),(1);
+create table t2 (b int, index idx(b)) engine=myisam;
+insert into t2 values (2),(3),(2),(1),(3),(4);
+insert into t2 select b+10 from t2;
+insert into t2 select b+10 from t2;
+insert into t2 select b+10 from t2;
+insert into t2 select b+10 from t2;
+insert into t2 select b+10 from t2;
+insert into t2 select b+10 from t2;
+insert into t2 select b+10 from t2;
+insert into t2 select b+10 from t2;
+insert into t2 select b+10 from t2;
+insert into t2 select b+10 from t2;
+insert into t2 select b+10 from t2;
+analyze table t1,t2;
+Table Op Msg_type Msg_text
+test.t1 analyze status OK
+test.t2 analyze status OK
+explain
+select a from t1, t2 where b between 1 and 2 and a in (select b from t2);
+id select_type table type possible_keys key key_len ref rows Extra
+1 PRIMARY t1 ALL NULL NULL NULL NULL 7 Using where
+1 PRIMARY t2 ref idx idx 5 test.t1.a 256 Using index; FirstMatch(t1)
+1 PRIMARY t2 range idx idx 5 NULL 2 Using where; Using index; Using join buffer (flat, BNL join)
+explain
+select a from t1 join t2 on b between 1 and 2 and a in (select b from t2);
+id select_type table type possible_keys key key_len ref rows Extra
+1 PRIMARY t1 ALL NULL NULL NULL NULL 7 Using where
+1 PRIMARY t2 ref idx idx 5 test.t1.a 256 Using index; FirstMatch(t1)
+1 PRIMARY t2 range idx idx 5 NULL 2 Using where; Using index; Using join buffer (flat, BNL join)
+drop table t1,t2;
+set optimizer_switch= @tmp_mdev12675;
+#
+# MDEV-12817: subquery NOT subject to semi-join optimizations
+# in ON expression of INNER JOIN
+#
+CREATE TABLE t1 (c1 int) ENGINE=MyISAM;
+INSERT INTO t1 VALUES (1),(2);
+CREATE TABLE t2 (c2 int) ENGINE=MyISAM;
+INSERT INTO t2 VALUES (3),(4);
+CREATE TABLE t3 (c3 int) ENGINE=MyISAM;
+INSERT INTO t3 VALUES (5),(6);
+CREATE TABLE t4 (c4 int) ENGINE=MyISAM;
+INSERT INTO t4 VALUES (7),(8);
+SELECT c1
+FROM t1
+LEFT JOIN
+( t2 INNER JOIN t3 ON ( 1 IN ( SELECT c4 FROM t4 ) ) )
+ON (c1 = c3);
+c1
+1
+2
+EXPLAIN EXTENDED SELECT c1
+FROM t1
+LEFT JOIN
+( t2 INNER JOIN t3 ON ( 1 IN ( SELECT c4 FROM t4 ) ) )
+ON (c1 = c3);
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 PRIMARY t1 ALL NULL NULL NULL NULL 2 100.00
+1 PRIMARY t2 ALL NULL NULL NULL NULL 2 100.00 Using where
+1 PRIMARY t3 ALL NULL NULL NULL NULL 2 100.00 Using where
+2 SUBQUERY t4 ALL NULL NULL NULL NULL 2 100.00 Using where
+Warnings:
+Note 1003 /* select#1 */ select `test`.`t1`.`c1` AS `c1` from `test`.`t1` left join (`test`.`t2` join `test`.`t3`) on(`test`.`t3`.`c3` = `test`.`t1`.`c1` and <cache>(<in_optimizer>(1,<exists>(/* select#2 */ select `test`.`t4`.`c4` from `test`.`t4` where 1 = `test`.`t4`.`c4`)))) where 1
+# mdev-12820
+SELECT *
+FROM t1
+LEFT JOIN
+( ( SELECT * FROM t2 WHERE c2 IN ( SELECT c3 FROM t3 ) ) AS sq INNER JOIN t4 )
+ON (c1 = c2);
+c1 c2 c4
+1 NULL NULL
+2 NULL NULL
+EXPLAIN EXTENDED SELECT *
+FROM t1
+LEFT JOIN
+( ( SELECT * FROM t2 WHERE c2 IN ( SELECT c3 FROM t3 ) ) AS sq INNER JOIN t4 )
+ON (c1 = c2);
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 PRIMARY t1 ALL NULL NULL NULL NULL 2 100.00
+1 PRIMARY t2 ALL NULL NULL NULL NULL 2 100.00 Using where
+1 PRIMARY t4 ALL NULL NULL NULL NULL 2 100.00
+3 DEPENDENT SUBQUERY t3 ALL NULL NULL NULL NULL 2 100.00 Using where
+Warnings:
+Note 1003 /* select#1 */ select `test`.`t1`.`c1` AS `c1`,`test`.`t2`.`c2` AS `c2`,`test`.`t4`.`c4` AS `c4` from `test`.`t1` left join (`test`.`t2` join `test`.`t4`) on(`test`.`t2`.`c2` = `test`.`t1`.`c1` and <in_optimizer>(`test`.`t1`.`c1`,<exists>(/* select#3 */ select `test`.`t3`.`c3` from `test`.`t3` where <cache>(`test`.`t2`.`c2`) = `test`.`t3`.`c3`))) where 1
+DROP TABLE t1,t2,t3,t4;
set optimizer_switch=@subselect_sj_tmp;
diff --git a/mysql-test/r/subselect_sj2_mat.result b/mysql-test/r/subselect_sj2_mat.result
index 07c9e74b4d7..8466f9fba4e 100644
--- a/mysql-test/r/subselect_sj2_mat.result
+++ b/mysql-test/r/subselect_sj2_mat.result
@@ -1625,3 +1625,26 @@ i1
DROP TABLE t1,t2,t3;
set join_cache_level= @save_join_cache_level;
set optimizer_switch=@save_optimizer_switch;
+#
+# mdev-7791: materialization of a semi-join subquery +
+# RAND() in WHERE
+# (materialized table is accessed last)
+#
+set @save_optimizer_switch=@@optimizer_switch;
+set optimizer_switch='materialization=on';
+create table t1(i int);
+insert into t1 values (1), (2), (3), (7), (9), (10);
+create table t2(i int);
+insert into t2 values (1), (2), (3), (4), (5), (6), (7), (8), (9), (10);
+select * from t1 where (rand() < 0) and i in (select i from t2);
+i
+explain extended
+select * from t1 where (rand() < 0) and i in (select i from t2);
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 PRIMARY t1 ALL NULL NULL NULL NULL 6 100.00 Using where
+1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 4 func 1 100.00
+2 MATERIALIZED t2 ALL NULL NULL NULL NULL 10 100.00
+Warnings:
+Note 1003 select `test`.`t1`.`i` AS `i` from `test`.`t1` semi join (`test`.`t2`) where rand() < 0
+drop table t1,t2;
+set optimizer_switch=@save_optimizer_switch;
diff --git a/mysql-test/r/subselect_sj_jcl6.result b/mysql-test/r/subselect_sj_jcl6.result
index bcfa1b14be9..77a073ea2d3 100644
--- a/mysql-test/r/subselect_sj_jcl6.result
+++ b/mysql-test/r/subselect_sj_jcl6.result
@@ -1665,9 +1665,9 @@ CREATE VIEW v1 AS SELECT 1;
EXPLAIN
SELECT * FROM t1 INNER JOIN t2 ON t2.a != 0 AND t2.a IN (SELECT * FROM v1);
id select_type table type possible_keys key key_len ref rows Extra
+1 PRIMARY <derived3> system NULL NULL NULL NULL 1
1 PRIMARY t1 ALL NULL NULL NULL NULL 2
1 PRIMARY t2 ALL NULL NULL NULL NULL 2 Using where; Using join buffer (flat, BNL join)
-2 MATERIALIZED <derived3> system NULL NULL NULL NULL 1
3 DERIVED NULL NULL NULL NULL NULL NULL NULL No tables used
SELECT * FROM t1 INNER JOIN t2 ON t2.a != 0 AND t2.a IN (SELECT * FROM v1);
a a
@@ -3086,6 +3086,99 @@ project_number
aaa
drop table t1, t2, t3;
set optimizer_switch= @tmp_mdev6859;
+#
+# MDEV-12675: subquery subject to semi-join optimizations
+# in ON expression of INNER JOIN
+#
+set @tmp_mdev12675=@@optimizer_switch;
+set optimizer_switch=default;
+create table t1 (a int) engine=myisam;
+insert into t1 values (5),(3),(2),(7),(2),(5),(1);
+create table t2 (b int, index idx(b)) engine=myisam;
+insert into t2 values (2),(3),(2),(1),(3),(4);
+insert into t2 select b+10 from t2;
+insert into t2 select b+10 from t2;
+insert into t2 select b+10 from t2;
+insert into t2 select b+10 from t2;
+insert into t2 select b+10 from t2;
+insert into t2 select b+10 from t2;
+insert into t2 select b+10 from t2;
+insert into t2 select b+10 from t2;
+insert into t2 select b+10 from t2;
+insert into t2 select b+10 from t2;
+insert into t2 select b+10 from t2;
+analyze table t1,t2;
+Table Op Msg_type Msg_text
+test.t1 analyze status OK
+test.t2 analyze status OK
+explain
+select a from t1, t2 where b between 1 and 2 and a in (select b from t2);
+id select_type table type possible_keys key key_len ref rows Extra
+1 PRIMARY t1 ALL NULL NULL NULL NULL 7 Using where
+1 PRIMARY t2 ref idx idx 5 test.t1.a 256 Using index; FirstMatch(t1)
+1 PRIMARY t2 range idx idx 5 NULL 2 Using where; Using index; Using join buffer (flat, BNL join)
+explain
+select a from t1 join t2 on b between 1 and 2 and a in (select b from t2);
+id select_type table type possible_keys key key_len ref rows Extra
+1 PRIMARY t1 ALL NULL NULL NULL NULL 7 Using where
+1 PRIMARY t2 ref idx idx 5 test.t1.a 256 Using index; FirstMatch(t1)
+1 PRIMARY t2 range idx idx 5 NULL 2 Using where; Using index; Using join buffer (flat, BNL join)
+drop table t1,t2;
+set optimizer_switch= @tmp_mdev12675;
+#
+# MDEV-12817: subquery NOT subject to semi-join optimizations
+# in ON expression of INNER JOIN
+#
+CREATE TABLE t1 (c1 int) ENGINE=MyISAM;
+INSERT INTO t1 VALUES (1),(2);
+CREATE TABLE t2 (c2 int) ENGINE=MyISAM;
+INSERT INTO t2 VALUES (3),(4);
+CREATE TABLE t3 (c3 int) ENGINE=MyISAM;
+INSERT INTO t3 VALUES (5),(6);
+CREATE TABLE t4 (c4 int) ENGINE=MyISAM;
+INSERT INTO t4 VALUES (7),(8);
+SELECT c1
+FROM t1
+LEFT JOIN
+( t2 INNER JOIN t3 ON ( 1 IN ( SELECT c4 FROM t4 ) ) )
+ON (c1 = c3);
+c1
+1
+2
+EXPLAIN EXTENDED SELECT c1
+FROM t1
+LEFT JOIN
+( t2 INNER JOIN t3 ON ( 1 IN ( SELECT c4 FROM t4 ) ) )
+ON (c1 = c3);
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 PRIMARY t1 ALL NULL NULL NULL NULL 2 100.00
+1 PRIMARY t2 ALL NULL NULL NULL NULL 2 100.00 Using where; Using join buffer (flat, BNL join)
+1 PRIMARY t3 ALL NULL NULL NULL NULL 2 100.00 Using where; Using join buffer (incremental, BNL join)
+2 SUBQUERY t4 ALL NULL NULL NULL NULL 2 100.00 Using where
+Warnings:
+Note 1003 /* select#1 */ select `test`.`t1`.`c1` AS `c1` from `test`.`t1` left join (`test`.`t2` join `test`.`t3`) on(`test`.`t3`.`c3` = `test`.`t1`.`c1` and <cache>(<in_optimizer>(1,<exists>(/* select#2 */ select `test`.`t4`.`c4` from `test`.`t4` where 1 = `test`.`t4`.`c4`)))) where 1
+# mdev-12820
+SELECT *
+FROM t1
+LEFT JOIN
+( ( SELECT * FROM t2 WHERE c2 IN ( SELECT c3 FROM t3 ) ) AS sq INNER JOIN t4 )
+ON (c1 = c2);
+c1 c2 c4
+1 NULL NULL
+2 NULL NULL
+EXPLAIN EXTENDED SELECT *
+FROM t1
+LEFT JOIN
+( ( SELECT * FROM t2 WHERE c2 IN ( SELECT c3 FROM t3 ) ) AS sq INNER JOIN t4 )
+ON (c1 = c2);
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 PRIMARY t1 ALL NULL NULL NULL NULL 2 100.00
+1 PRIMARY t2 ALL NULL NULL NULL NULL 2 100.00 Using where; Using join buffer (flat, BNL join)
+1 PRIMARY t4 ALL NULL NULL NULL NULL 2 100.00 Using join buffer (incremental, BNL join)
+3 DEPENDENT SUBQUERY t3 ALL NULL NULL NULL NULL 2 100.00 Using where
+Warnings:
+Note 1003 /* select#1 */ select `test`.`t1`.`c1` AS `c1`,`test`.`t2`.`c2` AS `c2`,`test`.`t4`.`c4` AS `c4` from `test`.`t1` left join (`test`.`t2` join `test`.`t4`) on(`test`.`t2`.`c2` = `test`.`t1`.`c1` and <in_optimizer>(`test`.`t1`.`c1`,<exists>(/* select#3 */ select `test`.`t3`.`c3` from `test`.`t3` where <cache>(`test`.`t2`.`c2`) = `test`.`t3`.`c3`))) where 1
+DROP TABLE t1,t2,t3,t4;
set optimizer_switch=@subselect_sj_tmp;
#
# BUG#49129: Wrong result with IN-subquery with join_cache_level=6 and firstmatch=off
diff --git a/mysql-test/r/trigger.result b/mysql-test/r/trigger.result
index 407f8534672..b751632307a 100644
--- a/mysql-test/r/trigger.result
+++ b/mysql-test/r/trigger.result
@@ -2368,6 +2368,16 @@ tr1 1 2016-01-01 10:10:10.33
tr2 2 2016-01-01 10:10:10.99
drop table t1;
set time_zone= @@global.time_zone;
+# MDEV-12992: Increasing memory consumption
+with each invocation of trigger
+#
+CREATE TABLE t1 (a INT);
+INSERT INTO t1 VALUES (1);
+CREATE TABLE t2 (b INT);
+CREATE TRIGGER tr
+AFTER UPDATE ON t1 FOR EACH ROW SELECT (SELECT b FROM t2) INTO @x;
+# Running 20000 queries
+DROP TABLE t1,t2;
#
# Start of 10.3 tests
#
diff --git a/mysql-test/r/union.result b/mysql-test/r/union.result
index e0aa93d5c97..956703237a1 100644
--- a/mysql-test/r/union.result
+++ b/mysql-test/r/union.result
@@ -2178,6 +2178,35 @@ WHERE t1_2.b NOT IN ( SELECT 4 UNION ALL SELECT 5 );
a b a b
1 1 1 1
DROP TABLE t1;
+# Bug mdev-12788: UNION ALL + impossible having for derived
+# with IN subquery in WHERE
+#
+CREATE TABLE t1 (i int) ENGINE=MyISAM;
+INSERT INTO t1 VALUES (1);
+CREATE TABLE t2 (pk int PRIMARY KEY) ENGINE=MyISAM;
+INSERT INTO t2 VALUES (1),(2);
+SELECT 1, 2
+UNION ALL
+SELECT i, COUNT(*) FROM (
+SELECT * FROM t1 WHERE i IN ( SELECT pk FROM t2 )
+) AS sq
+GROUP BY i
+HAVING i = 10;
+1 2
+1 2
+EXPLAIN EXTENDED SELECT 1, 2
+UNION ALL
+SELECT i, COUNT(*) FROM (
+SELECT * FROM t1 WHERE i IN ( SELECT pk FROM t2 )
+) AS sq
+GROUP BY i
+HAVING i = 10;
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 PRIMARY NULL NULL NULL NULL NULL NULL NULL NULL No tables used
+2 UNION NULL NULL NULL NULL NULL NULL NULL NULL Impossible HAVING noticed after reading const tables
+Warnings:
+Note 1003 /* select#1 */ select 1 AS `1`,2 AS `2` union all /* select#2 */ select 1 AS `i`,count(0) AS `COUNT(*)` from `test`.`t2` where 1 group by 1 having 0
+DROP TABLE t1,t2;
#
# Start of 10.3 tests
#
diff --git a/mysql-test/suite/binlog/r/mysqladmin.result b/mysql-test/suite/binlog/r/mysqladmin.result
new file mode 100644
index 00000000000..4be6c96d55b
--- /dev/null
+++ b/mysql-test/suite/binlog/r/mysqladmin.result
@@ -0,0 +1,12 @@
+create user adm@localhost identified by 'foobar';
+grant reload on *.* to adm@localhost;
+reset master;
+include/show_binlog_events.inc
+Log_name Pos Event_type Server_id End_log_pos Info
+master-bin.000001 # Gtid # # GTID #-#-#
+master-bin.000001 # Query # # flush status
+include/show_binlog_events.inc
+Log_name Pos Event_type Server_id End_log_pos Info
+master-bin.000001 # Gtid # # GTID #-#-#
+master-bin.000001 # Query # # flush status
+drop user adm@localhost;
diff --git a/mysql-test/suite/binlog/t/mysqladmin.test b/mysql-test/suite/binlog/t/mysqladmin.test
new file mode 100644
index 00000000000..3c2fbc0a708
--- /dev/null
+++ b/mysql-test/suite/binlog/t/mysqladmin.test
@@ -0,0 +1,12 @@
+source include/have_binlog_format_statement.inc;
+#
+# MDEV-12612 mysqladmin --local flush... to use FLUSH LOCAL
+#
+create user adm@localhost identified by 'foobar';
+grant reload on *.* to adm@localhost;
+reset master;
+exec $MYSQLADMIN -uadm -pfoobar flush-status;
+source include/show_binlog_events.inc;
+exec $MYSQLADMIN --local -uadm -pfoobar flush-status;
+source include/show_binlog_events.inc;
+drop user adm@localhost;
diff --git a/mysql-test/suite/csv/read_only.result b/mysql-test/suite/csv/read_only.result
new file mode 100644
index 00000000000..d6936681f65
--- /dev/null
+++ b/mysql-test/suite/csv/read_only.result
@@ -0,0 +1,30 @@
+create table t1 (a int not null) engine=csv;
+insert t1 values (1),(2);
+flush tables;
+select * from information_schema.tables where table_schema='test';
+TABLE_CATALOG def
+TABLE_SCHEMA test
+TABLE_NAME t1
+TABLE_TYPE BASE TABLE
+ENGINE NULL
+VERSION NULL
+ROW_FORMAT NULL
+TABLE_ROWS NULL
+AVG_ROW_LENGTH NULL
+DATA_LENGTH NULL
+MAX_DATA_LENGTH NULL
+INDEX_LENGTH NULL
+DATA_FREE NULL
+AUTO_INCREMENT NULL
+CREATE_TIME NULL
+UPDATE_TIME NULL
+CHECK_TIME NULL
+TABLE_COLLATION NULL
+CHECKSUM NULL
+CREATE_OPTIONS NULL
+TABLE_COMMENT File './test/t1.CSM' not found (Errcode: 13 "Permission denied")
+Warnings:
+Level Warning
+Code 29
+Message File './test/t1.CSM' not found (Errcode: 13 "Permission denied")
+drop table t1;
diff --git a/mysql-test/suite/csv/read_only.test b/mysql-test/suite/csv/read_only.test
new file mode 100644
index 00000000000..2af209182d0
--- /dev/null
+++ b/mysql-test/suite/csv/read_only.test
@@ -0,0 +1,19 @@
+#
+# MDEV-11883 MariaDB crashes with out-of-memory when query information_schema
+#
+source include/have_csv.inc;
+
+let datadir=`select @@datadir`;
+
+create table t1 (a int not null) engine=csv;
+insert t1 values (1),(2);
+flush tables;
+
+chmod 0400 $datadir/test/t1.CSM;
+chmod 0400 $datadir/test/t1.CSV;
+
+--replace_result $datadir ./
+query_vertical select * from information_schema.tables where table_schema='test';
+
+drop table t1;
+
diff --git a/mysql-test/suite/encryption/disabled.def b/mysql-test/suite/encryption/disabled.def
index 8c263c6a458..d92d3495cb8 100644
--- a/mysql-test/suite/encryption/disabled.def
+++ b/mysql-test/suite/encryption/disabled.def
@@ -12,4 +12,3 @@
innodb_scrub : MDEV-8139 scrubbing does not work reliably
innodb_scrub_background : MDEV-8139 scrubbing does not work reliably
-innodb_encryption-page-compression : MDEV-11420
diff --git a/mysql-test/suite/encryption/r/innodb-checksum-algorithm,32k.rdiff b/mysql-test/suite/encryption/r/innodb-checksum-algorithm,32k.rdiff
new file mode 100644
index 00000000000..cd66df7440b
--- /dev/null
+++ b/mysql-test/suite/encryption/r/innodb-checksum-algorithm,32k.rdiff
@@ -0,0 +1,38 @@
+--- suite/encryption/r/innodb-checksum-algorithm.result
++++ suite/encryption/r/innodb-checksum-algorithm,32k.reject
+@@ -13,9 +13,9 @@
+ SET GLOBAL innodb_default_encryption_key_id=4;
+ SET GLOBAL innodb_checksum_algorithm=crc32;
+ create table tce_crc32(a serial, b blob, index(b(10))) engine=innodb
+-ROW_FORMAT=COMPRESSED encrypted=yes;
++ROW_FORMAT=DYNAMIC encrypted=yes;
+ create table tc_crc32(a serial, b blob, index(b(10))) engine=innodb
+-ROW_FORMAT=COMPRESSED encrypted=no;
++ROW_FORMAT=DYNAMIC encrypted=no;
+ create table te_crc32(a serial, b blob, index(b(10))) engine=innodb
+ encrypted=yes;
+ create table t_crc32(a serial, b blob, index(b(10))) engine=innodb
+@@ -222,9 +222,9 @@
+ t_crc32, tpe_crc32, tp_crc32;
+ SET GLOBAL innodb_checksum_algorithm=innodb;
+ create table tce_innodb(a serial, b blob, index(b(10))) engine=innodb
+-ROW_FORMAT=COMPRESSED encrypted=yes;
++ROW_FORMAT=DYNAMIC encrypted=yes;
+ create table tc_innodb(a serial, b blob, index(b(10))) engine=innodb
+-ROW_FORMAT=COMPRESSED encrypted=no;
++ROW_FORMAT=DYNAMIC encrypted=no;
+ create table te_innodb(a serial, b blob, index(b(10))) engine=innodb
+ encrypted=yes;
+ create table t_innodb(a serial, b blob, index(b(10))) engine=innodb
+@@ -431,9 +431,9 @@
+ t_innodb, tpe_innodb, tp_innodb;
+ SET GLOBAL innodb_checksum_algorithm=none;
+ create table tce_none(a serial, b blob, index(b(10))) engine=innodb
+-ROW_FORMAT=COMPRESSED encrypted=yes;
++ROW_FORMAT=DYNAMIC encrypted=yes;
+ create table tc_none(a serial, b blob, index(b(10))) engine=innodb
+-ROW_FORMAT=COMPRESSED encrypted=no;
++ROW_FORMAT=DYNAMIC encrypted=no;
+ create table te_none(a serial, b blob, index(b(10))) engine=innodb
+ encrypted=yes;
+ create table t_none(a serial, b blob, index(b(10))) engine=innodb
diff --git a/mysql-test/suite/encryption/r/innodb-checksum-algorithm,64k.rdiff b/mysql-test/suite/encryption/r/innodb-checksum-algorithm,64k.rdiff
new file mode 100644
index 00000000000..523074297da
--- /dev/null
+++ b/mysql-test/suite/encryption/r/innodb-checksum-algorithm,64k.rdiff
@@ -0,0 +1,38 @@
+--- suite/encryption/r/innodb-checksum-algorithm.result
++++ suite/encryption/r/innodb-checksum-algorithm,64k.reject
+@@ -13,9 +13,9 @@
+ SET GLOBAL innodb_default_encryption_key_id=4;
+ SET GLOBAL innodb_checksum_algorithm=crc32;
+ create table tce_crc32(a serial, b blob, index(b(10))) engine=innodb
+-ROW_FORMAT=COMPRESSED encrypted=yes;
++ROW_FORMAT=DYNAMIC encrypted=yes;
+ create table tc_crc32(a serial, b blob, index(b(10))) engine=innodb
+-ROW_FORMAT=COMPRESSED encrypted=no;
++ROW_FORMAT=DYNAMIC encrypted=no;
+ create table te_crc32(a serial, b blob, index(b(10))) engine=innodb
+ encrypted=yes;
+ create table t_crc32(a serial, b blob, index(b(10))) engine=innodb
+@@ -222,9 +222,9 @@
+ t_crc32, tpe_crc32, tp_crc32;
+ SET GLOBAL innodb_checksum_algorithm=innodb;
+ create table tce_innodb(a serial, b blob, index(b(10))) engine=innodb
+-ROW_FORMAT=COMPRESSED encrypted=yes;
++ROW_FORMAT=DYNAMIC encrypted=yes;
+ create table tc_innodb(a serial, b blob, index(b(10))) engine=innodb
+-ROW_FORMAT=COMPRESSED encrypted=no;
++ROW_FORMAT=DYNAMIC encrypted=no;
+ create table te_innodb(a serial, b blob, index(b(10))) engine=innodb
+ encrypted=yes;
+ create table t_innodb(a serial, b blob, index(b(10))) engine=innodb
+@@ -431,9 +431,9 @@
+ t_innodb, tpe_innodb, tp_innodb;
+ SET GLOBAL innodb_checksum_algorithm=none;
+ create table tce_none(a serial, b blob, index(b(10))) engine=innodb
+-ROW_FORMAT=COMPRESSED encrypted=yes;
++ROW_FORMAT=DYNAMIC encrypted=yes;
+ create table tc_none(a serial, b blob, index(b(10))) engine=innodb
+-ROW_FORMAT=COMPRESSED encrypted=no;
++ROW_FORMAT=DYNAMIC encrypted=no;
+ create table te_none(a serial, b blob, index(b(10))) engine=innodb
+ encrypted=yes;
+ create table t_none(a serial, b blob, index(b(10))) engine=innodb
diff --git a/mysql-test/suite/encryption/r/innodb-checksum-algorithm.result b/mysql-test/suite/encryption/r/innodb-checksum-algorithm.result
new file mode 100644
index 00000000000..a12b47ef31d
--- /dev/null
+++ b/mysql-test/suite/encryption/r/innodb-checksum-algorithm.result
@@ -0,0 +1,643 @@
+SET @saved_file_per_table = @@global.innodb_file_per_table;
+SET @saved_checksum_algorithm = @@global.innodb_checksum_algorithm;
+SET @saved_encrypt_tables = @@global.innodb_encrypt_tables;
+SET @saved_encryption_threads = @@global.innodb_encryption_threads;
+SET @saved_encryption_key_id = @@global.innodb_default_encryption_key_id;
+SET GLOBAL innodb_file_per_table = ON;
+SET GLOBAL innodb_encrypt_tables = ON;
+SET GLOBAL innodb_encryption_threads = 4;
+call mtr.add_suppression("InnoDB: innodb_checksum_algorithm is set to \"strict_(crc32|none|innodb)\" but the page \\[page id: space=[0-9]+, page number=[0-9]+\\] contains a valid checksum \"(innodb|none|crc32)\"");
+SET GLOBAL innodb_checksum_algorithm = innodb;
+SET GLOBAL innodb_default_encryption_key_id=4;
+SET GLOBAL innodb_checksum_algorithm=crc32;
+create table tce_crc32(a serial, b blob, index(b(10))) engine=innodb
+ROW_FORMAT=COMPRESSED encrypted=yes;
+create table tc_crc32(a serial, b blob, index(b(10))) engine=innodb
+ROW_FORMAT=COMPRESSED encrypted=no;
+create table te_crc32(a serial, b blob, index(b(10))) engine=innodb
+encrypted=yes;
+create table t_crc32(a serial, b blob, index(b(10))) engine=innodb
+encrypted=no;
+create table tpe_crc32(a serial, b blob, index(b(10))) engine=innodb
+page_compressed=yes encrypted=yes;
+create table tp_crc32(a serial, b blob, index(b(10))) engine=innodb
+page_compressed=yes encrypted=no;
+begin;
+insert into tce_crc32(b) values (repeat('secret',20));
+insert into tc_crc32(b) values (repeat('secret',20));
+insert into te_crc32(b) values (repeat('secret',20));
+insert into t_crc32(b) values (repeat('secret',20));
+insert into tpe_crc32(b) values (repeat('secret',20));
+insert into tp_crc32(b) values (repeat('secret',20));
+commit;
+FLUSH TABLES tce_crc32, tc_crc32, te_crc32,
+t_crc32, tpe_crc32, tp_crc32 FOR EXPORT;
+backup: tce_crc32
+backup: tc_crc32
+backup: te_crc32
+backup: t_crc32
+backup: tpe_crc32
+backup: tp_crc32
+t_crc32.cfg
+t_crc32.frm
+t_crc32.ibd
+tc_crc32.cfg
+tc_crc32.frm
+tc_crc32.ibd
+tce_crc32.cfg
+tce_crc32.frm
+tce_crc32.ibd
+te_crc32.cfg
+te_crc32.frm
+te_crc32.ibd
+tp_crc32.cfg
+tp_crc32.frm
+tp_crc32.ibd
+tpe_crc32.cfg
+tpe_crc32.frm
+tpe_crc32.ibd
+UNLOCK TABLES;
+SET GLOBAL innodb_checksum_algorithm=strict_crc32;
+ALTER TABLE tce_crc32 DISCARD TABLESPACE;
+ALTER TABLE tc_crc32 DISCARD TABLESPACE;
+ALTER TABLE te_crc32 DISCARD TABLESPACE;
+ALTER TABLE t_crc32 DISCARD TABLESPACE;
+ALTER TABLE tpe_crc32 DISCARD TABLESPACE;
+ALTER TABLE tp_crc32 DISCARD TABLESPACE;
+restore: tce_crc32 .ibd and .cfg files
+restore: tc_crc32 .ibd and .cfg files
+restore: te_crc32 .ibd and .cfg files
+restore: t_crc32 .ibd and .cfg files
+restore: tpe_crc32 .ibd and .cfg files
+restore: tp_crc32 .ibd and .cfg files
+ALTER TABLE tce_crc32 IMPORT TABLESPACE;
+update tce_crc32 set b=substr(b,1);
+ALTER TABLE tc_crc32 IMPORT TABLESPACE;
+update tc_crc32 set b=substr(b,1);
+ALTER TABLE te_crc32 IMPORT TABLESPACE;
+update te_crc32 set b=substr(b,1);
+ALTER TABLE t_crc32 IMPORT TABLESPACE;
+update t_crc32 set b=substr(b,1);
+ALTER TABLE tpe_crc32 IMPORT TABLESPACE;
+update tpe_crc32 set b=substr(b,1);
+ALTER TABLE tp_crc32 IMPORT TABLESPACE;
+update tp_crc32 set b=substr(b,1);
+SET GLOBAL innodb_checksum_algorithm=crc32;
+ALTER TABLE tce_crc32 DISCARD TABLESPACE;
+ALTER TABLE tc_crc32 DISCARD TABLESPACE;
+ALTER TABLE te_crc32 DISCARD TABLESPACE;
+ALTER TABLE t_crc32 DISCARD TABLESPACE;
+ALTER TABLE tpe_crc32 DISCARD TABLESPACE;
+ALTER TABLE tp_crc32 DISCARD TABLESPACE;
+restore: tce_crc32 .ibd and .cfg files
+restore: tc_crc32 .ibd and .cfg files
+restore: te_crc32 .ibd and .cfg files
+restore: t_crc32 .ibd and .cfg files
+restore: tpe_crc32 .ibd and .cfg files
+restore: tp_crc32 .ibd and .cfg files
+ALTER TABLE tce_crc32 IMPORT TABLESPACE;
+update tce_crc32 set b=substr(b,1);
+ALTER TABLE tc_crc32 IMPORT TABLESPACE;
+update tc_crc32 set b=substr(b,1);
+ALTER TABLE te_crc32 IMPORT TABLESPACE;
+update te_crc32 set b=substr(b,1);
+ALTER TABLE t_crc32 IMPORT TABLESPACE;
+update t_crc32 set b=substr(b,1);
+ALTER TABLE tpe_crc32 IMPORT TABLESPACE;
+update tpe_crc32 set b=substr(b,1);
+ALTER TABLE tp_crc32 IMPORT TABLESPACE;
+update tp_crc32 set b=substr(b,1);
+SET GLOBAL innodb_checksum_algorithm=strict_innodb;
+ALTER TABLE tce_crc32 DISCARD TABLESPACE;
+ALTER TABLE tc_crc32 DISCARD TABLESPACE;
+ALTER TABLE te_crc32 DISCARD TABLESPACE;
+ALTER TABLE t_crc32 DISCARD TABLESPACE;
+ALTER TABLE tpe_crc32 DISCARD TABLESPACE;
+ALTER TABLE tp_crc32 DISCARD TABLESPACE;
+restore: tce_crc32 .ibd and .cfg files
+restore: tc_crc32 .ibd and .cfg files
+restore: te_crc32 .ibd and .cfg files
+restore: t_crc32 .ibd and .cfg files
+restore: tpe_crc32 .ibd and .cfg files
+restore: tp_crc32 .ibd and .cfg files
+ALTER TABLE tce_crc32 IMPORT TABLESPACE;
+update tce_crc32 set b=substr(b,1);
+ALTER TABLE tc_crc32 IMPORT TABLESPACE;
+update tc_crc32 set b=substr(b,1);
+ALTER TABLE te_crc32 IMPORT TABLESPACE;
+update te_crc32 set b=substr(b,1);
+ALTER TABLE t_crc32 IMPORT TABLESPACE;
+update t_crc32 set b=substr(b,1);
+ALTER TABLE tpe_crc32 IMPORT TABLESPACE;
+update tpe_crc32 set b=substr(b,1);
+ALTER TABLE tp_crc32 IMPORT TABLESPACE;
+update tp_crc32 set b=substr(b,1);
+SET GLOBAL innodb_checksum_algorithm=innodb;
+ALTER TABLE tce_crc32 DISCARD TABLESPACE;
+ALTER TABLE tc_crc32 DISCARD TABLESPACE;
+ALTER TABLE te_crc32 DISCARD TABLESPACE;
+ALTER TABLE t_crc32 DISCARD TABLESPACE;
+ALTER TABLE tpe_crc32 DISCARD TABLESPACE;
+ALTER TABLE tp_crc32 DISCARD TABLESPACE;
+restore: tce_crc32 .ibd and .cfg files
+restore: tc_crc32 .ibd and .cfg files
+restore: te_crc32 .ibd and .cfg files
+restore: t_crc32 .ibd and .cfg files
+restore: tpe_crc32 .ibd and .cfg files
+restore: tp_crc32 .ibd and .cfg files
+ALTER TABLE tce_crc32 IMPORT TABLESPACE;
+update tce_crc32 set b=substr(b,1);
+ALTER TABLE tc_crc32 IMPORT TABLESPACE;
+update tc_crc32 set b=substr(b,1);
+ALTER TABLE te_crc32 IMPORT TABLESPACE;
+update te_crc32 set b=substr(b,1);
+ALTER TABLE t_crc32 IMPORT TABLESPACE;
+update t_crc32 set b=substr(b,1);
+ALTER TABLE tpe_crc32 IMPORT TABLESPACE;
+update tpe_crc32 set b=substr(b,1);
+ALTER TABLE tp_crc32 IMPORT TABLESPACE;
+update tp_crc32 set b=substr(b,1);
+SET GLOBAL innodb_checksum_algorithm=strict_none;
+ALTER TABLE tce_crc32 DISCARD TABLESPACE;
+ALTER TABLE tc_crc32 DISCARD TABLESPACE;
+ALTER TABLE te_crc32 DISCARD TABLESPACE;
+ALTER TABLE t_crc32 DISCARD TABLESPACE;
+ALTER TABLE tpe_crc32 DISCARD TABLESPACE;
+ALTER TABLE tp_crc32 DISCARD TABLESPACE;
+restore: tce_crc32 .ibd and .cfg files
+restore: tc_crc32 .ibd and .cfg files
+restore: te_crc32 .ibd and .cfg files
+restore: t_crc32 .ibd and .cfg files
+restore: tpe_crc32 .ibd and .cfg files
+restore: tp_crc32 .ibd and .cfg files
+ALTER TABLE tce_crc32 IMPORT TABLESPACE;
+update tce_crc32 set b=substr(b,1);
+ALTER TABLE tc_crc32 IMPORT TABLESPACE;
+update tc_crc32 set b=substr(b,1);
+ALTER TABLE te_crc32 IMPORT TABLESPACE;
+update te_crc32 set b=substr(b,1);
+ALTER TABLE t_crc32 IMPORT TABLESPACE;
+update t_crc32 set b=substr(b,1);
+ALTER TABLE tpe_crc32 IMPORT TABLESPACE;
+update tpe_crc32 set b=substr(b,1);
+ALTER TABLE tp_crc32 IMPORT TABLESPACE;
+update tp_crc32 set b=substr(b,1);
+SET GLOBAL innodb_checksum_algorithm=none;
+ALTER TABLE tce_crc32 DISCARD TABLESPACE;
+ALTER TABLE tc_crc32 DISCARD TABLESPACE;
+ALTER TABLE te_crc32 DISCARD TABLESPACE;
+ALTER TABLE t_crc32 DISCARD TABLESPACE;
+ALTER TABLE tpe_crc32 DISCARD TABLESPACE;
+ALTER TABLE tp_crc32 DISCARD TABLESPACE;
+restore: tce_crc32 .ibd and .cfg files
+restore: tc_crc32 .ibd and .cfg files
+restore: te_crc32 .ibd and .cfg files
+restore: t_crc32 .ibd and .cfg files
+restore: tpe_crc32 .ibd and .cfg files
+restore: tp_crc32 .ibd and .cfg files
+ALTER TABLE tce_crc32 IMPORT TABLESPACE;
+update tce_crc32 set b=substr(b,1);
+ALTER TABLE tc_crc32 IMPORT TABLESPACE;
+update tc_crc32 set b=substr(b,1);
+ALTER TABLE te_crc32 IMPORT TABLESPACE;
+update te_crc32 set b=substr(b,1);
+ALTER TABLE t_crc32 IMPORT TABLESPACE;
+update t_crc32 set b=substr(b,1);
+ALTER TABLE tpe_crc32 IMPORT TABLESPACE;
+update tpe_crc32 set b=substr(b,1);
+ALTER TABLE tp_crc32 IMPORT TABLESPACE;
+update tp_crc32 set b=substr(b,1);
+CHECK TABLE tce_crc32, tc_crc32, te_crc32,
+t_crc32, tpe_crc32, tp_crc32;
+Table Op Msg_type Msg_text
+test.tce_crc32 check status OK
+test.tc_crc32 check status OK
+test.te_crc32 check status OK
+test.t_crc32 check status OK
+test.tpe_crc32 check status OK
+test.tp_crc32 check status OK
+DROP TABLE tce_crc32, tc_crc32, te_crc32,
+t_crc32, tpe_crc32, tp_crc32;
+SET GLOBAL innodb_checksum_algorithm=innodb;
+create table tce_innodb(a serial, b blob, index(b(10))) engine=innodb
+ROW_FORMAT=COMPRESSED encrypted=yes;
+create table tc_innodb(a serial, b blob, index(b(10))) engine=innodb
+ROW_FORMAT=COMPRESSED encrypted=no;
+create table te_innodb(a serial, b blob, index(b(10))) engine=innodb
+encrypted=yes;
+create table t_innodb(a serial, b blob, index(b(10))) engine=innodb
+encrypted=no;
+create table tpe_innodb(a serial, b blob, index(b(10))) engine=innodb
+page_compressed=yes encrypted=yes;
+create table tp_innodb(a serial, b blob, index(b(10))) engine=innodb
+page_compressed=yes encrypted=no;
+begin;
+insert into tce_innodb(b) values (repeat('secret',20));
+insert into tc_innodb(b) values (repeat('secret',20));
+insert into te_innodb(b) values (repeat('secret',20));
+insert into t_innodb(b) values (repeat('secret',20));
+insert into tpe_innodb(b) values (repeat('secret',20));
+insert into tp_innodb(b) values (repeat('secret',20));
+commit;
+FLUSH TABLES tce_innodb, tc_innodb, te_innodb,
+t_innodb, tpe_innodb, tp_innodb FOR EXPORT;
+backup: tce_innodb
+backup: tc_innodb
+backup: te_innodb
+backup: t_innodb
+backup: tpe_innodb
+backup: tp_innodb
+t_innodb.cfg
+t_innodb.frm
+t_innodb.ibd
+tc_innodb.cfg
+tc_innodb.frm
+tc_innodb.ibd
+tce_innodb.cfg
+tce_innodb.frm
+tce_innodb.ibd
+te_innodb.cfg
+te_innodb.frm
+te_innodb.ibd
+tp_innodb.cfg
+tp_innodb.frm
+tp_innodb.ibd
+tpe_innodb.cfg
+tpe_innodb.frm
+tpe_innodb.ibd
+UNLOCK TABLES;
+SET GLOBAL innodb_checksum_algorithm=strict_crc32;
+ALTER TABLE tce_innodb DISCARD TABLESPACE;
+ALTER TABLE tc_innodb DISCARD TABLESPACE;
+ALTER TABLE te_innodb DISCARD TABLESPACE;
+ALTER TABLE t_innodb DISCARD TABLESPACE;
+ALTER TABLE tpe_innodb DISCARD TABLESPACE;
+ALTER TABLE tp_innodb DISCARD TABLESPACE;
+restore: tce_innodb .ibd and .cfg files
+restore: tc_innodb .ibd and .cfg files
+restore: te_innodb .ibd and .cfg files
+restore: t_innodb .ibd and .cfg files
+restore: tpe_innodb .ibd and .cfg files
+restore: tp_innodb .ibd and .cfg files
+ALTER TABLE tce_innodb IMPORT TABLESPACE;
+update tce_innodb set b=substr(b,1);
+ALTER TABLE tc_innodb IMPORT TABLESPACE;
+update tc_innodb set b=substr(b,1);
+ALTER TABLE te_innodb IMPORT TABLESPACE;
+update te_innodb set b=substr(b,1);
+ALTER TABLE t_innodb IMPORT TABLESPACE;
+update t_innodb set b=substr(b,1);
+ALTER TABLE tpe_innodb IMPORT TABLESPACE;
+update tpe_innodb set b=substr(b,1);
+ALTER TABLE tp_innodb IMPORT TABLESPACE;
+update tp_innodb set b=substr(b,1);
+SET GLOBAL innodb_checksum_algorithm=crc32;
+ALTER TABLE tce_innodb DISCARD TABLESPACE;
+ALTER TABLE tc_innodb DISCARD TABLESPACE;
+ALTER TABLE te_innodb DISCARD TABLESPACE;
+ALTER TABLE t_innodb DISCARD TABLESPACE;
+ALTER TABLE tpe_innodb DISCARD TABLESPACE;
+ALTER TABLE tp_innodb DISCARD TABLESPACE;
+restore: tce_innodb .ibd and .cfg files
+restore: tc_innodb .ibd and .cfg files
+restore: te_innodb .ibd and .cfg files
+restore: t_innodb .ibd and .cfg files
+restore: tpe_innodb .ibd and .cfg files
+restore: tp_innodb .ibd and .cfg files
+ALTER TABLE tce_innodb IMPORT TABLESPACE;
+update tce_innodb set b=substr(b,1);
+ALTER TABLE tc_innodb IMPORT TABLESPACE;
+update tc_innodb set b=substr(b,1);
+ALTER TABLE te_innodb IMPORT TABLESPACE;
+update te_innodb set b=substr(b,1);
+ALTER TABLE t_innodb IMPORT TABLESPACE;
+update t_innodb set b=substr(b,1);
+ALTER TABLE tpe_innodb IMPORT TABLESPACE;
+update tpe_innodb set b=substr(b,1);
+ALTER TABLE tp_innodb IMPORT TABLESPACE;
+update tp_innodb set b=substr(b,1);
+SET GLOBAL innodb_checksum_algorithm=strict_innodb;
+ALTER TABLE tce_innodb DISCARD TABLESPACE;
+ALTER TABLE tc_innodb DISCARD TABLESPACE;
+ALTER TABLE te_innodb DISCARD TABLESPACE;
+ALTER TABLE t_innodb DISCARD TABLESPACE;
+ALTER TABLE tpe_innodb DISCARD TABLESPACE;
+ALTER TABLE tp_innodb DISCARD TABLESPACE;
+restore: tce_innodb .ibd and .cfg files
+restore: tc_innodb .ibd and .cfg files
+restore: te_innodb .ibd and .cfg files
+restore: t_innodb .ibd and .cfg files
+restore: tpe_innodb .ibd and .cfg files
+restore: tp_innodb .ibd and .cfg files
+ALTER TABLE tce_innodb IMPORT TABLESPACE;
+update tce_innodb set b=substr(b,1);
+ALTER TABLE tc_innodb IMPORT TABLESPACE;
+update tc_innodb set b=substr(b,1);
+ALTER TABLE te_innodb IMPORT TABLESPACE;
+update te_innodb set b=substr(b,1);
+ALTER TABLE t_innodb IMPORT TABLESPACE;
+update t_innodb set b=substr(b,1);
+ALTER TABLE tpe_innodb IMPORT TABLESPACE;
+update tpe_innodb set b=substr(b,1);
+ALTER TABLE tp_innodb IMPORT TABLESPACE;
+update tp_innodb set b=substr(b,1);
+SET GLOBAL innodb_checksum_algorithm=innodb;
+ALTER TABLE tce_innodb DISCARD TABLESPACE;
+ALTER TABLE tc_innodb DISCARD TABLESPACE;
+ALTER TABLE te_innodb DISCARD TABLESPACE;
+ALTER TABLE t_innodb DISCARD TABLESPACE;
+ALTER TABLE tpe_innodb DISCARD TABLESPACE;
+ALTER TABLE tp_innodb DISCARD TABLESPACE;
+restore: tce_innodb .ibd and .cfg files
+restore: tc_innodb .ibd and .cfg files
+restore: te_innodb .ibd and .cfg files
+restore: t_innodb .ibd and .cfg files
+restore: tpe_innodb .ibd and .cfg files
+restore: tp_innodb .ibd and .cfg files
+ALTER TABLE tce_innodb IMPORT TABLESPACE;
+update tce_innodb set b=substr(b,1);
+ALTER TABLE tc_innodb IMPORT TABLESPACE;
+update tc_innodb set b=substr(b,1);
+ALTER TABLE te_innodb IMPORT TABLESPACE;
+update te_innodb set b=substr(b,1);
+ALTER TABLE t_innodb IMPORT TABLESPACE;
+update t_innodb set b=substr(b,1);
+ALTER TABLE tpe_innodb IMPORT TABLESPACE;
+update tpe_innodb set b=substr(b,1);
+ALTER TABLE tp_innodb IMPORT TABLESPACE;
+update tp_innodb set b=substr(b,1);
+SET GLOBAL innodb_checksum_algorithm=strict_none;
+ALTER TABLE tce_innodb DISCARD TABLESPACE;
+ALTER TABLE tc_innodb DISCARD TABLESPACE;
+ALTER TABLE te_innodb DISCARD TABLESPACE;
+ALTER TABLE t_innodb DISCARD TABLESPACE;
+ALTER TABLE tpe_innodb DISCARD TABLESPACE;
+ALTER TABLE tp_innodb DISCARD TABLESPACE;
+restore: tce_innodb .ibd and .cfg files
+restore: tc_innodb .ibd and .cfg files
+restore: te_innodb .ibd and .cfg files
+restore: t_innodb .ibd and .cfg files
+restore: tpe_innodb .ibd and .cfg files
+restore: tp_innodb .ibd and .cfg files
+ALTER TABLE tce_innodb IMPORT TABLESPACE;
+update tce_innodb set b=substr(b,1);
+ALTER TABLE tc_innodb IMPORT TABLESPACE;
+update tc_innodb set b=substr(b,1);
+ALTER TABLE te_innodb IMPORT TABLESPACE;
+update te_innodb set b=substr(b,1);
+ALTER TABLE t_innodb IMPORT TABLESPACE;
+update t_innodb set b=substr(b,1);
+ALTER TABLE tpe_innodb IMPORT TABLESPACE;
+update tpe_innodb set b=substr(b,1);
+ALTER TABLE tp_innodb IMPORT TABLESPACE;
+update tp_innodb set b=substr(b,1);
+SET GLOBAL innodb_checksum_algorithm=none;
+ALTER TABLE tce_innodb DISCARD TABLESPACE;
+ALTER TABLE tc_innodb DISCARD TABLESPACE;
+ALTER TABLE te_innodb DISCARD TABLESPACE;
+ALTER TABLE t_innodb DISCARD TABLESPACE;
+ALTER TABLE tpe_innodb DISCARD TABLESPACE;
+ALTER TABLE tp_innodb DISCARD TABLESPACE;
+restore: tce_innodb .ibd and .cfg files
+restore: tc_innodb .ibd and .cfg files
+restore: te_innodb .ibd and .cfg files
+restore: t_innodb .ibd and .cfg files
+restore: tpe_innodb .ibd and .cfg files
+restore: tp_innodb .ibd and .cfg files
+ALTER TABLE tce_innodb IMPORT TABLESPACE;
+update tce_innodb set b=substr(b,1);
+ALTER TABLE tc_innodb IMPORT TABLESPACE;
+update tc_innodb set b=substr(b,1);
+ALTER TABLE te_innodb IMPORT TABLESPACE;
+update te_innodb set b=substr(b,1);
+ALTER TABLE t_innodb IMPORT TABLESPACE;
+update t_innodb set b=substr(b,1);
+ALTER TABLE tpe_innodb IMPORT TABLESPACE;
+update tpe_innodb set b=substr(b,1);
+ALTER TABLE tp_innodb IMPORT TABLESPACE;
+update tp_innodb set b=substr(b,1);
+CHECK TABLE tce_innodb, tc_innodb, te_innodb,
+t_innodb, tpe_innodb, tp_innodb;
+Table Op Msg_type Msg_text
+test.tce_innodb check status OK
+test.tc_innodb check status OK
+test.te_innodb check status OK
+test.t_innodb check status OK
+test.tpe_innodb check status OK
+test.tp_innodb check status OK
+DROP TABLE tce_innodb, tc_innodb, te_innodb,
+t_innodb, tpe_innodb, tp_innodb;
+SET GLOBAL innodb_checksum_algorithm=none;
+create table tce_none(a serial, b blob, index(b(10))) engine=innodb
+ROW_FORMAT=COMPRESSED encrypted=yes;
+create table tc_none(a serial, b blob, index(b(10))) engine=innodb
+ROW_FORMAT=COMPRESSED encrypted=no;
+create table te_none(a serial, b blob, index(b(10))) engine=innodb
+encrypted=yes;
+create table t_none(a serial, b blob, index(b(10))) engine=innodb
+encrypted=no;
+create table tpe_none(a serial, b blob, index(b(10))) engine=innodb
+page_compressed=yes encrypted=yes;
+create table tp_none(a serial, b blob, index(b(10))) engine=innodb
+page_compressed=yes encrypted=no;
+begin;
+insert into tce_none(b) values (repeat('secret',20));
+insert into tc_none(b) values (repeat('secret',20));
+insert into te_none(b) values (repeat('secret',20));
+insert into t_none(b) values (repeat('secret',20));
+insert into tpe_none(b) values (repeat('secret',20));
+insert into tp_none(b) values (repeat('secret',20));
+commit;
+FLUSH TABLES tce_none, tc_none, te_none,
+t_none, tpe_none, tp_none FOR EXPORT;
+backup: tce_none
+backup: tc_none
+backup: te_none
+backup: t_none
+backup: tpe_none
+backup: tp_none
+t_none.cfg
+t_none.frm
+t_none.ibd
+tc_none.cfg
+tc_none.frm
+tc_none.ibd
+tce_none.cfg
+tce_none.frm
+tce_none.ibd
+te_none.cfg
+te_none.frm
+te_none.ibd
+tp_none.cfg
+tp_none.frm
+tp_none.ibd
+tpe_none.cfg
+tpe_none.frm
+tpe_none.ibd
+UNLOCK TABLES;
+SET GLOBAL innodb_checksum_algorithm=strict_crc32;
+ALTER TABLE tce_none DISCARD TABLESPACE;
+ALTER TABLE tc_none DISCARD TABLESPACE;
+ALTER TABLE te_none DISCARD TABLESPACE;
+ALTER TABLE t_none DISCARD TABLESPACE;
+ALTER TABLE tpe_none DISCARD TABLESPACE;
+ALTER TABLE tp_none DISCARD TABLESPACE;
+restore: tce_none .ibd and .cfg files
+restore: tc_none .ibd and .cfg files
+restore: te_none .ibd and .cfg files
+restore: t_none .ibd and .cfg files
+restore: tpe_none .ibd and .cfg files
+restore: tp_none .ibd and .cfg files
+ALTER TABLE tce_none IMPORT TABLESPACE;
+update tce_none set b=substr(b,1);
+ALTER TABLE tc_none IMPORT TABLESPACE;
+update tc_none set b=substr(b,1);
+ALTER TABLE te_none IMPORT TABLESPACE;
+update te_none set b=substr(b,1);
+ALTER TABLE t_none IMPORT TABLESPACE;
+update t_none set b=substr(b,1);
+ALTER TABLE tpe_none IMPORT TABLESPACE;
+update tpe_none set b=substr(b,1);
+ALTER TABLE tp_none IMPORT TABLESPACE;
+update tp_none set b=substr(b,1);
+SET GLOBAL innodb_checksum_algorithm=crc32;
+ALTER TABLE tce_none DISCARD TABLESPACE;
+ALTER TABLE tc_none DISCARD TABLESPACE;
+ALTER TABLE te_none DISCARD TABLESPACE;
+ALTER TABLE t_none DISCARD TABLESPACE;
+ALTER TABLE tpe_none DISCARD TABLESPACE;
+ALTER TABLE tp_none DISCARD TABLESPACE;
+restore: tce_none .ibd and .cfg files
+restore: tc_none .ibd and .cfg files
+restore: te_none .ibd and .cfg files
+restore: t_none .ibd and .cfg files
+restore: tpe_none .ibd and .cfg files
+restore: tp_none .ibd and .cfg files
+ALTER TABLE tce_none IMPORT TABLESPACE;
+update tce_none set b=substr(b,1);
+ALTER TABLE tc_none IMPORT TABLESPACE;
+update tc_none set b=substr(b,1);
+ALTER TABLE te_none IMPORT TABLESPACE;
+update te_none set b=substr(b,1);
+ALTER TABLE t_none IMPORT TABLESPACE;
+update t_none set b=substr(b,1);
+ALTER TABLE tpe_none IMPORT TABLESPACE;
+update tpe_none set b=substr(b,1);
+ALTER TABLE tp_none IMPORT TABLESPACE;
+update tp_none set b=substr(b,1);
+SET GLOBAL innodb_checksum_algorithm=strict_innodb;
+ALTER TABLE tce_none DISCARD TABLESPACE;
+ALTER TABLE tc_none DISCARD TABLESPACE;
+ALTER TABLE te_none DISCARD TABLESPACE;
+ALTER TABLE t_none DISCARD TABLESPACE;
+ALTER TABLE tpe_none DISCARD TABLESPACE;
+ALTER TABLE tp_none DISCARD TABLESPACE;
+restore: tce_none .ibd and .cfg files
+restore: tc_none .ibd and .cfg files
+restore: te_none .ibd and .cfg files
+restore: t_none .ibd and .cfg files
+restore: tpe_none .ibd and .cfg files
+restore: tp_none .ibd and .cfg files
+ALTER TABLE tce_none IMPORT TABLESPACE;
+update tce_none set b=substr(b,1);
+ALTER TABLE tc_none IMPORT TABLESPACE;
+update tc_none set b=substr(b,1);
+ALTER TABLE te_none IMPORT TABLESPACE;
+update te_none set b=substr(b,1);
+ALTER TABLE t_none IMPORT TABLESPACE;
+update t_none set b=substr(b,1);
+ALTER TABLE tpe_none IMPORT TABLESPACE;
+update tpe_none set b=substr(b,1);
+ALTER TABLE tp_none IMPORT TABLESPACE;
+update tp_none set b=substr(b,1);
+SET GLOBAL innodb_checksum_algorithm=innodb;
+ALTER TABLE tce_none DISCARD TABLESPACE;
+ALTER TABLE tc_none DISCARD TABLESPACE;
+ALTER TABLE te_none DISCARD TABLESPACE;
+ALTER TABLE t_none DISCARD TABLESPACE;
+ALTER TABLE tpe_none DISCARD TABLESPACE;
+ALTER TABLE tp_none DISCARD TABLESPACE;
+restore: tce_none .ibd and .cfg files
+restore: tc_none .ibd and .cfg files
+restore: te_none .ibd and .cfg files
+restore: t_none .ibd and .cfg files
+restore: tpe_none .ibd and .cfg files
+restore: tp_none .ibd and .cfg files
+ALTER TABLE tce_none IMPORT TABLESPACE;
+update tce_none set b=substr(b,1);
+ALTER TABLE tc_none IMPORT TABLESPACE;
+update tc_none set b=substr(b,1);
+ALTER TABLE te_none IMPORT TABLESPACE;
+update te_none set b=substr(b,1);
+ALTER TABLE t_none IMPORT TABLESPACE;
+update t_none set b=substr(b,1);
+ALTER TABLE tpe_none IMPORT TABLESPACE;
+update tpe_none set b=substr(b,1);
+ALTER TABLE tp_none IMPORT TABLESPACE;
+update tp_none set b=substr(b,1);
+SET GLOBAL innodb_checksum_algorithm=strict_none;
+ALTER TABLE tce_none DISCARD TABLESPACE;
+ALTER TABLE tc_none DISCARD TABLESPACE;
+ALTER TABLE te_none DISCARD TABLESPACE;
+ALTER TABLE t_none DISCARD TABLESPACE;
+ALTER TABLE tpe_none DISCARD TABLESPACE;
+ALTER TABLE tp_none DISCARD TABLESPACE;
+restore: tce_none .ibd and .cfg files
+restore: tc_none .ibd and .cfg files
+restore: te_none .ibd and .cfg files
+restore: t_none .ibd and .cfg files
+restore: tpe_none .ibd and .cfg files
+restore: tp_none .ibd and .cfg files
+ALTER TABLE tce_none IMPORT TABLESPACE;
+update tce_none set b=substr(b,1);
+ALTER TABLE tc_none IMPORT TABLESPACE;
+update tc_none set b=substr(b,1);
+ALTER TABLE te_none IMPORT TABLESPACE;
+update te_none set b=substr(b,1);
+ALTER TABLE t_none IMPORT TABLESPACE;
+update t_none set b=substr(b,1);
+ALTER TABLE tpe_none IMPORT TABLESPACE;
+update tpe_none set b=substr(b,1);
+ALTER TABLE tp_none IMPORT TABLESPACE;
+update tp_none set b=substr(b,1);
+SET GLOBAL innodb_checksum_algorithm=none;
+ALTER TABLE tce_none DISCARD TABLESPACE;
+ALTER TABLE tc_none DISCARD TABLESPACE;
+ALTER TABLE te_none DISCARD TABLESPACE;
+ALTER TABLE t_none DISCARD TABLESPACE;
+ALTER TABLE tpe_none DISCARD TABLESPACE;
+ALTER TABLE tp_none DISCARD TABLESPACE;
+restore: tce_none .ibd and .cfg files
+restore: tc_none .ibd and .cfg files
+restore: te_none .ibd and .cfg files
+restore: t_none .ibd and .cfg files
+restore: tpe_none .ibd and .cfg files
+restore: tp_none .ibd and .cfg files
+ALTER TABLE tce_none IMPORT TABLESPACE;
+update tce_none set b=substr(b,1);
+ALTER TABLE tc_none IMPORT TABLESPACE;
+update tc_none set b=substr(b,1);
+ALTER TABLE te_none IMPORT TABLESPACE;
+update te_none set b=substr(b,1);
+ALTER TABLE t_none IMPORT TABLESPACE;
+update t_none set b=substr(b,1);
+ALTER TABLE tpe_none IMPORT TABLESPACE;
+update tpe_none set b=substr(b,1);
+ALTER TABLE tp_none IMPORT TABLESPACE;
+update tp_none set b=substr(b,1);
+CHECK TABLE tce_none, tc_none, te_none,
+t_none, tpe_none, tp_none;
+Table Op Msg_type Msg_text
+test.tce_none check status OK
+test.tc_none check status OK
+test.te_none check status OK
+test.t_none check status OK
+test.tpe_none check status OK
+test.tp_none check status OK
+DROP TABLE tce_none, tc_none, te_none,
+t_none, tpe_none, tp_none;
+SET GLOBAL innodb_file_per_table = @saved_file_per_table;
+SET GLOBAL innodb_checksum_algorithm = @saved_checksum_algorithm;
+SET GLOBAL innodb_encrypt_tables = @saved_encrypt_tables;
+SET GLOBAL innodb_encryption_threads = @saved_encryption_threads;
+SET GLOBAL innodb_default_encryption_key_id = @saved_encryption_key_id;
diff --git a/mysql-test/suite/encryption/r/innodb-compressed-blob.result b/mysql-test/suite/encryption/r/innodb-compressed-blob.result
index 5753188b168..bf43e1b30d6 100644
--- a/mysql-test/suite/encryption/r/innodb-compressed-blob.result
+++ b/mysql-test/suite/encryption/r/innodb-compressed-blob.result
@@ -1,4 +1,5 @@
call mtr.add_suppression("InnoDB: The page \\[page id: space=[1-9][0-9]*, page number=[1-9][0-9]*\\] in file '.*test.t[123]\\.ibd' cannot be decrypted\\.");
+call mtr.add_suppression("InnoDB: Unable to decompress ..test.t[1-3]\\.ibd\\[page id: space=[1-9][0-9]*, page number=[0-9]+\\]");
# Restart mysqld --file-key-management-filename=keys2.txt
SET GLOBAL innodb_file_per_table = ON;
set GLOBAL innodb_default_encryption_key_id=4;
diff --git a/mysql-test/suite/encryption/r/innodb-first-page-read.result b/mysql-test/suite/encryption/r/innodb-first-page-read.result
new file mode 100644
index 00000000000..9b57b9de5f9
--- /dev/null
+++ b/mysql-test/suite/encryption/r/innodb-first-page-read.result
@@ -0,0 +1,89 @@
+SET GLOBAL innodb_file_format = `Barracuda`;
+SET GLOBAL innodb_file_per_table = ON;
+create database innodb_test;
+use innodb_test;
+create table innodb_normal(c1 bigint not null, b char(200)) engine=innodb;
+create table innodb_compact(c1 bigint not null, b char(200)) engine=innodb row_format=compact;
+create table innodb_dynamic(c1 bigint not null, b char(200)) engine=innodb row_format=dynamic;
+create table innodb_compressed(c1 bigint not null, b char(200)) engine=innodb row_format=compressed;
+create table innodb_compressed1(c1 bigint not null, b char(200)) engine=innodb row_format=compressed key_block_size=1;
+create table innodb_compressed2(c1 bigint not null, b char(200)) engine=innodb row_format=compressed key_block_size=2;
+create table innodb_compressed4(c1 bigint not null, b char(200)) engine=innodb row_format=compressed key_block_size=4;
+create table innodb_compressed8(c1 bigint not null, b char(200)) engine=innodb row_format=compressed key_block_size=8;
+create table innodb_compressed16(c1 bigint not null, b char(200)) engine=innodb row_format=compressed key_block_size=16;
+create table innodb_redundant(c1 bigint not null, b char(200)) engine=innodb row_format=redundant;
+create table innodb_pagecomp(c1 bigint not null, b char(200)) engine=innodb page_compressed=yes;
+create table innodb_pagecomp1(c1 bigint not null, b char(200)) engine=innodb page_compressed=yes page_compression_level=1;
+create table innodb_pagecomp2(c1 bigint not null, b char(200)) engine=innodb page_compressed=yes page_compression_level=2;
+create table innodb_pagecomp3(c1 bigint not null, b char(200)) engine=innodb page_compressed=yes page_compression_level=3;
+create table innodb_pagecomp4(c1 bigint not null, b char(200)) engine=innodb page_compressed=yes page_compression_level=4;
+create table innodb_pagecomp5(c1 bigint not null, b char(200)) engine=innodb page_compressed=yes page_compression_level=5;
+create table innodb_pagecomp6(c1 bigint not null, b char(200)) engine=innodb page_compressed=yes page_compression_level=6;
+create table innodb_pagecomp7(c1 bigint not null, b char(200)) engine=innodb page_compressed=yes page_compression_level=7;
+create table innodb_pagecomp8(c1 bigint not null, b char(200)) engine=innodb page_compressed=yes page_compression_level=8;
+create table innodb_pagecomp9(c1 bigint not null, b char(200)) engine=innodb page_compressed=yes page_compression_level=9;
+create table innodb_datadir1(c1 bigint not null, b char(200)) engine=innodb DATA DIRECTORY='MYSQL_TMP_DIR';
+create table innodb_datadir2(c1 bigint not null, b char(200)) engine=innodb row_format=compressed DATA DIRECTORY='MYSQL_TMP_DIR';
+create table innodb_datadir3(c1 bigint not null, b char(200)) engine=innodb page_compressed=yes DATA DIRECTORY='MYSQL_TMP_DIR';
+begin;
+insert into innodb_normal values (1,'secret');
+insert into innodb_compact select * from innodb_normal;
+insert into innodb_dynamic select * from innodb_normal;
+insert into innodb_compressed select * from innodb_normal;
+insert into innodb_compressed1 select * from innodb_normal;
+insert into innodb_compressed2 select * from innodb_normal;
+insert into innodb_compressed4 select * from innodb_normal;
+insert into innodb_compressed8 select * from innodb_normal;
+insert into innodb_compressed16 select * from innodb_normal;
+insert into innodb_redundant select * from innodb_normal;
+insert into innodb_pagecomp select * from innodb_normal;
+insert into innodb_pagecomp1 select * from innodb_normal;
+insert into innodb_pagecomp2 select * from innodb_normal;
+insert into innodb_pagecomp3 select * from innodb_normal;
+insert into innodb_pagecomp4 select * from innodb_normal;
+insert into innodb_pagecomp5 select * from innodb_normal;
+insert into innodb_pagecomp6 select * from innodb_normal;
+insert into innodb_pagecomp7 select * from innodb_normal;
+insert into innodb_pagecomp8 select * from innodb_normal;
+insert into innodb_pagecomp9 select * from innodb_normal;
+insert into innodb_datadir1 select * from innodb_normal;
+insert into innodb_datadir2 select * from innodb_normal;
+insert into innodb_datadir3 select * from innodb_normal;
+commit;
+# Restart server and see how many page 0's are read
+# result should be less than actual number of tables
+# i.e. < 23 + 3 = 26
+show status like 'innodb_pages0_read%';
+Variable_name Value
+Innodb_pages0_read 26
+use innodb_test;
+show status like 'innodb_pages0_read%';
+Variable_name Value
+Innodb_pages0_read 26
+use test;
+show status like 'innodb_pages0_read%';
+Variable_name Value
+Innodb_pages0_read 26
+set global innodb_encrypt_tables=OFF;
+# wait until tables are decrypted
+show status like 'innodb_pages0_read%';
+Variable_name Value
+Innodb_pages0_read 26
+use innodb_test;
+show status like 'innodb_pages0_read%';
+Variable_name Value
+Innodb_pages0_read 26
+use test;
+# restart and see number read page 0
+show status like 'innodb_pages0_read%';
+Variable_name Value
+Innodb_pages0_read 26
+use innodb_test;
+show status like 'innodb_pages0_read%';
+Variable_name Value
+Innodb_pages0_read 26
+use test;
+drop database innodb_test;
+show status like 'innodb_pages0_read%';
+Variable_name Value
+Innodb_pages0_read 26
diff --git a/mysql-test/suite/encryption/r/innodb-key-rotation-disable.result b/mysql-test/suite/encryption/r/innodb-key-rotation-disable.result
index feaede20f2a..a642ad20183 100644
--- a/mysql-test/suite/encryption/r/innodb-key-rotation-disable.result
+++ b/mysql-test/suite/encryption/r/innodb-key-rotation-disable.result
@@ -62,5 +62,4 @@ FOUND 1 /public/ in t7.ibd
FOUND 1 /public/ in t8.ibd
# t9 page compressed expecting NOT FOUND
NOT FOUND /public/ in t9.ibd
-use test;
drop database enctests;
diff --git a/mysql-test/suite/encryption/r/innodb_encryption-page-compression.result b/mysql-test/suite/encryption/r/innodb_encryption-page-compression.result
index ec92825ac8e..359f285901c 100644
--- a/mysql-test/suite/encryption/r/innodb_encryption-page-compression.result
+++ b/mysql-test/suite/encryption/r/innodb_encryption-page-compression.result
@@ -1,5 +1,3 @@
-SET GLOBAL innodb_file_format = `Barracuda`;
-SET GLOBAL innodb_file_per_table = ON;
SET GLOBAL innodb_encryption_threads = 4;
SET GLOBAL innodb_encrypt_tables = on;
set global innodb_compression_algorithm = 1;
diff --git a/mysql-test/suite/encryption/r/innodb_lotoftables.result b/mysql-test/suite/encryption/r/innodb_lotoftables.result
index cf5724b527a..06b832ea6fc 100644
--- a/mysql-test/suite/encryption/r/innodb_lotoftables.result
+++ b/mysql-test/suite/encryption/r/innodb_lotoftables.result
@@ -10,13 +10,13 @@ create database innodb_encrypted_1;
use innodb_encrypted_1;
show status like 'innodb_pages0_read%';
Variable_name Value
-Innodb_pages0_read 3
+Innodb_pages0_read 1
set autocommit=0;
set autocommit=1;
commit work;
show status like 'innodb_pages0_read%';
Variable_name Value
-Innodb_pages0_read 3
+Innodb_pages0_read 1
# should be 100
SELECT COUNT(*) FROM INFORMATION_SCHEMA.INNODB_TABLESPACES_ENCRYPTION WHERE NAME LIKE 'innodb_encrypted%';
COUNT(*)
@@ -86,47 +86,47 @@ Innodb_pages0_read 3
# Restart Success!
show status like 'innodb_pages0_read%';
Variable_name Value
-Innodb_pages0_read 303
+Innodb_pages0_read 1
show status like 'innodb_pages0_read%';
Variable_name Value
-Innodb_pages0_read 303
+Innodb_pages0_read 1
use test;
show status like 'innodb_pages0_read%';
Variable_name Value
-Innodb_pages0_read 303
+Innodb_pages0_read 1
use innodb_encrypted_1;
show status like 'innodb_pages0_read%';
Variable_name Value
-Innodb_pages0_read 303
+Innodb_pages0_read 1
use innodb_encrypted_2;
show status like 'innodb_pages0_read%';
Variable_name Value
-Innodb_pages0_read 303
+Innodb_pages0_read 1
use innodb_encrypted_3;
show status like 'innodb_pages0_read%';
Variable_name Value
-Innodb_pages0_read 303
+Innodb_pages0_read 1
use innodb_encrypted_1;
show status like 'innodb_pages0_read%';
Variable_name Value
-Innodb_pages0_read 303
+Innodb_pages0_read 1
show status like 'innodb_pages0_read%';
Variable_name Value
-Innodb_pages0_read 303
+Innodb_pages0_read 101
use innodb_encrypted_2;
show status like 'innodb_pages0_read%';
Variable_name Value
-Innodb_pages0_read 303
+Innodb_pages0_read 101
show status like 'innodb_pages0_read%';
Variable_name Value
-Innodb_pages0_read 303
+Innodb_pages0_read 201
use innodb_encrypted_3;
show status like 'innodb_pages0_read%';
Variable_name Value
-Innodb_pages0_read 303
+Innodb_pages0_read 201
show status like 'innodb_pages0_read%';
Variable_name Value
-Innodb_pages0_read 303
+Innodb_pages0_read 301
SELECT COUNT(*) FROM INFORMATION_SCHEMA.INNODB_TABLESPACES_ENCRYPTION WHERE MIN_KEY_VERSION = 0 AND NAME LIKE 'innodb_encrypted%';
COUNT(*)
100
diff --git a/mysql-test/suite/encryption/t/innodb-checksum-algorithm.test b/mysql-test/suite/encryption/t/innodb-checksum-algorithm.test
new file mode 100644
index 00000000000..7eaa1bd64c6
--- /dev/null
+++ b/mysql-test/suite/encryption/t/innodb-checksum-algorithm.test
@@ -0,0 +1,120 @@
+-- source include/innodb_page_size.inc
+-- source include/have_file_key_management_plugin.inc
+
+SET @saved_file_per_table = @@global.innodb_file_per_table;
+SET @saved_checksum_algorithm = @@global.innodb_checksum_algorithm;
+SET @saved_encrypt_tables = @@global.innodb_encrypt_tables;
+SET @saved_encryption_threads = @@global.innodb_encryption_threads;
+SET @saved_encryption_key_id = @@global.innodb_default_encryption_key_id;
+
+SET GLOBAL innodb_file_per_table = ON;
+SET GLOBAL innodb_encrypt_tables = ON;
+SET GLOBAL innodb_encryption_threads = 4;
+
+call mtr.add_suppression("InnoDB: innodb_checksum_algorithm is set to \"strict_(crc32|none|innodb)\" but the page \\[page id: space=[0-9]+, page number=[0-9]+\\] contains a valid checksum \"(innodb|none|crc32)\"");
+
+SET GLOBAL innodb_checksum_algorithm = innodb;
+SET GLOBAL innodb_default_encryption_key_id=4;
+
+let MYSQLD_DATADIR =`SELECT @@datadir`;
+
+# ROW_FORMAT=COMPRESSED is unavailable with innodb_page_size=32k or 64k
+let $row_format_compressed= `select case when @@global.innodb_page_size>16384
+then 'ROW_FORMAT=DYNAMIC' else 'ROW_FORMAT=COMPRESSED' end`;
+
+let $from = 3;
+while ($from)
+{
+dec $from;
+let checksum = `select case $from
+ when 0 then 'none'
+ when 1 then 'innodb'
+ when 2 then 'crc32'
+ end`;
+eval SET GLOBAL innodb_checksum_algorithm=$checksum;
+
+eval create table tce_$checksum(a serial, b blob, index(b(10))) engine=innodb
+$row_format_compressed encrypted=yes;
+eval create table tc_$checksum(a serial, b blob, index(b(10))) engine=innodb
+$row_format_compressed encrypted=no;
+eval create table te_$checksum(a serial, b blob, index(b(10))) engine=innodb
+encrypted=yes;
+eval create table t_$checksum(a serial, b blob, index(b(10))) engine=innodb
+encrypted=no;
+eval create table tpe_$checksum(a serial, b blob, index(b(10))) engine=innodb
+page_compressed=yes encrypted=yes;
+eval create table tp_$checksum(a serial, b blob, index(b(10))) engine=innodb
+page_compressed=yes encrypted=no;
+
+begin;
+eval insert into tce_$checksum(b) values (repeat('secret',20));
+eval insert into tc_$checksum(b) values (repeat('secret',20));
+eval insert into te_$checksum(b) values (repeat('secret',20));
+eval insert into t_$checksum(b) values (repeat('secret',20));
+eval insert into tpe_$checksum(b) values (repeat('secret',20));
+eval insert into tp_$checksum(b) values (repeat('secret',20));
+commit;
+
+eval FLUSH TABLES tce_$checksum, tc_$checksum, te_$checksum,
+t_$checksum, tpe_$checksum, tp_$checksum FOR EXPORT;
+perl;
+do "$ENV{MTR_SUITE_DIR}/include/innodb-util.pl";
+my @tables = ("tce_", "tc_", "te_", "t_", "tpe_", "tp_");
+ib_backup_tablespaces("test", map{ $_ . $ENV{checksum} } @tables);
+EOF
+--list_files $MYSQLD_DATADIR/test
+UNLOCK TABLES;
+
+let $to = 6;
+while ($to)
+{
+dec $to;
+let $tocksum = `select case $to
+ when 0 then 'none'
+ when 1 then 'strict_none'
+ when 2 then 'innodb'
+ when 3 then 'strict_innodb'
+ when 4 then 'crc32'
+ when 5 then 'strict_crc32'
+ end`;
+
+eval SET GLOBAL innodb_checksum_algorithm=$tocksum;
+
+eval ALTER TABLE tce_$checksum DISCARD TABLESPACE;
+eval ALTER TABLE tc_$checksum DISCARD TABLESPACE;
+eval ALTER TABLE te_$checksum DISCARD TABLESPACE;
+eval ALTER TABLE t_$checksum DISCARD TABLESPACE;
+eval ALTER TABLE tpe_$checksum DISCARD TABLESPACE;
+eval ALTER TABLE tp_$checksum DISCARD TABLESPACE;
+
+perl;
+do "$ENV{MTR_SUITE_DIR}/include/innodb-util.pl";
+my @tables = ("tce_", "tc_", "te_", "t_", "tpe_", "tp_");
+ib_restore_tablespaces("test", map{ $_ . $ENV{checksum} } @tables);
+EOF
+
+eval ALTER TABLE tce_$checksum IMPORT TABLESPACE;
+eval update tce_$checksum set b=substr(b,1);
+eval ALTER TABLE tc_$checksum IMPORT TABLESPACE;
+eval update tc_$checksum set b=substr(b,1);
+eval ALTER TABLE te_$checksum IMPORT TABLESPACE;
+eval update te_$checksum set b=substr(b,1);
+eval ALTER TABLE t_$checksum IMPORT TABLESPACE;
+eval update t_$checksum set b=substr(b,1);
+eval ALTER TABLE tpe_$checksum IMPORT TABLESPACE;
+eval update tpe_$checksum set b=substr(b,1);
+eval ALTER TABLE tp_$checksum IMPORT TABLESPACE;
+eval update tp_$checksum set b=substr(b,1);
+}
+
+eval CHECK TABLE tce_$checksum, tc_$checksum, te_$checksum,
+t_$checksum, tpe_$checksum, tp_$checksum;
+eval DROP TABLE tce_$checksum, tc_$checksum, te_$checksum,
+t_$checksum, tpe_$checksum, tp_$checksum;
+}
+
+SET GLOBAL innodb_file_per_table = @saved_file_per_table;
+SET GLOBAL innodb_checksum_algorithm = @saved_checksum_algorithm;
+SET GLOBAL innodb_encrypt_tables = @saved_encrypt_tables;
+SET GLOBAL innodb_encryption_threads = @saved_encryption_threads;
+SET GLOBAL innodb_default_encryption_key_id = @saved_encryption_key_id;
diff --git a/mysql-test/suite/encryption/t/innodb-compressed-blob.combinations b/mysql-test/suite/encryption/t/innodb-compressed-blob.combinations
new file mode 100644
index 00000000000..e096b023b52
--- /dev/null
+++ b/mysql-test/suite/encryption/t/innodb-compressed-blob.combinations
@@ -0,0 +1,12 @@
+[crc32]
+loose-innodb-tablespaces-encryption
+loose-innodb-encrypt-tables=on
+loose-innodb-encryption-threads=4
+max_allowed_packet=64K
+loose-innodb-checksum-algorithm=crc32
+[none]
+loose-innodb-tablespaces-encryption
+loose-innodb-encrypt-tables=on
+loose-innodb-encryption-threads=4
+max_allowed_packet=64K
+loose-innodb-checksum-algorithm=none
diff --git a/mysql-test/suite/encryption/t/innodb-compressed-blob.opt b/mysql-test/suite/encryption/t/innodb-compressed-blob.opt
deleted file mode 100644
index 36dcb6c6f26..00000000000
--- a/mysql-test/suite/encryption/t/innodb-compressed-blob.opt
+++ /dev/null
@@ -1,4 +0,0 @@
---innodb-tablespaces-encryption
---innodb-encrypt-tables=on
---innodb-encryption-threads=2
---max_allowed_packet=64K
diff --git a/mysql-test/suite/encryption/t/innodb-compressed-blob.test b/mysql-test/suite/encryption/t/innodb-compressed-blob.test
index 6256aaf5c33..4f28f8e183d 100644
--- a/mysql-test/suite/encryption/t/innodb-compressed-blob.test
+++ b/mysql-test/suite/encryption/t/innodb-compressed-blob.test
@@ -1,10 +1,11 @@
--- source include/have_innodb.inc
+-- source include/innodb_page_size_small.inc
-- source include/have_file_key_management_plugin.inc
# embedded does not support restart
-- source include/not_embedded.inc
call mtr.add_suppression("InnoDB: The page \\[page id: space=[1-9][0-9]*, page number=[1-9][0-9]*\\] in file '.*test.t[123]\\.ibd' cannot be decrypted\\.");
+call mtr.add_suppression("InnoDB: Unable to decompress ..test.t[1-3]\\.ibd\\[page id: space=[1-9][0-9]*, page number=[0-9]+\\]");
--echo # Restart mysqld --file-key-management-filename=keys2.txt
-- let $restart_parameters=--file-key-management-filename=$MYSQL_TEST_DIR/std_data/keys2.txt
diff --git a/mysql-test/suite/encryption/t/innodb-first-page-read.opt b/mysql-test/suite/encryption/t/innodb-first-page-read.opt
new file mode 100644
index 00000000000..38d69691ed6
--- /dev/null
+++ b/mysql-test/suite/encryption/t/innodb-first-page-read.opt
@@ -0,0 +1,5 @@
+--innodb-encrypt-tables=ON
+--innodb-encrypt-log=ON
+--innodb-encryption-rotate-key-age=15
+--innodb-encryption-threads=4
+--innodb-tablespaces-encryption
diff --git a/mysql-test/suite/encryption/t/innodb-first-page-read.test b/mysql-test/suite/encryption/t/innodb-first-page-read.test
new file mode 100644
index 00000000000..1fc07159e05
--- /dev/null
+++ b/mysql-test/suite/encryption/t/innodb-first-page-read.test
@@ -0,0 +1,97 @@
+-- source include/have_innodb.inc
+-- source include/have_file_key_management_plugin.inc
+-- source include/not_embedded.inc
+
+--disable_warnings
+SET GLOBAL innodb_file_format = `Barracuda`;
+SET GLOBAL innodb_file_per_table = ON;
+--enable_warnings
+
+create database innodb_test;
+use innodb_test;
+create table innodb_normal(c1 bigint not null, b char(200)) engine=innodb;
+create table innodb_compact(c1 bigint not null, b char(200)) engine=innodb row_format=compact;
+create table innodb_dynamic(c1 bigint not null, b char(200)) engine=innodb row_format=dynamic;
+create table innodb_compressed(c1 bigint not null, b char(200)) engine=innodb row_format=compressed;
+create table innodb_compressed1(c1 bigint not null, b char(200)) engine=innodb row_format=compressed key_block_size=1;
+create table innodb_compressed2(c1 bigint not null, b char(200)) engine=innodb row_format=compressed key_block_size=2;
+create table innodb_compressed4(c1 bigint not null, b char(200)) engine=innodb row_format=compressed key_block_size=4;
+create table innodb_compressed8(c1 bigint not null, b char(200)) engine=innodb row_format=compressed key_block_size=8;
+create table innodb_compressed16(c1 bigint not null, b char(200)) engine=innodb row_format=compressed key_block_size=16;
+create table innodb_redundant(c1 bigint not null, b char(200)) engine=innodb row_format=redundant;
+create table innodb_pagecomp(c1 bigint not null, b char(200)) engine=innodb page_compressed=yes;
+create table innodb_pagecomp1(c1 bigint not null, b char(200)) engine=innodb page_compressed=yes page_compression_level=1;
+create table innodb_pagecomp2(c1 bigint not null, b char(200)) engine=innodb page_compressed=yes page_compression_level=2;
+create table innodb_pagecomp3(c1 bigint not null, b char(200)) engine=innodb page_compressed=yes page_compression_level=3;
+create table innodb_pagecomp4(c1 bigint not null, b char(200)) engine=innodb page_compressed=yes page_compression_level=4;
+create table innodb_pagecomp5(c1 bigint not null, b char(200)) engine=innodb page_compressed=yes page_compression_level=5;
+create table innodb_pagecomp6(c1 bigint not null, b char(200)) engine=innodb page_compressed=yes page_compression_level=6;
+create table innodb_pagecomp7(c1 bigint not null, b char(200)) engine=innodb page_compressed=yes page_compression_level=7;
+create table innodb_pagecomp8(c1 bigint not null, b char(200)) engine=innodb page_compressed=yes page_compression_level=8;
+create table innodb_pagecomp9(c1 bigint not null, b char(200)) engine=innodb page_compressed=yes page_compression_level=9;
+
+--replace_result $MYSQL_TMP_DIR MYSQL_TMP_DIR
+eval create table innodb_datadir1(c1 bigint not null, b char(200)) engine=innodb DATA DIRECTORY='$MYSQL_TMP_DIR';
+--replace_result $MYSQL_TMP_DIR MYSQL_TMP_DIR
+eval create table innodb_datadir2(c1 bigint not null, b char(200)) engine=innodb row_format=compressed DATA DIRECTORY='$MYSQL_TMP_DIR';
+--replace_result $MYSQL_TMP_DIR MYSQL_TMP_DIR
+eval create table innodb_datadir3(c1 bigint not null, b char(200)) engine=innodb page_compressed=yes DATA DIRECTORY='$MYSQL_TMP_DIR';
+
+begin;
+insert into innodb_normal values (1,'secret');
+insert into innodb_compact select * from innodb_normal;
+insert into innodb_dynamic select * from innodb_normal;
+insert into innodb_compressed select * from innodb_normal;
+insert into innodb_compressed1 select * from innodb_normal;
+insert into innodb_compressed2 select * from innodb_normal;
+insert into innodb_compressed4 select * from innodb_normal;
+insert into innodb_compressed8 select * from innodb_normal;
+insert into innodb_compressed16 select * from innodb_normal;
+insert into innodb_redundant select * from innodb_normal;
+insert into innodb_pagecomp select * from innodb_normal;
+insert into innodb_pagecomp1 select * from innodb_normal;
+insert into innodb_pagecomp2 select * from innodb_normal;
+insert into innodb_pagecomp3 select * from innodb_normal;
+insert into innodb_pagecomp4 select * from innodb_normal;
+insert into innodb_pagecomp5 select * from innodb_normal;
+insert into innodb_pagecomp6 select * from innodb_normal;
+insert into innodb_pagecomp7 select * from innodb_normal;
+insert into innodb_pagecomp8 select * from innodb_normal;
+insert into innodb_pagecomp9 select * from innodb_normal;
+insert into innodb_datadir1 select * from innodb_normal;
+insert into innodb_datadir2 select * from innodb_normal;
+insert into innodb_datadir3 select * from innodb_normal;
+commit;
+
+--echo # Restart server and see how many page 0's are read
+--source include/restart_mysqld.inc
+
+--echo # result should be less than actual number of tables
+--echo # i.e. < 23 + 3 = 26
+show status like 'innodb_pages0_read%';
+use innodb_test;
+show status like 'innodb_pages0_read%';
+use test;
+show status like 'innodb_pages0_read%';
+
+set global innodb_encrypt_tables=OFF;
+
+--echo # wait until tables are decrypted
+--let $wait_condition=SELECT COUNT(*) = 0 FROM INFORMATION_SCHEMA.INNODB_TABLESPACES_ENCRYPTION WHERE MIN_KEY_VERSION <> 0
+--source include/wait_condition.inc
+
+show status like 'innodb_pages0_read%';
+use innodb_test;
+show status like 'innodb_pages0_read%';
+use test;
+
+--echo # restart and see number read page 0
+-- source include/restart_mysqld.inc
+
+show status like 'innodb_pages0_read%';
+use innodb_test;
+show status like 'innodb_pages0_read%';
+use test;
+
+drop database innodb_test;
+show status like 'innodb_pages0_read%';
diff --git a/mysql-test/suite/encryption/t/innodb-key-rotation-disable.test b/mysql-test/suite/encryption/t/innodb-key-rotation-disable.test
index fdbd6c8da7c..574e0c3becc 100644
--- a/mysql-test/suite/encryption/t/innodb-key-rotation-disable.test
+++ b/mysql-test/suite/encryption/t/innodb-key-rotation-disable.test
@@ -8,9 +8,6 @@ SELECT NAME FROM INFORMATION_SCHEMA.INNODB_TABLESPACES_ENCRYPTION WHERE MIN_KEY_
--disable_query_log
--disable_warnings
-let $innodb_compression_algorithm_orig=`SELECT @@innodb_compression_algorithm`;
-let $innodb_file_format_orig = `SELECT @@innodb_file_format`;
-let $innodb_file_per_table_orig = `SELECT @@innodb_file_per_table`;
let $encryption = `SELECT @@innodb_encrypt_tables`;
SET GLOBAL innodb_file_format = `Barracuda`;
SET GLOBAL innodb_file_per_table = ON;
@@ -88,15 +85,4 @@ SET GLOBAL innodb_encrypt_tables=ON;
-- source include/start_mysqld.inc
-use test;
drop database enctests;
-# reset system
-
---disable_query_log
---disable_warnings
-EVAL SET GLOBAL innodb_compression_algorithm = $innodb_compression_algorithm_orig;
-EVAL SET GLOBAL innodb_file_per_table = $innodb_file_per_table_orig;
-EVAL SET GLOBAL innodb_file_format = $innodb_file_format_orig;
-set global innodb_compression_algorithm = DEFAULT;
---enable_warnings
---enable_query_log
diff --git a/mysql-test/suite/federated/assisted_discovery.result b/mysql-test/suite/federated/assisted_discovery.result
index f79e47da8b4..4818ff7bb02 100644
--- a/mysql-test/suite/federated/assisted_discovery.result
+++ b/mysql-test/suite/federated/assisted_discovery.result
@@ -38,6 +38,40 @@ id group a\\b a\\ name
1 1 2 NULL foo
2 1 2 NULL fee
DROP TABLE t1;
+create table t1 (
+a bigint(20) not null auto_increment,
+b bigint(20) not null,
+c tinyint(4) not null,
+d varchar(4096) not null,
+primary key (a),
+key (b,c,d(255))
+);
+show create table t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` bigint(20) NOT NULL AUTO_INCREMENT,
+ `b` bigint(20) NOT NULL,
+ `c` tinyint(4) NOT NULL,
+ `d` varchar(4096) NOT NULL,
+ PRIMARY KEY (`a`),
+ KEY `b` (`b`,`c`,`d`(255))
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+connection master;
+create table t1 engine=federated
+connection='mysql://root@127.0.0.1:SLAVE_PORT/test/t1';
+show create table t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` bigint(20) NOT NULL AUTO_INCREMENT,
+ `b` bigint(20) NOT NULL,
+ `c` tinyint(4) NOT NULL,
+ `d` varchar(4096) NOT NULL,
+ PRIMARY KEY (`a`),
+ KEY `b` (`b`,`c`,`d`(255))
+) ENGINE=FEDERATED DEFAULT CHARSET=latin1 CONNECTION='mysql://root@127.0.0.1:SLAVE_PORT/test/t1'
+drop table t1;
+connection slave;
+drop table t1;
connection master;
DROP TABLE IF EXISTS federated.t1;
DROP DATABASE IF EXISTS federated;
diff --git a/mysql-test/suite/federated/assisted_discovery.test b/mysql-test/suite/federated/assisted_discovery.test
index 9f3abe74ecc..fa83a2a8e19 100644
--- a/mysql-test/suite/federated/assisted_discovery.test
+++ b/mysql-test/suite/federated/assisted_discovery.test
@@ -30,5 +30,29 @@ connection slave;
SELECT * FROM t1;
DROP TABLE t1;
+#
+#
+#
+create table t1 (
+ a bigint(20) not null auto_increment,
+ b bigint(20) not null,
+ c tinyint(4) not null,
+ d varchar(4096) not null,
+ primary key (a),
+ key (b,c,d(255))
+);
+show create table t1;
+
+connection master;
+--replace_result $SLAVE_MYPORT SLAVE_PORT
+eval create table t1 engine=federated
+ connection='mysql://root@127.0.0.1:$SLAVE_MYPORT/test/t1';
+--replace_result $SLAVE_MYPORT SLAVE_PORT
+show create table t1;
+drop table t1;
+
+connection slave;
+drop table t1;
+
source include/federated_cleanup.inc;
diff --git a/mysql-test/suite/galera/galera_2nodes.cnf b/mysql-test/suite/galera/galera_2nodes.cnf
index 34bf1fc58fe..f120775d433 100644
--- a/mysql-test/suite/galera/galera_2nodes.cnf
+++ b/mysql-test/suite/galera/galera_2nodes.cnf
@@ -16,7 +16,7 @@ wsrep-sync-wait=7
#ist_port=@OPT.port
#sst_port=@OPT.port
wsrep-cluster-address=gcomm://
-wsrep_provider_options='repl.causal_read_timeout=PT90S;base_port=@mysqld.1.#galera_port;gcache.size=10M;evs.suspect_timeout=PT10S'
+wsrep_provider_options='repl.causal_read_timeout=PT90S;base_port=@mysqld.1.#galera_port;evs.suspect_timeout=PT10S;evs.inactive_timeout=PT30S;evs.install_timeout=PT15S;gcache.size=10M'
wsrep_node_incoming_address=127.0.0.1:@mysqld.1.port
wsrep_sst_receive_address='127.0.0.1:@mysqld.1.#sst_port'
diff --git a/mysql-test/suite/galera/r/MW-309.result b/mysql-test/suite/galera/r/MW-309.result
new file mode 100644
index 00000000000..3dd49a041ee
--- /dev/null
+++ b/mysql-test/suite/galera/r/MW-309.result
@@ -0,0 +1,22 @@
+CREATE TABLE t1 (f1 INTEGER) ENGINE=InnoDB;
+INSERT INTO t1 VALUES (1);
+INSERT INTO t1 SELECT * FROM t1;
+INSERT INTO t1 SELECT * FROM t1;
+INSERT INTO t1 SELECT * FROM t1;
+INSERT INTO t1 SELECT * FROM t1;
+INSERT INTO t1 SELECT * FROM t1;
+SET GLOBAL wsrep_max_ws_rows = 2;
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
+SELECT * FROM t1 GROUP BY f1;
+f1
+1
+SELECT * FROM t1 GROUP BY f1;
+f1
+1
+SELECT * FROM t1 GROUP BY f1;
+f1
+1
+SHOW STATUS LIKE '%wsrep%';
+SET GLOBAL wsrep_max_ws_rows = 0;
+DROP TABLE t1;
diff --git a/mysql-test/suite/galera/t/MW-309.test b/mysql-test/suite/galera/t/MW-309.test
new file mode 100644
index 00000000000..351a508ecec
--- /dev/null
+++ b/mysql-test/suite/galera/t/MW-309.test
@@ -0,0 +1,32 @@
+#
+# MW-309 Regression: wsrep_max_ws_rows limit also applies to certain SELECT queries
+#
+
+--source include/galera_cluster.inc
+--source include/have_innodb.inc
+
+CREATE TABLE t1 (f1 INTEGER) ENGINE=InnoDB;
+INSERT INTO t1 VALUES (1);
+INSERT INTO t1 SELECT * FROM t1;
+INSERT INTO t1 SELECT * FROM t1;
+INSERT INTO t1 SELECT * FROM t1;
+INSERT INTO t1 SELECT * FROM t1;
+INSERT INTO t1 SELECT * FROM t1;
+
+SET GLOBAL wsrep_max_ws_rows = 2;
+
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
+SELECT * FROM t1 GROUP BY f1;
+SELECT * FROM t1 GROUP BY f1;
+
+--error 0
+SELECT * FROM t1 GROUP BY f1;
+
+--disable_result_log
+--error 0
+SHOW STATUS LIKE '%wsrep%';
+--enable_result_log
+
+SET GLOBAL wsrep_max_ws_rows = 0;
+DROP TABLE t1;
diff --git a/mysql-test/suite/galera/t/galera_autoinc_sst_xtrabackup.cnf b/mysql-test/suite/galera/t/galera_autoinc_sst_xtrabackup.cnf
index 7d684cef67d..b4bf5f02171 100644
--- a/mysql-test/suite/galera/t/galera_autoinc_sst_xtrabackup.cnf
+++ b/mysql-test/suite/galera/t/galera_autoinc_sst_xtrabackup.cnf
@@ -1,9 +1,8 @@
!include ../galera_2nodes.cnf
[mysqld]
-#wsrep_sst_method=xtrabackup-v2
-#wsrep_sst_auth="root:"
-#wsrep_debug=ON
+wsrep_sst_method=xtrabackup-v2
+wsrep_sst_auth="root:"
[mysqld.1]
wsrep_provider_options='base_port=@mysqld.1.#galera_port;gcache.size=1;pc.ignore_sb=true'
diff --git a/mysql-test/suite/galera/t/galera_autoinc_sst_xtrabackup.test b/mysql-test/suite/galera/t/galera_autoinc_sst_xtrabackup.test
index 251450f7099..30ce9bc4ceb 100644
--- a/mysql-test/suite/galera/t/galera_autoinc_sst_xtrabackup.test
+++ b/mysql-test/suite/galera/t/galera_autoinc_sst_xtrabackup.test
@@ -1,6 +1,6 @@
#
# Test that autoincrement works correctly while the cluster membership
-# is changing and IST takes place.
+# is changing and SST takes place.
#
--source include/big_test.inc
diff --git a/mysql-test/suite/galera/t/galera_var_cluster_address.test b/mysql-test/suite/galera/t/galera_var_cluster_address.test
index 0f857eb1aac..03706bbbb12 100644
--- a/mysql-test/suite/galera/t/galera_var_cluster_address.test
+++ b/mysql-test/suite/galera/t/galera_var_cluster_address.test
@@ -76,4 +76,3 @@ CALL mtr.add_suppression("WSREP: wsrep::connect\\(gcomm://192.0.2.1\\) failed: 7
--source include/galera_end.inc
--echo # End of test
-
diff --git a/mysql-test/suite/galera_3nodes/disabled.def b/mysql-test/suite/galera_3nodes/disabled.def
index ca55c41ff72..502e7bfba68 100644
--- a/mysql-test/suite/galera_3nodes/disabled.def
+++ b/mysql-test/suite/galera_3nodes/disabled.def
@@ -5,3 +5,4 @@ galera_slave_options_do :MDEV-8798
galera_slave_options_ignore : MDEV-8798
galera_pc_bootstrap : TODO: Investigate: Timeout in wait_condition.inc
galera_pc_weight : Test times out
+galera_safe_to_bootstrap : I Really dont know :(
diff --git a/mysql-test/suite/galera_3nodes/galera_3nodes.cnf b/mysql-test/suite/galera_3nodes/galera_3nodes.cnf
index 1ed273fdcb5..305bdaaae3a 100644
--- a/mysql-test/suite/galera_3nodes/galera_3nodes.cnf
+++ b/mysql-test/suite/galera_3nodes/galera_3nodes.cnf
@@ -14,11 +14,10 @@ wsrep-causal-reads=ON
wsrep-sync-wait=7
[mysqld.1]
-#galera_port=@OPT.port
-#ist_port=@OPT.port
-#sst_port=@OPT.port
-wsrep-cluster-address=gcomm://
-wsrep_provider_options='base_port=@mysqld.1.#galera_port;gcache.size=10M;evs.suspect_timeout=PT10S'
+wsrep-cluster-address='gcomm://'
+wsrep_provider_options='base_port=@mysqld.1.#galera_port;evs.suspect_timeout=PT10S;evs.inactive_timeout=PT30S;evs.install_timeout=PT15S'
+
+wsrep_sst_receive_address=127.0.0.2:@mysqld.1.#sst_port
wsrep_node_incoming_address=127.0.0.1:@mysqld.1.port
wsrep_sst_receive_address='127.0.0.1:@mysqld.1.#sst_port'
diff --git a/mysql-test/suite/innodb/include/innodb-page-compression.inc b/mysql-test/suite/innodb/include/innodb-page-compression.inc
new file mode 100644
index 00000000000..3acbeaf0988
--- /dev/null
+++ b/mysql-test/suite/innodb/include/innodb-page-compression.inc
@@ -0,0 +1,131 @@
+--disable_warnings
+set global innodb_file_format = `Barracuda`;
+set global innodb_file_per_table = on;
+--enable_warnings
+
+create table innodb_normal (c1 int not null auto_increment primary key, b char(200)) engine=innodb;
+create table innodb_page_compressed1 (c1 int not null auto_increment primary key, b char(200)) engine=innodb page_compressed=1 page_compression_level=1;
+create table innodb_page_compressed2 (c1 int not null auto_increment primary key, b char(200)) engine=innodb page_compressed=1 page_compression_level=2;
+create table innodb_page_compressed3 (c1 int not null auto_increment primary key, b char(200)) engine=innodb page_compressed=1 page_compression_level=3;
+create table innodb_page_compressed4 (c1 int not null auto_increment primary key, b char(200)) engine=innodb page_compressed=1 page_compression_level=4;
+create table innodb_page_compressed5 (c1 int not null auto_increment primary key, b char(200)) engine=innodb page_compressed=1 page_compression_level=5;
+create table innodb_page_compressed6 (c1 int not null auto_increment primary key, b char(200)) engine=innodb page_compressed=1 page_compression_level=6;
+create table innodb_page_compressed7 (c1 int not null auto_increment primary key, b char(200)) engine=innodb page_compressed=1 page_compression_level=7;
+create table innodb_page_compressed8 (c1 int not null auto_increment primary key, b char(200)) engine=innodb page_compressed=1 page_compression_level=8;
+create table innodb_page_compressed9 (c1 int not null auto_increment primary key, b char(200)) engine=innodb page_compressed=1 page_compression_level=9;
+
+--disable_query_log
+begin;
+let $i = 2000;
+while ($i)
+{
+ insert into innodb_normal(b) values(REPEAT('Aa',50));
+ insert into innodb_normal(b) values(REPEAT('a',100));
+ insert into innodb_normal(b) values(REPEAT('b',100));
+ insert into innodb_normal(b) values(REPEAT('0',100));
+ insert into innodb_normal(b) values(REPEAT('1',100));
+ dec $i;
+}
+
+insert into innodb_page_compressed1 select * from innodb_normal;
+insert into innodb_page_compressed2 select * from innodb_normal;
+insert into innodb_page_compressed3 select * from innodb_normal;
+insert into innodb_page_compressed4 select * from innodb_normal;
+insert into innodb_page_compressed5 select * from innodb_normal;
+insert into innodb_page_compressed6 select * from innodb_normal;
+insert into innodb_page_compressed7 select * from innodb_normal;
+insert into innodb_page_compressed8 select * from innodb_normal;
+insert into innodb_page_compressed9 select * from innodb_normal;
+commit;
+--enable_query_log
+
+select count(*) from innodb_page_compressed1;
+select count(*) from innodb_page_compressed3;
+select count(*) from innodb_page_compressed4;
+select count(*) from innodb_page_compressed5;
+select count(*) from innodb_page_compressed6;
+select count(*) from innodb_page_compressed6;
+select count(*) from innodb_page_compressed7;
+select count(*) from innodb_page_compressed8;
+select count(*) from innodb_page_compressed9;
+
+#
+# Wait until pages are really compressed
+#
+let $wait_condition= select variable_value > 0 from information_schema.global_status where variable_name = 'INNODB_NUM_PAGES_PAGE_COMPRESSED';
+--source include/wait_condition.inc
+
+--let $MYSQLD_DATADIR=`select @@datadir`
+
+# shutdown before grep
+
+--source include/shutdown_mysqld.inc
+
+--let t1_IBD = $MYSQLD_DATADIR/test/innodb_normal.ibd
+--let SEARCH_RANGE = 10000000
+--let SEARCH_PATTERN=AaAaAaAa
+--echo # innodb_normal expected FOUND
+-- let SEARCH_FILE=$t1_IBD
+-- source include/search_pattern_in_file.inc
+--let t1_IBD = $MYSQLD_DATADIR/test/innodb_page_compressed1.ibd
+--echo # innodb_page_compressed1 page compressed expected NOT FOUND
+-- let SEARCH_FILE=$t1_IBD
+-- source include/search_pattern_in_file.inc
+--let t1_IBD = $MYSQLD_DATADIR/test/innodb_page_compressed2.ibd
+--echo # innodb_page_compressed2 page compressed expected NOT FOUND
+-- let SEARCH_FILE=$t1_IBD
+-- source include/search_pattern_in_file.inc
+--let t1_IBD = $MYSQLD_DATADIR/test/innodb_page_compressed3.ibd
+--echo # innodb_page_compressed3 page compressed expected NOT FOUND
+-- let SEARCH_FILE=$t1_IBD
+-- source include/search_pattern_in_file.inc
+--let t1_IBD = $MYSQLD_DATADIR/test/innodb_page_compressed4.ibd
+--echo # innodb_page_compressed4 page compressed expected NOT FOUND
+-- let SEARCH_FILE=$t1_IBD
+-- source include/search_pattern_in_file.inc
+--let t1_IBD = $MYSQLD_DATADIR/test/innodb_page_compressed5.ibd
+--echo # innodb_page_compressed5 page compressed expected NOT FOUND
+-- let SEARCH_FILE=$t1_IBD
+-- source include/search_pattern_in_file.inc
+--let t1_IBD = $MYSQLD_DATADIR/test/innodb_page_compressed6.ibd
+--echo # innodb_page_compressed6 page compressed expected NOT FOUND
+-- let SEARCH_FILE=$t1_IBD
+-- source include/search_pattern_in_file.inc
+--let t1_IBD = $MYSQLD_DATADIR/test/innodb_page_compressed7.ibd
+--echo # innodb_page_compressed7 page compressed expected NOT FOUND
+-- let SEARCH_FILE=$t1_IBD
+-- source include/search_pattern_in_file.inc
+--let t1_IBD = $MYSQLD_DATADIR/test/innodb_page_compressed8.ibd
+--echo # innodb_page_compressed8 page compressed expected NOT FOUND
+-- let SEARCH_FILE=$t1_IBD
+-- source include/search_pattern_in_file.inc
+--let t1_IBD = $MYSQLD_DATADIR/test/innodb_page_compressed9.ibd
+--echo # innodb_page_compressed9 page compressed expected NOT FOUND
+-- let SEARCH_FILE=$t1_IBD
+-- source include/search_pattern_in_file.inc
+
+-- source include/start_mysqld.inc
+
+select count(*) from innodb_page_compressed1;
+select count(*) from innodb_page_compressed3;
+select count(*) from innodb_page_compressed4;
+select count(*) from innodb_page_compressed5;
+select count(*) from innodb_page_compressed6;
+select count(*) from innodb_page_compressed6;
+select count(*) from innodb_page_compressed7;
+select count(*) from innodb_page_compressed8;
+select count(*) from innodb_page_compressed9;
+
+let $wait_condition= select variable_value > 0 from information_schema.global_status where variable_name = 'INNODB_NUM_PAGES_PAGE_DECOMPRESSED';
+--source include/wait_condition.inc
+
+drop table innodb_normal;
+drop table innodb_page_compressed1;
+drop table innodb_page_compressed2;
+drop table innodb_page_compressed3;
+drop table innodb_page_compressed4;
+drop table innodb_page_compressed5;
+drop table innodb_page_compressed6;
+drop table innodb_page_compressed7;
+drop table innodb_page_compressed8;
+drop table innodb_page_compressed9;
diff --git a/mysql-test/suite/innodb/include/wait_all_purged.inc b/mysql-test/suite/innodb/include/wait_all_purged.inc
new file mode 100644
index 00000000000..7dbb59a5d32
--- /dev/null
+++ b/mysql-test/suite/innodb/include/wait_all_purged.inc
@@ -0,0 +1,19 @@
+# Wait for everything to be purged.
+# The user should have set innodb_purge_rseg_truncate_frequency=1.
+
+let $wait_counter= 300;
+while ($wait_counter)
+{
+ --replace_regex /.*History list length ([0-9]+).*/\1/
+ let $remaining= `SHOW ENGINE INNODB STATUS`;
+ if ($remaining == 'InnoDB 0')
+ {
+ let $wait_counter= 0;
+ }
+ if ($wait_counter)
+ {
+ real_sleep 0.1;
+ dec $wait_counter;
+ }
+}
+echo $remaining transactions not purged;
diff --git a/mysql-test/suite/innodb/r/doublewrite.result b/mysql-test/suite/innodb/r/doublewrite.result
index 6b913f49972..61c81ee9dff 100644
--- a/mysql-test/suite/innodb/r/doublewrite.result
+++ b/mysql-test/suite/innodb/r/doublewrite.result
@@ -231,6 +231,7 @@ set global innodb_buf_flush_list_now = 1;
check table t1;
Table Op Msg_type Msg_text
test.t1 check status OK
+FOUND 1 /\[ERROR\] InnoDB: .*test.t1\.ibd.*/ in mysqld.1.err
select f1, f2 from t1;
f1 f2
1 ############
@@ -238,6 +239,13 @@ f1 f2
3 ////////////
4 ------------
5 ............
-# Test End
-# ---------------------------------------------------------------
drop table t1;
+#
+# MDEV-12600 crash during install_db with innodb_page_size=32K
+# and ibdata1=3M
+#
+SELECT * FROM INFORMATION_SCHEMA.ENGINES
+WHERE engine = 'innodb'
+AND support IN ('YES', 'DEFAULT', 'ENABLED');
+ENGINE SUPPORT COMMENT TRANSACTIONS XA SAVEPOINTS
+FOUND 1 /\[ERROR\] InnoDB: Cannot create doublewrite buffer/ in mysqld.1.err
diff --git a/mysql-test/suite/innodb/r/drop_table_background.result b/mysql-test/suite/innodb/r/drop_table_background.result
new file mode 100644
index 00000000000..a6f5672ba7f
--- /dev/null
+++ b/mysql-test/suite/innodb/r/drop_table_background.result
@@ -0,0 +1,9 @@
+CREATE TABLE t(c0 SERIAL, c1 INT, c2 INT, c3 INT, c4 INT,
+KEY(c1), KEY(c2), KEY(c2,c1),
+KEY(c3), KEY(c3,c1), KEY(c3,c2), KEY(c3,c2,c1),
+KEY(c4), KEY(c4,c1), KEY(c4,c2), KEY(c4,c2,c1),
+KEY(c4,c3), KEY(c4,c3,c1), KEY(c4,c3,c2), KEY(c4,c3,c2,c1)) ENGINE=InnoDB;
+SET DEBUG_DBUG='+d,row_drop_table_add_to_background';
+DROP TABLE t;
+CREATE TABLE t (a INT) ENGINE=InnoDB;
+DROP TABLE t;
diff --git a/mysql-test/suite/innodb/r/innodb-page_compression_default.result b/mysql-test/suite/innodb/r/innodb-page_compression_default.result
index 10e1d6c766c..413450e1a6d 100644
--- a/mysql-test/suite/innodb/r/innodb-page_compression_default.result
+++ b/mysql-test/suite/innodb/r/innodb-page_compression_default.result
@@ -1,20 +1,98 @@
-SET GLOBAL innodb_file_format = `Barracuda`;
-SET GLOBAL innodb_file_per_table = ON;
-create table t1 (c1 int not null primary key auto_increment, b char(200)) engine=innodb page_compressed=1;
-insert into t1 values(NULL,'compressed_text_aaaaaaaaabbbbbbbbbbbbbccccccccccccc');
-insert into t1(b) select b from t1;
-insert into t1(b) select b from t1;
-insert into t1(b) select b from t1;
-insert into t1(b) select b from t1;
-insert into t1(b) select b from t1;
-insert into t1(b) select b from t1;
-insert into t1(b) select b from t1;
-insert into t1(b) select b from t1;
-insert into t1(b) select b from t1;
-insert into t1(b) select b from t1;
-insert into t1(b) select b from t1;
-insert into t1(b) select b from t1;
-insert into t1(b) select b from t1;
-# t1 compressed expected NOT FOUND
-NOT FOUND /compressed_text/ in t1.ibd
-drop table t1;
+call mtr.add_suppression("InnoDB: Compression failed for space [0-9]+ name test/innodb_page_compressed[0-9] len [0-9]+ err 2 write_size [0-9]+.");
+set global innodb_file_format = `Barracuda`;
+set global innodb_file_per_table = on;
+create table innodb_normal (c1 int not null auto_increment primary key, b char(200)) engine=innodb;
+create table innodb_page_compressed1 (c1 int not null auto_increment primary key, b char(200)) engine=innodb page_compressed=1 page_compression_level=1;
+create table innodb_page_compressed2 (c1 int not null auto_increment primary key, b char(200)) engine=innodb page_compressed=1 page_compression_level=2;
+create table innodb_page_compressed3 (c1 int not null auto_increment primary key, b char(200)) engine=innodb page_compressed=1 page_compression_level=3;
+create table innodb_page_compressed4 (c1 int not null auto_increment primary key, b char(200)) engine=innodb page_compressed=1 page_compression_level=4;
+create table innodb_page_compressed5 (c1 int not null auto_increment primary key, b char(200)) engine=innodb page_compressed=1 page_compression_level=5;
+create table innodb_page_compressed6 (c1 int not null auto_increment primary key, b char(200)) engine=innodb page_compressed=1 page_compression_level=6;
+create table innodb_page_compressed7 (c1 int not null auto_increment primary key, b char(200)) engine=innodb page_compressed=1 page_compression_level=7;
+create table innodb_page_compressed8 (c1 int not null auto_increment primary key, b char(200)) engine=innodb page_compressed=1 page_compression_level=8;
+create table innodb_page_compressed9 (c1 int not null auto_increment primary key, b char(200)) engine=innodb page_compressed=1 page_compression_level=9;
+select count(*) from innodb_page_compressed1;
+count(*)
+10000
+select count(*) from innodb_page_compressed3;
+count(*)
+10000
+select count(*) from innodb_page_compressed4;
+count(*)
+10000
+select count(*) from innodb_page_compressed5;
+count(*)
+10000
+select count(*) from innodb_page_compressed6;
+count(*)
+10000
+select count(*) from innodb_page_compressed6;
+count(*)
+10000
+select count(*) from innodb_page_compressed7;
+count(*)
+10000
+select count(*) from innodb_page_compressed8;
+count(*)
+10000
+select count(*) from innodb_page_compressed9;
+count(*)
+10000
+# innodb_normal expected FOUND
+FOUND 24084 /AaAaAaAa/ in innodb_normal.ibd
+# innodb_page_compressed1 page compressed expected NOT FOUND
+NOT FOUND /AaAaAaAa/ in innodb_page_compressed1.ibd
+# innodb_page_compressed2 page compressed expected NOT FOUND
+NOT FOUND /AaAaAaAa/ in innodb_page_compressed2.ibd
+# innodb_page_compressed3 page compressed expected NOT FOUND
+NOT FOUND /AaAaAaAa/ in innodb_page_compressed3.ibd
+# innodb_page_compressed4 page compressed expected NOT FOUND
+NOT FOUND /AaAaAaAa/ in innodb_page_compressed4.ibd
+# innodb_page_compressed5 page compressed expected NOT FOUND
+NOT FOUND /AaAaAaAa/ in innodb_page_compressed5.ibd
+# innodb_page_compressed6 page compressed expected NOT FOUND
+NOT FOUND /AaAaAaAa/ in innodb_page_compressed6.ibd
+# innodb_page_compressed7 page compressed expected NOT FOUND
+NOT FOUND /AaAaAaAa/ in innodb_page_compressed7.ibd
+# innodb_page_compressed8 page compressed expected NOT FOUND
+NOT FOUND /AaAaAaAa/ in innodb_page_compressed8.ibd
+# innodb_page_compressed9 page compressed expected NOT FOUND
+NOT FOUND /AaAaAaAa/ in innodb_page_compressed9.ibd
+select count(*) from innodb_page_compressed1;
+count(*)
+10000
+select count(*) from innodb_page_compressed3;
+count(*)
+10000
+select count(*) from innodb_page_compressed4;
+count(*)
+10000
+select count(*) from innodb_page_compressed5;
+count(*)
+10000
+select count(*) from innodb_page_compressed6;
+count(*)
+10000
+select count(*) from innodb_page_compressed6;
+count(*)
+10000
+select count(*) from innodb_page_compressed7;
+count(*)
+10000
+select count(*) from innodb_page_compressed8;
+count(*)
+10000
+select count(*) from innodb_page_compressed9;
+count(*)
+10000
+drop table innodb_normal;
+drop table innodb_page_compressed1;
+drop table innodb_page_compressed2;
+drop table innodb_page_compressed3;
+drop table innodb_page_compressed4;
+drop table innodb_page_compressed5;
+drop table innodb_page_compressed6;
+drop table innodb_page_compressed7;
+drop table innodb_page_compressed8;
+drop table innodb_page_compressed9;
+#done
diff --git a/mysql-test/suite/innodb/r/innodb-page_compression_snappy.result b/mysql-test/suite/innodb/r/innodb-page_compression_snappy.result
index 442885755fd..83a17f678e4 100644
--- a/mysql-test/suite/innodb/r/innodb-page_compression_snappy.result
+++ b/mysql-test/suite/innodb/r/innodb-page_compression_snappy.result
@@ -1,430 +1,92 @@
-call mtr.add_suppression("InnoDB: Compression failed for space.*");
-set global innodb_compression_algorithm = 6;
-create table innodb_compressed(c1 int, b char(200)) engine=innodb row_format=compressed key_block_size=8;
-show warnings;
-Level Code Message
-create table innodb_normal (c1 int, b char(200)) engine=innodb;
-show warnings;
-Level Code Message
-create table innodb_page_compressed1 (c1 int, b char(200)) engine=innodb page_compressed=1 page_compression_level=1;
-show warnings;
-Level Code Message
-show create table innodb_page_compressed1;
-Table Create Table
-innodb_page_compressed1 CREATE TABLE `innodb_page_compressed1` (
- `c1` int(11) DEFAULT NULL,
- `b` char(200) DEFAULT NULL
-) ENGINE=InnoDB DEFAULT CHARSET=latin1 `page_compressed`=1 `page_compression_level`=1
-create table innodb_page_compressed2 (c1 int, b char(200)) engine=innodb page_compressed=1 page_compression_level=2;
-show warnings;
-Level Code Message
-show create table innodb_page_compressed2;
-Table Create Table
-innodb_page_compressed2 CREATE TABLE `innodb_page_compressed2` (
- `c1` int(11) DEFAULT NULL,
- `b` char(200) DEFAULT NULL
-) ENGINE=InnoDB DEFAULT CHARSET=latin1 `page_compressed`=1 `page_compression_level`=2
-create table innodb_page_compressed3 (c1 int, b char(200)) engine=innodb page_compressed=1 page_compression_level=3;
-show warnings;
-Level Code Message
-show create table innodb_page_compressed3;
-Table Create Table
-innodb_page_compressed3 CREATE TABLE `innodb_page_compressed3` (
- `c1` int(11) DEFAULT NULL,
- `b` char(200) DEFAULT NULL
-) ENGINE=InnoDB DEFAULT CHARSET=latin1 `page_compressed`=1 `page_compression_level`=3
-create table innodb_page_compressed4 (c1 int, b char(200)) engine=innodb page_compressed=1 page_compression_level=4;
-show warnings;
-Level Code Message
-show create table innodb_page_compressed4;
-Table Create Table
-innodb_page_compressed4 CREATE TABLE `innodb_page_compressed4` (
- `c1` int(11) DEFAULT NULL,
- `b` char(200) DEFAULT NULL
-) ENGINE=InnoDB DEFAULT CHARSET=latin1 `page_compressed`=1 `page_compression_level`=4
-create table innodb_page_compressed5 (c1 int, b char(200)) engine=innodb page_compressed=1 page_compression_level=5;
-show warnings;
-Level Code Message
-show create table innodb_page_compressed5;
-Table Create Table
-innodb_page_compressed5 CREATE TABLE `innodb_page_compressed5` (
- `c1` int(11) DEFAULT NULL,
- `b` char(200) DEFAULT NULL
-) ENGINE=InnoDB DEFAULT CHARSET=latin1 `page_compressed`=1 `page_compression_level`=5
-create table innodb_page_compressed6 (c1 int, b char(200)) engine=innodb page_compressed=1 page_compression_level=6;
-show warnings;
-Level Code Message
-show create table innodb_page_compressed6;
-Table Create Table
-innodb_page_compressed6 CREATE TABLE `innodb_page_compressed6` (
- `c1` int(11) DEFAULT NULL,
- `b` char(200) DEFAULT NULL
-) ENGINE=InnoDB DEFAULT CHARSET=latin1 `page_compressed`=1 `page_compression_level`=6
-create table innodb_page_compressed7 (c1 int, b char(200)) engine=innodb page_compressed=1 page_compression_level=7;
-show warnings;
-Level Code Message
-show create table innodb_page_compressed7;
-Table Create Table
-innodb_page_compressed7 CREATE TABLE `innodb_page_compressed7` (
- `c1` int(11) DEFAULT NULL,
- `b` char(200) DEFAULT NULL
-) ENGINE=InnoDB DEFAULT CHARSET=latin1 `page_compressed`=1 `page_compression_level`=7
-create table innodb_page_compressed8 (c1 int, b char(200)) engine=innodb page_compressed=1 page_compression_level=8;
-show warnings;
-Level Code Message
-show create table innodb_page_compressed8;
-Table Create Table
-innodb_page_compressed8 CREATE TABLE `innodb_page_compressed8` (
- `c1` int(11) DEFAULT NULL,
- `b` char(200) DEFAULT NULL
-) ENGINE=InnoDB DEFAULT CHARSET=latin1 `page_compressed`=1 `page_compression_level`=8
-create table innodb_page_compressed9 (c1 int, b char(200)) engine=innodb page_compressed=1 page_compression_level=9;
-show warnings;
-Level Code Message
-show create table innodb_page_compressed9;
-Table Create Table
-innodb_page_compressed9 CREATE TABLE `innodb_page_compressed9` (
- `c1` int(11) DEFAULT NULL,
- `b` char(200) DEFAULT NULL
-) ENGINE=InnoDB DEFAULT CHARSET=latin1 `page_compressed`=1 `page_compression_level`=9
-create procedure innodb_insert_proc (repeat_count int)
-begin
-declare current_num int;
-set current_num = 0;
-while current_num < repeat_count do
-insert into innodb_normal values(current_num,'aaaaaaaaaaaaaaaaaaabbbbbbbbbbbbbbbbbbbbbbbccccccccccccccccccccccc');
-set current_num = current_num + 1;
-end while;
-end//
-commit;
-set autocommit=0;
-call innodb_insert_proc(5000);
-commit;
-set autocommit=1;
-select count(*) from innodb_normal;
-count(*)
-5000
-insert into innodb_compressed select * from innodb_normal;
-insert into innodb_page_compressed1 select * from innodb_normal;
-insert into innodb_page_compressed2 select * from innodb_normal;
-insert into innodb_page_compressed3 select * from innodb_normal;
-insert into innodb_page_compressed4 select * from innodb_normal;
-insert into innodb_page_compressed5 select * from innodb_normal;
-insert into innodb_page_compressed6 select * from innodb_normal;
-insert into innodb_page_compressed7 select * from innodb_normal;
-insert into innodb_page_compressed8 select * from innodb_normal;
-insert into innodb_page_compressed9 select * from innodb_normal;
-commit;
-select count(*) from innodb_compressed;
-count(*)
-5000
+call mtr.add_suppression("InnoDB: Compression failed for space [0-9]+ name test/innodb_page_compressed[0-9] len [0-9]+ err 2 write_size [0-9]+.");
+set global innodb_compression_algorithm = snappy;
+set global innodb_file_format = `Barracuda`;
+set global innodb_file_per_table = on;
+create table innodb_normal (c1 int not null auto_increment primary key, b char(200)) engine=innodb;
+create table innodb_page_compressed1 (c1 int not null auto_increment primary key, b char(200)) engine=innodb page_compressed=1 page_compression_level=1;
+create table innodb_page_compressed2 (c1 int not null auto_increment primary key, b char(200)) engine=innodb page_compressed=1 page_compression_level=2;
+create table innodb_page_compressed3 (c1 int not null auto_increment primary key, b char(200)) engine=innodb page_compressed=1 page_compression_level=3;
+create table innodb_page_compressed4 (c1 int not null auto_increment primary key, b char(200)) engine=innodb page_compressed=1 page_compression_level=4;
+create table innodb_page_compressed5 (c1 int not null auto_increment primary key, b char(200)) engine=innodb page_compressed=1 page_compression_level=5;
+create table innodb_page_compressed6 (c1 int not null auto_increment primary key, b char(200)) engine=innodb page_compressed=1 page_compression_level=6;
+create table innodb_page_compressed7 (c1 int not null auto_increment primary key, b char(200)) engine=innodb page_compressed=1 page_compression_level=7;
+create table innodb_page_compressed8 (c1 int not null auto_increment primary key, b char(200)) engine=innodb page_compressed=1 page_compression_level=8;
+create table innodb_page_compressed9 (c1 int not null auto_increment primary key, b char(200)) engine=innodb page_compressed=1 page_compression_level=9;
select count(*) from innodb_page_compressed1;
count(*)
-5000
-select count(*) from innodb_page_compressed1 where c1 < 500000;
-count(*)
-5000
-select count(*) from innodb_page_compressed2 where c1 < 500000;
-count(*)
-5000
-select count(*) from innodb_page_compressed3 where c1 < 500000;
-count(*)
-5000
-select count(*) from innodb_page_compressed4 where c1 < 500000;
-count(*)
-5000
-select count(*) from innodb_page_compressed5 where c1 < 500000;
-count(*)
-5000
-select count(*) from innodb_page_compressed6 where c1 < 500000;
-count(*)
-5000
-select count(*) from innodb_page_compressed7 where c1 < 500000;
-count(*)
-5000
-select count(*) from innodb_page_compressed8 where c1 < 500000;
-count(*)
-5000
-select count(*) from innodb_page_compressed9 where c1 < 500000;
-count(*)
-5000
-alter table innodb_normal page_compressed=1 page_compression_level=8;
-show warnings;
-Level Code Message
-show create table innodb_normal;
-Table Create Table
-innodb_normal CREATE TABLE `innodb_normal` (
- `c1` int(11) DEFAULT NULL,
- `b` char(200) DEFAULT NULL
-) ENGINE=InnoDB DEFAULT CHARSET=latin1 `page_compressed`=1 `page_compression_level`=8
-alter table innodb_compressed row_format=default page_compressed=1 page_compression_level=8 key_block_size=0;
-show warnings;
-Level Code Message
-show create table innodb_compressed;
-Table Create Table
-innodb_compressed CREATE TABLE `innodb_compressed` (
- `c1` int(11) DEFAULT NULL,
- `b` char(200) DEFAULT NULL
-) ENGINE=InnoDB DEFAULT CHARSET=latin1 `page_compressed`=1 `page_compression_level`=8
-update innodb_page_compressed1 set c1 = c1 + 1;
-update innodb_page_compressed2 set c1 = c1 + 1;
-update innodb_page_compressed3 set c1 = c1 + 1;
-update innodb_page_compressed4 set c1 = c1 + 1;
-update innodb_page_compressed5 set c1 = c1 + 1;
-update innodb_page_compressed6 set c1 = c1 + 1;
-update innodb_page_compressed7 set c1 = c1 + 1;
-update innodb_page_compressed8 set c1 = c1 + 1;
-update innodb_page_compressed9 set c1 = c1 + 1;
-select count(*) from innodb_compressed;
-count(*)
-5000
+10000
+select count(*) from innodb_page_compressed3;
+count(*)
+10000
+select count(*) from innodb_page_compressed4;
+count(*)
+10000
+select count(*) from innodb_page_compressed5;
+count(*)
+10000
+select count(*) from innodb_page_compressed6;
+count(*)
+10000
+select count(*) from innodb_page_compressed6;
+count(*)
+10000
+select count(*) from innodb_page_compressed7;
+count(*)
+10000
+select count(*) from innodb_page_compressed8;
+count(*)
+10000
+select count(*) from innodb_page_compressed9;
+count(*)
+10000
+# innodb_normal expected FOUND
+FOUND 24084 /AaAaAaAa/ in innodb_normal.ibd
+# innodb_page_compressed1 page compressed expected NOT FOUND
+NOT FOUND /AaAaAaAa/ in innodb_page_compressed1.ibd
+# innodb_page_compressed2 page compressed expected NOT FOUND
+NOT FOUND /AaAaAaAa/ in innodb_page_compressed2.ibd
+# innodb_page_compressed3 page compressed expected NOT FOUND
+NOT FOUND /AaAaAaAa/ in innodb_page_compressed3.ibd
+# innodb_page_compressed4 page compressed expected NOT FOUND
+NOT FOUND /AaAaAaAa/ in innodb_page_compressed4.ibd
+# innodb_page_compressed5 page compressed expected NOT FOUND
+NOT FOUND /AaAaAaAa/ in innodb_page_compressed5.ibd
+# innodb_page_compressed6 page compressed expected NOT FOUND
+NOT FOUND /AaAaAaAa/ in innodb_page_compressed6.ibd
+# innodb_page_compressed7 page compressed expected NOT FOUND
+NOT FOUND /AaAaAaAa/ in innodb_page_compressed7.ibd
+# innodb_page_compressed8 page compressed expected NOT FOUND
+NOT FOUND /AaAaAaAa/ in innodb_page_compressed8.ibd
+# innodb_page_compressed9 page compressed expected NOT FOUND
+NOT FOUND /AaAaAaAa/ in innodb_page_compressed9.ibd
select count(*) from innodb_page_compressed1;
count(*)
-5000
-select count(*) from innodb_page_compressed1 where c1 < 500000;
-count(*)
-5000
-select count(*) from innodb_page_compressed2 where c1 < 500000;
-count(*)
-5000
-select count(*) from innodb_page_compressed3 where c1 < 500000;
-count(*)
-5000
-select count(*) from innodb_page_compressed4 where c1 < 500000;
-count(*)
-5000
-select count(*) from innodb_page_compressed5 where c1 < 500000;
-count(*)
-5000
-select count(*) from innodb_page_compressed6 where c1 < 500000;
-count(*)
-5000
-select count(*) from innodb_page_compressed7 where c1 < 500000;
-count(*)
-5000
-select count(*) from innodb_page_compressed8 where c1 < 500000;
-count(*)
-5000
-select count(*) from innodb_page_compressed9 where c1 < 500000;
-count(*)
-5000
-update innodb_page_compressed1 set c1 = c1 + 1;
-update innodb_page_compressed2 set c1 = c1 + 1;
-update innodb_page_compressed3 set c1 = c1 + 1;
-update innodb_page_compressed4 set c1 = c1 + 1;
-update innodb_page_compressed5 set c1 = c1 + 1;
-update innodb_page_compressed6 set c1 = c1 + 1;
-update innodb_page_compressed7 set c1 = c1 + 1;
-update innodb_page_compressed8 set c1 = c1 + 1;
-update innodb_page_compressed9 set c1 = c1 + 1;
-select count(*) from innodb_compressed;
-count(*)
-5000
-select count(*) from innodb_page_compressed1;
-count(*)
-5000
-select count(*) from innodb_page_compressed1 where c1 < 500000;
-count(*)
-5000
-select count(*) from innodb_page_compressed2 where c1 < 500000;
-count(*)
-5000
-select count(*) from innodb_page_compressed3 where c1 < 500000;
-count(*)
-5000
-select count(*) from innodb_page_compressed4 where c1 < 500000;
-count(*)
-5000
-select count(*) from innodb_page_compressed5 where c1 < 500000;
-count(*)
-5000
-select count(*) from innodb_page_compressed6 where c1 < 500000;
-count(*)
-5000
-select count(*) from innodb_page_compressed7 where c1 < 500000;
-count(*)
-5000
-select count(*) from innodb_page_compressed8 where c1 < 500000;
-count(*)
-5000
-select count(*) from innodb_page_compressed9 where c1 < 500000;
-count(*)
-5000
-set global innodb_compression_algorithm = 1;
-update innodb_page_compressed1 set c1 = c1 + 1;
-update innodb_page_compressed2 set c1 = c1 + 1;
-update innodb_page_compressed3 set c1 = c1 + 1;
-update innodb_page_compressed4 set c1 = c1 + 1;
-update innodb_page_compressed5 set c1 = c1 + 1;
-update innodb_page_compressed6 set c1 = c1 + 1;
-update innodb_page_compressed7 set c1 = c1 + 1;
-update innodb_page_compressed8 set c1 = c1 + 1;
-update innodb_page_compressed9 set c1 = c1 + 1;
-commit;
-select count(*) from innodb_compressed;
-count(*)
-5000
-select count(*) from innodb_page_compressed1;
-count(*)
-5000
-select count(*) from innodb_page_compressed1 where c1 < 500000;
-count(*)
-5000
-select count(*) from innodb_page_compressed2 where c1 < 500000;
-count(*)
-5000
-select count(*) from innodb_page_compressed3 where c1 < 500000;
-count(*)
-5000
-select count(*) from innodb_page_compressed4 where c1 < 500000;
-count(*)
-5000
-select count(*) from innodb_page_compressed5 where c1 < 500000;
-count(*)
-5000
-select count(*) from innodb_page_compressed6 where c1 < 500000;
-count(*)
-5000
-select count(*) from innodb_page_compressed7 where c1 < 500000;
-count(*)
-5000
-select count(*) from innodb_page_compressed8 where c1 < 500000;
-count(*)
-5000
-select count(*) from innodb_page_compressed9 where c1 < 500000;
-count(*)
-5000
-update innodb_page_compressed1 set c1 = c1 + 1;
-update innodb_page_compressed2 set c1 = c1 + 1;
-update innodb_page_compressed3 set c1 = c1 + 1;
-update innodb_page_compressed4 set c1 = c1 + 1;
-update innodb_page_compressed5 set c1 = c1 + 1;
-update innodb_page_compressed6 set c1 = c1 + 1;
-update innodb_page_compressed7 set c1 = c1 + 1;
-update innodb_page_compressed8 set c1 = c1 + 1;
-update innodb_page_compressed9 set c1 = c1 + 1;
-select count(*) from innodb_compressed;
-count(*)
-5000
-select count(*) from innodb_page_compressed1;
-count(*)
-5000
-select count(*) from innodb_page_compressed1 where c1 < 500000;
-count(*)
-5000
-select count(*) from innodb_page_compressed2 where c1 < 500000;
-count(*)
-5000
-select count(*) from innodb_page_compressed3 where c1 < 500000;
-count(*)
-5000
-select count(*) from innodb_page_compressed4 where c1 < 500000;
-count(*)
-5000
-select count(*) from innodb_page_compressed5 where c1 < 500000;
-count(*)
-5000
-select count(*) from innodb_page_compressed6 where c1 < 500000;
-count(*)
-5000
-select count(*) from innodb_page_compressed7 where c1 < 500000;
-count(*)
-5000
-select count(*) from innodb_page_compressed8 where c1 < 500000;
-count(*)
-5000
-select count(*) from innodb_page_compressed9 where c1 < 500000;
-count(*)
-5000
-set global innodb_compression_algorithm = 0;
-update innodb_page_compressed1 set c1 = c1 + 1;
-update innodb_page_compressed2 set c1 = c1 + 1;
-update innodb_page_compressed3 set c1 = c1 + 1;
-update innodb_page_compressed4 set c1 = c1 + 1;
-update innodb_page_compressed5 set c1 = c1 + 1;
-update innodb_page_compressed6 set c1 = c1 + 1;
-update innodb_page_compressed7 set c1 = c1 + 1;
-update innodb_page_compressed8 set c1 = c1 + 1;
-update innodb_page_compressed9 set c1 = c1 + 1;
-commit;
-select count(*) from innodb_compressed;
-count(*)
-5000
-select count(*) from innodb_page_compressed1;
-count(*)
-5000
-select count(*) from innodb_page_compressed1 where c1 < 500000;
-count(*)
-5000
-select count(*) from innodb_page_compressed2 where c1 < 500000;
-count(*)
-5000
-select count(*) from innodb_page_compressed3 where c1 < 500000;
-count(*)
-5000
-select count(*) from innodb_page_compressed4 where c1 < 500000;
-count(*)
-5000
-select count(*) from innodb_page_compressed5 where c1 < 500000;
-count(*)
-5000
-select count(*) from innodb_page_compressed6 where c1 < 500000;
-count(*)
-5000
-select count(*) from innodb_page_compressed7 where c1 < 500000;
-count(*)
-5000
-select count(*) from innodb_page_compressed8 where c1 < 500000;
-count(*)
-5000
-select count(*) from innodb_page_compressed9 where c1 < 500000;
-count(*)
-5000
-update innodb_page_compressed1 set c1 = c1 + 1;
-update innodb_page_compressed2 set c1 = c1 + 1;
-update innodb_page_compressed3 set c1 = c1 + 1;
-update innodb_page_compressed4 set c1 = c1 + 1;
-update innodb_page_compressed5 set c1 = c1 + 1;
-update innodb_page_compressed6 set c1 = c1 + 1;
-update innodb_page_compressed7 set c1 = c1 + 1;
-update innodb_page_compressed8 set c1 = c1 + 1;
-update innodb_page_compressed9 set c1 = c1 + 1;
-select count(*) from innodb_compressed;
-count(*)
-5000
-select count(*) from innodb_page_compressed1;
-count(*)
-5000
-select count(*) from innodb_page_compressed1 where c1 < 500000;
-count(*)
-5000
-select count(*) from innodb_page_compressed2 where c1 < 500000;
+10000
+select count(*) from innodb_page_compressed3;
count(*)
-5000
-select count(*) from innodb_page_compressed3 where c1 < 500000;
+10000
+select count(*) from innodb_page_compressed4;
count(*)
-5000
-select count(*) from innodb_page_compressed4 where c1 < 500000;
+10000
+select count(*) from innodb_page_compressed5;
count(*)
-5000
-select count(*) from innodb_page_compressed5 where c1 < 500000;
+10000
+select count(*) from innodb_page_compressed6;
count(*)
-5000
-select count(*) from innodb_page_compressed6 where c1 < 500000;
+10000
+select count(*) from innodb_page_compressed6;
count(*)
-5000
-select count(*) from innodb_page_compressed7 where c1 < 500000;
+10000
+select count(*) from innodb_page_compressed7;
count(*)
-5000
-select count(*) from innodb_page_compressed8 where c1 < 500000;
+10000
+select count(*) from innodb_page_compressed8;
count(*)
-5000
-select count(*) from innodb_page_compressed9 where c1 < 500000;
+10000
+select count(*) from innodb_page_compressed9;
count(*)
-5000
-drop procedure innodb_insert_proc;
+10000
drop table innodb_normal;
-drop table innodb_compressed;
drop table innodb_page_compressed1;
drop table innodb_page_compressed2;
drop table innodb_page_compressed3;
@@ -434,3 +96,4 @@ drop table innodb_page_compressed6;
drop table innodb_page_compressed7;
drop table innodb_page_compressed8;
drop table innodb_page_compressed9;
+#done
diff --git a/mysql-test/suite/innodb/r/innodb.result b/mysql-test/suite/innodb/r/innodb.result
index 2ce73304281..8e0e6d5b512 100644
--- a/mysql-test/suite/innodb/r/innodb.result
+++ b/mysql-test/suite/innodb/r/innodb.result
@@ -1917,6 +1917,9 @@ id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ref v v 13 const # Using where; Using index
alter table t1 add unique(v);
ERROR 23000: Duplicate entry '{ ' for key 'v_2'
+show warnings;
+Level Code Message
+Error 1062 Duplicate entry 'v' for key 'v_2'
alter table t1 add key(v);
Warnings:
Note 1831 Duplicate index `v_2`. This is deprecated and will be disallowed in a future release
diff --git a/mysql-test/suite/innodb/r/innodb_stats_del_mark.result b/mysql-test/suite/innodb/r/innodb_stats_del_mark.result
deleted file mode 100644
index 67538bf8eb3..00000000000
--- a/mysql-test/suite/innodb/r/innodb_stats_del_mark.result
+++ /dev/null
@@ -1,91 +0,0 @@
-#
-# Bug 23333990 PERSISTENT INDEX STATISTICS UPDATE BEFORE
-# TRANSACTION IS COMMITTED
-#
-"Test 1:- Uncommited delete test"
-CREATE TABLE t1 (id INT UNSIGNED NOT NULL AUTO_INCREMENT PRIMARY KEY,
-val INT UNSIGNED NOT NULL,
-INDEX (val)) ENGINE=INNODB
-STATS_PERSISTENT=1,STATS_AUTO_RECALC=1;
-INSERT INTO t1 (val) VALUES (CEIL(RAND()*20));
-INSERT INTO t1 (val) SELECT CEIL(RAND()*20) FROM t1;
-INSERT INTO t1 (val) SELECT CEIL(RAND()*20) FROM t1;
-INSERT INTO t1 (val) SELECT CEIL(RAND()*20) FROM t1;
-INSERT INTO t1 (val) SELECT CEIL(RAND()*20) FROM t1;
-INSERT INTO t1 (val) SELECT CEIL(RAND()*20) FROM t1;
-INSERT INTO t1 (val) SELECT CEIL(RAND()*20) FROM t1;
-INSERT INTO t1 (val) SELECT CEIL(RAND()*20) FROM t1;
-INSERT INTO t1 (val) SELECT CEIL(RAND()*20) FROM t1;
-INSERT INTO t1 (val) SELECT CEIL(RAND()*20) FROM t1;
-INSERT INTO t1 (val) SELECT CEIL(RAND()*20) FROM t1;
-INSERT INTO t1 (val) SELECT CEIL(RAND()*20) FROM t1;
-INSERT INTO t1 (val) SELECT CEIL(RAND()*20) FROM t1;
-INSERT INTO t1 (val) SELECT CEIL(RAND()*20) FROM t1;
-INSERT INTO t1 (val) SELECT CEIL(RAND()*20) FROM t1;
-INSERT INTO t1 (val) SELECT CEIL(RAND()*20) FROM t1;
-INSERT INTO t1 (val) SELECT CEIL(RAND()*20) FROM t1;
-INSERT INTO t1 (val) SELECT CEIL(RAND()*20) FROM t1;
-INSERT INTO t1 (val) SELECT CEIL(RAND()*20) FROM t1;
-SELECT COUNT(*) FROM t1;
-COUNT(*)
-262144
-ANALYZE TABLE t1;
-Table Op Msg_type Msg_text
-test.t1 analyze status OK
-connect con1, localhost, root,,;
-START TRANSACTION;
-DELETE FROM t1;
-SELECT COUNT(*) FROM t1;
-connection default;
-Test correctly estimates the number of rows as > 20000
-even when in other uncommmited transaction
-all rows have been deleted.
-connection con1;
-COUNT(*)
-0
-commit;
-connection default;
-Test 2:- Insert and rollback test
-CREATE TABLE t2 (id INT UNSIGNED NOT NULL AUTO_INCREMENT PRIMARY KEY,
-val INT UNSIGNED NOT NULL,
-INDEX (val)) ENGINE=INNODB
-STATS_PERSISTENT=1,STATS_AUTO_RECALC=1;
-connection con1;
-START TRANSACTION;
-INSERT INTO t2 (val) VALUES (CEIL(RAND()*20));
-INSERT INTO t2 (val) SELECT CEIL(RAND()*20) FROM t2;
-INSERT INTO t2 (val) SELECT CEIL(RAND()*20) FROM t2;
-INSERT INTO t2 (val) SELECT CEIL(RAND()*20) FROM t2;
-INSERT INTO t2 (val) SELECT CEIL(RAND()*20) FROM t2;
-INSERT INTO t2 (val) SELECT CEIL(RAND()*20) FROM t2;
-INSERT INTO t2 (val) SELECT CEIL(RAND()*20) FROM t2;
-INSERT INTO t2 (val) SELECT CEIL(RAND()*20) FROM t2;
-INSERT INTO t2 (val) SELECT CEIL(RAND()*20) FROM t2;
-INSERT INTO t2 (val) SELECT CEIL(RAND()*20) FROM t2;
-INSERT INTO t2 (val) SELECT CEIL(RAND()*20) FROM t2;
-INSERT INTO t2 (val) SELECT CEIL(RAND()*20) FROM t2;
-INSERT INTO t2 (val) SELECT CEIL(RAND()*20) FROM t2;
-INSERT INTO t2 (val) SELECT CEIL(RAND()*20) FROM t2;
-INSERT INTO t2 (val) SELECT CEIL(RAND()*20) FROM t2;
-INSERT INTO t2 (val) SELECT CEIL(RAND()*20) FROM t2;
-INSERT INTO t2 (val) SELECT CEIL(RAND()*20) FROM t2;
-INSERT INTO t2 (val) SELECT CEIL(RAND()*20) FROM t2;
-INSERT INTO t2 (val) SELECT CEIL(RAND()*20) FROM t2;
-SELECT COUNT(*) FROM t2;
-connection default;
-select count(*) from t2;
-count(*)
-0
-Test correctly estimates the number of rows as > 20000
-even when in other uncommited transaction
-many rows are inserted.
-connection con1;
-COUNT(*)
-262144
-Rollback the insert
-rollback;
-disconnect con1;
-connection default;
-Test correctly estimates the number of rows as 1
-after rollback.
-DROP TABLE t1,t2;
diff --git a/mysql-test/suite/innodb/r/innodb_stats_persistent.result b/mysql-test/suite/innodb/r/innodb_stats_persistent.result
new file mode 100644
index 00000000000..f4de4b6b82e
--- /dev/null
+++ b/mysql-test/suite/innodb/r/innodb_stats_persistent.result
@@ -0,0 +1,116 @@
+SET @saved_include_delete_marked = @@GLOBAL.innodb_stats_include_delete_marked;
+SET GLOBAL innodb_stats_include_delete_marked = ON;
+SET @saved_traditional = @@GLOBAL.innodb_stats_traditional;
+SET GLOBAL innodb_stats_traditional=false;
+SET @saved_modified_counter = @@GLOBAL.innodb_stats_modified_counter;
+SET GLOBAL innodb_stats_modified_counter=1;
+CREATE TABLE t0 (id SERIAL, val INT UNSIGNED NOT NULL, KEY(val))
+ENGINE=INNODB STATS_PERSISTENT=1,STATS_AUTO_RECALC=1;
+CREATE TABLE t1 LIKE t0;
+CREATE TABLE t2 LIKE t0;
+INSERT INTO t0 (val) VALUES (4);
+INSERT INTO t0 (val) SELECT 4 FROM t0;
+INSERT INTO t0 (val) SELECT 4 FROM t0;
+INSERT INTO t0 (val) SELECT 4 FROM t0;
+INSERT INTO t0 (val) SELECT 4 FROM t0;
+INSERT INTO t1 SELECT * FROM t0;
+SELECT COUNT(*) FROM t1;
+COUNT(*)
+16
+ANALYZE TABLE t1;
+Table Op Msg_type Msg_text
+test.t1 analyze status OK
+connect con1, localhost, root,,;
+START TRANSACTION;
+DELETE FROM t1;
+SELECT COUNT(*) FROM t1;
+connection default;
+# With innodb_stats_include_delete_marked=ON,
+# DELETE must not affect statistics before COMMIT.
+EXPLAIN SELECT * FROM t1 WHERE val=4;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ref val val 4 const 16 Using index
+connection con1;
+COUNT(*)
+0
+ROLLBACK;
+SELECT COUNT(*) FROM t1;
+COUNT(*)
+16
+EXPLAIN SELECT * FROM t1 WHERE val=4;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ref val val 4 const 16 Using index
+BEGIN;
+DELETE FROM t1;
+COMMIT;
+SELECT COUNT(*) FROM t1;
+COUNT(*)
+0
+connection default;
+BEGIN;
+INSERT INTO t2 SELECT * FROM t0;
+# The INSERT will show up before COMMIT.
+EXPLAIN SELECT * FROM t2 WHERE val=4;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t2 ref val val 4 const 16 Using index
+SELECT COUNT(*) FROM t2;
+COUNT(*)
+16
+# The ROLLBACK of the INSERT must affect the statistics.
+ROLLBACK;
+SELECT COUNT(*) FROM t2;
+COUNT(*)
+0
+connection con1;
+EXPLAIN SELECT * FROM t2 WHERE val=4;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t2 ref val val 4 const 1 Using index
+SET @saved_frequency = @@GLOBAL.innodb_purge_rseg_truncate_frequency;
+SET GLOBAL innodb_purge_rseg_truncate_frequency = 1;
+InnoDB 0 transactions not purged
+SET GLOBAL innodb_purge_rseg_truncate_frequency = @saved_frequency;
+# After COMMIT and purge, the DELETE must show up.
+EXPLAIN SELECT * FROM t1 WHERE val=4;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ref val val 4 const 1 Using index
+SET GLOBAL innodb_stats_include_delete_marked = OFF;
+BEGIN;
+INSERT INTO t1 SELECT * FROM t0;
+EXPLAIN SELECT * FROM t1 WHERE val=4;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ref val val 4 const 16 Using index
+ROLLBACK;
+EXPLAIN SELECT * FROM t1 WHERE val=4;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ref val val 4 const 1 Using index
+BEGIN;
+INSERT INTO t1 SELECT * FROM t0;
+COMMIT;
+EXPLAIN SELECT * FROM t1 WHERE val=4;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ref val val 4 const 16 Using index
+BEGIN;
+DELETE FROM t1;
+SELECT COUNT(*) FROM t1;
+COUNT(*)
+0
+# With innodb_stats_include_delete_marked=OFF,
+# DELETE must affect statistics even before COMMIT.
+# However, if there was a WHERE condition,
+# ha_innobase::records_in_range() would count the delete-marked records.
+EXPLAIN SELECT * FROM t1;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 index NULL val 4 NULL 1 Using index
+ROLLBACK;
+EXPLAIN SELECT * FROM t1;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 index NULL val 4 NULL 16 Using index
+SELECT COUNT(*) FROM t1;
+COUNT(*)
+16
+disconnect con1;
+connection default;
+DROP TABLE t0,t1,t2;
+SET GLOBAL innodb_stats_include_delete_marked = @saved_include_delete_marked;
+SET GLOBAL innodb_stats_traditional = @saved_traditional;
+SET GLOBAL innodb_stats_modified_counter = @saved_modified_counter;
diff --git a/mysql-test/suite/innodb/r/row_format_redundant.result b/mysql-test/suite/innodb/r/row_format_redundant.result
new file mode 100644
index 00000000000..45dfdf01218
--- /dev/null
+++ b/mysql-test/suite/innodb/r/row_format_redundant.result
@@ -0,0 +1,79 @@
+SET GLOBAL innodb_file_per_table=1;
+#
+# Bug#21644827 - FTS, ASSERT !SRV_READ_ONLY_MODE || M_IMPL.M_LOG_MODE ==
+# MTR_LOG_NO_REDO
+#
+SET GLOBAL innodb_file_per_table=ON;
+create table t1 (a int not null, d varchar(15) not null, b
+varchar(198) not null, c char(156),
+fulltext ftsic(c)) engine=InnoDB
+row_format=redundant;
+insert into t1 values(123, 'abcdef', 'jghikl', 'mnop');
+insert into t1 values(456, 'abcdef', 'jghikl', 'mnop');
+insert into t1 values(789, 'abcdef', 'jghikl', 'mnop');
+insert into t1 values(134, 'kasdfsdsadf', 'adfjlasdkfjasd', 'adfsadflkasdasdfljasdf');
+insert into t1 select * from t1;
+insert into t1 select * from t1;
+insert into t1 select * from t1;
+insert into t1 select * from t1;
+insert into t1 select * from t1;
+insert into t1 select * from t1;
+insert into t1 select * from t1;
+insert into t1 select * from t1;
+insert into t1 select * from t1;
+insert into t1 select * from t1;
+SET GLOBAL innodb_file_per_table=OFF;
+create table t2 (a int not null, d varchar(15) not null, b
+varchar(198) not null, c char(156), fulltext ftsic(c)) engine=InnoDB
+row_format=redundant;
+insert into t2 select * from t1;
+create table t3 (a int not null, d varchar(15) not null, b varchar(198),
+c varchar(150), index k1(c(99), b(56)), index k2(b(5), c(10))) engine=InnoDB
+row_format=redundant;
+insert into t3 values(444, 'dddd', 'bbbbb', 'aaaaa');
+insert into t3 values(555, 'eeee', 'ccccc', 'aaaaa');
+SET GLOBAL innodb_fast_shutdown=0;
+SELECT COUNT(*) FROM t1;
+COUNT(*)
+4096
+SELECT COUNT(*) FROM t2;
+COUNT(*)
+4096
+SELECT COUNT(*) FROM t3;
+COUNT(*)
+2
+TRUNCATE TABLE t1;
+ERROR HY000: Table 't1' is read only
+TRUNCATE TABLE t2;
+ERROR HY000: Table 't2' is read only
+TRUNCATE TABLE t3;
+ERROR HY000: Table 't3' is read only
+TRUNCATE TABLE t1;
+TRUNCATE TABLE t2;
+TRUNCATE TABLE t3;
+corrupted SYS_TABLES.MIX_LEN for test/t1
+corrupted SYS_TABLES.MIX_LEN for test/t2
+corrupted SYS_TABLES.MIX_LEN for test/t3
+TRUNCATE TABLE t1;
+ERROR 42S02: Table 'test.t1' doesn't exist in engine
+TRUNCATE TABLE t2;
+TRUNCATE TABLE t3;
+SELECT COUNT(*) FROM t1;
+ERROR 42S02: Table 'test.t1' doesn't exist in engine
+SELECT COUNT(*) FROM t2;
+COUNT(*)
+0
+SELECT COUNT(*) FROM t3;
+COUNT(*)
+0
+RENAME TABLE t1 TO tee_one;
+ERROR HY000: Error on rename of './test/t1' to './test/tee_one' (errno: 155 "The table does not exist in engine")
+DROP TABLE t1;
+Warnings:
+Warning 1932 Table 'test.t1' doesn't exist in engine
+DROP TABLE t2,t3;
+FOUND 49 /\[ERROR\] InnoDB: Table `test`\.`t1` in InnoDB data dictionary contains invalid flags\. SYS_TABLES.MIX_LEN=255\b/ in mysqld.1.err
+ib_buffer_pool
+ib_logfile0
+ib_logfile1
+ibdata1
diff --git a/mysql-test/suite/innodb/r/temporary_table.result b/mysql-test/suite/innodb/r/temporary_table.result
index da5ff361eeb..074dd413947 100644
--- a/mysql-test/suite/innodb/r/temporary_table.result
+++ b/mysql-test/suite/innodb/r/temporary_table.result
@@ -149,10 +149,6 @@ FOUND 2 /support raw device/ in mysqld.1.err
SELECT * FROM INFORMATION_SCHEMA.ENGINES WHERE engine = 'innodb'
AND support IN ('YES', 'DEFAULT', 'ENABLED');
ENGINE SUPPORT COMMENT TRANSACTIONS XA SAVEPOINTS
-FOUND 1 /The innodb_temporary data file 'ibtmp1' must be at least/ in mysqld.1.err
-SELECT * FROM INFORMATION_SCHEMA.ENGINES WHERE engine = 'innodb'
-AND support IN ('YES', 'DEFAULT', 'ENABLED');
-ENGINE SUPPORT COMMENT TRANSACTIONS XA SAVEPOINTS
FOUND 1 /InnoDB: syntax error in file path/ in mysqld.1.err
SELECT * FROM INFORMATION_SCHEMA.ENGINES WHERE engine = 'innodb'
AND support IN ('YES', 'DEFAULT', 'ENABLED');
diff --git a/mysql-test/suite/innodb/t/101_compatibility.test b/mysql-test/suite/innodb/t/101_compatibility.test
index 83ab7914a69..78dee53e52f 100644
--- a/mysql-test/suite/innodb/t/101_compatibility.test
+++ b/mysql-test/suite/innodb/t/101_compatibility.test
@@ -1,4 +1,4 @@
---source include/have_innodb.inc
+--source include/innodb_page_size.inc
--source include/not_embedded.inc
-- echo #
diff --git a/mysql-test/suite/innodb/t/alter_missing_tablespace.test b/mysql-test/suite/innodb/t/alter_missing_tablespace.test
index 643b7a4833d..d877b8f3b5d 100644
--- a/mysql-test/suite/innodb/t/alter_missing_tablespace.test
+++ b/mysql-test/suite/innodb/t/alter_missing_tablespace.test
@@ -1,5 +1,5 @@
--source include/not_embedded.inc
---source include/have_innodb.inc
+--source include/innodb_page_size.inc
--echo #
--echo # Bug#13955083 ALLOW IN-PLACE DDL OPERATIONS ON MISSING
diff --git a/mysql-test/suite/innodb/t/doublewrite.test b/mysql-test/suite/innodb/t/doublewrite.test
index a153ad66b19..bdeaba2dcec 100644
--- a/mysql-test/suite/innodb/t/doublewrite.test
+++ b/mysql-test/suite/innodb/t/doublewrite.test
@@ -4,22 +4,27 @@
--echo # PAGE OF SYSTEM TABLESPACE
--echo #
---source include/have_innodb.inc
+--source include/innodb_page_size.inc
--source include/have_debug.inc
--source include/not_embedded.inc
# Slow shutdown and restart to make sure ibuf merge is finished
SET GLOBAL innodb_fast_shutdown = 0;
--disable_query_log
-call mtr.add_suppression("Header page consists of zero bytes");
-call mtr.add_suppression("Checksum mismatch in datafile");
-call mtr.add_suppression("but the innodb_page_size start-up parameter is");
-call mtr.add_suppression("adjusting FSP_SPACE_FLAGS");
+call mtr.add_suppression("InnoDB: Header page consists of zero bytes");
+call mtr.add_suppression("InnoDB: Checksum mismatch in datafile: .*, Space ID:0, Flags: 0");
+call mtr.add_suppression("InnoDB: Data file .* uses page size .* but the innodb_page_size start-up parameter is");
+call mtr.add_suppression("InnoDB: adjusting FSP_SPACE_FLAGS");
+call mtr.add_suppression("InnoDB: New log files created");
+call mtr.add_suppression("InnoDB: Cannot create doublewrite buffer: the first file in innodb_data_file_path must be at least (3|6|12)M\\.");
+call mtr.add_suppression("InnoDB: Database creation was aborted");
+call mtr.add_suppression("Plugin 'InnoDB' (init function returned error|registration as a STORAGE ENGINE failed)");
--enable_query_log
--source include/restart_mysqld.inc
let INNODB_PAGE_SIZE=`select @@innodb_page_size`;
let MYSQLD_DATADIR=`select @@datadir`;
+let SEARCH_FILE= $MYSQLTEST_VARDIR/log/mysqld.1.err;
show variables like 'innodb_doublewrite';
show variables like 'innodb_fil_make_page_dirty_debug';
@@ -392,9 +397,38 @@ EOF
--source include/start_mysqld.inc
check table t1;
-select f1, f2 from t1;
+--let SEARCH_PATTERN= \[ERROR\] InnoDB: .*test.t1\\.ibd.*
+--source include/search_pattern_in_file.inc
---echo # Test End
---echo # ---------------------------------------------------------------
+select f1, f2 from t1;
drop table t1;
+
+--echo #
+--echo # MDEV-12600 crash during install_db with innodb_page_size=32K
+--echo # and ibdata1=3M
+--echo #
+let bugdir= $MYSQLTEST_VARDIR/tmp/doublewrite;
+--mkdir $bugdir
+
+let $check_no_innodb=SELECT * FROM INFORMATION_SCHEMA.ENGINES
+WHERE engine = 'innodb'
+AND support IN ('YES', 'DEFAULT', 'ENABLED');
+
+--let $ibp=--innodb-log-group-home-dir=$bugdir --innodb-data-home-dir=$bugdir
+--let $ibd=$ibp --innodb-undo-tablespaces=0 --innodb-log-files-in-group=2
+--let $ibp=$ibp --innodb-data-file-path=ibdata1:1M;ibdata2:1M:autoextend
+
+--let $restart_parameters= $ibp
+--source include/restart_mysqld.inc
+eval $check_no_innodb;
+--let SEARCH_PATTERN= \[ERROR\] InnoDB: Cannot create doublewrite buffer
+--source include/search_pattern_in_file.inc
+--let $restart_parameters=
+--source include/restart_mysqld.inc
+
+--remove_file $bugdir/ibdata1
+--remove_file $bugdir/ibdata2
+--remove_file $bugdir/ib_logfile0
+--remove_file $bugdir/ib_logfile1
+--rmdir $bugdir
diff --git a/mysql-test/suite/innodb/t/drop_table_background.test b/mysql-test/suite/innodb/t/drop_table_background.test
new file mode 100644
index 00000000000..0f596dec574
--- /dev/null
+++ b/mysql-test/suite/innodb/t/drop_table_background.test
@@ -0,0 +1,30 @@
+--source include/have_innodb.inc
+--source include/have_debug.inc
+# Embedded server does not support restarting
+--source include/not_embedded.inc
+
+CREATE TABLE t(c0 SERIAL, c1 INT, c2 INT, c3 INT, c4 INT,
+KEY(c1), KEY(c2), KEY(c2,c1),
+KEY(c3), KEY(c3,c1), KEY(c3,c2), KEY(c3,c2,c1),
+KEY(c4), KEY(c4,c1), KEY(c4,c2), KEY(c4,c2,c1),
+KEY(c4,c3), KEY(c4,c3,c1), KEY(c4,c3,c2), KEY(c4,c3,c2,c1)) ENGINE=InnoDB;
+
+let $n= 10;
+
+SET DEBUG_DBUG='+d,row_drop_table_add_to_background';
+--disable_query_log
+let $i= $n;
+while ($i) {
+ eval CREATE TABLE t$i LIKE t;
+ dec $i;
+}
+let $i= $n;
+while ($i) {
+ eval DROP TABLE t$i;
+ dec $i;
+}
+--enable_query_log
+DROP TABLE t;
+--source include/restart_mysqld.inc
+CREATE TABLE t (a INT) ENGINE=InnoDB;
+DROP TABLE t;
diff --git a/mysql-test/suite/innodb/t/innodb-alter-debug.test b/mysql-test/suite/innodb/t/innodb-alter-debug.test
index 70017ffba35..f4996916e9f 100644
--- a/mysql-test/suite/innodb/t/innodb-alter-debug.test
+++ b/mysql-test/suite/innodb/t/innodb-alter-debug.test
@@ -1,4 +1,4 @@
---source include/have_innodb.inc
+--source include/innodb_page_size.inc
--source include/have_debug.inc
--source include/have_debug_sync.inc
diff --git a/mysql-test/suite/innodb/t/innodb-alter-nullable.test b/mysql-test/suite/innodb/t/innodb-alter-nullable.test
index 3f1e82b3183..bb5cdee000a 100644
--- a/mysql-test/suite/innodb/t/innodb-alter-nullable.test
+++ b/mysql-test/suite/innodb/t/innodb-alter-nullable.test
@@ -1,4 +1,4 @@
---source include/have_innodb.inc
+--source include/innodb_page_size.inc
# Save the initial number of concurrent sessions.
--source include/count_sessions.inc
diff --git a/mysql-test/suite/innodb/t/innodb-alter-table.test b/mysql-test/suite/innodb/t/innodb-alter-table.test
index 45342b4a218..97f0075f344 100644
--- a/mysql-test/suite/innodb/t/innodb-alter-table.test
+++ b/mysql-test/suite/innodb/t/innodb-alter-table.test
@@ -1,4 +1,4 @@
---source include/have_innodb.inc
+--source include/innodb_page_size.inc
#
# MMDEV-8386: MariaDB creates very big tmp file and hangs on xtradb
diff --git a/mysql-test/suite/innodb/t/innodb-alter-tempfile.test b/mysql-test/suite/innodb/t/innodb-alter-tempfile.test
index ec1ea35f1cf..d3f34b12ea6 100644
--- a/mysql-test/suite/innodb/t/innodb-alter-tempfile.test
+++ b/mysql-test/suite/innodb/t/innodb-alter-tempfile.test
@@ -10,8 +10,7 @@
# Avoid CrashReporter popup on Mac
--source include/not_crashrep.inc
-# InnoDB is required
---source include/have_innodb.inc
+--source include/innodb_page_size.inc
--echo #
--echo # Bug #18734396 INNODB IN-PLACE ALTER FAILURES BLOCK FUTURE ALTERS
diff --git a/mysql-test/suite/innodb/t/innodb-page_compression_default.test b/mysql-test/suite/innodb/t/innodb-page_compression_default.test
index 28f184c278c..1cc6c917548 100644
--- a/mysql-test/suite/innodb/t/innodb-page_compression_default.test
+++ b/mysql-test/suite/innodb/t/innodb-page_compression_default.test
@@ -1,51 +1,10 @@
--source include/have_innodb.inc
+--source include/not_embedded.inc
---disable_query_log
-let $innodb_compression_algorithm_orig=`SELECT @@innodb_compression_algorithm`;
-let $innodb_file_format_orig = `SELECT @@innodb_file_format`;
-let $innodb_file_per_table_orig = `SELECT @@innodb_file_per_table`;
---enable_query_log
+call mtr.add_suppression("InnoDB: Compression failed for space [0-9]+ name test/innodb_page_compressed[0-9] len [0-9]+ err 2 write_size [0-9]+.");
---disable_warnings
-SET GLOBAL innodb_file_format = `Barracuda`;
-SET GLOBAL innodb_file_per_table = ON;
---enable_warnings
+# All page compression test use the same
+--source include/innodb-page-compression.inc
-create table t1 (c1 int not null primary key auto_increment, b char(200)) engine=innodb page_compressed=1;
-insert into t1 values(NULL,'compressed_text_aaaaaaaaabbbbbbbbbbbbbccccccccccccc');
-insert into t1(b) select b from t1;
-insert into t1(b) select b from t1;
-insert into t1(b) select b from t1;
-insert into t1(b) select b from t1;
-insert into t1(b) select b from t1;
-insert into t1(b) select b from t1;
-insert into t1(b) select b from t1;
-insert into t1(b) select b from t1;
-insert into t1(b) select b from t1;
-insert into t1(b) select b from t1;
-insert into t1(b) select b from t1;
-insert into t1(b) select b from t1;
-insert into t1(b) select b from t1;
+-- echo #done
-let $wait_condition= select variable_value > 0 from information_schema.global_status where variable_name = 'INNODB_NUM_PAGES_PAGE_COMPRESSED';
---source include/wait_condition.inc
-
---let $MYSQLD_DATADIR=`select @@datadir`
---let t1_IBD = $MYSQLD_DATADIR/test/t1.ibd
---let SEARCH_RANGE = 10000000
---let SEARCH_PATTERN=compressed_text
-
---echo # t1 compressed expected NOT FOUND
--- let SEARCH_FILE=$t1_IBD
--- source include/search_pattern_in_file.inc
-
-drop table t1;
-
-# reset system
---disable_query_log
---disable_warnings
-EVAL SET GLOBAL innodb_compression_algorithm = $innodb_compression_algorithm_orig;
-EVAL SET GLOBAL innodb_file_per_table = $innodb_file_per_table_orig;
-EVAL SET GLOBAL innodb_file_format = $innodb_file_format_orig;
---enable_warnings
---enable_query_log
diff --git a/mysql-test/suite/innodb/t/innodb-page_compression_snappy.test b/mysql-test/suite/innodb/t/innodb-page_compression_snappy.test
index b90d15f1fa3..532ec294d28 100644
--- a/mysql-test/suite/innodb/t/innodb-page_compression_snappy.test
+++ b/mysql-test/suite/innodb/t/innodb-page_compression_snappy.test
@@ -1,244 +1,13 @@
-- source include/have_innodb.inc
-- source include/have_innodb_snappy.inc
+--source include/not_embedded.inc
-call mtr.add_suppression("InnoDB: Compression failed for space.*");
-
-let $innodb_compression_algorithm_orig=`select @@innodb_compression_algorithm`;
+call mtr.add_suppression("InnoDB: Compression failed for space [0-9]+ name test/innodb_page_compressed[0-9] len [0-9]+ err 2 write_size [0-9]+.");
# snappy
-set global innodb_compression_algorithm = 6;
-
-create table innodb_compressed(c1 int, b char(200)) engine=innodb row_format=compressed key_block_size=8;
-show warnings;
-create table innodb_normal (c1 int, b char(200)) engine=innodb;
-show warnings;
-create table innodb_page_compressed1 (c1 int, b char(200)) engine=innodb page_compressed=1 page_compression_level=1;
-show warnings;
-show create table innodb_page_compressed1;
-create table innodb_page_compressed2 (c1 int, b char(200)) engine=innodb page_compressed=1 page_compression_level=2;
-show warnings;
-show create table innodb_page_compressed2;
-create table innodb_page_compressed3 (c1 int, b char(200)) engine=innodb page_compressed=1 page_compression_level=3;
-show warnings;
-show create table innodb_page_compressed3;
-create table innodb_page_compressed4 (c1 int, b char(200)) engine=innodb page_compressed=1 page_compression_level=4;
-show warnings;
-show create table innodb_page_compressed4;
-create table innodb_page_compressed5 (c1 int, b char(200)) engine=innodb page_compressed=1 page_compression_level=5;
-show warnings;
-show create table innodb_page_compressed5;
-create table innodb_page_compressed6 (c1 int, b char(200)) engine=innodb page_compressed=1 page_compression_level=6;
-show warnings;
-show create table innodb_page_compressed6;
-create table innodb_page_compressed7 (c1 int, b char(200)) engine=innodb page_compressed=1 page_compression_level=7;
-show warnings;
-show create table innodb_page_compressed7;
-create table innodb_page_compressed8 (c1 int, b char(200)) engine=innodb page_compressed=1 page_compression_level=8;
-show warnings;
-show create table innodb_page_compressed8;
-create table innodb_page_compressed9 (c1 int, b char(200)) engine=innodb page_compressed=1 page_compression_level=9;
-show warnings;
-show create table innodb_page_compressed9;
-delimiter //;
-create procedure innodb_insert_proc (repeat_count int)
-begin
- declare current_num int;
- set current_num = 0;
- while current_num < repeat_count do
- insert into innodb_normal values(current_num,'aaaaaaaaaaaaaaaaaaabbbbbbbbbbbbbbbbbbbbbbbccccccccccccccccccccccc');
- set current_num = current_num + 1;
- end while;
-end//
-delimiter ;//
-commit;
-
-set autocommit=0;
-call innodb_insert_proc(5000);
-commit;
-set autocommit=1;
-select count(*) from innodb_normal;
-insert into innodb_compressed select * from innodb_normal;
-insert into innodb_page_compressed1 select * from innodb_normal;
-insert into innodb_page_compressed2 select * from innodb_normal;
-insert into innodb_page_compressed3 select * from innodb_normal;
-insert into innodb_page_compressed4 select * from innodb_normal;
-insert into innodb_page_compressed5 select * from innodb_normal;
-insert into innodb_page_compressed6 select * from innodb_normal;
-insert into innodb_page_compressed7 select * from innodb_normal;
-insert into innodb_page_compressed8 select * from innodb_normal;
-insert into innodb_page_compressed9 select * from innodb_normal;
-commit;
-select count(*) from innodb_compressed;
-select count(*) from innodb_page_compressed1;
-select count(*) from innodb_page_compressed1 where c1 < 500000;
-select count(*) from innodb_page_compressed2 where c1 < 500000;
-select count(*) from innodb_page_compressed3 where c1 < 500000;
-select count(*) from innodb_page_compressed4 where c1 < 500000;
-select count(*) from innodb_page_compressed5 where c1 < 500000;
-select count(*) from innodb_page_compressed6 where c1 < 500000;
-select count(*) from innodb_page_compressed7 where c1 < 500000;
-select count(*) from innodb_page_compressed8 where c1 < 500000;
-select count(*) from innodb_page_compressed9 where c1 < 500000;
-
-alter table innodb_normal page_compressed=1 page_compression_level=8;
-show warnings;
-show create table innodb_normal;
-alter table innodb_compressed row_format=default page_compressed=1 page_compression_level=8 key_block_size=0;
-show warnings;
-show create table innodb_compressed;
-
-update innodb_page_compressed1 set c1 = c1 + 1;
-update innodb_page_compressed2 set c1 = c1 + 1;
-update innodb_page_compressed3 set c1 = c1 + 1;
-update innodb_page_compressed4 set c1 = c1 + 1;
-update innodb_page_compressed5 set c1 = c1 + 1;
-update innodb_page_compressed6 set c1 = c1 + 1;
-update innodb_page_compressed7 set c1 = c1 + 1;
-update innodb_page_compressed8 set c1 = c1 + 1;
-update innodb_page_compressed9 set c1 = c1 + 1;
-select count(*) from innodb_compressed;
-select count(*) from innodb_page_compressed1;
-select count(*) from innodb_page_compressed1 where c1 < 500000;
-select count(*) from innodb_page_compressed2 where c1 < 500000;
-select count(*) from innodb_page_compressed3 where c1 < 500000;
-select count(*) from innodb_page_compressed4 where c1 < 500000;
-select count(*) from innodb_page_compressed5 where c1 < 500000;
-select count(*) from innodb_page_compressed6 where c1 < 500000;
-select count(*) from innodb_page_compressed7 where c1 < 500000;
-select count(*) from innodb_page_compressed8 where c1 < 500000;
-select count(*) from innodb_page_compressed9 where c1 < 500000;
-
---source include/restart_mysqld.inc
-
-update innodb_page_compressed1 set c1 = c1 + 1;
-update innodb_page_compressed2 set c1 = c1 + 1;
-update innodb_page_compressed3 set c1 = c1 + 1;
-update innodb_page_compressed4 set c1 = c1 + 1;
-update innodb_page_compressed5 set c1 = c1 + 1;
-update innodb_page_compressed6 set c1 = c1 + 1;
-update innodb_page_compressed7 set c1 = c1 + 1;
-update innodb_page_compressed8 set c1 = c1 + 1;
-update innodb_page_compressed9 set c1 = c1 + 1;
-select count(*) from innodb_compressed;
-select count(*) from innodb_page_compressed1;
-select count(*) from innodb_page_compressed1 where c1 < 500000;
-select count(*) from innodb_page_compressed2 where c1 < 500000;
-select count(*) from innodb_page_compressed3 where c1 < 500000;
-select count(*) from innodb_page_compressed4 where c1 < 500000;
-select count(*) from innodb_page_compressed5 where c1 < 500000;
-select count(*) from innodb_page_compressed6 where c1 < 500000;
-select count(*) from innodb_page_compressed7 where c1 < 500000;
-select count(*) from innodb_page_compressed8 where c1 < 500000;
-select count(*) from innodb_page_compressed9 where c1 < 500000;
-
-# zlib
-set global innodb_compression_algorithm = 1;
-update innodb_page_compressed1 set c1 = c1 + 1;
-update innodb_page_compressed2 set c1 = c1 + 1;
-update innodb_page_compressed3 set c1 = c1 + 1;
-update innodb_page_compressed4 set c1 = c1 + 1;
-update innodb_page_compressed5 set c1 = c1 + 1;
-update innodb_page_compressed6 set c1 = c1 + 1;
-update innodb_page_compressed7 set c1 = c1 + 1;
-update innodb_page_compressed8 set c1 = c1 + 1;
-update innodb_page_compressed9 set c1 = c1 + 1;
-commit;
-select count(*) from innodb_compressed;
-select count(*) from innodb_page_compressed1;
-select count(*) from innodb_page_compressed1 where c1 < 500000;
-select count(*) from innodb_page_compressed2 where c1 < 500000;
-select count(*) from innodb_page_compressed3 where c1 < 500000;
-select count(*) from innodb_page_compressed4 where c1 < 500000;
-select count(*) from innodb_page_compressed5 where c1 < 500000;
-select count(*) from innodb_page_compressed6 where c1 < 500000;
-select count(*) from innodb_page_compressed7 where c1 < 500000;
-select count(*) from innodb_page_compressed8 where c1 < 500000;
-select count(*) from innodb_page_compressed9 where c1 < 500000;
-
---source include/restart_mysqld.inc
-
-update innodb_page_compressed1 set c1 = c1 + 1;
-update innodb_page_compressed2 set c1 = c1 + 1;
-update innodb_page_compressed3 set c1 = c1 + 1;
-update innodb_page_compressed4 set c1 = c1 + 1;
-update innodb_page_compressed5 set c1 = c1 + 1;
-update innodb_page_compressed6 set c1 = c1 + 1;
-update innodb_page_compressed7 set c1 = c1 + 1;
-update innodb_page_compressed8 set c1 = c1 + 1;
-update innodb_page_compressed9 set c1 = c1 + 1;
-select count(*) from innodb_compressed;
-select count(*) from innodb_page_compressed1;
-select count(*) from innodb_page_compressed1 where c1 < 500000;
-select count(*) from innodb_page_compressed2 where c1 < 500000;
-select count(*) from innodb_page_compressed3 where c1 < 500000;
-select count(*) from innodb_page_compressed4 where c1 < 500000;
-select count(*) from innodb_page_compressed5 where c1 < 500000;
-select count(*) from innodb_page_compressed6 where c1 < 500000;
-select count(*) from innodb_page_compressed7 where c1 < 500000;
-select count(*) from innodb_page_compressed8 where c1 < 500000;
-select count(*) from innodb_page_compressed9 where c1 < 500000;
-
-# none
-set global innodb_compression_algorithm = 0;
-update innodb_page_compressed1 set c1 = c1 + 1;
-update innodb_page_compressed2 set c1 = c1 + 1;
-update innodb_page_compressed3 set c1 = c1 + 1;
-update innodb_page_compressed4 set c1 = c1 + 1;
-update innodb_page_compressed5 set c1 = c1 + 1;
-update innodb_page_compressed6 set c1 = c1 + 1;
-update innodb_page_compressed7 set c1 = c1 + 1;
-update innodb_page_compressed8 set c1 = c1 + 1;
-update innodb_page_compressed9 set c1 = c1 + 1;
-commit;
-select count(*) from innodb_compressed;
-select count(*) from innodb_page_compressed1;
-select count(*) from innodb_page_compressed1 where c1 < 500000;
-select count(*) from innodb_page_compressed2 where c1 < 500000;
-select count(*) from innodb_page_compressed3 where c1 < 500000;
-select count(*) from innodb_page_compressed4 where c1 < 500000;
-select count(*) from innodb_page_compressed5 where c1 < 500000;
-select count(*) from innodb_page_compressed6 where c1 < 500000;
-select count(*) from innodb_page_compressed7 where c1 < 500000;
-select count(*) from innodb_page_compressed8 where c1 < 500000;
-select count(*) from innodb_page_compressed9 where c1 < 500000;
-
---source include/restart_mysqld.inc
-
-update innodb_page_compressed1 set c1 = c1 + 1;
-update innodb_page_compressed2 set c1 = c1 + 1;
-update innodb_page_compressed3 set c1 = c1 + 1;
-update innodb_page_compressed4 set c1 = c1 + 1;
-update innodb_page_compressed5 set c1 = c1 + 1;
-update innodb_page_compressed6 set c1 = c1 + 1;
-update innodb_page_compressed7 set c1 = c1 + 1;
-update innodb_page_compressed8 set c1 = c1 + 1;
-update innodb_page_compressed9 set c1 = c1 + 1;
-select count(*) from innodb_compressed;
-select count(*) from innodb_page_compressed1;
-select count(*) from innodb_page_compressed1 where c1 < 500000;
-select count(*) from innodb_page_compressed2 where c1 < 500000;
-select count(*) from innodb_page_compressed3 where c1 < 500000;
-select count(*) from innodb_page_compressed4 where c1 < 500000;
-select count(*) from innodb_page_compressed5 where c1 < 500000;
-select count(*) from innodb_page_compressed6 where c1 < 500000;
-select count(*) from innodb_page_compressed7 where c1 < 500000;
-select count(*) from innodb_page_compressed8 where c1 < 500000;
-select count(*) from innodb_page_compressed9 where c1 < 500000;
+set global innodb_compression_algorithm = snappy;
-drop procedure innodb_insert_proc;
-drop table innodb_normal;
-drop table innodb_compressed;
-drop table innodb_page_compressed1;
-drop table innodb_page_compressed2;
-drop table innodb_page_compressed3;
-drop table innodb_page_compressed4;
-drop table innodb_page_compressed5;
-drop table innodb_page_compressed6;
-drop table innodb_page_compressed7;
-drop table innodb_page_compressed8;
-drop table innodb_page_compressed9;
+# All page compression test use the same
+--source include/innodb-page-compression.inc
-# reset system
---disable_query_log
-EVAL SET GLOBAL innodb_compression_algorithm = $innodb_compression_algorithm_orig;
---enable_query_log
+-- echo #done
diff --git a/mysql-test/suite/innodb/t/innodb_stats_del_mark-master.opt b/mysql-test/suite/innodb/t/innodb_stats_del_mark-master.opt
deleted file mode 100644
index 145ee2b4264..00000000000
--- a/mysql-test/suite/innodb/t/innodb_stats_del_mark-master.opt
+++ /dev/null
@@ -1 +0,0 @@
---innodb_stats_include_delete_marked=on
diff --git a/mysql-test/suite/innodb/t/innodb_stats_del_mark.test b/mysql-test/suite/innodb/t/innodb_stats_del_mark.test
deleted file mode 100644
index 36f7a2ea099..00000000000
--- a/mysql-test/suite/innodb/t/innodb_stats_del_mark.test
+++ /dev/null
@@ -1,113 +0,0 @@
---source include/have_innodb.inc
---source include/big_test.inc
-
---echo #
---echo # Bug 23333990 PERSISTENT INDEX STATISTICS UPDATE BEFORE
---echo # TRANSACTION IS COMMITTED
---echo #
-
---echo "Test 1:- Uncommited delete test"
-CREATE TABLE t1 (id INT UNSIGNED NOT NULL AUTO_INCREMENT PRIMARY KEY,
- val INT UNSIGNED NOT NULL,
- INDEX (val)) ENGINE=INNODB
- STATS_PERSISTENT=1,STATS_AUTO_RECALC=1;
-
-
-INSERT INTO t1 (val) VALUES (CEIL(RAND()*20));
-INSERT INTO t1 (val) SELECT CEIL(RAND()*20) FROM t1;
-INSERT INTO t1 (val) SELECT CEIL(RAND()*20) FROM t1;
-INSERT INTO t1 (val) SELECT CEIL(RAND()*20) FROM t1;
-INSERT INTO t1 (val) SELECT CEIL(RAND()*20) FROM t1;
-INSERT INTO t1 (val) SELECT CEIL(RAND()*20) FROM t1;
-INSERT INTO t1 (val) SELECT CEIL(RAND()*20) FROM t1;
-INSERT INTO t1 (val) SELECT CEIL(RAND()*20) FROM t1;
-INSERT INTO t1 (val) SELECT CEIL(RAND()*20) FROM t1;
-INSERT INTO t1 (val) SELECT CEIL(RAND()*20) FROM t1;
-INSERT INTO t1 (val) SELECT CEIL(RAND()*20) FROM t1;
-INSERT INTO t1 (val) SELECT CEIL(RAND()*20) FROM t1;
-INSERT INTO t1 (val) SELECT CEIL(RAND()*20) FROM t1;
-INSERT INTO t1 (val) SELECT CEIL(RAND()*20) FROM t1;
-INSERT INTO t1 (val) SELECT CEIL(RAND()*20) FROM t1;
-INSERT INTO t1 (val) SELECT CEIL(RAND()*20) FROM t1;
-INSERT INTO t1 (val) SELECT CEIL(RAND()*20) FROM t1;
-INSERT INTO t1 (val) SELECT CEIL(RAND()*20) FROM t1;
-INSERT INTO t1 (val) SELECT CEIL(RAND()*20) FROM t1;
-
-SELECT COUNT(*) FROM t1;
-ANALYZE TABLE t1;
-
-connect(con1, localhost, root,,);
-START TRANSACTION;
-DELETE FROM t1;
-send SELECT COUNT(*) FROM t1;
-
-connection default;
-let $row_count= query_get_value(EXPLAIN SELECT * FROM t1 WHERE val=4, rows,1);
-if ($row_count > 20000)
-{
---echo Test correctly estimates the number of rows as > 20000
---echo even when in other uncommmited transaction
---echo all rows have been deleted.
-}
-
-connection con1;
-reap;
-commit;
-
-connection default;
-
---echo Test 2:- Insert and rollback test
-CREATE TABLE t2 (id INT UNSIGNED NOT NULL AUTO_INCREMENT PRIMARY KEY,
- val INT UNSIGNED NOT NULL,
- INDEX (val)) ENGINE=INNODB
- STATS_PERSISTENT=1,STATS_AUTO_RECALC=1;
-
-connection con1;
-
-START TRANSACTION;
-INSERT INTO t2 (val) VALUES (CEIL(RAND()*20));
-INSERT INTO t2 (val) SELECT CEIL(RAND()*20) FROM t2;
-INSERT INTO t2 (val) SELECT CEIL(RAND()*20) FROM t2;
-INSERT INTO t2 (val) SELECT CEIL(RAND()*20) FROM t2;
-INSERT INTO t2 (val) SELECT CEIL(RAND()*20) FROM t2;
-INSERT INTO t2 (val) SELECT CEIL(RAND()*20) FROM t2;
-INSERT INTO t2 (val) SELECT CEIL(RAND()*20) FROM t2;
-INSERT INTO t2 (val) SELECT CEIL(RAND()*20) FROM t2;
-INSERT INTO t2 (val) SELECT CEIL(RAND()*20) FROM t2;
-INSERT INTO t2 (val) SELECT CEIL(RAND()*20) FROM t2;
-INSERT INTO t2 (val) SELECT CEIL(RAND()*20) FROM t2;
-INSERT INTO t2 (val) SELECT CEIL(RAND()*20) FROM t2;
-INSERT INTO t2 (val) SELECT CEIL(RAND()*20) FROM t2;
-INSERT INTO t2 (val) SELECT CEIL(RAND()*20) FROM t2;
-INSERT INTO t2 (val) SELECT CEIL(RAND()*20) FROM t2;
-INSERT INTO t2 (val) SELECT CEIL(RAND()*20) FROM t2;
-INSERT INTO t2 (val) SELECT CEIL(RAND()*20) FROM t2;
-INSERT INTO t2 (val) SELECT CEIL(RAND()*20) FROM t2;
-INSERT INTO t2 (val) SELECT CEIL(RAND()*20) FROM t2;
-send SELECT COUNT(*) FROM t2;
-
-connection default;
-select count(*) from t2;
-let $row_count= query_get_value(EXPLAIN SELECT * FROM t2 WHERE val=4, rows,1);
-if ($row_count > 20000)
-{
---echo Test correctly estimates the number of rows as > 20000
---echo even when in other uncommited transaction
---echo many rows are inserted.
-}
-
-connection con1;
-reap;
---echo Rollback the insert
-rollback;
-disconnect con1;
-
-connection default;
-let $row_count= query_get_value(EXPLAIN SELECT * FROM t2 WHERE val=4, rows,1);
-if ($row_count <= 1)
-{
---echo Test correctly estimates the number of rows as $row_count
---echo after rollback.
-}
-
-DROP TABLE t1,t2;
diff --git a/mysql-test/suite/innodb/t/innodb_stats_persistent.test b/mysql-test/suite/innodb/t/innodb_stats_persistent.test
new file mode 100644
index 00000000000..652b201c4b4
--- /dev/null
+++ b/mysql-test/suite/innodb/t/innodb_stats_persistent.test
@@ -0,0 +1,95 @@
+--source include/have_innodb.inc
+--source include/big_test.inc
+
+SET @saved_include_delete_marked = @@GLOBAL.innodb_stats_include_delete_marked;
+SET GLOBAL innodb_stats_include_delete_marked = ON;
+SET @saved_traditional = @@GLOBAL.innodb_stats_traditional;
+SET GLOBAL innodb_stats_traditional=false;
+SET @saved_modified_counter = @@GLOBAL.innodb_stats_modified_counter;
+SET GLOBAL innodb_stats_modified_counter=1;
+
+CREATE TABLE t0 (id SERIAL, val INT UNSIGNED NOT NULL, KEY(val))
+ENGINE=INNODB STATS_PERSISTENT=1,STATS_AUTO_RECALC=1;
+CREATE TABLE t1 LIKE t0;
+CREATE TABLE t2 LIKE t0;
+
+INSERT INTO t0 (val) VALUES (4);
+INSERT INTO t0 (val) SELECT 4 FROM t0;
+INSERT INTO t0 (val) SELECT 4 FROM t0;
+INSERT INTO t0 (val) SELECT 4 FROM t0;
+INSERT INTO t0 (val) SELECT 4 FROM t0;
+
+INSERT INTO t1 SELECT * FROM t0;
+SELECT COUNT(*) FROM t1;
+ANALYZE TABLE t1;
+
+connect(con1, localhost, root,,);
+START TRANSACTION;
+DELETE FROM t1;
+send SELECT COUNT(*) FROM t1;
+
+connection default;
+--echo # With innodb_stats_include_delete_marked=ON,
+--echo # DELETE must not affect statistics before COMMIT.
+EXPLAIN SELECT * FROM t1 WHERE val=4;
+
+connection con1;
+reap;
+ROLLBACK;
+SELECT COUNT(*) FROM t1;
+EXPLAIN SELECT * FROM t1 WHERE val=4;
+
+BEGIN;
+DELETE FROM t1;
+COMMIT;
+SELECT COUNT(*) FROM t1;
+
+connection default;
+BEGIN;
+INSERT INTO t2 SELECT * FROM t0;
+
+--echo # The INSERT will show up before COMMIT.
+EXPLAIN SELECT * FROM t2 WHERE val=4;
+SELECT COUNT(*) FROM t2;
+--echo # The ROLLBACK of the INSERT must affect the statistics.
+ROLLBACK;
+SELECT COUNT(*) FROM t2;
+
+connection con1;
+EXPLAIN SELECT * FROM t2 WHERE val=4;
+SET @saved_frequency = @@GLOBAL.innodb_purge_rseg_truncate_frequency;
+SET GLOBAL innodb_purge_rseg_truncate_frequency = 1;
+--source include/wait_all_purged.inc
+SET GLOBAL innodb_purge_rseg_truncate_frequency = @saved_frequency;
+--echo # After COMMIT and purge, the DELETE must show up.
+EXPLAIN SELECT * FROM t1 WHERE val=4;
+
+SET GLOBAL innodb_stats_include_delete_marked = OFF;
+BEGIN;
+INSERT INTO t1 SELECT * FROM t0;
+EXPLAIN SELECT * FROM t1 WHERE val=4;
+ROLLBACK;
+EXPLAIN SELECT * FROM t1 WHERE val=4;
+BEGIN;
+INSERT INTO t1 SELECT * FROM t0;
+COMMIT;
+EXPLAIN SELECT * FROM t1 WHERE val=4;
+BEGIN;
+DELETE FROM t1;
+SELECT COUNT(*) FROM t1;
+--echo # With innodb_stats_include_delete_marked=OFF,
+--echo # DELETE must affect statistics even before COMMIT.
+--echo # However, if there was a WHERE condition,
+--echo # ha_innobase::records_in_range() would count the delete-marked records.
+EXPLAIN SELECT * FROM t1;
+ROLLBACK;
+EXPLAIN SELECT * FROM t1;
+SELECT COUNT(*) FROM t1;
+disconnect con1;
+
+connection default;
+
+DROP TABLE t0,t1,t2;
+SET GLOBAL innodb_stats_include_delete_marked = @saved_include_delete_marked;
+SET GLOBAL innodb_stats_traditional = @saved_traditional;
+SET GLOBAL innodb_stats_modified_counter = @saved_modified_counter;
diff --git a/mysql-test/suite/innodb/t/log_data_file_size.test b/mysql-test/suite/innodb/t/log_data_file_size.test
index 56a9d35bd92..8a0e88efc76 100644
--- a/mysql-test/suite/innodb/t/log_data_file_size.test
+++ b/mysql-test/suite/innodb/t/log_data_file_size.test
@@ -1,4 +1,4 @@
---source include/have_innodb.inc
+--source include/innodb_page_size.inc
--source include/not_embedded.inc
let INNODB_PAGE_SIZE=`select @@innodb_page_size`;
diff --git a/mysql-test/suite/innodb/t/row_format_redundant.test b/mysql-test/suite/innodb/t/row_format_redundant.test
new file mode 100644
index 00000000000..f8e25b373de
--- /dev/null
+++ b/mysql-test/suite/innodb/t/row_format_redundant.test
@@ -0,0 +1,157 @@
+--source include/have_innodb.inc
+# Embedded mode doesn't allow restarting
+--source include/not_embedded.inc
+
+--disable_query_log
+call mtr.add_suppression("InnoDB: Table `mysql`\\.`innodb_table_stats` not found");
+call mtr.add_suppression("InnoDB: Table `test`.`t1` in InnoDB data dictionary contains invalid flags. SYS_TABLES.MIX_LEN=255");
+call mtr.add_suppression("InnoDB: Parent table of FTS auxiliary table test/FTS_.* not found");
+call mtr.add_suppression("InnoDB: Cannot open table test/t1 from the internal data dictionary");
+call mtr.add_suppression("InnoDB: Table `test`.`t1` does not exist in the InnoDB internal data dictionary though MariaDB is trying to (rename|drop)");
+FLUSH TABLES;
+--enable_query_log
+
+let INNODB_PAGE_SIZE=`select @@innodb_page_size`;
+
+let bugdir= $MYSQLTEST_VARDIR/tmp/row_format_redundant;
+--mkdir $bugdir
+--let SEARCH_FILE = $MYSQLTEST_VARDIR/log/mysqld.1.err
+
+--let $d=--innodb-data-home-dir=$bugdir --innodb-log-group-home-dir=$bugdir
+--let $d=$d --innodb-data-file-path=ibdata1:1M:autoextend
+--let $d=$d --innodb-undo-tablespaces=0 --innodb-stats-persistent=0
+--let $restart_parameters= $d
+--source include/restart_mysqld.inc
+
+SET GLOBAL innodb_file_per_table=1;
+
+--echo #
+--echo # Bug#21644827 - FTS, ASSERT !SRV_READ_ONLY_MODE || M_IMPL.M_LOG_MODE ==
+--echo # MTR_LOG_NO_REDO
+--echo #
+
+SET GLOBAL innodb_file_per_table=ON;
+create table t1 (a int not null, d varchar(15) not null, b
+varchar(198) not null, c char(156),
+fulltext ftsic(c)) engine=InnoDB
+row_format=redundant;
+
+insert into t1 values(123, 'abcdef', 'jghikl', 'mnop');
+insert into t1 values(456, 'abcdef', 'jghikl', 'mnop');
+insert into t1 values(789, 'abcdef', 'jghikl', 'mnop');
+insert into t1 values(134, 'kasdfsdsadf', 'adfjlasdkfjasd', 'adfsadflkasdasdfljasdf');
+insert into t1 select * from t1;
+insert into t1 select * from t1;
+insert into t1 select * from t1;
+insert into t1 select * from t1;
+insert into t1 select * from t1;
+insert into t1 select * from t1;
+insert into t1 select * from t1;
+insert into t1 select * from t1;
+insert into t1 select * from t1;
+insert into t1 select * from t1;
+
+SET GLOBAL innodb_file_per_table=OFF;
+create table t2 (a int not null, d varchar(15) not null, b
+varchar(198) not null, c char(156), fulltext ftsic(c)) engine=InnoDB
+row_format=redundant;
+
+insert into t2 select * from t1;
+
+create table t3 (a int not null, d varchar(15) not null, b varchar(198),
+c varchar(150), index k1(c(99), b(56)), index k2(b(5), c(10))) engine=InnoDB
+row_format=redundant;
+
+insert into t3 values(444, 'dddd', 'bbbbb', 'aaaaa');
+insert into t3 values(555, 'eeee', 'ccccc', 'aaaaa');
+
+# read-only restart requires the change buffer to be empty; therefore we
+# do a slow shutdown.
+SET GLOBAL innodb_fast_shutdown=0;
+--let $restart_parameters= $d --innodb-read-only
+--source include/restart_mysqld.inc
+
+SELECT COUNT(*) FROM t1;
+SELECT COUNT(*) FROM t2;
+SELECT COUNT(*) FROM t3;
+
+--error ER_OPEN_AS_READONLY
+TRUNCATE TABLE t1;
+--error ER_OPEN_AS_READONLY
+TRUNCATE TABLE t2;
+--error ER_OPEN_AS_READONLY
+TRUNCATE TABLE t3;
+
+--let $restart_parameters= $d
+--source include/restart_mysqld.inc
+
+TRUNCATE TABLE t1;
+TRUNCATE TABLE t2;
+TRUNCATE TABLE t3;
+
+--source include/shutdown_mysqld.inc
+--perl
+use strict;
+my $ps= $ENV{INNODB_PAGE_SIZE};
+my $file= "$ENV{bugdir}/ibdata1";
+open(FILE, "+<", $file) || die "Unable to open $file\n";
+# Read DICT_HDR_TABLES, the root page number of CLUST_IND (SYS_TABLES.NAME).
+sysseek(FILE, 7*$ps+38+32, 0) || die "Unable to seek $file";
+die "Unable to read $file" unless sysread(FILE, $_, 4) == 4;
+my $sys_tables_root = unpack("N", $_);
+my $page;
+sysseek(FILE, $sys_tables_root*$ps, 0) || die "Unable to seek $file";
+die "Unable to read $file" unless sysread(FILE, $page, $ps) == $ps;
+for (my $offset= 0x65; $offset;
+ $offset= unpack("n", substr($page,$offset-2,2)))
+{
+ my $n_fields= unpack("n", substr($page,$offset-4,2)) >> 1 & 0x3ff;
+ my $start= 0;
+ my $end= unpack("C", substr($page, $offset-7, 1));
+ my $name= substr($page,$offset+$start,$end-$start);
+ for (my $i= 0; $i < $n_fields; $i++) {
+ my $end= unpack("C", substr($page, $offset-7-$i, 1));
+ # Corrupt SYS_TABLES.MIX_LEN (ignored for ROW_FORMAT=REDUNDANT)
+ if ($i == 7 && $name =~ '^test/t[123]')
+ {
+ print "corrupted SYS_TABLES.MIX_LEN for $name\n";
+ substr($page,$offset+$start,$end-$start)= pack("N", 255);
+ }
+ $start= $end & 0x7f;
+ }
+}
+substr($page,0,4)=pack("N",0xdeadbeef);
+substr($page,$ps-8,4)=pack("N",0xdeadbeef);
+sysseek(FILE, $sys_tables_root*$ps, 0) || die "Unable to seek $file";
+syswrite(FILE, $page, $ps)==$ps || die "Unable to write $file\n";
+close(FILE) || die "Unable to close $file\n";
+EOF
+
+--source include/start_mysqld.inc
+--error ER_NO_SUCH_TABLE_IN_ENGINE
+TRUNCATE TABLE t1;
+TRUNCATE TABLE t2;
+TRUNCATE TABLE t3;
+--error ER_NO_SUCH_TABLE_IN_ENGINE
+SELECT COUNT(*) FROM t1;
+SELECT COUNT(*) FROM t2;
+SELECT COUNT(*) FROM t3;
+--error ER_ERROR_ON_RENAME
+RENAME TABLE t1 TO tee_one;
+DROP TABLE t1;
+DROP TABLE t2,t3;
+
+--let SEARCH_PATTERN= \[ERROR\] InnoDB: Table `test`\.`t1` in InnoDB data dictionary contains invalid flags\. SYS_TABLES.MIX_LEN=255\b
+--source include/search_pattern_in_file.inc
+
+--let $restart_parameters=
+--source include/restart_mysqld.inc
+
+--list_files $bugdir
+--remove_files_wildcard $bugdir
+--rmdir $bugdir
+
+# Remove the data file, because DROP TABLE skipped it for the "corrupted" table
+--let MYSQLD_DATADIR=`select @@datadir`
+--remove_file $MYSQLD_DATADIR/test/t1.ibd
+--list_files $MYSQLD_DATADIR/test
diff --git a/mysql-test/suite/innodb/t/temporary_table.test b/mysql-test/suite/innodb/t/temporary_table.test
index f841acff1c0..1148f2ee1a0 100644
--- a/mysql-test/suite/innodb/t/temporary_table.test
+++ b/mysql-test/suite/innodb/t/temporary_table.test
@@ -15,7 +15,6 @@ call mtr.add_suppression("The table 't1' is full");
call mtr.add_suppression("Plugin 'InnoDB' init function returned error");
call mtr.add_suppression("Plugin 'InnoDB' registration as a STORAGE ENGINE failed");
call mtr.add_suppression("InnoDB: Tablespace doesn't support raw devices");
-call mtr.add_suppression("InnoDB: The innodb_temporary data file 'ibtmp1' must be at least");
call mtr.add_suppression("InnoDB: Plugin initialization aborted");
call mtr.add_suppression("innodb_temporary and innodb_system file names seem to be the same");
call mtr.add_suppression("Could not create the shared innodb_temporary");
@@ -143,12 +142,6 @@ eval $check_no_innodb;
--source include/search_pattern_in_file.inc
eval $check_no_innodb;
---let $restart_parameters= --innodb_temp_data_file_path=ibtmp1:2M:autoextend
---source include/restart_mysqld.inc
---let SEARCH_PATTERN = The innodb_temporary data file 'ibtmp1' must be at least
---source include/search_pattern_in_file.inc
-eval $check_no_innodb;
-
--let $restart_parameters= --innodb_temp_data_file_path=
--source include/restart_mysqld.inc
--let SEARCH_PATTERN = InnoDB: syntax error in file path
diff --git a/mysql-test/suite/innodb/t/truncate_purge_debug.test b/mysql-test/suite/innodb/t/truncate_purge_debug.test
index 513c59e12c8..e8f5768f557 100644
--- a/mysql-test/suite/innodb/t/truncate_purge_debug.test
+++ b/mysql-test/suite/innodb/t/truncate_purge_debug.test
@@ -33,25 +33,8 @@ COMMIT;
disconnect con2;
connection default;
+--source include/wait_all_purged.inc
-# Wait for everything to be purged.
-
-let $wait_counter= 300;
-while ($wait_counter)
-{
- --replace_regex /.*History list length ([0-9]+).*/\1/
- let $remaining= `SHOW ENGINE INNODB STATUS`;
- if ($remaining == 'InnoDB 0')
- {
- let $wait_counter= 0;
- }
- if ($wait_counter)
- {
- real_sleep 0.1;
- dec $wait_counter;
- }
-}
-echo $remaining transactions not purged;
SET GLOBAL innodb_purge_rseg_truncate_frequency = @saved_frequency;
SET DEBUG_SYNC = 'now SIGNAL finish_scan';
diff --git a/mysql-test/suite/innodb_fts/r/innodb_fts_multiple_index.result b/mysql-test/suite/innodb_fts/r/innodb_fts_multiple_index.result
index 0b011639e2e..b0f7d7727d6 100644
--- a/mysql-test/suite/innodb_fts/r/innodb_fts_multiple_index.result
+++ b/mysql-test/suite/innodb_fts/r/innodb_fts_multiple_index.result
@@ -46,9 +46,9 @@ id a b
1 MySQL Tutorial DBMS stands for DataBase ...
select *, MATCH(a) AGAINST("Optimizing MySQL" IN BOOLEAN MODE) as x from t1;
id a b x
-1 MySQL Tutorial DBMS stands for DataBase ... 0.0906190574169159
-2 How To Use MySQL Well After you went through a ... 0.0906190574169159
-3 Optimizing MySQL In this tutorial we will show ... 0.6961383819580078
+1 MySQL Tutorial DBMS stands for DataBase ... 0.000000001885928302414186
+2 How To Use MySQL Well After you went through a ... 0.000000001885928302414186
+3 Optimizing MySQL In this tutorial we will show ... 0.22764469683170319
select *, MATCH(b) AGAINST("collections support" IN BOOLEAN MODE) as x from t1;
id a b x
1 MySQL Tutorial DBMS stands for DataBase ... 0
@@ -90,9 +90,9 @@ id a b
1 MySQL Tutorial DBMS stands for DataBase ...
select *, MATCH(a) AGAINST("Optimizing MySQL" IN BOOLEAN MODE) as x from t1;
id a b x
-1 MySQL Tutorial DBMS stands for DataBase ... 0.0906190574169159
-2 How To Use MySQL Well After you went through a ... 0.0906190574169159
-3 Optimizing MySQL In this tutorial we will show ... 0.6961383819580078
+1 MySQL Tutorial DBMS stands for DataBase ... 0.000000001885928302414186
+2 How To Use MySQL Well After you went through a ... 0.000000001885928302414186
+3 Optimizing MySQL In this tutorial we will show ... 0.22764469683170319
select *, MATCH(b) AGAINST("collections support" IN BOOLEAN MODE) as x from t1;
id a b x
1 MySQL Tutorial DBMS stands for DataBase ... 0
diff --git a/mysql-test/suite/innodb_zip/include/have_innodb_zip.inc b/mysql-test/suite/innodb_zip/include/have_innodb_zip.inc
deleted file mode 100644
index 6af83d51304..00000000000
--- a/mysql-test/suite/innodb_zip/include/have_innodb_zip.inc
+++ /dev/null
@@ -1,4 +0,0 @@
-if (`SELECT COUNT(*) = 0 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE LOWER(variable_name) = 'innodb_page_size' AND variable_value <= 16384`)
-{
- --skip Test with InnoDB zip requires page size not greater than 16k.
-}
diff --git a/mysql-test/suite/innodb_zip/r/wl6344_compress_level.result b/mysql-test/suite/innodb_zip/r/wl6344_compress_level.result
index 5cdfe162b6a..b33d9c0c8de 100644
--- a/mysql-test/suite/innodb_zip/r/wl6344_compress_level.result
+++ b/mysql-test/suite/innodb_zip/r/wl6344_compress_level.result
@@ -1,28 +1,13 @@
-USE test;
-DROP TABLE IF EXISTS tab5;
-Warnings:
-Note 1051 Unknown table 'test.tab5'
-DROP TABLE IF EXISTS tab6;
-Warnings:
-Note 1051 Unknown table 'test.tab6'
-#set the other madatory flags before test starts
SET GLOBAL Innodb_file_per_table=on;
-#set the compression level=0 (No compress)
+SET @save_innodb_compression_level = @@global.innodb_compression_level;
SET global innodb_compression_level=0;
-#check the compression level and the compressed_pages is default
-SELECT @@innodb_compression_level;
-@@innodb_compression_level
-0
-SELECT @@Innodb_file_per_table;
-@@Innodb_file_per_table
-1
#create table with 1K block size
CREATE TABLE tab5 (col_1 CHAR (255) ,
col_2 VARCHAR (255), col_3 longtext,
col_4 longtext,col_5 longtext,
col_6 longtext , col_7 longtext ,
col_8 longtext ,col_9 longtext ,
-col_10 longtext ,col_11 int auto_increment primary key)
+col_10 longtext ,col_11 int auto_increment primary key)
ENGINE = innodb ROW_FORMAT=compressed key_block_size=1;
#create indexes
CREATE INDEX idx1 ON tab5(col_4(10));
@@ -42,7 +27,8 @@ SET @col_7 = repeat('g', 100);
SET @col_8 = repeat('h', 100);
SET @col_9 = repeat('i', 100);
SET @col_10 = repeat('j', 100);
-#insert 10 records
+#insert 10 records
+BEGIN;
INSERT INTO tab5(col_1,col_2,col_3,col_4,col_5,col_6,col_7,col_8,col_9,col_10)
VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9,@col_10);
INSERT INTO tab5(col_1,col_2,col_3,col_4,col_5,col_6,col_7,col_8,col_9,col_10)
@@ -63,7 +49,8 @@ INSERT INTO tab5(col_1,col_2,col_3,col_4,col_5,col_6,col_7,col_8,col_9,col_10)
VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9,@col_10);
INSERT INTO tab5(col_1,col_2,col_3,col_4,col_5,col_6,col_7,col_8,col_9,col_10)
VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9,@col_10);
-#set the compression level=9 (High compress)
+COMMIT;
+#set the compression level=9 (High compress)
SET global innodb_compression_level=9;
#create table with 1K block size
CREATE TABLE tab6 (col_1 CHAR (255) ,
@@ -80,18 +67,8 @@ CREATE INDEX idx3 ON tab6(col_6(10));
CREATE INDEX idx4 ON tab6(col_7(10));
CREATE INDEX idx5 ON tab6(col_8(10));
CREATE INDEX idx6 ON tab6(col_11);
-#load the with repeat function
-SET @col_1 = repeat('a', 100);
-SET @col_2 = repeat('b', 100);
-SET @col_3 = repeat('c', 100);
-SET @col_4 = repeat('d', 100);
-SET @col_5 = repeat('e', 100);
-SET @col_6 = repeat('f', 100);
-SET @col_7 = repeat('g', 100);
-SET @col_8 = repeat('h', 100);
-SET @col_9 = repeat('i', 100);
-SET @col_10 = repeat('j', 100);
-#insert 10 records
+#insert 10 records
+BEGIN;
INSERT INTO tab6(col_1,col_2,col_3,col_4,col_5,col_6,col_7,col_8,col_9,col_10)
VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9,@col_10);
INSERT INTO tab6(col_1,col_2,col_3,col_4,col_5,col_6,col_7,col_8,col_9,col_10)
@@ -112,24 +89,21 @@ INSERT INTO tab6(col_1,col_2,col_3,col_4,col_5,col_6,col_7,col_8,col_9,col_10)
VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9,@col_10);
INSERT INTO tab6(col_1,col_2,col_3,col_4,col_5,col_6,col_7,col_8,col_9,col_10)
VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9,@col_10);
+COMMIT;
#diff the sizes of the No compressed table and high compressed table
-SET @size=(SELECT
-(SELECT (SUM(DATA_LENGTH+INDEX_LENGTH)/1024/1024)
-FROM INFORMATION_SCHEMA.TABLES
+SET @size=(SELECT
+(SELECT (SUM(DATA_LENGTH+INDEX_LENGTH)/1024/1024)
+FROM INFORMATION_SCHEMA.TABLES
WHERE table_name='tab5' AND ENGINE='InnoDB' AND table_schema='test')
-
-(SELECT SUM(DATA_LENGTH+INDEX_LENGTH)/1024/1024
-FROM INFORMATION_SCHEMA.TABLES
+(SELECT SUM(DATA_LENGTH+INDEX_LENGTH)/1024/1024
+FROM INFORMATION_SCHEMA.TABLES
WHERE table_name='tab6' AND ENGINE='InnoDB' AND table_schema='test')
FROM DUAL);
-#check the size of the table, it should not be Negative value
+#check the size of the table, it should not be Negative value
#The results of this query Test pass = 1 and fail=0
SELECT @size >= 0;
@size >= 0
1
-#
-# Cleanup
-#
-DROP TABLE tab5;
-DROP TABLE tab6;
-#reset back the compression_level to default.
+DROP TABLE tab5, tab6;
+SET GLOBAL innodb_compression_level = @save_innodb_compression_level;
diff --git a/mysql-test/suite/innodb_zip/t/bug36169.test b/mysql-test/suite/innodb_zip/t/bug36169.test
index 5452c929b92..07566b204bd 100644
--- a/mysql-test/suite/innodb_zip/t/bug36169.test
+++ b/mysql-test/suite/innodb_zip/t/bug36169.test
@@ -3,8 +3,7 @@
# http://bugs.mysql.com/36169
#
--- source include/have_innodb.inc
--- source include/have_innodb_zip.inc
+-- source include/innodb_page_size_small.inc
let $file_per_table=`select @@innodb_file_per_table`;
SET GLOBAL innodb_file_per_table=ON;
diff --git a/mysql-test/suite/innodb_zip/t/bug52745.test b/mysql-test/suite/innodb_zip/t/bug52745.test
index a3de7323efe..be4b5c2bfcb 100644
--- a/mysql-test/suite/innodb_zip/t/bug52745.test
+++ b/mysql-test/suite/innodb_zip/t/bug52745.test
@@ -1,5 +1,4 @@
--- source include/have_innodb.inc
--- source include/have_innodb_zip.inc
+-- source include/innodb_page_size_small.inc
let $file_per_table=`select @@innodb_file_per_table`;
SET GLOBAL innodb_file_per_table=on;
diff --git a/mysql-test/suite/innodb_zip/t/bug53591.test b/mysql-test/suite/innodb_zip/t/bug53591.test
index 1943c59fe17..67223027bad 100644
--- a/mysql-test/suite/innodb_zip/t/bug53591.test
+++ b/mysql-test/suite/innodb_zip/t/bug53591.test
@@ -1,5 +1,4 @@
--- source include/have_innodb.inc
--- source include/have_innodb_zip.inc
+-- source include/innodb_page_size_small.inc
let $file_per_table=`select @@innodb_file_per_table`;
diff --git a/mysql-test/suite/innodb_zip/t/bug56680.test b/mysql-test/suite/innodb_zip/t/bug56680.test
index 694c5ffac59..da37f6a28b4 100644
--- a/mysql-test/suite/innodb_zip/t/bug56680.test
+++ b/mysql-test/suite/innodb_zip/t/bug56680.test
@@ -3,8 +3,7 @@
#
# Bug #56680 InnoDB may return wrong results from a case-insensitive index
#
--- source include/have_innodb.inc
--- source include/have_innodb_zip.inc
+-- source include/innodb_page_size_small.inc
-- disable_query_log
SET @tx_isolation_orig = @@tx_isolation;
diff --git a/mysql-test/suite/innodb_zip/t/create_options.test b/mysql-test/suite/innodb_zip/t/create_options.test
index 86ef141cae8..e7303dee8f3 100644
--- a/mysql-test/suite/innodb_zip/t/create_options.test
+++ b/mysql-test/suite/innodb_zip/t/create_options.test
@@ -57,8 +57,7 @@
# since they are rejected for InnoDB page sizes of 8k and 16k.
# See innodb_16k and innodb_8k for those tests.
--- source include/have_innodb.inc
--- source include/have_innodb_zip.inc
+--source include/innodb_page_size_small.inc
SET default_storage_engine=InnoDB;
--disable_query_log
diff --git a/mysql-test/suite/innodb_zip/t/innochecksum.test b/mysql-test/suite/innodb_zip/t/innochecksum.test
index 0403251bf64..63a4b418677 100644
--- a/mysql-test/suite/innodb_zip/t/innochecksum.test
+++ b/mysql-test/suite/innodb_zip/t/innochecksum.test
@@ -1,8 +1,7 @@
#************************************************************
# WL6045:Improve Innochecksum
#************************************************************
---source include/have_innodb.inc
---source include/have_innodb_zip.inc
+--source include/innodb_page_size_small.inc
--source include/no_valgrind_without_big.inc
# Embedded server does not support crashing.
--source include/not_embedded.inc
diff --git a/mysql-test/suite/innodb_zip/t/innochecksum_2.test b/mysql-test/suite/innodb_zip/t/innochecksum_2.test
index 7f4ad336805..330bb81ba75 100644
--- a/mysql-test/suite/innodb_zip/t/innochecksum_2.test
+++ b/mysql-test/suite/innodb_zip/t/innochecksum_2.test
@@ -1,8 +1,7 @@
#************************************************************
# WL6045:Improve Innochecksum
#************************************************************
---source include/have_innodb.inc
---source include/have_innodb_zip.inc
+--source include/innodb_page_size_small.inc
--source include/have_debug.inc
--source include/no_valgrind_without_big.inc
# Avoid CrashReporter popup on Mac.
diff --git a/mysql-test/suite/innodb_zip/t/innochecksum_3.test b/mysql-test/suite/innodb_zip/t/innochecksum_3.test
index 7895510b335..54c67ff1a9b 100644
--- a/mysql-test/suite/innodb_zip/t/innochecksum_3.test
+++ b/mysql-test/suite/innodb_zip/t/innochecksum_3.test
@@ -1,8 +1,7 @@
#************************************************************
# WL6045:Improve Innochecksum
#************************************************************
---source include/have_innodb.inc
---source include/have_innodb_zip.inc
+--source include/innodb_page_size_small.inc
--source include/no_valgrind_without_big.inc
diff --git a/mysql-test/suite/innodb_zip/t/innodb-zip.test b/mysql-test/suite/innodb_zip/t/innodb-zip.test
index 8a201cb37ae..a2b55144c61 100644
--- a/mysql-test/suite/innodb_zip/t/innodb-zip.test
+++ b/mysql-test/suite/innodb_zip/t/innodb-zip.test
@@ -1,4 +1,4 @@
--- source include/have_innodb.inc
+--source include/innodb_page_size_small.inc
CREATE DATABASE mysqltest_innodb_zip;
USE mysqltest_innodb_zip;
@@ -8,7 +8,6 @@ SELECT table_name, row_format, data_length, index_length
let $per_table=`select @@innodb_file_per_table`;
let $format=`select @@innodb_file_format`;
-let $innodb_strict_mode_orig=`select @@session.innodb_strict_mode`;
let $innodb_file_format_orig=`select @@innodb_file_format`;
let $innodb_file_format_max_orig=`select @@innodb_file_format_max`;
SET @save_innodb_stats_on_metadata=@@global.innodb_stats_on_metadata;
@@ -354,7 +353,6 @@ drop table normal_table, zip_table;
-- disable_query_log
eval set global innodb_file_format=$format;
eval set global innodb_file_per_table=$per_table;
-eval set session innodb_strict_mode=$innodb_strict_mode_orig;
eval SET GLOBAL innodb_file_format=$innodb_file_format_orig;
eval SET GLOBAL innodb_file_format_max=$innodb_file_format_max_orig;
SET @@global.innodb_stats_on_metadata=@save_innodb_stats_on_metadata;
diff --git a/mysql-test/suite/innodb_zip/t/innodb_bug36169.test b/mysql-test/suite/innodb_zip/t/innodb_bug36169.test
index 93051d56639..6a9b2099fa8 100644
--- a/mysql-test/suite/innodb_zip/t/innodb_bug36169.test
+++ b/mysql-test/suite/innodb_zip/t/innodb_bug36169.test
@@ -1,4 +1,4 @@
---source include/have_innodb.inc
+--source include/innodb_page_size_small.inc
#
# Bug#36169 create innodb compressed table with too large row size crashed
# http://bugs.mysql.com/36169
diff --git a/mysql-test/suite/innodb_zip/t/innodb_bug36172.test b/mysql-test/suite/innodb_zip/t/innodb_bug36172.test
index 622e0c56b15..6d21d656fd8 100644
--- a/mysql-test/suite/innodb_zip/t/innodb_bug36172.test
+++ b/mysql-test/suite/innodb_zip/t/innodb_bug36172.test
@@ -1,4 +1,4 @@
---source include/have_innodb.inc
+--source include/innodb_page_size_small.inc
#
# Test case for bug 36172
#
diff --git a/mysql-test/suite/innodb_zip/t/innodb_bug52745.test b/mysql-test/suite/innodb_zip/t/innodb_bug52745.test
index 5882278ab9b..b7efd0692ca 100644
--- a/mysql-test/suite/innodb_zip/t/innodb_bug52745.test
+++ b/mysql-test/suite/innodb_zip/t/innodb_bug52745.test
@@ -1,4 +1,4 @@
--- source include/have_innodb.inc
+--source include/innodb_page_size_small.inc
SET STATEMENT sql_mode = 'NO_ENGINE_SUBSTITUTION' FOR
CREATE TABLE bug52745 (
diff --git a/mysql-test/suite/innodb_zip/t/innodb_bug53591.test b/mysql-test/suite/innodb_zip/t/innodb_bug53591.test
index 1366e0e43ac..0bce587f106 100644
--- a/mysql-test/suite/innodb_zip/t/innodb_bug53591.test
+++ b/mysql-test/suite/innodb_zip/t/innodb_bug53591.test
@@ -1,4 +1,4 @@
--- source include/have_innodb.inc
+--source include/innodb_page_size_small.inc
SET GLOBAL innodb_strict_mode=on;
set old_alter_table=0;
diff --git a/mysql-test/suite/innodb_zip/t/innodb_bug56680.test b/mysql-test/suite/innodb_zip/t/innodb_bug56680.test
index 4e73b1fce0f..0cb1c897f2e 100644
--- a/mysql-test/suite/innodb_zip/t/innodb_bug56680.test
+++ b/mysql-test/suite/innodb_zip/t/innodb_bug56680.test
@@ -1,4 +1,4 @@
---source include/have_innodb.inc
+--source include/innodb_page_size_small.inc
#
# Bug #56680 InnoDB may return wrong results from a case-insensitive index
#
diff --git a/mysql-test/suite/innodb_zip/t/recover.test b/mysql-test/suite/innodb_zip/t/recover.test
index 3969cd2f44e..0cfc2b672fb 100644
--- a/mysql-test/suite/innodb_zip/t/recover.test
+++ b/mysql-test/suite/innodb_zip/t/recover.test
@@ -2,6 +2,13 @@
--source include/have_innodb_max_16k.inc
--source include/not_embedded.inc
+--disable_query_log
+# This test kills the server, which could corrupt some mysql.* tables
+# that are not created with ENGINE=InnoDB.
+# Flush any non-InnoDB tables to prevent that from happening.
+FLUSH TABLES;
+--enable_query_log
+
--echo #
--echo # MDEV-12720 recovery fails with "Generic error"
--echo # for ROW_FORMAT=compressed
diff --git a/mysql-test/suite/innodb_zip/t/restart.test b/mysql-test/suite/innodb_zip/t/restart.test
index a2dacdb4a05..05ac8274278 100644
--- a/mysql-test/suite/innodb_zip/t/restart.test
+++ b/mysql-test/suite/innodb_zip/t/restart.test
@@ -1,9 +1,7 @@
#
# These test make sure that tables are visible after rebooting
#
-
---source include/have_innodb.inc
---source include/have_innodb_zip.inc
+--source include/innodb_page_size_small.inc
--source include/have_partition.inc
--source include/not_embedded.inc
SET default_storage_engine=InnoDB;
diff --git a/mysql-test/suite/innodb_zip/t/wl5522_debug_zip.test b/mysql-test/suite/innodb_zip/t/wl5522_debug_zip.test
index 392a3ed2879..0898a32fbf5 100644
--- a/mysql-test/suite/innodb_zip/t/wl5522_debug_zip.test
+++ b/mysql-test/suite/innodb_zip/t/wl5522_debug_zip.test
@@ -10,8 +10,7 @@
# Avoid CrashReporter popup on Mac
--source include/not_crashrep.inc
--- source include/have_innodb.inc
--- source include/have_innodb_zip.inc
+-- source include/innodb_page_size_small.inc
call mtr.add_suppression("InnoDB: Tablespace for table .* is set as discarded.");
call mtr.add_suppression("InnoDB: Cannot calculate statistics for table .* because the .ibd file is missing. Please refer to .* for how to resolve the issue.");
diff --git a/mysql-test/suite/innodb_zip/t/wl5522_zip.test b/mysql-test/suite/innodb_zip/t/wl5522_zip.test
index 7863c2b3ffb..d92093262ac 100644
--- a/mysql-test/suite/innodb_zip/t/wl5522_zip.test
+++ b/mysql-test/suite/innodb_zip/t/wl5522_zip.test
@@ -1,8 +1,7 @@
# Not supported in embedded
--source include/not_embedded.inc
--- source include/have_innodb.inc
--- source include/have_innodb_zip.inc
+-- source include/innodb_page_size_small.inc
call mtr.add_suppression("InnoDB: Unable to import tablespace .* because it already exists. Please DISCARD the tablespace before IMPORT.");
diff --git a/mysql-test/suite/innodb_zip/t/wl6344_compress_level.test b/mysql-test/suite/innodb_zip/t/wl6344_compress_level.test
index df4e66967f7..690efffed56 100644
--- a/mysql-test/suite/innodb_zip/t/wl6344_compress_level.test
+++ b/mysql-test/suite/innodb_zip/t/wl6344_compress_level.test
@@ -9,31 +9,20 @@
# greater than the
# the size of the table when compression level=9
#*******************************************************************
---source include/have_innodb.inc
---source include/have_innodb_zip.inc
+--source include/innodb_page_size_small.inc
-USE test;
-DROP TABLE IF EXISTS tab5;
-DROP TABLE IF EXISTS tab6;
-
---echo #set the other madatory flags before test starts
SET GLOBAL Innodb_file_per_table=on;
-let $innodb_compression_level = `SELECT @@global.innodb_compression_level`;
+SET @save_innodb_compression_level = @@global.innodb_compression_level;
---echo #set the compression level=0 (No compress)
SET global innodb_compression_level=0;
--- echo #check the compression level and the compressed_pages is default
-SELECT @@innodb_compression_level;
-SELECT @@Innodb_file_per_table;
-
-- echo #create table with 1K block size
CREATE TABLE tab5 (col_1 CHAR (255) ,
col_2 VARCHAR (255), col_3 longtext,
col_4 longtext,col_5 longtext,
col_6 longtext , col_7 longtext ,
col_8 longtext ,col_9 longtext ,
-col_10 longtext ,col_11 int auto_increment primary key)
+col_10 longtext ,col_11 int auto_increment primary key)
ENGINE = innodb ROW_FORMAT=compressed key_block_size=1;
-- echo #create indexes
@@ -56,7 +45,8 @@ SET @col_8 = repeat('h', 100);
SET @col_9 = repeat('i', 100);
SET @col_10 = repeat('j', 100);
---echo #insert 10 records
+--echo #insert 10 records
+BEGIN;
let $i = 10;
while ($i) {
@@ -65,8 +55,9 @@ VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9,@col_10);
dec $i;
}
+COMMIT;
---echo #set the compression level=9 (High compress)
+--echo #set the compression level=9 (High compress)
SET global innodb_compression_level=9;
-- echo #create table with 1K block size
@@ -86,19 +77,8 @@ CREATE INDEX idx4 ON tab6(col_7(10));
CREATE INDEX idx5 ON tab6(col_8(10));
CREATE INDEX idx6 ON tab6(col_11);
---echo #load the with repeat function
-SET @col_1 = repeat('a', 100);
-SET @col_2 = repeat('b', 100);
-SET @col_3 = repeat('c', 100);
-SET @col_4 = repeat('d', 100);
-SET @col_5 = repeat('e', 100);
-SET @col_6 = repeat('f', 100);
-SET @col_7 = repeat('g', 100);
-SET @col_8 = repeat('h', 100);
-SET @col_9 = repeat('i', 100);
-SET @col_10 = repeat('j', 100);
-
---echo #insert 10 records
+--echo #insert 10 records
+BEGIN;
let $i = 10;
while ($i) {
@@ -106,30 +86,23 @@ eval INSERT INTO tab6(col_1,col_2,col_3,col_4,col_5,col_6,col_7,col_8,col_9,col_
VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9,@col_10);
dec $i;
}
+COMMIT;
-- echo #diff the sizes of the No compressed table and high compressed table
-SET @size=(SELECT
-(SELECT (SUM(DATA_LENGTH+INDEX_LENGTH)/1024/1024)
-FROM INFORMATION_SCHEMA.TABLES
+SET @size=(SELECT
+(SELECT (SUM(DATA_LENGTH+INDEX_LENGTH)/1024/1024)
+FROM INFORMATION_SCHEMA.TABLES
WHERE table_name='tab5' AND ENGINE='InnoDB' AND table_schema='test')
-
-(SELECT SUM(DATA_LENGTH+INDEX_LENGTH)/1024/1024
-FROM INFORMATION_SCHEMA.TABLES
+(SELECT SUM(DATA_LENGTH+INDEX_LENGTH)/1024/1024
+FROM INFORMATION_SCHEMA.TABLES
WHERE table_name='tab6' AND ENGINE='InnoDB' AND table_schema='test')
FROM DUAL);
---echo #check the size of the table, it should not be Negative value
+--echo #check the size of the table, it should not be Negative value
--echo #The results of this query Test pass = 1 and fail=0
SELECT @size >= 0;
+DROP TABLE tab5, tab6;
---echo #
---echo # Cleanup
---echo #
-DROP TABLE tab5;
-DROP TABLE tab6;
-
---echo #reset back the compression_level to default.
---disable_query_log
-eval SET GLOBAL innodb_compression_level=$innodb_compression_level;
---enable_query_log
+SET GLOBAL innodb_compression_level = @save_innodb_compression_level;
diff --git a/mysql-test/suite/maria/maria.result b/mysql-test/suite/maria/maria.result
index 04b042059fb..88f39827f27 100644
--- a/mysql-test/suite/maria/maria.result
+++ b/mysql-test/suite/maria/maria.result
@@ -1145,6 +1145,9 @@ id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ref v v 13 const # Using where; Using index
alter table t1 add unique(v);
ERROR 23000: Duplicate entry '{ ' for key 'v_2'
+show warnings;
+Level Code Message
+Error 1062 Duplicate entry 'a' for key 'v_2'
alter table t1 add key(v);
Warnings:
Note 1831 Duplicate index `v_2`. This is deprecated and will be disallowed in a future release
diff --git a/mysql-test/suite/mariabackup/include/restart_and_restore.inc b/mysql-test/suite/mariabackup/include/restart_and_restore.inc
index 39616cc6f15..7ee4a660b78 100644
--- a/mysql-test/suite/mariabackup/include/restart_and_restore.inc
+++ b/mysql-test/suite/mariabackup/include/restart_and_restore.inc
@@ -7,7 +7,7 @@ shutdown_server;
echo # remove datadir;
rmdir $_datadir;
echo # xtrabackup move back;
-exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --copy-back --datadir=$_datadir --target-dir=$targetdir --parallel=2;
+exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --copy-back --datadir=$_datadir --target-dir=$targetdir --parallel=2 --throttle=1;
echo # restart server;
exec echo "restart" > $_expect_file_name;
enable_reconnect;
diff --git a/mysql-test/suite/mariabackup/suite.pm b/mysql-test/suite/mariabackup/suite.pm
index 8eecd4e8018..26d5c06cdad 100644
--- a/mysql-test/suite/mariabackup/suite.pm
+++ b/mysql-test/suite/mariabackup/suite.pm
@@ -21,11 +21,6 @@ $ENV{XBSTREAM}= ::mtr_exe_maybe_exists(
"$::bindir/extra/mariabackup/$::opt_vs_config/mbstream",
"$::path_client_bindir/mbstream");
-my $tar_version = `tar --version 2>&1`;
-$ENV{HAVE_TAR} = $! ? 0: 1;
-my $mariabackup_help=`$mariabackup_exe --help 2>&1`;
-$ENV{HAVE_XTRABACKUP_TAR_SUPPORT} = (index($mariabackup_help,"'tar'") == -1) ? 0 : 1;
-
$ENV{INNOBACKUPEX}= "$mariabackup_exe --innobackupex";
sub skip_combinations {
diff --git a/mysql-test/suite/mariabackup/tar.result b/mysql-test/suite/mariabackup/tar.result
deleted file mode 100644
index bbb546d7add..00000000000
--- a/mysql-test/suite/mariabackup/tar.result
+++ /dev/null
@@ -1,12 +0,0 @@
-CREATE TABLE t(i INT) ENGINE INNODB;
-INSERT INTO t VALUES(1);
-# xtrabackup backup
-# xtrabackup prepare
-# shutdown server
-# remove datadir
-# xtrabackup move back
-# restart server
-SELECT * FROM t;
-i
-1
-DROP TABLE t;
diff --git a/mysql-test/suite/mariabackup/tar.test b/mysql-test/suite/mariabackup/tar.test
deleted file mode 100644
index 3938d597e05..00000000000
--- a/mysql-test/suite/mariabackup/tar.test
+++ /dev/null
@@ -1,30 +0,0 @@
-if (`select $HAVE_TAR = 0`)
-{
- --skip No tar
-}
-if (`select $HAVE_XTRABACKUP_TAR_SUPPORT = 0`)
-{
- --skip Compiled without libarchive
-}
-
-
-CREATE TABLE t(i INT) ENGINE INNODB;
-INSERT INTO t VALUES(1);
-
-echo # xtrabackup backup;
-let $targetdir=$MYSQLTEST_VARDIR/tmp/backup;
-let $streamfile=$MYSQLTEST_VARDIR/tmp/backup.tar;
-mkdir $targetdir;
-
-
-exec $XTRABACKUP "--defaults-file=$MYSQLTEST_VARDIR/my.cnf" --backup --stream=tar > $streamfile 2>$targetdir/backup_stream.log;
---disable_result_log
-exec tar -C $targetdir -x < $streamfile;
-echo # xtrabackup prepare;
-exec $XTRABACKUP --prepare --target-dir=$targetdir;
-
--- source include/restart_and_restore.inc
---enable_result_log
-SELECT * FROM t;
-DROP TABLE t;
-rmdir $targetdir;
diff --git a/mysql-test/suite/mariabackup/xbstream.test b/mysql-test/suite/mariabackup/xbstream.test
index 06e5685276c..f2b4704a87e 100644
--- a/mysql-test/suite/mariabackup/xbstream.test
+++ b/mysql-test/suite/mariabackup/xbstream.test
@@ -9,7 +9,7 @@ echo # xtrabackup backup to stream;
exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --backup --stream=xbstream > $streamfile 2>$targetdir/backup_stream.log;
echo # xbstream extract;
--disable_result_log
-exec $XBSTREAM -x -C $targetdir --parallel=16 < $streamfile;
+exec $XBSTREAM -x -C $targetdir < $streamfile;
echo # xtrabackup prepare;
exec $XTRABACKUP --prepare --target-dir=$targetdir;
diff --git a/mysql-test/suite/multi_source/mdev-9544.cnf b/mysql-test/suite/multi_source/mdev-9544.cnf
new file mode 100644
index 00000000000..b43aca5370d
--- /dev/null
+++ b/mysql-test/suite/multi_source/mdev-9544.cnf
@@ -0,0 +1,22 @@
+!include my.cnf
+
+[mysqld.1]
+log-bin
+log-slave-updates
+
+[mysqld.2]
+log-bin
+log-slave-updates
+
+[mysqld.3]
+log-bin
+log-slave-updates
+
+[mysqld.4]
+server-id=4
+log-bin=server4-bin
+log-slave-updates
+
+[ENV]
+SERVER_MYPORT_4= @mysqld.4.port
+SERVER_MYSOCK_4= @mysqld.4.socket
diff --git a/mysql-test/suite/multi_source/mdev-9544.result b/mysql-test/suite/multi_source/mdev-9544.result
new file mode 100644
index 00000000000..42e6b6a9f56
--- /dev/null
+++ b/mysql-test/suite/multi_source/mdev-9544.result
@@ -0,0 +1,90 @@
+connect server_1,127.0.0.1,root,,,$SERVER_MYPORT_1;
+connect server_2,127.0.0.1,root,,,$SERVER_MYPORT_2;
+connect server_3,127.0.0.1,root,,,$SERVER_MYPORT_3;
+connect server_4,127.0.0.1,root,,,$SERVER_MYPORT_4;
+connection server_1;
+create database a;
+use a;
+create table t1(a int);
+insert into t1 values(1);
+create table t2(a int);
+insert into t2 values(1);
+connection server_2;
+create database b;
+use b;
+create table t1(a int);
+insert into t1 values(1);
+create table t2(a int);
+insert into t2 values(1);
+connection server_3;
+create database c;
+use c;
+create table t1(a int);
+insert into t1 values(1);
+create table t2(a int);
+insert into t2 values(1);
+connection server_4;
+change master 'm1' to master_port=MYPORT_1 , master_host='127.0.0.1', master_user='root';
+change master 'm2' to master_port=MYPORT_2 , master_host='127.0.0.1', master_user='root';
+change master to master_port=MYPORT_3 , master_host='127.0.0.1', master_user='root';
+start all slaves;
+set default_master_connection = 'm1';
+include/wait_for_slave_to_start.inc
+set default_master_connection = 'm2';
+include/wait_for_slave_to_start.inc
+set default_master_connection = '';
+include/wait_for_slave_to_start.inc
+use a;
+show tables;
+Tables_in_a
+t1
+t2
+use b;
+show tables;
+Tables_in_b
+t1
+t2
+use c;
+show tables;
+Tables_in_c
+t1
+t2
+#TEST
+SET default_master_connection = "m1";
+SET default_master_connection = "m2";
+SET default_master_connection = "";
+flush logs;
+SET default_master_connection = "m1";
+#log Rotated
+mysqld-relay-bin.000002
+mysqld-relay-bin.000003
+mysqld-relay-bin.index
+SET default_master_connection = "m2";
+#log Rotated
+mysqld-relay-bin-m1.000002
+mysqld-relay-bin-m1.000003
+mysqld-relay-bin-m1.index
+SET default_master_connection = "";
+#log Rotated
+mysqld-relay-bin-m2.000002
+mysqld-relay-bin-m2.000003
+mysqld-relay-bin-m2.index
+#CleanUp
+connection server_1;
+drop database a;
+connection server_2;
+drop database b;
+connection server_3;
+drop database c;
+connection server_4;
+stop all slaves;
+Warnings:
+Note 1938 SLAVE 'm2' stopped
+Note 1938 SLAVE '' stopped
+Note 1938 SLAVE 'm1' stopped
+SET default_master_connection = "m1";
+include/wait_for_slave_to_stop.inc
+SET default_master_connection = "m2";
+include/wait_for_slave_to_stop.inc
+SET default_master_connection = "";
+include/wait_for_slave_to_stop.inc
diff --git a/mysql-test/suite/multi_source/mdev-9544.test b/mysql-test/suite/multi_source/mdev-9544.test
new file mode 100644
index 00000000000..f532a63a585
--- /dev/null
+++ b/mysql-test/suite/multi_source/mdev-9544.test
@@ -0,0 +1,116 @@
+--source include/not_embedded.inc
+--source include/have_innodb.inc
+--source include/have_debug.inc
+
+--connect (server_1,127.0.0.1,root,,,$SERVER_MYPORT_1)
+--connect (server_2,127.0.0.1,root,,,$SERVER_MYPORT_2)
+--connect (server_3,127.0.0.1,root,,,$SERVER_MYPORT_3)
+--connect (server_4,127.0.0.1,root,,,$SERVER_MYPORT_4)
+
+--connection server_1
+create database a;
+use a;
+create table t1(a int);
+insert into t1 values(1);
+create table t2(a int);
+insert into t2 values(1);
+--save_master_pos
+
+--connection server_2
+create database b;
+use b;
+create table t1(a int);
+insert into t1 values(1);
+create table t2(a int);
+insert into t2 values(1);
+--save_master_pos
+
+--connection server_3
+create database c;
+use c;
+create table t1(a int);
+insert into t1 values(1);
+create table t2(a int);
+insert into t2 values(1);
+--save_master_pos
+
+--connection server_4
+--disable_warnings
+--replace_result $SERVER_MYPORT_1 MYPORT_1
+eval change master 'm1' to master_port=$SERVER_MYPORT_1 , master_host='127.0.0.1', master_user='root';
+--replace_result $SERVER_MYPORT_2 MYPORT_2
+eval change master 'm2' to master_port=$SERVER_MYPORT_2 , master_host='127.0.0.1', master_user='root';
+--replace_result $SERVER_MYPORT_3 MYPORT_3
+eval change master to master_port=$SERVER_MYPORT_3 , master_host='127.0.0.1', master_user='root';
+start all slaves;
+set default_master_connection = 'm1';
+--source include/wait_for_slave_to_start.inc
+set default_master_connection = 'm2';
+--source include/wait_for_slave_to_start.inc
+set default_master_connection = '';
+--source include/wait_for_slave_to_start.inc
+
+--enable_warnings
+--sync_with_master 0,'m1'
+--sync_with_master 0,'m2'
+--sync_with_master 0,''
+use a;
+show tables;
+use b;
+show tables;
+use c;
+show tables;
+--echo #TEST
+SET default_master_connection = "m1";
+--let $old_m1 = query_get_value("show relaylog events;", "Log_name",1)
+SET default_master_connection = "m2";
+--let $old_m2 = query_get_value("show relaylog events;", "Log_name",1)
+SET default_master_connection = "";
+--let $old__ = query_get_value("show relaylog events;", "Log_name",1)
+flush logs;
+--sleep 2
+--let $MYSQLD_DATADIR = `select @@datadir`
+SET default_master_connection = "m1";
+--let $new_m1 = query_get_value("show relaylog events;", "Log_name",1)
+--if ($new_m1 != $old_m1) {
+ --echo #log Rotated
+ --exec ls $MYSQLD_DATADIR | grep "mysqld-relay-bin\."
+}
+SET default_master_connection = "m2";
+--let $new_m2 = query_get_value("show relaylog events;", "Log_name",1)
+--if ($new_m2 != $old_m2) {
+ --echo #log Rotated
+ --exec ls $MYSQLD_DATADIR | grep "mysqld-relay-bin-m1"
+}
+SET default_master_connection = "";
+--let $new__ = query_get_value("show relaylog events;", "Log_name",1)
+--if ($new__ != $old__) {
+ --echo #log Rotated
+ --exec ls $MYSQLD_DATADIR | grep "mysqld-relay-bin-m2"
+}
+
+--echo #CleanUp
+--connection server_1
+drop database a;
+--save_master_pos
+
+--connection server_2
+drop database b;
+--save_master_pos
+
+--connection server_3
+drop database c;
+--save_master_pos
+
+--connection server_4
+--sync_with_master 0,'m1'
+--sync_with_master 0,'m2'
+--sync_with_master 0,''
+stop all slaves;
+SET default_master_connection = "m1";
+--source include/wait_for_slave_to_stop.inc
+SET default_master_connection = "m2";
+--source include/wait_for_slave_to_stop.inc
+SET default_master_connection = "";
+--source include/wait_for_slave_to_stop.inc
+
diff --git a/mysql-test/suite/perfschema/r/start_server_1_digest.result b/mysql-test/suite/perfschema/r/start_server_1_digest.result
new file mode 100644
index 00000000000..cf07022d344
--- /dev/null
+++ b/mysql-test/suite/perfschema/r/start_server_1_digest.result
@@ -0,0 +1,7 @@
+SELECT "Digest table has a size 1 and is full already." as use_case;
+use_case
+Digest table has a size 1 and is full already.
+select SCHEMA_NAME, DIGEST, DIGEST_TEXT
+from performance_schema.events_statements_summary_by_digest;
+SCHEMA_NAME DIGEST DIGEST_TEXT
+NULL NULL NULL
diff --git a/mysql-test/suite/perfschema/t/start_server_1_digest-master.opt b/mysql-test/suite/perfschema/t/start_server_1_digest-master.opt
new file mode 100644
index 00000000000..c3a6012fbac
--- /dev/null
+++ b/mysql-test/suite/perfschema/t/start_server_1_digest-master.opt
@@ -0,0 +1 @@
+--loose-performance-schema-digests-size=1
diff --git a/mysql-test/suite/perfschema/t/start_server_1_digest.test b/mysql-test/suite/perfschema/t/start_server_1_digest.test
new file mode 100644
index 00000000000..998d9a5eebe
--- /dev/null
+++ b/mysql-test/suite/perfschema/t/start_server_1_digest.test
@@ -0,0 +1,15 @@
+# -----------------------------------------------------------------------
+# Tests for the performance schema statement Digests.
+# -----------------------------------------------------------------------
+
+--source include/not_embedded.inc
+--source include/have_perfschema.inc
+--source include/no_protocol.inc
+
+SELECT "Digest table has a size 1 and is full already." as use_case;
+
+select SCHEMA_NAME, DIGEST, DIGEST_TEXT
+ from performance_schema.events_statements_summary_by_digest;
+
+
+
diff --git a/mysql-test/suite/rpl/r/rpl_mdev-11092.result b/mysql-test/suite/rpl/r/rpl_mdev-11092.result
new file mode 100644
index 00000000000..90b809477b2
--- /dev/null
+++ b/mysql-test/suite/rpl/r/rpl_mdev-11092.result
@@ -0,0 +1,21 @@
+include/master-slave.inc
+[connection master]
+call mtr.add_suppression("Unsafe statement written to the binary log using statement format since BINLOG_FORMAT = STATEMENT");
+call mtr.add_suppression("Slave SQL: The incident LOST_EVENTS occured on the master. .*");
+SET GLOBAL max_binlog_cache_size = 4096;
+SET GLOBAL binlog_cache_size = 4096;
+SET GLOBAL max_binlog_stmt_cache_size = 4096;
+SET GLOBAL binlog_stmt_cache_size = 4096;
+disconnect master;
+connect master,127.0.0.1,root,,test,$MASTER_MYPORT,;
+CREATE TABLE t1(a INT PRIMARY KEY, data VARCHAR(30000)) ENGINE=MYISAM;
+connection master;
+ERROR HY000: Writing one row to the row-based binary log failed
+include/wait_for_slave_sql_error_and_skip.inc [errno=1590]
+connection master;
+SET GLOBAL max_binlog_cache_size= ORIGINAL_VALUE;
+SET GLOBAL binlog_cache_size= ORIGINAL_VALUE;
+SET GLOBAL max_binlog_stmt_cache_size= ORIGINAL_VALUE;
+SET GLOBAL binlog_stmt_cache_size= ORIGINAL_VALUE;
+DROP TABLE t1;
+include/rpl_end.inc
diff --git a/mysql-test/suite/rpl/t/rpl_mdev-11092.opt b/mysql-test/suite/rpl/t/rpl_mdev-11092.opt
new file mode 100644
index 00000000000..7f1d270d29f
--- /dev/null
+++ b/mysql-test/suite/rpl/t/rpl_mdev-11092.opt
@@ -0,0 +1 @@
+--binlog_checksum=1 --binlog-annotate-row-events=1
diff --git a/mysql-test/suite/rpl/t/rpl_mdev-11092.test b/mysql-test/suite/rpl/t/rpl_mdev-11092.test
new file mode 100644
index 00000000000..c8b2b7f2ad1
--- /dev/null
+++ b/mysql-test/suite/rpl/t/rpl_mdev-11092.test
@@ -0,0 +1,53 @@
+--source include/have_innodb.inc
+--source include/master-slave.inc
+--source include/not_embedded.inc
+--source include/not_windows.inc
+--source include/have_binlog_format_row.inc
+
+########################################################################################
+call mtr.add_suppression("Unsafe statement written to the binary log using statement format since BINLOG_FORMAT = STATEMENT");
+call mtr.add_suppression("Slave SQL: The incident LOST_EVENTS occured on the master. .*");
+
+let $old_max_binlog_cache_size= query_get_value(SHOW VARIABLES LIKE "max_binlog_cache_size", Value, 1);
+let $old_binlog_cache_size= query_get_value(SHOW VARIABLES LIKE "binlog_cache_size", Value, 1);
+let $old_max_binlog_stmt_cache_size= query_get_value(SHOW VARIABLES LIKE "max_binlog_stmt_cache_size", Value, 1);
+let $old_binlog_stmt_cache_size= query_get_value(SHOW VARIABLES LIKE "binlog_stmt_cache_size", Value, 1);
+
+SET GLOBAL max_binlog_cache_size = 4096;
+SET GLOBAL binlog_cache_size = 4096;
+SET GLOBAL max_binlog_stmt_cache_size = 4096;
+SET GLOBAL binlog_stmt_cache_size = 4096;
+disconnect master;
+connect (master,127.0.0.1,root,,test,$MASTER_MYPORT,);
+
+CREATE TABLE t1(a INT PRIMARY KEY, data VARCHAR(30000)) ENGINE=MYISAM;
+
+let $data = `select concat('"', repeat('a',2000), '"')`;
+
+connection master;
+
+--disable_query_log
+--error ER_BINLOG_ROW_LOGGING_FAILED
+eval INSERT INTO t1 (a, data) VALUES (2,
+ CONCAT($data, $data, $data, $data, $data, $data));
+--enable_query_log
+
+# Incident event
+# 1590=ER_SLAVE_INCIDENT
+--let $slave_sql_errno= 1590
+--source include/wait_for_slave_sql_error_and_skip.inc
+
+connection master;
+
+--replace_result $old_max_binlog_cache_size ORIGINAL_VALUE
+--eval SET GLOBAL max_binlog_cache_size= $old_max_binlog_cache_size
+--replace_result $old_binlog_cache_size ORIGINAL_VALUE
+--eval SET GLOBAL binlog_cache_size= $old_binlog_cache_size
+--replace_result $old_max_binlog_stmt_cache_size ORIGINAL_VALUE
+--eval SET GLOBAL max_binlog_stmt_cache_size= $old_max_binlog_stmt_cache_size
+--replace_result $old_binlog_stmt_cache_size ORIGINAL_VALUE
+--eval SET GLOBAL binlog_stmt_cache_size= $old_binlog_stmt_cache_size
+
+DROP TABLE t1;
+
+--source include/rpl_end.inc
diff --git a/mysql-test/suite/storage_engine/alter_table.result b/mysql-test/suite/storage_engine/alter_table.result
index 09696e0e6b6..85db1514421 100644
--- a/mysql-test/suite/storage_engine/alter_table.result
+++ b/mysql-test/suite/storage_engine/alter_table.result
@@ -13,7 +13,7 @@ ALTER TABLE t1 ALTER COLUMN a SET DEFAULT '0';
SHOW CREATE TABLE t1;
Table Create Table
t1 CREATE TABLE `t1` (
- `a` int(11) DEFAULT '0',
+ `a` int(11) DEFAULT 0,
`c` char(8) DEFAULT NULL,
`b` int(11) DEFAULT NULL
) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1
diff --git a/mysql-test/suite/storage_engine/alter_tablespace.result b/mysql-test/suite/storage_engine/alter_tablespace.result
index b54d7c99e9e..ff3da9f68bb 100644
--- a/mysql-test/suite/storage_engine/alter_tablespace.result
+++ b/mysql-test/suite/storage_engine/alter_tablespace.result
@@ -10,7 +10,7 @@ a
2
ALTER TABLE t1 DISCARD TABLESPACE;
SELECT a FROM t1;
-ERROR HY000: Tablespace has been discarded for table 't1'
+ERROR HY000: Tablespace has been discarded for table `t1`
ALTER TABLE t1 IMPORT TABLESPACE;
Warnings:
Warning 1810 IO Read error: (2, No such file or directory) Error opening './test/t1.cfg', will attempt to import without schema verification
diff --git a/mysql-test/suite/storage_engine/create_table.result b/mysql-test/suite/storage_engine/create_table.result
index 87bf37046ba..6936854eb65 100644
--- a/mysql-test/suite/storage_engine/create_table.result
+++ b/mysql-test/suite/storage_engine/create_table.result
@@ -27,7 +27,7 @@ CREATE TABLE t1 ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS> AS SELECT 1 UNION
SHOW CREATE TABLE t1;
Table Create Table
t1 CREATE TABLE `t1` (
- `1` bigint(20) NOT NULL DEFAULT '0'
+ `1` bigint(20) NOT NULL DEFAULT 0
) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1
SELECT * FROM t1;
1
diff --git a/mysql-test/suite/storage_engine/disabled.def b/mysql-test/suite/storage_engine/disabled.def
index e69de29bb2d..658d0c240db 100644
--- a/mysql-test/suite/storage_engine/disabled.def
+++ b/mysql-test/suite/storage_engine/disabled.def
@@ -0,0 +1,12 @@
+alter_table_online : MDEV-9705 - Field type conversion warnings have changed to worse
+col_opt_null : MDEV-10252 - Row count in 'out of range' warnings is off
+col_opt_not_null : MDEV-10252 - Row count in 'out of range' warnings is off
+col_opt_unsigned : MDEV-10252 - Row count in 'out of range' warnings is off
+col_opt_zerofill : MDEV-10252 - Row count in 'out of range' warnings is off
+type_binary : MDEV-10252 - Row count in 'out of range' warnings is off
+type_char : MDEV-10252 - Row count in 'out of range' warnings is off
+type_fixed : MDEV-10252 - Row count in 'out of range' warnings is off
+type_float : MDEV-10252 - Row count in 'out of range' warnings is off
+type_int : MDEV-10252 - Row count in 'out of range' warnings is off
+type_varbinary : MDEV-10252 - Row count in 'out of range' warnings is off
+type_varchar : MDEV-10252 - Row count in 'out of range' warnings is off
diff --git a/mysql-test/suite/storage_engine/insert_delayed.test b/mysql-test/suite/storage_engine/insert_delayed.test
index 3ded1686714..ece0bf3cf68 100644
--- a/mysql-test/suite/storage_engine/insert_delayed.test
+++ b/mysql-test/suite/storage_engine/insert_delayed.test
@@ -1,4 +1,4 @@
-#
+#
# INSERT DELAYED
#
diff --git a/mysql-test/suite/storage_engine/repair_table.inc b/mysql-test/suite/storage_engine/repair_table.inc
index aa3b4e6304d..a295b4c19f2 100644
--- a/mysql-test/suite/storage_engine/repair_table.inc
+++ b/mysql-test/suite/storage_engine/repair_table.inc
@@ -91,6 +91,9 @@ if ($have_default_index)
call mtr.add_suppression(" '\..test.t1'");
call mtr.add_suppression("Couldn't repair table: test.t1");
+ # In 10.2 with log_warnings=2 the error message is printed to the error log
+ call mtr.add_suppression("Table 't1' is marked as crashed.*");
+
--let $create_definition = a $int_indexed_col, b $char_col, $default_index (a)
--source create_table.inc
REPAIR TABLE t1;
diff --git a/mysql-test/suite/storage_engine/repair_table.result b/mysql-test/suite/storage_engine/repair_table.result
index a20b9be3ba1..d518e47756f 100644
--- a/mysql-test/suite/storage_engine/repair_table.result
+++ b/mysql-test/suite/storage_engine/repair_table.result
@@ -59,6 +59,7 @@ call mtr.add_suppression("Got an error from thread_id=.*");
call mtr.add_suppression("MySQL thread id .*, query id .* localhost.*root Checking table");
call mtr.add_suppression(" '\..test.t1'");
call mtr.add_suppression("Couldn't repair table: test.t1");
+call mtr.add_suppression("Table 't1' is marked as crashed.*");
CREATE TABLE t1 (a <INT_COLUMN>, b <CHAR_COLUMN>, <CUSTOM_INDEX> (a)) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
REPAIR TABLE t1;
Table Op Msg_type Msg_text
@@ -85,7 +86,7 @@ Table Op Msg_type Msg_text
test.t1 check error Size of datafile is: 39 Should be: 65
test.t1 check error Corrupt
SELECT a,b FROM t1;
-ERROR HY000: Incorrect key file for table 't1'; try to repair it
+ERROR HY000: Index for table 't1' is corrupt; try to repair it
# Statement ended with one of expected results (0,ER_NOT_KEYFILE,144).
# If you got a difference in error message, just add it to rdiff file
INSERT INTO t1 (a,b) VALUES (14,'n'),(15,'o');
diff --git a/mysql-test/suite/storage_engine/tbl_opt_data_index_dir.result b/mysql-test/suite/storage_engine/tbl_opt_data_dir.result
index b8c5e75c88c..19b7d539b45 100644
--- a/mysql-test/suite/storage_engine/tbl_opt_data_index_dir.result
+++ b/mysql-test/suite/storage_engine/tbl_opt_data_dir.result
@@ -1,16 +1,19 @@
DROP TABLE IF EXISTS t1;
+# Running CREATE TABLE .. DATA DIRECTORY = <>
SHOW CREATE TABLE t1;
Table Create Table
t1 CREATE TABLE `t1` (
`a` int(11) DEFAULT NULL,
`b` char(8) DEFAULT NULL
-) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 DATA DIRECTORY='<DATA_DIR>' INDEX DIRECTORY='<INDEX_DIR>'
+) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 DATA DIRECTORY='<DATA_DIR_1>'
+# For ALTER TABLE the option is ignored
+# Running ALTER TABLE .. DATA DIRECTORY = <>
Warnings:
-Warning 1618 <INDEX DIRECTORY> option ignored
+Warning 1618 <DATA DIRECTORY> option ignored
SHOW CREATE TABLE t1;
Table Create Table
t1 CREATE TABLE `t1` (
`a` int(11) DEFAULT NULL,
`b` char(8) DEFAULT NULL
-) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 DATA DIRECTORY='<DATA_DIR>' INDEX DIRECTORY='<INDEX_DIR>'
+) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 DATA DIRECTORY='<DATA_DIR_1>'
DROP TABLE t1;
diff --git a/mysql-test/suite/storage_engine/tbl_opt_data_dir.test b/mysql-test/suite/storage_engine/tbl_opt_data_dir.test
new file mode 100644
index 00000000000..434ca449125
--- /dev/null
+++ b/mysql-test/suite/storage_engine/tbl_opt_data_dir.test
@@ -0,0 +1,52 @@
+#
+# Check whether DATA DIRECTORY is supported in CREATE and ALTER TABLE
+#
+# Note: the test does not check whether the option
+# has any real effect on the table, only that it is accepted
+#
+
+--source include/have_symlink.inc
+--source have_engine.inc
+
+--let $data_dir1 = $MYSQLTEST_VARDIR/storage_engine_data_dir1/
+--let $data_dir2 = $MYSQLTEST_VARDIR/storage_engine_data_dir2/
+--mkdir $data_dir1
+--mkdir $data_dir2
+
+--disable_warnings
+DROP TABLE IF EXISTS t1;
+--enable_warnings
+
+--let $table_options = DATA DIRECTORY = '$data_dir1'
+# We cannot mask the folder name here and further, but we can switch off query logging
+--let $disable_query_log = 1
+--echo # Running CREATE TABLE .. DATA DIRECTORY = <>
+--source create_table.inc
+
+--source mask_engine.inc
+--replace_result $data_dir1 <DATA_DIR_1>
+SHOW CREATE TABLE t1;
+
+--echo # For ALTER TABLE the option is ignored
+
+--let $alter_definition = DATA DIRECTORY = '$data_dir2'
+--disable_query_log
+--echo # Running ALTER TABLE .. DATA DIRECTORY = <>
+--source alter_table.inc
+if ($mysql_errname)
+{
+ --let $my_last_stmt = $alter_statement
+ --let $functionality = ALTER TABLE
+ --source unexpected_result.inc
+}
+--enable_query_log
+--source mask_engine.inc
+--replace_result $data_dir1 <DATA_DIR_1>
+SHOW CREATE TABLE t1;
+
+DROP TABLE t1;
+
+--source cleanup_engine.inc
+
+--rmdir $data_dir1
+--rmdir $data_dir2
diff --git a/mysql-test/suite/storage_engine/tbl_opt_data_index_dir.test b/mysql-test/suite/storage_engine/tbl_opt_data_index_dir.test
deleted file mode 100644
index 4a83cc46b84..00000000000
--- a/mysql-test/suite/storage_engine/tbl_opt_data_index_dir.test
+++ /dev/null
@@ -1,52 +0,0 @@
-#
-# Check whether DATA DIRECTORY and INDEX DIRECTORY
-# are supported in CREATE and ALTER TABLE
-#
-# Note: the test does not check whether the options
-# have any real effect on the table, only
-# that they are accepted
-#
-
---source include/have_symlink.inc
---source have_engine.inc
-
---let $data_dir = $MYSQLTEST_VARDIR/storage_engine_data_dir/
---let $index_dir = $MYSQLTEST_VARDIR/storage_engine_index_dir/
---mkdir $data_dir
---mkdir $index_dir
-
---disable_warnings
-DROP TABLE IF EXISTS t1;
---enable_warnings
-
---let $table_options = DATA DIRECTORY = '$data_dir' INDEX DIRECTORY = '$index_dir'
-# We cannot mask the folders name here, but we can switch off query logging
---let $disable_query_log = 1
---source create_table.inc
-
---source mask_engine.inc
---replace_result $data_dir <DATA_DIR> $index_dir <INDEX_DIR>
-SHOW CREATE TABLE t1;
-
---let $alter_definition = INDEX DIRECTORY = '$data_dir'
---disable_query_log
---source alter_table.inc
-if ($mysql_errname)
-{
- --let $my_last_stmt = $alter_statement
- --let $functionality = ALTER TABLE
- --source unexpected_result.inc
-}
---enable_query_log
---source mask_engine.inc
---replace_result $data_dir <DATA_DIR> $index_dir <INDEX_DIR>
-SHOW CREATE TABLE t1;
-
-DROP TABLE t1;
-
---source cleanup_engine.inc
-
---rmdir $data_dir
---rmdir $index_dir
-
-
diff --git a/mysql-test/suite/storage_engine/tbl_opt_index_dir.result b/mysql-test/suite/storage_engine/tbl_opt_index_dir.result
new file mode 100644
index 00000000000..c7368e9a84a
--- /dev/null
+++ b/mysql-test/suite/storage_engine/tbl_opt_index_dir.result
@@ -0,0 +1,19 @@
+DROP TABLE IF EXISTS t1;
+# Running CREATE TABLE .. INDEX DIRECTORY = <>
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` int(11) DEFAULT NULL,
+ `b` char(8) DEFAULT NULL
+) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 INDEX DIRECTORY='<INDEX_DIR_1>'
+# For ALTER TABLE the option is ignored
+# Running ALTER TABLE .. INDEX DIRECTORY = <>
+Warnings:
+Warning 1618 <INDEX DIRECTORY> option ignored
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` int(11) DEFAULT NULL,
+ `b` char(8) DEFAULT NULL
+) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 INDEX DIRECTORY='<INDEX_DIR_1>'
+DROP TABLE t1;
diff --git a/mysql-test/suite/storage_engine/tbl_opt_index_dir.test b/mysql-test/suite/storage_engine/tbl_opt_index_dir.test
new file mode 100644
index 00000000000..03d2ef2bf82
--- /dev/null
+++ b/mysql-test/suite/storage_engine/tbl_opt_index_dir.test
@@ -0,0 +1,52 @@
+#
+# Check whether INDEX DIRECTORY is supported in CREATE and ALTER TABLE
+#
+# Note: the test does not check whether the option
+# has any real effect on the table, only that it is accepted
+#
+
+--source include/have_symlink.inc
+--source have_engine.inc
+
+--let $index_dir1 = $MYSQLTEST_VARDIR/storage_engine_index_dir1/
+--let $index_dir2 = $MYSQLTEST_VARDIR/storage_engine_index_dir2/
+--mkdir $index_dir1
+--mkdir $index_dir2
+
+--disable_warnings
+DROP TABLE IF EXISTS t1;
+--enable_warnings
+
+--let $table_options = INDEX DIRECTORY = '$index_dir1'
+# We cannot mask the folder name here and further, but we can switch off query logging
+--let $disable_query_log = 1
+--echo # Running CREATE TABLE .. INDEX DIRECTORY = <>
+--source create_table.inc
+
+--source mask_engine.inc
+--replace_result $index_dir1 <INDEX_DIR_1>
+SHOW CREATE TABLE t1;
+
+--echo # For ALTER TABLE the option is ignored
+
+--let $alter_definition = INDEX DIRECTORY = '$index_dir2'
+--disable_query_log
+--echo # Running ALTER TABLE .. INDEX DIRECTORY = <>
+--source alter_table.inc
+if ($mysql_errname)
+{
+ --let $my_last_stmt = $alter_statement
+ --let $functionality = ALTER TABLE
+ --source unexpected_result.inc
+}
+--enable_query_log
+--source mask_engine.inc
+--replace_result $index_dir1 <INDEX_DIR_1>
+SHOW CREATE TABLE t1;
+
+DROP TABLE t1;
+
+--source cleanup_engine.inc
+
+--rmdir $index_dir1
+--rmdir $index_dir2
diff --git a/mysql-test/suite/storage_engine/tbl_opt_row_format.result b/mysql-test/suite/storage_engine/tbl_opt_row_format.result
index 5f1cf713bb6..57000e295f7 100644
--- a/mysql-test/suite/storage_engine/tbl_opt_row_format.result
+++ b/mysql-test/suite/storage_engine/tbl_opt_row_format.result
@@ -1,16 +1,30 @@
DROP TABLE IF EXISTS t1;
-CREATE TABLE t1 (a <INT_COLUMN>, b <CHAR_COLUMN>) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS> ROW_FORMAT=FIXED;
+CREATE TABLE t1 (a <INT_COLUMN>, b <CHAR_COLUMN>) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS> ROW_FORMAT=DYNAMIC;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` int(11) DEFAULT NULL,
+ `b` char(8) DEFAULT NULL
+) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 ROW_FORMAT=DYNAMIC
+ALTER TABLE t1 ROW_FORMAT=FIXED;
SHOW CREATE TABLE t1;
Table Create Table
t1 CREATE TABLE `t1` (
`a` int(11) DEFAULT NULL,
`b` char(8) DEFAULT NULL
) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 ROW_FORMAT=FIXED
-ALTER TABLE t1 ROW_FORMAT=DYNAMIC;
+ALTER TABLE t1 ROW_FORMAT=PAGE;
SHOW CREATE TABLE t1;
Table Create Table
t1 CREATE TABLE `t1` (
`a` int(11) DEFAULT NULL,
`b` char(8) DEFAULT NULL
-) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 ROW_FORMAT=DYNAMIC
+) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 ROW_FORMAT=PAGE
+ALTER TABLE t1 ROW_FORMAT=COMPACT;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` int(11) DEFAULT NULL,
+ `b` char(8) DEFAULT NULL
+) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 ROW_FORMAT=COMPACT
DROP TABLE t1;
diff --git a/mysql-test/suite/storage_engine/tbl_opt_row_format.test b/mysql-test/suite/storage_engine/tbl_opt_row_format.test
index d6347551da0..17fe2f36429 100644
--- a/mysql-test/suite/storage_engine/tbl_opt_row_format.test
+++ b/mysql-test/suite/storage_engine/tbl_opt_row_format.test
@@ -13,12 +13,12 @@
DROP TABLE IF EXISTS t1;
--enable_warnings
---let $table_options = ROW_FORMAT=FIXED
+--let $table_options = ROW_FORMAT=DYNAMIC
--source create_table.inc
--source mask_engine.inc
SHOW CREATE TABLE t1;
---let $alter_definition = ROW_FORMAT=DYNAMIC
+--let $alter_definition = ROW_FORMAT=FIXED
--source alter_table.inc
if ($mysql_errname)
{
@@ -29,6 +29,30 @@ if ($mysql_errname)
--source mask_engine.inc
SHOW CREATE TABLE t1;
+--let $alter_definition = ROW_FORMAT=PAGE
+--source alter_table.inc
+if ($mysql_errname)
+{
+ --let $my_last_stmt = $alter_statement
+ --let $functionality = ALTER TABLE
+ --source unexpected_result.inc
+}
+--source mask_engine.inc
+SHOW CREATE TABLE t1;
+
+
+--let $alter_definition = ROW_FORMAT=COMPACT
+--source alter_table.inc
+if ($mysql_errname)
+{
+ --let $my_last_stmt = $alter_statement
+ --let $functionality = ALTER TABLE
+ --source unexpected_result.inc
+}
+--source mask_engine.inc
+SHOW CREATE TABLE t1;
+
+
DROP TABLE t1;
--source cleanup_engine.inc
diff --git a/mysql-test/suite/storage_engine/type_char_indexes.result b/mysql-test/suite/storage_engine/type_char_indexes.result
index 5be07bdf53f..73c076863ab 100644
--- a/mysql-test/suite/storage_engine/type_char_indexes.result
+++ b/mysql-test/suite/storage_engine/type_char_indexes.result
@@ -97,7 +97,7 @@ varchar2b 1
varchar3b 1
SET SESSION optimizer_switch = 'engine_condition_pushdown=on';
Warnings:
-Warning 1681 'engine_condition_pushdown=on' is deprecated and will be removed in a future release.
+Warning 1681 'engine_condition_pushdown=on' is deprecated and will be removed in a future release
EXPLAIN SELECT c,c20,v16,v128 FROM t1 WHERE c > 'a';
id select_type table type possible_keys key key_len ref rows Extra
# # # range c_v c_v # # # Using index condition
diff --git a/mysql-test/suite/storage_engine/type_date_time.result b/mysql-test/suite/storage_engine/type_date_time.result
index 473a7e67494..f136dd93f34 100644
--- a/mysql-test/suite/storage_engine/type_date_time.result
+++ b/mysql-test/suite/storage_engine/type_date_time.result
@@ -13,7 +13,7 @@ SHOW COLUMNS IN t1;
Field Type Null Key Default Extra
d date # # #
dt datetime # # #
-ts timestamp # # # on update CURRENT_TIMESTAMP
+ts timestamp # # # on update current_timestamp()
t time # # #
y year(4) # # #
y4 year(4) # # #
diff --git a/mysql-test/suite/storage_engine/vcol.result b/mysql-test/suite/storage_engine/vcol.result
index d51ab038576..e56a1b0543d 100644
--- a/mysql-test/suite/storage_engine/vcol.result
+++ b/mysql-test/suite/storage_engine/vcol.result
@@ -3,12 +3,12 @@ CREATE TABLE t1 (a <INT_COLUMN>, b <INT_COLUMN> GENERATED ALWAYS AS (a+1)) ENGIN
SHOW COLUMNS IN t1;
Field Type Null Key Default Extra
a int(11) # #
-b int(11) # # VIRTUAL
+b int(11) # # VIRTUAL GENERATED
INSERT INTO t1 (a) VALUES (1),(2);
INSERT INTO t1 (a,b) VALUES (3,3),(4,4);
Warnings:
-Warning 1906 The value specified for computed column 'b' in table 't1' ignored
-Warning 1906 The value specified for computed column 'b' in table 't1' ignored
+Warning 1906 The value specified for generated column 'b' in table 't1' ignored
+Warning 1906 The value specified for generated column 'b' in table 't1' ignored
SELECT a,b FROM t1;
a b
1 2
@@ -20,12 +20,12 @@ CREATE TABLE t1 (a <INT_COLUMN>, b <INT_COLUMN> GENERATED ALWAYS AS (a+1) PERSIS
SHOW COLUMNS IN t1;
Field Type Null Key Default Extra
a int(11) # #
-b int(11) # # PERSISTENT
+b int(11) # # STORED GENERATED
INSERT INTO t1 (a) VALUES (1),(2);
INSERT INTO t1 (a,b) VALUES (3,3),(4,4);
Warnings:
-Warning 1906 The value specified for computed column 'b' in table 't1' ignored
-Warning 1906 The value specified for computed column 'b' in table 't1' ignored
+Warning 1906 The value specified for generated column 'b' in table 't1' ignored
+Warning 1906 The value specified for generated column 'b' in table 't1' ignored
SELECT a,b FROM t1;
a b
1 2
@@ -37,12 +37,12 @@ CREATE TABLE t1 (a <INT_COLUMN>, b <INT_COLUMN> GENERATED ALWAYS AS (a+1) VIRTUA
SHOW COLUMNS IN t1;
Field Type Null Key Default Extra
a int(11) # #
-b int(11) # # VIRTUAL
+b int(11) # # VIRTUAL GENERATED
INSERT INTO t1 (a) VALUES (1),(2);
INSERT INTO t1 (a,b) VALUES (3,3),(4,4);
Warnings:
-Warning 1906 The value specified for computed column 'b' in table 't1' ignored
-Warning 1906 The value specified for computed column 'b' in table 't1' ignored
+Warning 1906 The value specified for generated column 'b' in table 't1' ignored
+Warning 1906 The value specified for generated column 'b' in table 't1' ignored
SELECT a,b FROM t1;
a b
1 2
@@ -54,12 +54,12 @@ CREATE TABLE t1 (a <INT_COLUMN>, b <INT_COLUMN> AS (a+1) PERSISTENT) ENGINE=<STO
SHOW COLUMNS IN t1;
Field Type Null Key Default Extra
a int(11) # #
-b int(11) # # PERSISTENT
+b int(11) # # STORED GENERATED
INSERT INTO t1 (a) VALUES (1),(2);
INSERT INTO t1 (a,b) VALUES (3,3),(4,4);
Warnings:
-Warning 1906 The value specified for computed column 'b' in table 't1' ignored
-Warning 1906 The value specified for computed column 'b' in table 't1' ignored
+Warning 1906 The value specified for generated column 'b' in table 't1' ignored
+Warning 1906 The value specified for generated column 'b' in table 't1' ignored
SELECT a,b FROM t1;
a b
1 2
diff --git a/mysql-test/suite/sys_vars/r/delay_key_write_func.result b/mysql-test/suite/sys_vars/r/delay_key_write_func.result
index 0fd1d492ef4..5cc4b2eaaad 100644
--- a/mysql-test/suite/sys_vars/r/delay_key_write_func.result
+++ b/mysql-test/suite/sys_vars/r/delay_key_write_func.result
@@ -1,24 +1,20 @@
'#--------------------FN_DYNVARS_023_01-------------------------#'
SET @start_value= @@global.delay_key_write;
-SET @@global.delay_key_write = ON;
-SELECT @@global.delay_key_write;
-@@global.delay_key_write
-ON
-connect user1,localhost,root,,,,;
-connection user1;
-SELECT @@global.delay_key_write AS res_is_ON;
-res_is_ON
-ON
-SET @@global.delay_key_write = ALL;
-disconnect user1;
-connect user1,localhost,root,,,,;
-connection user1;
-SELECT @@global.delay_key_write AS res_is_ALL;
-res_is_ALL
-ALL
'#--------------------FN_DYNVARS_023_02-------------------------#'
+CREATE PROCEDURE sp_addRecords (IN var1 INT,IN var2 INT)
+BEGIN
+WHILE (var1 < var2) DO
+INSERT INTO t1 VALUES(var1,REPEAT('MYSQL',10),100000.0/var1);
+SET var1=var1+1;
+END WHILE;
+END//
'---check when delay_key_write is OFF---'
SET @@global.delay_key_write = OFF;
+CREATE TABLE t1(
+a INT PRIMARY KEY,
+b VARCHAR(512),
+c DOUBLE
+) DELAY_KEY_WRITE = 1;
FLUSH STATUS;
CALL sp_addRecords(1,10);
SHOW STATUS LIKE 'Key_reads';
@@ -33,8 +29,14 @@ Key_write_requests 9
SELECT COUNT(*) FROM t1;
COUNT(*)
9
+DROP TABLE t1;
'----check when delay_key_write is ON---'
SET @@global.delay_key_write = ON;
+CREATE TABLE t1(
+a INT PRIMARY KEY,
+b VARCHAR(512),
+c DOUBLE
+) DELAY_KEY_WRITE = 1;
FLUSH STATUS;
CALL sp_addRecords(1,10);
SHOW STATUS LIKE 'Key_reads';
@@ -49,8 +51,14 @@ Key_write_requests 9
SELECT COUNT(*) FROM t1;
COUNT(*)
9
+DROP TABLE t1;
'----check when delay_key_write is ALL---'
SET @@global.delay_key_write = ALL;
+CREATE TABLE t1(
+a INT PRIMARY KEY,
+b VARCHAR(512),
+c DOUBLE
+) DELAY_KEY_WRITE = 0;
FLUSH STATUS;
CALL sp_addRecords(1,10);
SHOW STATUS LIKE 'Key_reads';
@@ -67,6 +75,4 @@ COUNT(*)
9
DROP PROCEDURE sp_addRecords;
DROP TABLE t1;
-disconnect user1;
-connection default;
SET @@global.delay_key_write= @start_value;
diff --git a/mysql-test/suite/sys_vars/r/innodb_sched_priority_cleaner_basic.result b/mysql-test/suite/sys_vars/r/innodb_sched_priority_cleaner_basic.result
index 1183fb27732..f2bfaf2ed61 100644
--- a/mysql-test/suite/sys_vars/r/innodb_sched_priority_cleaner_basic.result
+++ b/mysql-test/suite/sys_vars/r/innodb_sched_priority_cleaner_basic.result
@@ -1,4 +1,4 @@
-SET @start_value = @@GLOBAL.innodb_sched_priority_cleaner;
+SET GLOBAL innodb_sched_priority_cleaner=39;
SELECT @@GLOBAL.innodb_sched_priority_cleaner;
@@GLOBAL.innodb_sched_priority_cleaner
19
diff --git a/mysql-test/suite/sys_vars/r/sysvars_innodb,32bit,xtradb.rdiff-disabled b/mysql-test/suite/sys_vars/r/sysvars_innodb,32bit,xtradb.rdiff-disabled
index 858df585a7b..f5ce4d77c6b 100644
--- a/mysql-test/suite/sys_vars/r/sysvars_innodb,32bit,xtradb.rdiff-disabled
+++ b/mysql-test/suite/sys_vars/r/sysvars_innodb,32bit,xtradb.rdiff-disabled
@@ -1218,8 +1218,8 @@
COMMAND_LINE_ARGUMENT OPTIONAL
VARIABLE_NAME INNODB_VERSION
SESSION_VALUE NULL
--GLOBAL_VALUE 5.6.35
-+GLOBAL_VALUE 5.6.35-80.0
+-GLOBAL_VALUE 5.6.36
++GLOBAL_VALUE 5.6.36-82.0
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE NULL
VARIABLE_SCOPE GLOBAL
diff --git a/mysql-test/suite/sys_vars/r/sysvars_innodb,xtradb.rdiff-disabled b/mysql-test/suite/sys_vars/r/sysvars_innodb,xtradb.rdiff-disabled
index d801270c6b6..9199653b460 100644
--- a/mysql-test/suite/sys_vars/r/sysvars_innodb,xtradb.rdiff-disabled
+++ b/mysql-test/suite/sys_vars/r/sysvars_innodb,xtradb.rdiff-disabled
@@ -661,8 +661,8 @@
COMMAND_LINE_ARGUMENT OPTIONAL
VARIABLE_NAME INNODB_VERSION
SESSION_VALUE NULL
--GLOBAL_VALUE 5.6.35
-+GLOBAL_VALUE 5.6.35-80.0
+-GLOBAL_VALUE 5.6.36
++GLOBAL_VALUE 5.6.36-82.0
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE NULL
VARIABLE_SCOPE GLOBAL
diff --git a/mysql-test/suite/sys_vars/r/sysvars_innodb.result b/mysql-test/suite/sys_vars/r/sysvars_innodb.result
index 042f35f04a8..1cd91a3a61b 100644
--- a/mysql-test/suite/sys_vars/r/sysvars_innodb.result
+++ b/mysql-test/suite/sys_vars/r/sysvars_innodb.result
@@ -3074,7 +3074,7 @@ READ_ONLY NO
COMMAND_LINE_ARGUMENT OPTIONAL
VARIABLE_NAME INNODB_VERSION
SESSION_VALUE NULL
-GLOBAL_VALUE 5.7.14
+GLOBAL_VALUE 5.7.18
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE NULL
VARIABLE_SCOPE GLOBAL
diff --git a/mysql-test/suite/sys_vars/t/delay_key_write_func.test b/mysql-test/suite/sys_vars/t/delay_key_write_func.test
index 89f40ba544b..e823e51954c 100644
--- a/mysql-test/suite/sys_vars/t/delay_key_write_func.test
+++ b/mysql-test/suite/sys_vars/t/delay_key_write_func.test
@@ -20,32 +20,14 @@
###############################################################################
--echo '#--------------------FN_DYNVARS_023_01-------------------------#'
-#######################################################################
-# Check if setting delay_key_write is changed in every new connection #
-#######################################################################
-
SET @start_value= @@global.delay_key_write;
-SET @@global.delay_key_write = ON;
-SELECT @@global.delay_key_write;
-
-connect (user1,localhost,root,,,,);
-connection user1;
-SELECT @@global.delay_key_write AS res_is_ON;
-SET @@global.delay_key_write = ALL;
-disconnect user1;
-
-connect (user1,localhost,root,,,,);
-connection user1;
-SELECT @@global.delay_key_write AS res_is_ALL;
-
--echo '#--------------------FN_DYNVARS_023_02-------------------------#'
######################################################
# Begin the functionality Testing of delay_key_write #
######################################################
# create procedure to add rows
---disable_query_log
DELIMITER //;
CREATE PROCEDURE sp_addRecords (IN var1 INT,IN var2 INT)
BEGIN
@@ -55,28 +37,19 @@ BEGIN
END WHILE;
END//
DELIMITER ;//
---enable_query_log
#==============================================================================
--echo '---check when delay_key_write is OFF---'
#==============================================================================
-
SET @@global.delay_key_write = OFF;
-
---disable_query_log
---disable_warnings
-DROP TABLE IF EXISTS t1;
---enable_warnings
# create a table with delay_key_write enabled
CREATE TABLE t1(
a INT PRIMARY KEY,
b VARCHAR(512),
c DOUBLE
) DELAY_KEY_WRITE = 1;
---enable_query_log
-
FLUSH STATUS;
@@ -86,6 +59,7 @@ SHOW STATUS LIKE 'Key_reads';
SHOW STATUS LIKE 'Key_writes';
SHOW STATUS LIKE 'Key_write_requests';
SELECT COUNT(*) FROM t1;
+DROP TABLE t1;
#==============================================================================
--echo '----check when delay_key_write is ON---'
@@ -93,17 +67,12 @@ SELECT COUNT(*) FROM t1;
SET @@global.delay_key_write = ON;
---disable_query_log
---disable_warnings
-DROP TABLE IF EXISTS t1;
---enable_warnings
# create a table with delay_key_write enabled
CREATE TABLE t1(
a INT PRIMARY KEY,
b VARCHAR(512),
c DOUBLE
) DELAY_KEY_WRITE = 1;
---enable_query_log
FLUSH STATUS;
CALL sp_addRecords(1,10);
@@ -112,23 +81,19 @@ SHOW STATUS LIKE 'Key_reads';
SHOW STATUS LIKE 'Key_writes';
SHOW STATUS LIKE 'Key_write_requests';
SELECT COUNT(*) FROM t1;
+DROP TABLE t1;
#==============================================================================
--echo '----check when delay_key_write is ALL---'
#==============================================================================
SET @@global.delay_key_write = ALL;
---disable_query_log
---disable_warnings
-DROP TABLE IF EXISTS t1;
---enable_warnings
# create a table with delay_key_write disabled
CREATE TABLE t1(
a INT PRIMARY KEY,
b VARCHAR(512),
c DOUBLE
) DELAY_KEY_WRITE = 0;
---enable_query_log
FLUSH STATUS;
CALL sp_addRecords(1,10);
@@ -140,12 +105,9 @@ SELECT COUNT(*) FROM t1;
DROP PROCEDURE sp_addRecords;
DROP TABLE t1;
-disconnect user1;
-connection default;
SET @@global.delay_key_write= @start_value;
####################################################
# End of functionality testing for delay_key_write #
####################################################
-
diff --git a/mysql-test/suite/sys_vars/t/innodb_sched_priority_cleaner_basic.test b/mysql-test/suite/sys_vars/t/innodb_sched_priority_cleaner_basic.test
index b2382fd7844..2c2037f167f 100644
--- a/mysql-test/suite/sys_vars/t/innodb_sched_priority_cleaner_basic.test
+++ b/mysql-test/suite/sys_vars/t/innodb_sched_priority_cleaner_basic.test
@@ -4,7 +4,15 @@
# A dynamic, global variable
-SET @start_value = @@GLOBAL.innodb_sched_priority_cleaner;
+# Test in read-only mode
+--let $restart_parameters= --innodb-read-only
+--source include/restart_mysqld.inc
+--let $restart_parameters=
+
+# This has no actual effect in innodb_read_only mode
+SET GLOBAL innodb_sched_priority_cleaner=39;
+
+--source include/restart_mysqld.inc
# Default value
SELECT @@GLOBAL.innodb_sched_priority_cleaner;
diff --git a/mysql-test/suite/wsrep/include/check_galera_version.inc b/mysql-test/suite/wsrep/include/check_galera_version.inc
index 38b4ada98b9..cb35269249b 100644
--- a/mysql-test/suite/wsrep/include/check_galera_version.inc
+++ b/mysql-test/suite/wsrep/include/check_galera_version.inc
@@ -12,11 +12,29 @@
--disable_query_log
-eval SET @GALERA_VERSION=(SELECT CONCAT('$galera_version', '%'));
+# Required Version
-if (!`SELECT COUNT(*) FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE
- VARIABLE_NAME LIKE 'wsrep_provider_version' AND
- VARIABLE_VALUE LIKE @GALERA_VERSION`)
+eval SET @GALERA_VERSION='$galera_version';
+SELECT CAST(REGEXP_REPLACE(@GALERA_VERSION,'^(\\d+)\\.(\\d+)\\.(\\d+).*','\\1') AS UNSIGNED) INTO @GALERA_MAJOR_VERSION;
+SELECT CAST(REGEXP_REPLACE(@GALERA_VERSION,'^(\\d+)\\.(\\d+)\\.(\\d+).*','\\2') AS UNSIGNED) INTO @GALERA_MID_VERSION;
+SELECT CAST(REGEXP_REPLACE(@GALERA_VERSION,'^(\\d+)\\.(\\d+)\\.(\\d+).*','\\3') AS UNSIGNED) INTO @GALERA_MINOR_VERSION;
+
+# Actual
+SELECT VARIABLE_VALUE INTO @ACTUAL_GALERA_VERSION FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME LIKE 'wsrep_provider_version';
+
+SELECT CAST(REGEXP_REPLACE(@ACTUAL_GALERA_VERSION,'^(\\d+)\\.(\\d+)\\.(\\d+).*','\\1') AS UNSIGNED) INTO @ACTUAL_GALERA_MAJOR_VERSION;
+SELECT CAST(REGEXP_REPLACE(@ACTUAL_GALERA_VERSION,'^(\\d+)\\.(\\d+)\\.(\\d+).*','\\2') AS UNSIGNED) INTO @ACTUAL_GALERA_MID_VERSION;
+SELECT CAST(REGEXP_REPLACE(@ACTUAL_GALERA_VERSION,'^(\\d+)\\.(\\d+)\\.(\\d+).*','\\3') AS UNSIGNED) INTO @ACTUAL_GALERA_MINOR_VERSION;
+
+# For testing
+#SELECT @GALERA_MAJOR_VERSION, @GALERA_MID_VERSION, @GALERA_MINOR_VERSION;
+#SELECT @ACTUAL_GALERA_VERSION;
+#SELECT @ACTUAL_GALERA_MAJOR_VERSION, @ACTUAL_GALERA_MID_VERSION, @ACTUAL_GALERA_MINOR_VERSION;
+
+if (!`SELECT (@ACTUAL_GALERA_MAJOR_VERSION > @GALERA_MAJOR_VERSION) OR
+ (@ACTUAL_GALERA_MAJOR_VERSION = @GALERA_MAJOR_VERSION AND @ACTUAL_GALERA_MID_VERSION > @GALERA_MID_VERSION) OR
+ (@ACTUAL_GALERA_MAJOR_VERSION = @GALERA_MAJOR_VERSION AND @ACTUAL_GALERA_MID_VERSION = @GALERA_MID_VERSION AND @ACTUAL_GALERA_MINOR_VERSION >= @GALERA_MINOR_VERSION)
+ `)
{
skip Test requires Galera library version $galera_version;
}
diff --git a/mysql-test/t/alter_table_online.test b/mysql-test/t/alter_table_online.test
index 22ebadd64f9..15df36e8009 100644
--- a/mysql-test/t/alter_table_online.test
+++ b/mysql-test/t/alter_table_online.test
@@ -285,6 +285,24 @@ CREATE TABLE t1 (a LONGTEXT COLLATE latin1_general_ci);
ALTER TABLE t1 MODIFY a LONGTEXT COLLATE latin1_swedish_ci, ALGORITHM=INPLACE;
DROP TABLE t1;
---echo #
---echo # End of MDEV-8948 ALTER ... INPLACE does work for BINARY, BLOB
---echo #
+#
+# MDEV-11335 Changing delay_key_write option for MyISAM table should not copy rows
+#
+select @@global.delay_key_write;
+create table t1 (a int, b int, key(b));
+flush tables;
+flush status;
+show status like 'Feature_delay_key_write';
+insert t1 values (1,2),(2,3),(3,4);
+show status like 'Feature_delay_key_write';
+alter online table t1 delay_key_write=1;
+show status like 'Feature_delay_key_write';
+flush tables;
+insert t1 values (1,2),(2,3),(3,4);
+show status like 'Feature_delay_key_write';
+alter online table t1 delay_key_write=0;
+show status like 'Feature_delay_key_write';
+flush tables;
+insert t1 values (1,2),(2,3),(3,4);
+show status like 'Feature_delay_key_write';
+drop table t1;
diff --git a/mysql-test/t/derived_view.test b/mysql-test/t/derived_view.test
index 3a18e9a086e..9b0cf9dca7d 100644
--- a/mysql-test/t/derived_view.test
+++ b/mysql-test/t/derived_view.test
@@ -1899,6 +1899,56 @@ deallocate prepare stmt1;
drop table t1,t2;
+--echo #
+--echo # Bug mdev-12670: mergeable derived / view with subqueries
+--echo # subject to semi-join optimizations
+--echo # (actually this is a 5.3 bug.)
+--echo #
+
+create table t1 (a int) engine=myisam;
+insert into t1 values (5),(3),(2),(7),(2),(5),(1);
+create table t2 (b int, index idx(b)) engine=myisam;
+insert into t2 values (2),(3),(2),(1),(3),(4);
+insert into t2 select b+10 from t2;
+insert into t2 select b+10 from t2;
+insert into t2 select b+10 from t2;
+insert into t2 select b+10 from t2;
+insert into t2 select b+10 from t2;
+insert into t2 select b+10 from t2;
+insert into t2 select b+10 from t2;
+insert into t2 select b+10 from t2;
+insert into t2 select b+10 from t2;
+insert into t2 select b+10 from t2;
+analyze table t1,t2;
+
+explain select a from t1 where a in (select b from t2);
+explain select * from (select a from t1 where a in (select b from t2)) t;
+create view v1 as select a from t1 where a in (select b from t2);
+explain select * from v1;
+
+drop view v1;
+drop table t1,t2;
+
+--echo #
+--echo # Bug mdev-12812: mergeable derived / view with subqueries
+--echo # NOT subject to semi-join optimizations
+--echo #
+
+CREATE TABLE t1 (c1 varchar(3)) ENGINE=MyISAM;
+INSERT INTO t1 VALUES ('foo'),('foo');
+
+CREATE TABLE t2 (c2 varchar(3)) ENGINE=MyISAM;
+INSERT INTO t2 VALUES ('bar'),('qux'),('foo');
+
+let $q=
+SELECT STRAIGHT_JOIN *
+ FROM ( SELECT * FROM t1 WHERE c1 IN ( SELECT c2 FROM t2 ) ) AS sq;
+
+eval $q;
+eval EXPLAIN EXTENDED $q;
+
+DROP TABLE t1, t2;
+
# The following command must be the last one the file
set optimizer_switch=@exit_optimizer_switch;
set join_cache_level=@exit_join_cache_level;
diff --git a/mysql-test/t/func_regexp_pcre.test b/mysql-test/t/func_regexp_pcre.test
index 26294ce2e24..4b2c18e0674 100644
--- a/mysql-test/t/func_regexp_pcre.test
+++ b/mysql-test/t/func_regexp_pcre.test
@@ -428,3 +428,14 @@ SELECT 0xE001 REGEXP @regCheck;
SET NAMES latin1;
SET @regCheck= '\\xE0\\x01';
SELECT CAST(0xE001 AS BINARY) REGEXP @regCheck;
+
+--echo # MDEV-12420: Testing recursion overflow
+--replace_regex /[0-9]+ exceeded/NUM exceeded/
+SELECT 1 FROM dual WHERE ('Alpha,Bravo,Charlie,Delta,Echo,Foxtrot,StrataCentral,Golf,Hotel,India,Juliet,Kilo,Lima,Mike,StrataL3,November,Oscar,StrataL2,Sand,P3,P4SwitchTest,Arsys,Poppa,ExtensionMgr,Arp,Quebec,Romeo,StrataApiV2,PtReyes,Sierra,SandAcl,Arrow,Artools,BridgeTest,Tango,SandT,PAlaska,Namespace,Agent,Qos,PatchPanel,ProjectReport,Ark,Gimp,Agent,SliceAgent,Arnet,Bgp,Ale,Tommy,Central,AsicPktTestLib,Hsc,SandL3,Abuild,Pca9555,Standby,ControllerDut,CalSys,SandLib,Sb820,PointV2,BfnLib,Evpn,BfnSdk,Sflow,ManagementActive,AutoTest,GatedTest,Bgp,Sand,xinetd,BfnAgentLib,bf-utils,Hello,BfnState,Eos,Artest,Qos,Scd,ThermoMgr,Uniform,EosUtils,Eb,FanController,Central,BfnL3,BfnL2,tcp_wrappers,Victor,Environment,Route,Failover,Whiskey,Xray,Gimp,BfnFixed,Strata,SoCal,XApi,Msrp,XpProfile,tcpdump,PatchPanel,ArosTest,FhTest,Arbus,XpAcl,MacConc,XpApi,telnet,QosTest,Alpha2,BfnVlan,Stp,VxlanControllerTest,MplsAgent,Bravo2,Lanz,BfnMbb,Intf,XCtrl,Unicast,SandTunnel,L3Unicast,Ipsec,MplsTest,Rsvp,EthIntf,StageMgr,Sol,MplsUtils,Nat,Ira,P4NamespaceDut,Counters,Charlie2,Aqlc,Mlag,Power,OpenFlow,Lag,RestApi,BfdTest,strongs,Sfa,CEosUtils,Adt746,MaintenanceMode,MlagDut,EosImage,IpEth,MultiProtocol,Launcher,Max3179,Snmp,Acl,IpEthTest,PhyEee,bf-syslibs,tacc,XpL2,p4-ar-switch,p4-bf-switch,LdpTest,BfnPhy,Mirroring,Phy6,Ptp' REGEXP '^((?!\b(Strata|StrataApi|StrataApiV2)\b).)*$');
+
+#
+# MDEV-12942 REGEXP_INSTR returns 1 when using brackets
+#
+SELECT REGEXP_INSTR('a_kollision', 'oll');
+SELECT REGEXP_INSTR('a_kollision', '(oll)');
+SELECT REGEXP_INSTR('a_kollision', 'o([lm])\\1');
diff --git a/mysql-test/t/innodb_ext_key.test b/mysql-test/t/innodb_ext_key.test
index bf94b7dd3d5..a721943e8bc 100644
--- a/mysql-test/t/innodb_ext_key.test
+++ b/mysql-test/t/innodb_ext_key.test
@@ -778,5 +778,46 @@ where index_date_updated= 10 and index_id < 800;
drop table t0,t1,t2;
-set optimizer_switch=@save_ext_key_optimizer_switch;
-SET SESSION STORAGE_ENGINE=DEFAULT;
+
+--echo #
+--echo # MDEV-11196: Error:Run-Time Check Failure #2 - Stack around the variable 'key_buff'
+--echo # was corrupted, server crashes in opt_sum_query
+
+CREATE TABLE t1 (
+ pk INT,
+ f1 VARCHAR(3),
+ f2 VARCHAR(1024),
+ PRIMARY KEY (pk),
+ KEY(f2)
+) ENGINE=InnoDB CHARSET utf8 ROW_FORMAT= DYNAMIC;
+
+INSERT INTO t1 VALUES (1,'foo','abc'),(2,'bar','def');
+SELECT MAX(t2.pk) FROM t1 t2 INNER JOIN t1 t3 ON t2.f1 = t3.f1 WHERE t2.pk <= 4;
+drop table t1;
+
+CREATE TABLE t1 (
+ pk1 INT,
+ pk2 INT,
+ f1 VARCHAR(3),
+ f2 VARCHAR(1021),
+ PRIMARY KEY (pk1,pk2),
+ KEY(f2)
+) ENGINE=InnoDB CHARSET utf8 ROW_FORMAT= DYNAMIC;
+
+INSERT INTO t1 VALUES (1,2,'2','abc'),(2,3,'3','def');
+explain format= json
+select * from t1 force index(f2) where pk1 <= 5 and pk2 <=5 and f2 = 'abc' and f1 <= '3';
+drop table t1;
+
+CREATE TABLE t1 (
+f2 INT,
+pk2 INT,
+f1 VARCHAR(3),
+pk1 VARCHAR(1000),
+PRIMARY KEY (pk1,pk2),
+KEY k1(pk1,f2)
+) ENGINE=InnoDB CHARSET utf8 ROW_FORMAT= DYNAMIC;
+INSERT INTO t1 VALUES (1,2,'2','abc'),(2,3,'3','def');
+explain format= json
+select * from t1 force index(k1) where f2 <= 5 and pk2 <=5 and pk1 = 'abc' and f1 <= '3';
+drop table t1;
diff --git a/mysql-test/t/join_outer.test b/mysql-test/t/join_outer.test
index a3a1278ef1e..88861511d79 100644
--- a/mysql-test/t/join_outer.test
+++ b/mysql-test/t/join_outer.test
@@ -1882,4 +1882,86 @@ WHERE v3 = 4;
drop table t1,t2,t3;
+--echo #
+--echo # MDEV-11958: LEFT JOIN with stored routine produces incorrect result
+--echo #
+
+CREATE TABLE t (x INT);
+INSERT INTO t VALUES(1),(NULL);
+CREATE FUNCTION f (val INT, ret INT) RETURNS INT DETERMINISTIC RETURN IFNULL(val, ret);
+
+SELECT t1.x, t2.x, IFNULL(t2.x,0), f(t2.x,0)
+ FROM t t1 LEFT JOIN t t2
+ ON t1.x = t2.x
+ WHERE IFNULL(t2.x,0)=0;
+explain extended
+SELECT t1.x, t2.x, IFNULL(t2.x,0), f(t2.x,0)
+ FROM t t1 LEFT JOIN t t2
+ ON t1.x = t2.x
+ WHERE IFNULL(t2.x,0)=0;
+SELECT t1.x, t2.x, IFNULL(t2.x,0), f(t2.x,0)
+ FROM t t1 LEFT JOIN t t2
+ ON t1.x = t2.x
+ WHERE f(t2.x,0)=0;
+explain extended
+SELECT t1.x, t2.x, IFNULL(t2.x,0), f(t2.x,0)
+ FROM t t1 LEFT JOIN t t2
+ ON t1.x = t2.x
+ WHERE f(t2.x,0)=0;
+
+drop function f;
+drop table t;
+CREATE TABLE t1 (
+ col1 DECIMAL(33,5) NULL DEFAULT NULL,
+ col2 DECIMAL(33,5) NULL DEFAULT NULL
+);
+
+CREATE TABLE t2 (
+ col1 DECIMAL(33,5) NULL DEFAULT NULL,
+ col2 DECIMAL(33,5) NULL DEFAULT NULL,
+ col3 DECIMAL(33,5) NULL DEFAULT NULL
+);
+
+INSERT INTO t1 VALUES (2, 1.1), (2, 2.1);
+INSERT INTO t2 VALUES (3, 3.1, 4), (1, 1, NULL);
+
+DELIMITER |;
+
+CREATE FUNCTION f1 ( p_num DECIMAL(45,15), p_return DECIMAL(45,15))
+RETURNS decimal(33,5)
+LANGUAGE SQL
+DETERMINISTIC
+CONTAINS SQL
+SQL SECURITY INVOKER
+BEGIN
+ IF p_num IS NULL THEN
+ RETURN p_return;
+ ELSE
+ RETURN p_num;
+ END IF;
+END |
+
+DELIMITER ;|
+
+let $q1=
+SELECT t1.col1, t2.col1, t2.col3
+FROM t1 LEFT OUTER JOIN t2 ON t1.col1 = t2.col2
+WHERE IFNULL(t2.col3,0) = 0;
+
+eval $q1;
+eval EXPLAIN EXTENDED $q1;
+
+let $q2=
+SELECT t1.col1, t2.col1, t2.col3
+FROM t1 LEFT OUTER JOIN t2 ON t1.col1 = t2.col2
+WHERE f1(t2.col3,0) = 0;
+eval $q2;
+eval EXPLAIN EXTENDED $q2;
+
+DROP FUNCTION f1;
+
+DROP TABLE t1,t2;
+
+--echo # end of 5.5 tests
+
SET optimizer_switch=@save_optimizer_switch;
diff --git a/mysql-test/t/log_tables-big-master.opt b/mysql-test/t/log_tables-big-master.opt
index b9bc885d0e4..0cdf12d284b 100644
--- a/mysql-test/t/log_tables-big-master.opt
+++ b/mysql-test/t/log_tables-big-master.opt
@@ -1 +1 @@
---log-output=table,file --slow-query-log --general-log --general-log-file="" --slow-query-log-file=""
+--slow-query-log --general-log --general-log-file="" --slow-query-log-file=""
diff --git a/mysql-test/t/myisam_debug.test b/mysql-test/t/myisam_debug.test
index b106ecdea5e..465ecd70895 100644
--- a/mysql-test/t/myisam_debug.test
+++ b/mysql-test/t/myisam_debug.test
@@ -57,3 +57,16 @@ KILL QUERY @thread_id;
CHECK TABLE t1;
DROP TABLE t1,t2;
DISCONNECT insertConn;
+
+#
+# MDEV-12761 Error return from external_lock make the server crash
+#
+call mtr.add_suppression("Index for table '.*test.t1\\.MYI' is corrupt; try to repair it");
+create table t1 (a int, index(a));
+lock tables t1 write;
+insert t1 values (1),(2),(1);
+set @old_dbug=@@debug_dbug;
+set debug_dbug='+d,mi_lock_database_failure';
+unlock tables;
+set debug_dbug=@old_dbug;
+drop table t1;
diff --git a/mysql-test/t/mysqld--help.test b/mysql-test/t/mysqld--help.test
index e6499957cd2..01f26f09543 100644
--- a/mysql-test/t/mysqld--help.test
+++ b/mysql-test/t/mysqld--help.test
@@ -3,6 +3,7 @@
#
--source include/not_embedded.inc
--source include/have_perfschema.inc
+--source include/have_profiling.inc
--source include/platform.inc
#
diff --git a/mysql-test/t/partition_alter.test b/mysql-test/t/partition_alter.test
index 592d8fdaeaa..9194e9a8222 100644
--- a/mysql-test/t/partition_alter.test
+++ b/mysql-test/t/partition_alter.test
@@ -1,3 +1,4 @@
+--source include/have_innodb.inc
--source include/have_partition.inc
CREATE TABLE `test_data` (
@@ -64,3 +65,41 @@ deallocate prepare stmt;
drop table test_data;
+#
+# MDEV-12389 ADD CHECK leaves an orphaned .par file
+#
+
+--let $datadir=`SELECT @@datadir`
+
+# InnoDB
+create table t1(id int, d date not null, b bool not null default 0, primary key(id,d))
+engine=innodb
+partition by range columns (d) (
+partition p1 values less than ('2016-10-18'),
+partition p2 values less than ('2020-10-19'));
+insert t1 values (0, '2000-01-02', 0);
+insert t1 values (1, '2020-01-02', 10);
+--replace_regex /#sql-[0-9a-f_]*/#sql-temporary/
+--error ER_CONSTRAINT_FAILED
+alter table t1 add check (b in (0, 1));
+alter table t1 add check (b in (0, 10));
+show create table t1;
+--error ER_CONSTRAINT_FAILED
+insert t1 values (2, '2020-01-03', 20);
+drop table t1;
+--list_files $datadir/test
+
+# MyISAM, different execution path
+create table t1(id int, d date not null, b bool not null default 0, primary key(id,d))
+partition by range columns (d) (
+partition p1 values less than ('2016-10-18'),
+partition p2 values less than ('2020-10-19'));
+insert t1 values (0, '2000-01-02', 0);
+insert t1 values (1, '2020-01-02', 10);
+# FIXME: MDEV-12923 MyISAM allows CHECK constraint violation in ALTER TABLE
+alter table t1 add check (b in (0, 1));
+show create table t1;
+--error ER_CONSTRAINT_FAILED
+insert t1 values (2, '2020-01-03', 20);
+drop table t1;
+--list_files $datadir/test
diff --git a/mysql-test/t/subselect_innodb.test b/mysql-test/t/subselect_innodb.test
index 2451bc60fee..544bcd994ed 100644
--- a/mysql-test/t/subselect_innodb.test
+++ b/mysql-test/t/subselect_innodb.test
@@ -576,3 +576,38 @@ from
t1;
drop table t1,t2;
+
+--echo #
+--echo # mdev-12931: semi-join in ON expression of STRAIGHT_JOIN
+--echo # joining a base table and a mergeable derived table
+--echo #
+
+CREATE TABLE t1 (f1 int) ENGINE=InnoDB;
+INSERT INTO t1 VALUES (3),(2);
+
+CREATE TABLE t2 (f2 int) ENGINE=InnoDB;
+INSERT INTO t2 VALUES (1),(4);
+
+CREATE TABLE t3 (f3 int) ENGINE=InnoDB;
+INSERT INTO t3 VALUES (5),(6);
+
+CREATE TABLE t4 (f4 int) ENGINE=InnoDB;
+INSERT INTO t4 VALUES (1),(8);
+
+SELECT *
+FROM t1
+ INNER JOIN
+ ( t2 STRAIGHT_JOIN ( SELECT * FROM t3 ) AS sq
+ ON ( 1 IN ( SELECT f4 FROM t4 ) ) )
+ ON ( f1 >= f2 );
+
+EXPLAIN EXTENDED
+SELECT *
+FROM t1
+ INNER JOIN
+ ( t2 STRAIGHT_JOIN ( SELECT * FROM t3 ) AS sq
+ ON ( 1 IN ( SELECT f4 FROM t4 ) ) )
+ ON ( f1 >= f2 );
+
+DROP TABLE t1,t2,t3,t4;
+
diff --git a/mysql-test/t/subselect_mat_cost_bugs.test b/mysql-test/t/subselect_mat_cost_bugs.test
index 316ac707bef..9e3ac603ec6 100644
--- a/mysql-test/t/subselect_mat_cost_bugs.test
+++ b/mysql-test/t/subselect_mat_cost_bugs.test
@@ -406,6 +406,8 @@ drop table t3, t4, t5;
--echo # LP BUG#858038 The result of a query with NOT IN subquery depends on the state of the optimizer switch
--echo #
+set @optimizer_switch_save= @@optimizer_switch;
+
create table t1 (c1 char(2) not null, c2 char(2));
create table t2 (c3 char(2), c4 char(2));
@@ -425,6 +427,8 @@ select * from t1 where c1 = 'a2' and (c1, c2) not in (select * from t2);
drop table t1, t2;
+set optimizer_switch= @optimizer_switch_save;
+
--echo #
--echo # MDEV-12673: cost-based choice between materialization and in-to-exists
--echo #
@@ -463,3 +467,43 @@ SELECT * FROM t1 WHERE i1 NOT IN (
);
DROP TABLE t1,t2,t3;
+
+--echo #
+--echo # MDEV-7599: in-to-exists chosen after min/max optimization
+--echo #
+
+set @optimizer_switch_save= @@optimizer_switch;
+
+CREATE TABLE t1 (a INT, KEY(a)) ENGINE=MyISAM;
+INSERT INTO t1 VALUES (1),(2);
+
+CREATE TABLE t2 (b INT, c INT) ENGINE=MyISAM;
+INSERT INTO t2 VALUES (1,6),(2,4), (8,9);
+
+let $q=
+SELECT * FROM t2 WHERE b != ALL (SELECT MIN(a) FROM t1, t2 WHERE t2.c = t2.b);
+
+eval $q;
+eval EXPLAIN EXTENDED $q;
+set optimizer_switch= 'materialization=off';
+eval $q;
+eval EXPLAIN EXTENDED $q;
+set optimizer_switch= @optimizer_switch_save;
+
+DROP TABLE t1,t2;
+
+CREATE TABLE t1 (f1 varchar(10)) ENGINE=MyISAM;
+INSERT INTO t1 VALUES ('foo'),('bar');
+
+CREATE TABLE t2 (f2 varchar(10), key(f2)) ENGINE=MyISAM;
+INSERT INTO t2 VALUES ('baz'),('qux');
+
+CREATE TABLE t3 (f3 varchar(10)) ENGINE=MyISAM;
+INSERT INTO t3 VALUES ('abc'),('def');
+
+SELECT * FROM t1
+ WHERE f1 = ALL( SELECT MAX(t2a.f2)
+ FROM t2 AS t2a INNER JOIN t2 t2b INNER JOIN t3
+ ON (f3 = t2b.f2) );
+
+DROP TABLE t1,t2,t3;
diff --git a/mysql-test/t/subselect_sj.test b/mysql-test/t/subselect_sj.test
index b6d6e0a5172..f90f1e2e927 100644
--- a/mysql-test/t/subselect_sj.test
+++ b/mysql-test/t/subselect_sj.test
@@ -2773,5 +2773,77 @@ WHERE ( SELECT z.country
drop table t1, t2, t3;
set optimizer_switch= @tmp_mdev6859;
+--echo #
+--echo # MDEV-12675: subquery subject to semi-join optimizations
+--echo # in ON expression of INNER JOIN
+--echo #
+
+set @tmp_mdev12675=@@optimizer_switch;
+set optimizer_switch=default;
+create table t1 (a int) engine=myisam;
+insert into t1 values (5),(3),(2),(7),(2),(5),(1);
+create table t2 (b int, index idx(b)) engine=myisam;
+insert into t2 values (2),(3),(2),(1),(3),(4);
+insert into t2 select b+10 from t2;
+insert into t2 select b+10 from t2;
+insert into t2 select b+10 from t2;
+insert into t2 select b+10 from t2;
+insert into t2 select b+10 from t2;
+insert into t2 select b+10 from t2;
+insert into t2 select b+10 from t2;
+insert into t2 select b+10 from t2;
+insert into t2 select b+10 from t2;
+insert into t2 select b+10 from t2;
+insert into t2 select b+10 from t2;
+analyze table t1,t2;
+
+explain
+select a from t1, t2 where b between 1 and 2 and a in (select b from t2);
+explain
+select a from t1 join t2 on b between 1 and 2 and a in (select b from t2);
+
+drop table t1,t2;
+set optimizer_switch= @tmp_mdev12675;
+
+--echo #
+--echo # MDEV-12817: subquery NOT subject to semi-join optimizations
+--echo # in ON expression of INNER JOIN
+--echo #
+
+CREATE TABLE t1 (c1 int) ENGINE=MyISAM;
+INSERT INTO t1 VALUES (1),(2);
+
+CREATE TABLE t2 (c2 int) ENGINE=MyISAM;
+INSERT INTO t2 VALUES (3),(4);
+
+CREATE TABLE t3 (c3 int) ENGINE=MyISAM;
+INSERT INTO t3 VALUES (5),(6);
+
+CREATE TABLE t4 (c4 int) ENGINE=MyISAM;
+INSERT INTO t4 VALUES (7),(8);
+
+let $q1=
+SELECT c1
+FROM t1
+LEFT JOIN
+( t2 INNER JOIN t3 ON ( 1 IN ( SELECT c4 FROM t4 ) ) )
+ON (c1 = c3);
+
+eval $q1;
+eval EXPLAIN EXTENDED $q1;
+
+let $q2=
+SELECT *
+FROM t1
+LEFT JOIN
+( ( SELECT * FROM t2 WHERE c2 IN ( SELECT c3 FROM t3 ) ) AS sq INNER JOIN t4 )
+ON (c1 = c2);
+
+--echo # mdev-12820
+eval $q2;
+eval EXPLAIN EXTENDED $q2;
+
+DROP TABLE t1,t2,t3,t4;
+
# The following command must be the last one the file
set optimizer_switch=@subselect_sj_tmp;
diff --git a/mysql-test/t/subselect_sj2_mat.test b/mysql-test/t/subselect_sj2_mat.test
index 61d9b09edff..0f2892ae2dc 100644
--- a/mysql-test/t/subselect_sj2_mat.test
+++ b/mysql-test/t/subselect_sj2_mat.test
@@ -263,3 +263,23 @@ DROP TABLE t1,t2,t3;
set join_cache_level= @save_join_cache_level;
set optimizer_switch=@save_optimizer_switch;
+--echo #
+--echo # mdev-7791: materialization of a semi-join subquery +
+--echo # RAND() in WHERE
+--echo # (materialized table is accessed last)
+--echo #
+
+set @save_optimizer_switch=@@optimizer_switch;
+set optimizer_switch='materialization=on';
+
+create table t1(i int);
+insert into t1 values (1), (2), (3), (7), (9), (10);
+create table t2(i int);
+insert into t2 values (1), (2), (3), (4), (5), (6), (7), (8), (9), (10);
+
+select * from t1 where (rand() < 0) and i in (select i from t2);
+explain extended
+select * from t1 where (rand() < 0) and i in (select i from t2);
+
+drop table t1,t2;
+set optimizer_switch=@save_optimizer_switch;
diff --git a/mysql-test/t/trigger.test b/mysql-test/t/trigger.test
index d6eca47c0d8..aeab884670d 100644
--- a/mysql-test/t/trigger.test
+++ b/mysql-test/t/trigger.test
@@ -2677,6 +2677,30 @@ select trigger_name, action_order, created from information_schema.triggers
drop table t1;
set time_zone= @@global.time_zone;
+--echo # MDEV-12992: Increasing memory consumption
+--echo with each invocation of trigger
+--echo #
+
+--let $n= 20000
+
+CREATE TABLE t1 (a INT);
+INSERT INTO t1 VALUES (1);
+CREATE TABLE t2 (b INT);
+CREATE TRIGGER tr
+ AFTER UPDATE ON t1 FOR EACH ROW SELECT (SELECT b FROM t2) INTO @x;
+
+--disable_query_log
+--echo # Running $n queries
+while ($n)
+{
+ UPDATE t1 SET a = 2;
+ --dec $n
+}
+--enable_query_log
+
+DROP TABLE t1,t2;
+
+
--echo #
--echo # Start of 10.3 tests
--echo #
diff --git a/mysql-test/t/union.test b/mysql-test/t/union.test
index ce8b2bc9c2a..abea52fe584 100644
--- a/mysql-test/t/union.test
+++ b/mysql-test/t/union.test
@@ -1524,6 +1524,31 @@ SELECT * FROM t1 t1_1 LEFT JOIN t1 t1_2 ON ( t1_2.b = t1_1.a )
DROP TABLE t1;
+--echo # Bug mdev-12788: UNION ALL + impossible having for derived
+--echo # with IN subquery in WHERE
+--echo #
+
+CREATE TABLE t1 (i int) ENGINE=MyISAM;
+INSERT INTO t1 VALUES (1);
+
+CREATE TABLE t2 (pk int PRIMARY KEY) ENGINE=MyISAM;
+INSERT INTO t2 VALUES (1),(2);
+
+let $q=
+SELECT 1, 2
+UNION ALL
+SELECT i, COUNT(*) FROM (
+ SELECT * FROM t1 WHERE i IN ( SELECT pk FROM t2 )
+) AS sq
+GROUP BY i
+HAVING i = 10;
+
+eval $q;
+eval EXPLAIN EXTENDED $q;
+
+DROP TABLE t1,t2;
+
+
--echo #
--echo # Start of 10.3 tests
--echo #
diff --git a/mysys/lf_alloc-pin.c b/mysys/lf_alloc-pin.c
index 6c813333d09..e2073df1e4d 100644
--- a/mysys/lf_alloc-pin.c
+++ b/mysys/lf_alloc-pin.c
@@ -323,12 +323,6 @@ static int match_pins(LF_PINS *el, void *addr)
return 0;
}
-#if STACK_DIRECTION < 0
-#define available_stack_size(CUR,END) (long) ((char*)(CUR) - (char*)(END))
-#else
-#define available_stack_size(CUR,END) (long) ((char*)(END) - (char*)(CUR))
-#endif
-
#define next_node(P, X) (*((uchar * volatile *)(((uchar *)(X)) + (P)->free_ptr_offset)))
#define anext_node(X) next_node(&allocator->pinbox, (X))
diff --git a/mysys/lf_hash.c b/mysys/lf_hash.c
index 41174a66ced..430f1007f30 100644
--- a/mysys/lf_hash.c
+++ b/mysys/lf_hash.c
@@ -550,7 +550,10 @@ static int initialize_bucket(LF_HASH *hash, LF_SLIST * volatile *node,
return -1;
if (*el == NULL && bucket &&
unlikely(initialize_bucket(hash, el, parent, pins)))
+ {
+ my_free(dummy);
return -1;
+ }
dummy->hashnr= my_reverse_bits(bucket) | 0; /* dummy node */
dummy->key= dummy_key;
dummy->keylen= 0;
diff --git a/mysys/ma_dyncol.c b/mysys/ma_dyncol.c
index 155a4367345..125b3a4632d 100644
--- a/mysys/ma_dyncol.c
+++ b/mysys/ma_dyncol.c
@@ -4038,6 +4038,8 @@ mariadb_dyncol_val_double(double *dbl, DYNAMIC_COLUMN_VALUE *val)
*dbl= strtod(str, &end);
if (*end != '\0')
rc= ER_DYNCOL_TRUNCATED;
+ free(str);
+ break;
}
case DYN_COL_DECIMAL:
if (decimal2double(&val->x.decimal.value, dbl) != E_DEC_OK)
diff --git a/mysys/waiting_threads.c b/mysys/waiting_threads.c
index 7d8aae032ea..2549bd8a587 100644
--- a/mysys/waiting_threads.c
+++ b/mysys/waiting_threads.c
@@ -556,7 +556,7 @@ my_bool wt_resource_id_memcmp(const void *a, const void *b)
{
/* we use the fact that there's no padding in the middle of WT_RESOURCE_ID */
compile_time_assert(offsetof(WT_RESOURCE_ID, type) == sizeof(ulonglong));
- return memcmp(a, b, sizeof_WT_RESOURCE_ID);
+ return MY_TEST(memcmp(a, b, sizeof_WT_RESOURCE_ID));
}
/**
diff --git a/plugin/auth_pam/auth_pam.c b/plugin/auth_pam/auth_pam.c
index 1f25163b371..ffc3d6f5537 100644
--- a/plugin/auth_pam/auth_pam.c
+++ b/plugin/auth_pam/auth_pam.c
@@ -162,10 +162,11 @@ static int pam_auth(MYSQL_PLUGIN_VIO *vio, MYSQL_SERVER_AUTH_INFO *info)
if (new_username && strcmp(new_username, info->user_name))
strncpy(info->authenticated_as, new_username,
sizeof(info->authenticated_as));
+ info->authenticated_as[sizeof(info->authenticated_as)-1]= 0;
end:
pam_end(pamh, status);
- PAM_DEBUG((stderr, "PAM: status = %d user = %s\n", status, new_username));
+ PAM_DEBUG((stderr, "PAM: status = %d user = %s\n", status, info->authenticated_as));
return status == PAM_SUCCESS ? CR_OK : CR_ERROR;
}
diff --git a/plugin/aws_key_management/aws_key_management_plugin.cc b/plugin/aws_key_management/aws_key_management_plugin.cc
index d7a948369f5..365c5caf198 100644
--- a/plugin/aws_key_management/aws_key_management_plugin.cc
+++ b/plugin/aws_key_management/aws_key_management_plugin.cc
@@ -621,6 +621,6 @@ maria_declare_plugin(aws_key_management)
NULL,
settings,
"1.0",
- MariaDB_PLUGIN_MATURITY_BETA
+ MariaDB_PLUGIN_MATURITY_STABLE
}
maria_declare_plugin_end;
diff --git a/plugin/cracklib_password_check/cracklib_password_check.c b/plugin/cracklib_password_check/cracklib_password_check.c
index 7861d5fd83e..94587a6d659 100644
--- a/plugin/cracklib_password_check/cracklib_password_check.c
+++ b/plugin/cracklib_password_check/cracklib_password_check.c
@@ -30,6 +30,7 @@ static int crackme(MYSQL_LEX_STRING *username, MYSQL_LEX_STRING *password)
const char *res;
memcpy(user, username->str, username->length);
+ user[username->length]= 0;
if ((host= strchr(user, '@')))
*host++= 0;
diff --git a/plugin/feedback/sender_thread.cc b/plugin/feedback/sender_thread.cc
index 66f47e7302a..4742d5f4920 100644
--- a/plugin/feedback/sender_thread.cc
+++ b/plugin/feedback/sender_thread.cc
@@ -204,7 +204,7 @@ static void send_report(const char *when)
/*
otherwise, prepare the THD and TABLE_LIST,
create and fill the temporary table with data just like
- SELECT * FROM INFORMATION_SCHEMA.feedback is doing,
+ SELECT * FROM INFORMATION_SCHEMA.FEEDBACK is doing,
read and concatenate table data into a String.
*/
if (!(thd= new THD(thd_thread_id)))
diff --git a/plugin/server_audit/server_audit.c b/plugin/server_audit/server_audit.c
index c0ec9aa6b8d..ee00c9e1385 100644
--- a/plugin/server_audit/server_audit.c
+++ b/plugin/server_audit/server_audit.c
@@ -1041,6 +1041,7 @@ static int start_logging()
error_header();
fprintf(stderr, "logging started to the file %s.\n", alt_fname);
strncpy(current_log_buf, alt_fname, sizeof(current_log_buf));
+ current_log_buf[sizeof(current_log_buf)-1]= 0;
}
else if (output_type == OUTPUT_SYSLOG)
{
@@ -2570,6 +2571,7 @@ static void update_file_path(MYSQL_THD thd,
}
strncpy(path_buffer, new_name, sizeof(path_buffer));
+ path_buffer[sizeof(path_buffer)-1]= 0;
file_path= path_buffer;
exit_func:
internal_stop_logging= 0;
@@ -2622,6 +2624,7 @@ static void update_incl_users(MYSQL_THD thd,
flogger_mutex_lock(&lock_operations);
mark_always_logged(thd);
strncpy(incl_user_buffer, new_users, sizeof(incl_user_buffer));
+ incl_user_buffer[sizeof(incl_user_buffer)-1]= 0;
incl_users= incl_user_buffer;
user_coll_fill(&incl_user_coll, incl_users, &excl_user_coll, 1);
error_header();
@@ -2640,6 +2643,7 @@ static void update_excl_users(MYSQL_THD thd __attribute__((unused)),
flogger_mutex_lock(&lock_operations);
mark_always_logged(thd);
strncpy(excl_user_buffer, new_users, sizeof(excl_user_buffer));
+ excl_user_buffer[sizeof(excl_user_buffer)-1]= 0;
excl_users= excl_user_buffer;
user_coll_fill(&excl_user_coll, excl_users, &incl_user_coll, 0);
error_header();
@@ -2771,6 +2775,7 @@ static void update_syslog_ident(MYSQL_THD thd __attribute__((unused)),
{
char *new_ident= (*(char **) save) ? *(char **) save : empty_str;
strncpy(syslog_ident_buffer, new_ident, sizeof(syslog_ident_buffer));
+ syslog_ident_buffer[sizeof(syslog_ident_buffer)-1]= 0;
syslog_ident= syslog_ident_buffer;
error_header();
fprintf(stderr, "SYSYLOG ident was changed to '%s'\n", syslog_ident);
diff --git a/scripts/galera_new_cluster.sh b/scripts/galera_new_cluster.sh
index b873192cf31..8bf2fa35cec 100755
--- a/scripts/galera_new_cluster.sh
+++ b/scripts/galera_new_cluster.sh
@@ -5,7 +5,7 @@
# the Free Software Foundation; either version 2.1 of the License, or
# (at your option) any later version.
-if [ "${1}" == "-h" -o "${1}" == "--help" ]; then
+if [ "${1}" = "-h" ] || [ "${1}" = "--help" ]; then
cat <<EOF
Usage: ${0}
diff --git a/sql-common/client_plugin.c b/sql-common/client_plugin.c
index dd87b01d932..f93e50125c5 100644
--- a/sql-common/client_plugin.c
+++ b/sql-common/client_plugin.c
@@ -375,8 +375,7 @@ mysql_load_plugin_v(MYSQL *mysql, const char *name, int type,
if (!(sym= dlsym(dlhandle, plugin_declarations_sym)))
{
errmsg= "not a plugin";
- (void)dlclose(dlhandle);
- goto err;
+ goto errc;
}
plugin= (struct st_mysql_client_plugin*)sym;
@@ -384,19 +383,19 @@ mysql_load_plugin_v(MYSQL *mysql, const char *name, int type,
if (type >=0 && type != plugin->type)
{
errmsg= "type mismatch";
- goto err;
+ goto errc;
}
if (strcmp(name, plugin->name))
{
errmsg= "name mismatch";
- goto err;
+ goto errc;
}
if (type < 0 && find_plugin(name, plugin->type))
{
errmsg= "it is already loaded";
- goto err;
+ goto errc;
}
plugin= add_plugin(mysql, plugin, dlhandle, argc, args);
@@ -406,6 +405,8 @@ mysql_load_plugin_v(MYSQL *mysql, const char *name, int type,
DBUG_PRINT ("leave", ("plugin loaded ok"));
DBUG_RETURN (plugin);
+errc:
+ dlclose(dlhandle);
err:
mysql_mutex_unlock(&LOCK_load_client_plugin);
DBUG_PRINT ("leave", ("plugin load error : %s", errmsg));
diff --git a/sql/CMakeLists.txt b/sql/CMakeLists.txt
index 87e41817857..88a4e40e373 100644
--- a/sql/CMakeLists.txt
+++ b/sql/CMakeLists.txt
@@ -62,15 +62,22 @@ SET_SOURCE_FILES_PROPERTIES(${GEN_SOURCES}
# Gen_lex_token
# Make sure sql_yacc.h is generated before compiling gen_lex_token
+
+IF(NOT CMAKE_GENERATOR MATCHES "Visual Studio")
+ SET(DEPENDS_gen_lex_token DEPENDS gen_lex_token)
+ SET(DEPENDS_gen_lex_hash DEPENDS gen_lex_hash)
+ENDIF()
+
+
IF(NOT CMAKE_CROSSCOMPILING)
- ADD_EXECUTABLE(gen_lex_token gen_lex_token.cc)
- ADD_DEPENDENCIES(gen_lex_token GenServerSource)
+ ADD_EXECUTABLE(gen_lex_token gen_lex_token.cc
+ ${CMAKE_CURRENT_BINARY_DIR}/sql_yacc.h)
ENDIF()
ADD_CUSTOM_COMMAND(
OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/lex_token.h
COMMAND gen_lex_token > lex_token.h
- DEPENDS gen_lex_token
+ ${DEPENDS_gen_lex_token}
)
ADD_DEFINITIONS(-DMYSQL_SERVER -DHAVE_EVENT_SCHEDULER)
@@ -341,7 +348,7 @@ ENDIF()
ADD_CUSTOM_COMMAND(
OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/lex_hash.h
COMMAND gen_lex_hash > lex_hash.h
- DEPENDS gen_lex_hash
+ ${DEPENDS_gen_lex_hash}
)
MYSQL_ADD_EXECUTABLE(mysql_tzinfo_to_sql tztime.cc COMPONENT Server)
diff --git a/sql/discover.cc b/sql/discover.cc
index d8ed718fc58..d8bf6ca79c5 100644
--- a/sql/discover.cc
+++ b/sql/discover.cc
@@ -89,8 +89,7 @@ int readfrm(const char *name, const uchar **frmdata, size_t *len)
error= 0;
err:
- if (file > 0)
- (void) mysql_file_close(file, MYF(MY_WME));
+ (void) mysql_file_close(file, MYF(MY_WME));
err_end: /* Here when no file */
DBUG_RETURN (error);
diff --git a/sql/events.cc b/sql/events.cc
index 978a1ebc710..86e85d7f757 100644
--- a/sql/events.cc
+++ b/sql/events.cc
@@ -1,5 +1,6 @@
/*
Copyright (c) 2005, 2013, Oracle and/or its affiliates.
+ Copyright (c) 2017, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -243,6 +244,7 @@ common_1_lev_code:
break;
case INTERVAL_WEEK:
expr/= 7;
+ /* fall through */
default:
close_quote= FALSE;
break;
diff --git a/sql/filesort.cc b/sql/filesort.cc
index 007216b392f..21b45e78c6e 100644
--- a/sql/filesort.cc
+++ b/sql/filesort.cc
@@ -953,6 +953,7 @@ write_keys(Sort_param *param, SORT_INFO *fs_info, uint count,
/* check we won't have more buffpeks than we can possibly keep in memory */
if (my_b_tell(buffpek_pointers) + sizeof(BUFFPEK) > (ulonglong)UINT_MAX)
goto err;
+ bzero(&buffpek, sizeof(buffpek));
buffpek.file_pos= my_b_tell(tempfile);
if ((ha_rows) count > param->max_rows)
count=(uint) param->max_rows; /* purecov: inspected */
diff --git a/sql/handler.cc b/sql/handler.cc
index 9b66801b109..66f0bb19482 100644
--- a/sql/handler.cc
+++ b/sql/handler.cc
@@ -5685,6 +5685,20 @@ bool handler::check_table_binlog_row_based_internal(bool binlog_row)
table->file->partition_ht()->db_type != DB_TYPE_INNODB) ||
(thd->wsrep_ignore_table == true)))
return 0;
+
+ /* enforce wsrep_max_ws_rows */
+ if (WSREP(thd) && table->s->tmp_table == NO_TMP_TABLE)
+ {
+ thd->wsrep_affected_rows++;
+ if (wsrep_max_ws_rows &&
+ thd->wsrep_exec_mode != REPL_RECV &&
+ thd->wsrep_affected_rows > wsrep_max_ws_rows)
+ {
+ trans_rollback_stmt(thd) || trans_rollback(thd);
+ my_message(ER_ERROR_DURING_COMMIT, "wsrep_max_ws_rows exceeded", MYF(0));
+ return ER_ERROR_DURING_COMMIT;
+ }
+ }
#endif
return (table->s->cached_row_logging_check &&
@@ -5893,7 +5907,7 @@ int handler::ha_external_lock(THD *thd, int lock_type)
DBUG_EXECUTE_IF("external_lock_failure", error= HA_ERR_GENERIC;);
- if (error == 0)
+ if (error == 0 || lock_type == F_UNLCK)
{
m_lock_type= lock_type;
cached_table_flags= table_flags();
diff --git a/sql/item.cc b/sql/item.cc
index f4236eee013..c295c83e1e4 100644
--- a/sql/item.cc
+++ b/sql/item.cc
@@ -1,6 +1,6 @@
/*
Copyright (c) 2000, 2016, Oracle and/or its affiliates.
- Copyright (c) 2010, 2016, MariaDB
+ Copyright (c) 2010, 2017, MariaDB
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -561,7 +561,8 @@ Item::Item(THD *thd):
command => we should check thd->lex->current_select on zero (thd->lex
can be uninitialised)
*/
- if (thd->lex->current_select)
+ if (thd->lex->current_select &&
+ thd->stmt_arena->is_stmt_prepare_or_first_sp_execute())
{
enum_parsing_place place=
thd->lex->current_select->parsing_place;
diff --git a/sql/item_cmpfunc.cc b/sql/item_cmpfunc.cc
index 07b5f90bf69..49135e846cd 100644
--- a/sql/item_cmpfunc.cc
+++ b/sql/item_cmpfunc.cc
@@ -5264,6 +5264,15 @@ int Regexp_processor_pcre::default_regex_flags()
return default_regex_flags_pcre(current_thd);
}
+void Regexp_processor_pcre::set_recursion_limit(THD *thd)
+{
+ long stack_used;
+ DBUG_ASSERT(thd == current_thd);
+ stack_used= available_stack_size(thd->thread_stack, &stack_used);
+ m_pcre_extra.match_limit_recursion=
+ (my_thread_stack_size - stack_used)/my_pcre_frame_size;
+}
+
/**
Convert string to lib_charset, if needed.
@@ -5355,15 +5364,77 @@ void Regexp_processor_pcre::pcre_exec_warn(int rc) const
*/
switch (rc)
{
+ case PCRE_ERROR_NULL:
+ errmsg= "pcre_exec: null arguement passed";
+ break;
+ case PCRE_ERROR_BADOPTION:
+ errmsg= "pcre_exec: bad option";
+ break;
+ case PCRE_ERROR_BADMAGIC:
+ errmsg= "pcre_exec: bad magic - not a compiled regex";
+ break;
+ case PCRE_ERROR_UNKNOWN_OPCODE:
+ errmsg= "pcre_exec: error in compiled regex";
+ break;
case PCRE_ERROR_NOMEMORY:
errmsg= "pcre_exec: Out of memory";
break;
+ case PCRE_ERROR_NOSUBSTRING:
+ errmsg= "pcre_exec: no substring";
+ break;
+ case PCRE_ERROR_MATCHLIMIT:
+ errmsg= "pcre_exec: match limit exceeded";
+ break;
+ case PCRE_ERROR_CALLOUT:
+ errmsg= "pcre_exec: callout error";
+ break;
case PCRE_ERROR_BADUTF8:
errmsg= "pcre_exec: Invalid utf8 byte sequence in the subject string";
break;
+ case PCRE_ERROR_BADUTF8_OFFSET:
+ errmsg= "pcre_exec: Started at invalid location within utf8 byte sequence";
+ break;
+ case PCRE_ERROR_PARTIAL:
+ errmsg= "pcre_exec: partial match";
+ break;
+ case PCRE_ERROR_INTERNAL:
+ errmsg= "pcre_exec: internal error";
+ break;
+ case PCRE_ERROR_BADCOUNT:
+ errmsg= "pcre_exec: ovesize is negative";
+ break;
+ case PCRE_ERROR_RECURSIONLIMIT:
+ my_snprintf(buf, sizeof(buf), "pcre_exec: recursion limit of %ld exceeded",
+ m_pcre_extra.match_limit_recursion);
+ errmsg= buf;
+ break;
+ case PCRE_ERROR_BADNEWLINE:
+ errmsg= "pcre_exec: bad newline options";
+ break;
+ case PCRE_ERROR_BADOFFSET:
+ errmsg= "pcre_exec: start offset negative or greater than string length";
+ break;
+ case PCRE_ERROR_SHORTUTF8:
+ errmsg= "pcre_exec: ended in middle of utf8 sequence";
+ break;
+ case PCRE_ERROR_JIT_STACKLIMIT:
+ errmsg= "pcre_exec: insufficient stack memory for JIT compile";
+ break;
case PCRE_ERROR_RECURSELOOP:
errmsg= "pcre_exec: Recursion loop detected";
break;
+ case PCRE_ERROR_BADMODE:
+ errmsg= "pcre_exec: compiled pattern passed to wrong bit library function";
+ break;
+ case PCRE_ERROR_BADENDIANNESS:
+ errmsg= "pcre_exec: compiled pattern passed to wrong endianness processor";
+ break;
+ case PCRE_ERROR_JIT_BADOPTION:
+ errmsg= "pcre_exec: bad jit option";
+ break;
+ case PCRE_ERROR_BADLENGTH:
+ errmsg= "pcre_exec: negative length";
+ break;
default:
/*
As other error codes should normally not happen,
@@ -5399,8 +5470,8 @@ int Regexp_processor_pcre::pcre_exec_with_warn(const pcre *code,
bool Regexp_processor_pcre::exec(const char *str, int length, int offset)
{
- m_pcre_exec_rc= pcre_exec_with_warn(m_pcre, NULL, str, length, offset, 0,
- m_SubStrVec, m_subpatterns_needed * 3);
+ m_pcre_exec_rc= pcre_exec_with_warn(m_pcre, &m_pcre_extra, str, length, offset, 0,
+ m_SubStrVec, array_elements(m_SubStrVec));
return false;
}
@@ -5410,10 +5481,10 @@ bool Regexp_processor_pcre::exec(String *str, int offset,
{
if (!(str= convert_if_needed(str, &subject_converter)))
return true;
- m_pcre_exec_rc= pcre_exec_with_warn(m_pcre, NULL,
+ m_pcre_exec_rc= pcre_exec_with_warn(m_pcre, &m_pcre_extra,
str->c_ptr_safe(), str->length(),
offset, 0,
- m_SubStrVec, m_subpatterns_needed * 3);
+ m_SubStrVec, array_elements(m_SubStrVec));
if (m_pcre_exec_rc > 0)
{
uint i;
@@ -5471,7 +5542,7 @@ Item_func_regex::fix_length_and_dec()
if (agg_arg_charsets_for_comparison(cmp_collation, args, 2))
return;
- re.init(cmp_collation.collation, 0, 0);
+ re.init(cmp_collation.collation, 0);
re.fix_owner(this, args[0], args[1]);
}
@@ -5495,7 +5566,7 @@ Item_func_regexp_instr::fix_length_and_dec()
if (agg_arg_charsets_for_comparison(cmp_collation, args, 2))
return;
- re.init(cmp_collation.collation, 0, 1);
+ re.init(cmp_collation.collation, 0);
re.fix_owner(this, args[0], args[1]);
max_length= MY_INT32_NUM_DECIMAL_DIGITS; // See also Item_func_locate
}
diff --git a/sql/item_cmpfunc.h b/sql/item_cmpfunc.h
index e21e074a7a3..f19cae72cb5 100644
--- a/sql/item_cmpfunc.h
+++ b/sql/item_cmpfunc.h
@@ -2554,6 +2554,7 @@ public:
class Regexp_processor_pcre
{
pcre *m_pcre;
+ pcre_extra m_pcre_extra;
bool m_conversion_is_needed;
bool m_is_const;
int m_library_flags;
@@ -2562,7 +2563,6 @@ class Regexp_processor_pcre
String m_prev_pattern;
int m_pcre_exec_rc;
int m_SubStrVec[30];
- uint m_subpatterns_needed;
void pcre_exec_warn(int rc) const;
int pcre_exec_with_warn(const pcre *code, const pcre_extra *extra,
const char *subject, int length, int startoffset,
@@ -2576,11 +2576,14 @@ public:
m_pcre(NULL), m_conversion_is_needed(true), m_is_const(0),
m_library_flags(0),
m_data_charset(&my_charset_utf8_general_ci),
- m_library_charset(&my_charset_utf8_general_ci),
- m_subpatterns_needed(0)
- {}
+ m_library_charset(&my_charset_utf8_general_ci)
+ {
+ m_pcre_extra.flags= PCRE_EXTRA_MATCH_LIMIT_RECURSION;
+ m_pcre_extra.match_limit_recursion= 100L;
+ }
int default_regex_flags();
- void init(CHARSET_INFO *data_charset, int extra_flags, uint nsubpatterns_arg)
+ void set_recursion_limit(THD *);
+ void init(CHARSET_INFO *data_charset, int extra_flags)
{
m_library_flags= default_regex_flags() | extra_flags |
(data_charset != &my_charset_bin ?
@@ -2594,7 +2597,6 @@ public:
m_conversion_is_needed= (data_charset != &my_charset_bin) &&
!my_charset_same(data_charset, m_library_charset);
- m_subpatterns_needed= nsubpatterns_arg;
}
void fix_owner(Item_func *owner, Item *subject_arg, Item *pattern_arg);
bool compile(String *pattern, bool send_error);
diff --git a/sql/item_func.cc b/sql/item_func.cc
index 74427da4cf2..c36f177223b 100644
--- a/sql/item_func.cc
+++ b/sql/item_func.cc
@@ -1,5 +1,5 @@
/* Copyright (c) 2000, 2015, Oracle and/or its affiliates.
- Copyright (c) 2009, 2015, MariaDB
+ Copyright (c) 2009, 2017, MariaDB
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -1729,6 +1729,7 @@ my_decimal *Item_func_mod::decimal_op(my_decimal *decimal_value)
return decimal_value;
case E_DEC_DIV_ZERO:
signal_divide_by_null();
+ /* fall through */
default:
null_value= 1;
return 0;
@@ -3648,6 +3649,7 @@ longlong Item_master_gtid_wait::val_int()
{
DBUG_ASSERT(fixed == 1);
longlong result= 0;
+ String *gtid_pos = args[0]->val_str(&value);
if (args[0]->null_value)
{
@@ -3659,7 +3661,6 @@ longlong Item_master_gtid_wait::val_int()
#ifdef HAVE_REPLICATION
THD* thd= current_thd;
longlong timeout_us;
- String *gtid_pos = args[0]->val_str(&value);
if (arg_count==2 && !args[1]->null_value)
timeout_us= (longlong)(1e6*args[1]->val_real());
diff --git a/sql/item_func.h b/sql/item_func.h
index 077f69fe0f5..03eb01bbcc9 100644
--- a/sql/item_func.h
+++ b/sql/item_func.h
@@ -1622,7 +1622,11 @@ public:
longlong val_int();
const char *func_name() const { return "coercibility"; }
void fix_length_and_dec() { max_length=10; maybe_null= 0; }
- table_map not_null_tables() const { return 0; }
+ bool eval_not_null_tables(void *)
+ {
+ not_null_tables_cache= 0;
+ return false;
+ }
Item* propagate_equal_fields(THD *thd, const Context &ctx, COND_EQUAL *cond)
{ return this; }
bool const_item() const { return true; }
@@ -1953,7 +1957,11 @@ public:
}
}
void cleanup();
- table_map not_null_tables() const { return 0; }
+ bool eval_not_null_tables(void *opt_arg)
+ {
+ not_null_tables_cache= 0;
+ return 0;
+ }
bool is_expensive() { return 1; }
virtual void print(String *str, enum_query_type query_type);
bool check_vcol_func_processor(void *arg)
@@ -2201,7 +2209,7 @@ public:
:Item_long_func(thd, a, b) {}
longlong val_int();
const char *func_name() const { return "master_gtid_wait"; }
- void fix_length_and_dec() { max_length= 2; maybe_null=0;}
+ void fix_length_and_dec() { max_length=2; }
bool check_vcol_func_processor(void *arg)
{
return mark_unsupported_function(func_name(), "()", arg, VCOL_IMPOSSIBLE);
@@ -2482,7 +2490,11 @@ public:
bool is_expensive_processor(void *arg) { return TRUE; }
enum Functype functype() const { return FT_FUNC; }
const char *func_name() const { return "match"; }
- table_map not_null_tables() const { return 0; }
+ bool eval_not_null_tables(void *opt_arg)
+ {
+ not_null_tables_cache= 0;
+ return 0;
+ }
bool fix_fields(THD *thd, Item **ref);
bool eq(const Item *, bool binary_cmp) const;
/* The following should be safe, even if we compare doubles */
@@ -2769,6 +2781,11 @@ public:
clone->sp_result_field= NULL;
return clone;
}
+ bool eval_not_null_tables(void *opt_arg)
+ {
+ not_null_tables_cache= 0;
+ return 0;
+ }
};
@@ -2863,8 +2880,12 @@ public:
my_decimal *val_decimal(my_decimal *);
void fix_length_and_dec();
const char *func_name() const { return "last_value"; }
- table_map not_null_tables() const { return 0; }
const Type_handler *type_handler() const { return last_value->type_handler(); }
+ bool eval_not_null_tables(void *)
+ {
+ not_null_tables_cache= 0;
+ return 0;
+ }
bool const_item() const { return 0; }
void evaluate_sideeffects();
void update_used_tables()
diff --git a/sql/item_strfunc.cc b/sql/item_strfunc.cc
index 4fcff4c0d2e..fb7a60f356e 100644
--- a/sql/item_strfunc.cc
+++ b/sql/item_strfunc.cc
@@ -225,7 +225,7 @@ String *Item_func_sha2::val_str_ascii(String *str)
break;
case 0: // SHA-256 is the default
digest_length= 256;
- /* fall trough */
+ /* fall through */
case 256:
my_sha256(digest_buf, input_ptr, input_len);
break;
@@ -271,7 +271,7 @@ void Item_func_sha2::fix_length_and_dec()
switch (sha_variant) {
case 0: // SHA-256 is the default
sha_variant= 256;
- /* fall trough */
+ /* fall through */
case 512:
case 384:
case 256:
@@ -1349,7 +1349,7 @@ void Item_func_regexp_replace::fix_length_and_dec()
if (agg_arg_charsets_for_string_result_with_comparison(collation, args, 3))
return;
max_length= MAX_BLOB_WIDTH;
- re.init(collation.collation, 0, 10);
+ re.init(collation.collation, 0);
re.fix_owner(this, args[0], args[1]);
}
@@ -1484,7 +1484,7 @@ void Item_func_regexp_substr::fix_length_and_dec()
if (agg_arg_charsets_for_string_result_with_comparison(collation, args, 2))
return;
fix_char_length(args[0]->max_char_length());
- re.init(collation.collation, 0, 10);
+ re.init(collation.collation, 0);
re.fix_owner(this, args[0], args[1]);
}
@@ -5163,7 +5163,7 @@ bool Item_dyncol_get::get_date(MYSQL_TIME *ltime, ulonglong fuzzy_date)
goto null;
case DYN_COL_INT:
signed_value= 1; // For error message
- /* fall_trough */
+ /* fall through */
case DYN_COL_UINT:
if (signed_value || val.x.ulong_value <= LONGLONG_MAX)
{
@@ -5177,7 +5177,7 @@ bool Item_dyncol_get::get_date(MYSQL_TIME *ltime, ulonglong fuzzy_date)
}
/* let double_to_datetime_with_warn() issue the warning message */
val.x.double_value= static_cast<double>(ULONGLONG_MAX);
- /* fall_trough */
+ /* fall through */
case DYN_COL_DOUBLE:
if (double_to_datetime_with_warn(val.x.double_value, ltime, fuzzy_date,
0 /* TODO */))
diff --git a/sql/item_subselect.cc b/sql/item_subselect.cc
index 09fbf3e239c..dd362cbc15e 100644
--- a/sql/item_subselect.cc
+++ b/sql/item_subselect.cc
@@ -1425,8 +1425,9 @@ Item_in_subselect::Item_in_subselect(THD *thd, Item * left_exp,
st_select_lex *select_lex):
Item_exists_subselect(thd), left_expr_cache(0), first_execution(TRUE),
in_strategy(SUBS_NOT_TRANSFORMED),
- pushed_cond_guards(NULL), is_jtbm_merged(FALSE), is_jtbm_const_tab(FALSE),
- is_flattenable_semijoin(FALSE), is_registered_semijoin(FALSE),
+ pushed_cond_guards(NULL), do_not_convert_to_sj(FALSE), is_jtbm_merged(FALSE),
+ is_jtbm_const_tab(FALSE), is_flattenable_semijoin(FALSE),
+ is_registered_semijoin(FALSE),
upper_item(0)
{
DBUG_ENTER("Item_in_subselect::Item_in_subselect");
@@ -2587,6 +2588,27 @@ bool Item_in_subselect::inject_in_to_exists_cond(JOIN *join_arg)
DBUG_ENTER("Item_in_subselect::inject_in_to_exists_cond");
DBUG_ASSERT(thd == join_arg->thd);
+ if (select_lex->min_max_opt_list.elements)
+ {
+ /*
+ MIN/MAX optimizations have been applied to Item_sum objects
+ of the subquery this subquery predicate in opt_sum_query().
+ Injection of new condition invalidates this optimizations.
+ Thus those optimizations must be rolled back.
+ */
+ List_iterator_fast<Item_sum> it(select_lex->min_max_opt_list);
+ Item_sum *item;
+ while ((item= it++))
+ {
+ item->clear();
+ item->reset_forced_const();
+ }
+ if (where_item)
+ where_item->update_used_tables();
+ if (having_item)
+ having_item->update_used_tables();
+ }
+
if (where_item)
{
List<Item> *and_args= NULL;
@@ -3761,7 +3783,10 @@ int subselect_single_select_engine::exec()
}
}
if (item->engine_changed(this))
+ {
+ thd->lex->current_select= save_select;
DBUG_RETURN(1);
+ }
}
if (select_lex->uncacheable &&
select_lex->uncacheable != UNCACHEABLE_EXPLAIN
diff --git a/sql/item_subselect.h b/sql/item_subselect.h
index cb60b646979..6112c1c22f4 100644
--- a/sql/item_subselect.h
+++ b/sql/item_subselect.h
@@ -505,6 +505,8 @@ public:
Item *left_expr_orig;
/* Priority of this predicate in the convert-to-semi-join-nest process. */
int sj_convert_priority;
+ /* May be TRUE only for the candidates to semi-join conversion */
+ bool do_not_convert_to_sj;
/*
Types of left_expr and subquery's select list allow to perform subquery
materialization. Currently, we set this to FALSE when it as well could
@@ -595,8 +597,8 @@ public:
Item_in_subselect(THD *thd_arg):
Item_exists_subselect(thd_arg), left_expr_cache(0), first_execution(TRUE),
in_strategy(SUBS_NOT_TRANSFORMED),
- pushed_cond_guards(NULL), func(NULL), is_jtbm_merged(FALSE),
- is_jtbm_const_tab(FALSE), upper_item(0) {}
+ pushed_cond_guards(NULL), func(NULL), do_not_convert_to_sj(FALSE),
+ is_jtbm_merged(FALSE), is_jtbm_const_tab(FALSE), upper_item(0) {}
void cleanup();
subs_type substype() { return IN_SUBS; }
void reset()
@@ -651,6 +653,8 @@ public:
*/
int get_identifier();
+ void block_conversion_to_sj () { do_not_convert_to_sj= TRUE; }
+
bool test_strategy(uchar strategy)
{ return MY_TEST(in_strategy & strategy); }
diff --git a/sql/item_sum.h b/sql/item_sum.h
index 65306ab6f48..a160d0ee522 100644
--- a/sql/item_sum.h
+++ b/sql/item_sum.h
@@ -485,6 +485,7 @@ public:
used_tables_cache= 0;
const_item_cache= true;
}
+ void reset_forced_const() { const_item_cache= false; }
virtual bool const_during_execution() const { return false; }
virtual void print(String *str, enum_query_type query_type);
void fix_num_length_and_dec();
diff --git a/sql/lock.cc b/sql/lock.cc
index 12de6ae0616..a34613fb7fe 100644
--- a/sql/lock.cc
+++ b/sql/lock.cc
@@ -422,6 +422,7 @@ void mysql_unlock_tables(THD *thd, MYSQL_LOCK *sql_lock)
void mysql_unlock_tables(THD *thd, MYSQL_LOCK *sql_lock, bool free_lock)
{
DBUG_ENTER("mysql_unlock_tables");
+ bool errors= thd->is_error();
THD_STAGE_INFO(thd, stage_unlocking_tables);
if (sql_lock->table_count)
@@ -430,6 +431,8 @@ void mysql_unlock_tables(THD *thd, MYSQL_LOCK *sql_lock, bool free_lock)
thr_multi_unlock(sql_lock->locks, sql_lock->lock_count, 0);
if (free_lock)
my_free(sql_lock);
+ if (!errors)
+ thd->clear_error();
DBUG_VOID_RETURN;
}
diff --git a/sql/log.cc b/sql/log.cc
index 0ffa4a5a82d..166180e08a4 100644
--- a/sql/log.cc
+++ b/sql/log.cc
@@ -5681,7 +5681,11 @@ int THD::binlog_write_table_map(TABLE *table, bool is_transactional,
/* Annotate event should be written not more than once */
*with_annotate= 0;
if ((error= writer.write(&anno)))
+ {
+ if (my_errno == EFBIG)
+ cache_data->set_incident();
DBUG_RETURN(error);
+ }
}
if ((error= writer.write(&the_event)))
DBUG_RETURN(error);
@@ -9220,8 +9224,10 @@ void TC_LOG_MMAP::close()
mysql_cond_destroy(&COND_pool);
mysql_cond_destroy(&COND_active);
mysql_cond_destroy(&COND_queue_busy);
+ /* fall through */
case 5:
data[0]='A'; // garble the first (signature) byte, in case mysql_file_delete fails
+ /* fall through */
case 4:
for (i=0; i < npages; i++)
{
@@ -9230,10 +9236,13 @@ void TC_LOG_MMAP::close()
mysql_mutex_destroy(&pages[i].lock);
mysql_cond_destroy(&pages[i].cond);
}
+ /* fall through */
case 3:
my_free(pages);
+ /* fall through */
case 2:
my_munmap((char*)data, (size_t)file_length);
+ /* fall through */
case 1:
mysql_file_close(fd, MYF(0));
}
diff --git a/sql/log_event.cc b/sql/log_event.cc
index 3c062975041..24bcaf8a60b 100644
--- a/sql/log_event.cc
+++ b/sql/log_event.cc
@@ -8404,7 +8404,6 @@ int Xid_log_event::do_apply_event(rpl_group_info *rgi)
consistent.
*/
#ifdef WITH_WSREP
- /*Set wsrep_affected_rows = 0 */
thd->wsrep_affected_rows= 0;
#endif
diff --git a/sql/log_event_old.cc b/sql/log_event_old.cc
index 9ba29ddb0f6..8f1cfe17656 100644
--- a/sql/log_event_old.cc
+++ b/sql/log_event_old.cc
@@ -1587,7 +1587,7 @@ int Old_rows_log_event::do_apply_event(rpl_group_info *rgi)
rli->report(ERROR_LEVEL, thd->net.last_errno, NULL,
"Error in %s event: row application failed. %s",
get_type_str(), thd->net.last_error);
- thd->is_slave_error = 1;
+ thd->is_slave_error= 1;
break;
}
diff --git a/sql/mysqld.cc b/sql/mysqld.cc
index 74efb91cbd1..6da1bd75531 100644
--- a/sql/mysqld.cc
+++ b/sql/mysqld.cc
@@ -105,6 +105,7 @@
#include "sp_rcontext.h"
#include "sp_cache.h"
#include "sql_reload.h" // reload_acl_and_cache
+#include "pcre.h"
#ifdef HAVE_POLL_H
#include <poll.h>
@@ -3721,6 +3722,7 @@ static void init_libstrings()
#endif
}
+ulonglong my_pcre_frame_size;
static void init_pcre()
{
@@ -3728,6 +3730,8 @@ static void init_pcre()
pcre_free= pcre_stack_free= my_str_free_mysqld;
#ifndef EMBEDDED_LIBRARY
pcre_stack_guard= check_enough_stack_size_slow;
+ /* See http://pcre.org/original/doc/html/pcrestack.html */
+ my_pcre_frame_size= -pcre_exec(NULL, NULL, NULL, -999, -999, 0, NULL, 0) + 16;
#endif
}
@@ -8245,7 +8249,7 @@ static int show_default_keycache(THD *thd, SHOW_VAR *var, char *buff,
{
struct st_data {
KEY_CACHE_STATISTICS stats;
- SHOW_VAR var[8];
+ SHOW_VAR var[9];
} *data;
SHOW_VAR *v;
@@ -9356,7 +9360,10 @@ mysql_getopt_value(const char *name, uint length,
return (uchar**) &key_cache->changed_blocks_hash_size;
}
}
+ /* We return in all cases above. Let us silence -Wimplicit-fallthrough */
+ DBUG_ASSERT(0);
#ifdef HAVE_REPLICATION
+ /* fall through */
case OPT_REPLICATE_DO_DB:
case OPT_REPLICATE_DO_TABLE:
case OPT_REPLICATE_IGNORE_DB:
diff --git a/sql/mysqld.h b/sql/mysqld.h
index b7de4ecc324..38e42dd61f1 100644
--- a/sql/mysqld.h
+++ b/sql/mysqld.h
@@ -133,7 +133,7 @@ extern ulong slave_retried_transactions;
extern ulong slave_run_triggers_for_rbr;
extern ulonglong slave_type_conversions_options;
extern my_bool read_only, opt_readonly;
-extern my_bool lower_case_file_system;
+extern MYSQL_PLUGIN_IMPORT my_bool lower_case_file_system;
extern my_bool opt_enable_named_pipe, opt_sync_frm, opt_allow_suspicious_udfs;
extern my_bool opt_secure_auth;
extern const char *current_dbug_option;
@@ -527,6 +527,8 @@ extern pthread_t signal_thread;
extern struct st_VioSSLFd * ssl_acceptor_fd;
#endif /* HAVE_OPENSSL */
+extern ulonglong my_pcre_frame_size;
+
/*
The following variables were under INNODB_COMPABILITY_HOOKS
*/
diff --git a/sql/opt_range.cc b/sql/opt_range.cc
index 40184d7a421..9161dee7836 100644
--- a/sql/opt_range.cc
+++ b/sql/opt_range.cc
@@ -6955,7 +6955,10 @@ QUICK_SELECT_I *TRP_ROR_UNION::make_quick(PARAM *param,
{
if (!(quick= (*scan)->make_quick(param, FALSE, &quick_roru->alloc)) ||
quick_roru->push_quick_back(quick))
+ {
+ delete quick_roru;
DBUG_RETURN(NULL);
+ }
}
quick_roru->records= records;
quick_roru->read_time= read_cost;
@@ -10795,9 +10798,7 @@ QUICK_RANGE_SELECT *get_quick_select_for_ref(THD *thd, TABLE *table,
*/
thd->mem_root= old_root;
- if (!quick || create_err)
- return 0; /* no ranges found */
- if (quick->init())
+ if (!quick || create_err || quick->init())
goto err;
quick->records= records;
diff --git a/sql/opt_subselect.cc b/sql/opt_subselect.cc
index b18fb8f2ae5..1b245342f76 100644
--- a/sql/opt_subselect.cc
+++ b/sql/opt_subselect.cc
@@ -980,6 +980,25 @@ bool check_for_outer_joins(List<TABLE_LIST> *join_list)
}
+void find_and_block_conversion_to_sj(Item *to_find,
+ List_iterator_fast<Item_in_subselect> &li)
+{
+ if (to_find->type() != Item::SUBSELECT_ITEM ||
+ ((Item_subselect *) to_find)->substype() != Item_subselect::IN_SUBS)
+ return;
+ Item_in_subselect *in_subq;
+ li.rewind();
+ while ((in_subq= li++))
+ {
+ if (in_subq == to_find)
+ {
+ in_subq->block_conversion_to_sj();
+ return;
+ }
+ }
+}
+
+
/*
Convert semi-join subquery predicates into semi-join join nests
@@ -1032,7 +1051,6 @@ bool convert_join_subqueries_to_semijoins(JOIN *join)
Query_arena *arena, backup;
Item_in_subselect *in_subq;
THD *thd= join->thd;
- List_iterator<TABLE_LIST> ti(join->select_lex->leaf_tables);
DBUG_ENTER("convert_join_subqueries_to_semijoins");
if (join->select_lex->sj_subselects.is_empty())
@@ -1050,6 +1068,89 @@ bool convert_join_subqueries_to_semijoins(JOIN *join)
subq_sel->update_used_tables();
}
+ /*
+ Check all candidates to semi-join conversion that occur
+ in ON expressions of outer join. Set the flag blocking
+ this conversion for them.
+ */
+ TABLE_LIST *tbl;
+ List_iterator<TABLE_LIST> ti(join->select_lex->leaf_tables);
+ while ((tbl= ti++))
+ {
+ TABLE_LIST *embedded;
+ TABLE_LIST *embedding= tbl;
+ do
+ {
+ embedded= embedding;
+ bool block_conversion_to_sj= false;
+ if (embedded->on_expr)
+ {
+ /*
+ Conversion of an IN subquery predicate into semi-join
+ is blocked now if the predicate occurs:
+ - in the ON expression of an outer join
+ - in the ON expression of an inner join embedded directly
+ or indirectly in the inner nest of an outer join
+ */
+ for (TABLE_LIST *tl= embedded; tl; tl= tl->embedding)
+ {
+ if (tl->outer_join)
+ {
+ block_conversion_to_sj= true;
+ break;
+ }
+ }
+ }
+ if (block_conversion_to_sj)
+ {
+ Item *cond= embedded->on_expr;
+ if (!cond)
+ ;
+ else if (cond->type() != Item::COND_ITEM)
+ find_and_block_conversion_to_sj(cond, li);
+ else if (((Item_cond*) cond)->functype() ==
+ Item_func::COND_AND_FUNC)
+ {
+ Item *item;
+ List_iterator<Item> it(*(((Item_cond*) cond)->argument_list()));
+ while ((item= it++))
+ {
+ find_and_block_conversion_to_sj(item, li);
+ }
+ }
+ }
+ embedding= embedded->embedding;
+ }
+ while (embedding &&
+ embedding->nested_join->join_list.head() == embedded);
+ }
+
+ /*
+ Block conversion to semi-joins for those candidates that
+ are encountered in the WHERE condition of the multi-table view
+ with CHECK OPTION if this view is used in UPDATE/DELETE.
+ (This limitation can be, probably, easily lifted.)
+ */
+ li.rewind();
+ while ((in_subq= li++))
+ {
+ if (in_subq->emb_on_expr_nest != NO_JOIN_NEST &&
+ in_subq->emb_on_expr_nest->effective_with_check)
+ {
+ in_subq->block_conversion_to_sj();
+ }
+ }
+
+ if (join->select_options & SELECT_STRAIGHT_JOIN)
+ {
+ /* Block conversion to semijoins for all candidates */
+ li.rewind();
+ while ((in_subq= li++))
+ {
+ in_subq->block_conversion_to_sj();
+ }
+ }
+
li.rewind();
/* First, convert child join's subqueries. We proceed bottom-up here */
while ((in_subq= li++))
@@ -1068,8 +1169,10 @@ bool convert_join_subqueries_to_semijoins(JOIN *join)
if (convert_join_subqueries_to_semijoins(child_join))
DBUG_RETURN(TRUE);
+
+
in_subq->sj_convert_priority=
- MY_TEST(in_subq->emb_on_expr_nest != NO_JOIN_NEST) * MAX_TABLES * 2 +
+ MY_TEST(in_subq->do_not_convert_to_sj) * MAX_TABLES * 2 +
in_subq->is_correlated * MAX_TABLES + child_join->outer_tables;
}
@@ -1102,7 +1205,7 @@ bool convert_join_subqueries_to_semijoins(JOIN *join)
bool remove_item= TRUE;
/* Stop processing if we've reached a subquery that's attached to the ON clause */
- if (in_subq->emb_on_expr_nest != NO_JOIN_NEST)
+ if (in_subq->do_not_convert_to_sj)
break;
if (in_subq->is_flattenable_semijoin)
diff --git a/sql/opt_sum.cc b/sql/opt_sum.cc
index 9bc21ab3ac3..ab587b8b279 100644
--- a/sql/opt_sum.cc
+++ b/sql/opt_sum.cc
@@ -1,5 +1,5 @@
/* Copyright (c) 2000, 2011, Oracle and/or its affiliates.
- Copyright (c) 2008-2011 Monty Program Ab
+ Copyright (c) 2008, 2017, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -254,6 +254,8 @@ int opt_sum_query(THD *thd,
int error= 0;
DBUG_ENTER("opt_sum_query");
+ thd->lex->current_select->min_max_opt_list.empty();
+
if (conds)
where_tables= conds->used_tables();
@@ -447,7 +449,14 @@ int opt_sum_query(THD *thd,
item_sum->aggregator_clear();
}
else
+ {
item_sum->reset_and_add();
+ /*
+ Save a reference to the item for possible rollback
+ of the min/max optimizations for this select
+ */
+ thd->lex->current_select->min_max_opt_list.push_back(item_sum);
+ }
item_sum->make_const();
recalc_const_item= 1;
break;
@@ -1042,6 +1051,7 @@ static int maxmin_in_range(bool max_fl, Field* field, COND *cond)
case Item_func::LT_FUNC:
case Item_func::LE_FUNC:
less_fl= 1;
+ /* fall through */
case Item_func::GT_FUNC:
case Item_func::GE_FUNC:
{
diff --git a/sql/rpl_gtid.cc b/sql/rpl_gtid.cc
index a2617de2757..c385434e41e 100644
--- a/sql/rpl_gtid.cc
+++ b/sql/rpl_gtid.cc
@@ -2045,7 +2045,7 @@ gtid_waiting::wait_for_pos(THD *thd, String *gtid_str, longlong timeout_us)
{
case -1:
status_var_increment(thd->status_var.master_gtid_wait_timeouts);
- /* Deliberate fall through. */
+ /* fall through */
case 0:
status_var_add(thd->status_var.master_gtid_wait_time,
microsecond_interval_timer() - before);
diff --git a/sql/rpl_mi.cc b/sql/rpl_mi.cc
index 60c887965fb..e90557efd0d 100644
--- a/sql/rpl_mi.cc
+++ b/sql/rpl_mi.cc
@@ -41,7 +41,7 @@ Master_info::Master_info(LEX_CSTRING *connection_name_arg,
master_id(0), prev_master_id(0),
using_gtid(USE_GTID_NO), events_queued_since_last_gtid(0),
gtid_reconnect_event_skip_count(0), gtid_event_seen(false),
- in_start_all_slaves(0), in_stop_all_slaves(0),
+ in_start_all_slaves(0), in_stop_all_slaves(0), in_flush_all_relay_logs(0),
users(0), killed(0)
{
char *tmp;
@@ -663,7 +663,7 @@ file '%s')", fname);
mi->connect_retry= (uint) connect_retry;
mi->ssl= (my_bool) ssl;
mi->ssl_verify_server_cert= ssl_verify_server_cert;
- mi->heartbeat_period= master_heartbeat_period;
+ mi->heartbeat_period= MY_MIN(SLAVE_MAX_HEARTBEAT_PERIOD, master_heartbeat_period);
}
DBUG_PRINT("master_info",("log_file_name: %s position: %ld",
mi->master_log_name,
@@ -798,8 +798,8 @@ int flush_master_info(Master_info* mi,
contents of file). But because of number of lines in the first line
of file we don't care about this garbage.
*/
- char heartbeat_buf[sizeof(mi->heartbeat_period) * 4]; // buffer to suffice always
- sprintf(heartbeat_buf, "%.3f", mi->heartbeat_period);
+ char heartbeat_buf[FLOATING_POINT_BUFFER];
+ my_fcvt(mi->heartbeat_period, 3, heartbeat_buf, NULL);
my_b_seek(file, 0L);
my_b_printf(file,
"%u\n%s\n%s\n%s\n%s\n%s\n%d\n%d\n%d\n%s\n%s\n%s\n%s\n%s\n%d\n%s\n%s\n%s\n%s\n%d\n%s\n%s\n"
@@ -1979,4 +1979,53 @@ void prot_store_ids(THD *thd, DYNAMIC_ARRAY *ids)
return;
}
+bool Master_info_index::flush_all_relay_logs()
+{
+ DBUG_ENTER("flush_all_relay_logs");
+ bool result= false;
+ int error= 0;
+ mysql_mutex_lock(&LOCK_active_mi);
+ for (uint i= 0; i< master_info_hash.records; i++)
+ {
+ Master_info *mi;
+ mi= (Master_info *) my_hash_element(&master_info_hash, i);
+ mi->in_flush_all_relay_logs= 0;
+ }
+ for (uint i=0; i < master_info_hash.records;)
+ {
+ Master_info *mi;
+ mi= (Master_info *)my_hash_element(&master_info_hash, i);
+ DBUG_ASSERT(mi);
+
+ if (mi->in_flush_all_relay_logs)
+ {
+ i++;
+ continue;
+ }
+ mi->in_flush_all_relay_logs= 1;
+
+ mysql_mutex_lock(&mi->sleep_lock);
+ mi->users++; // Mark used
+ mysql_mutex_unlock(&mi->sleep_lock);
+ mysql_mutex_unlock(&LOCK_active_mi);
+
+ mysql_mutex_lock(&mi->data_lock);
+ error= rotate_relay_log(mi);
+ mysql_mutex_unlock(&mi->data_lock);
+ mi->release();
+ mysql_mutex_lock(&LOCK_active_mi);
+
+ if (error)
+ {
+ result= true;
+ break;
+ }
+ /* Restart from first element as master_info_hash may have changed */
+ i= 0;
+ continue;
+ }
+ mysql_mutex_unlock(&LOCK_active_mi);
+ DBUG_RETURN(result);
+}
+
#endif /* HAVE_REPLICATION */
diff --git a/sql/rpl_mi.h b/sql/rpl_mi.h
index 535abd25f6d..ccc1be6e5ce 100644
--- a/sql/rpl_mi.h
+++ b/sql/rpl_mi.h
@@ -302,6 +302,7 @@ class Master_info : public Slave_reporting_capability
/* gtid_event_seen is false until we receive first GTID event from master. */
bool gtid_event_seen;
bool in_start_all_slaves, in_stop_all_slaves;
+ bool in_flush_all_relay_logs;
uint users; /* Active user for object */
uint killed;
@@ -354,6 +355,7 @@ public:
bool start_all_slaves(THD *thd);
bool stop_all_slaves(THD *thd);
void free_connections();
+ bool flush_all_relay_logs();
};
diff --git a/sql/slave.cc b/sql/slave.cc
index 641bdae9e31..a7f0f003e5c 100644
--- a/sql/slave.cc
+++ b/sql/slave.cc
@@ -1,5 +1,5 @@
/* Copyright (c) 2000, 2016, Oracle and/or its affiliates.
- Copyright (c) 2009, 2016, MariaDB
+ Copyright (c) 2009, 2017, MariaDB
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -6286,9 +6286,8 @@ static int queue_event(Master_info* mi,const char* buf, ulong event_len)
mi->last_queued_gtid.seq_no == 1000)
goto skip_relay_logging;
});
- /* Fall through to default case ... */
#endif
-
+ /* fall through */
default:
default_action:
DBUG_EXECUTE_IF("kill_slave_io_after_2_events",
diff --git a/sql/sp_head.cc b/sql/sp_head.cc
index f8ad3c305a7..c87a15ff927 100644
--- a/sql/sp_head.cc
+++ b/sql/sp_head.cc
@@ -1,6 +1,6 @@
/*
Copyright (c) 2002, 2016, Oracle and/or its affiliates.
- Copyright (c) 2011, 2016, MariaDB
+ Copyright (c) 2011, 2017, MariaDB
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
diff --git a/sql/sql_acl.cc b/sql/sql_acl.cc
index d63a2f2bc51..433d48a3de4 100644
--- a/sql/sql_acl.cc
+++ b/sql/sql_acl.cc
@@ -11698,12 +11698,6 @@ void fill_effective_table_privileges(THD *thd, GRANT_INFO *grant,
/* global privileges */
grant->privilege= sctx->master_access;
- if (!sctx->priv_user[0] && !sctx->priv_role[0])
- {
- DBUG_PRINT("info", ("privilege 0x%lx", grant->privilege));
- DBUG_VOID_RETURN; // it is slave
- }
-
if (!thd->db || strcmp(db, thd->db))
{
/* db privileges */
diff --git a/sql/sql_audit.cc b/sql/sql_audit.cc
index 8a523ebbf4b..8134adca13f 100644
--- a/sql/sql_audit.cc
+++ b/sql/sql_audit.cc
@@ -240,7 +240,7 @@ void mysql_audit_finalize()
/**
Initialize an Audit plug-in
-
+
@param[in] plugin
@retval FALSE OK
@@ -251,12 +251,13 @@ int initialize_audit_plugin(st_plugin_int *plugin)
{
st_mysql_audit *data= (st_mysql_audit*) plugin->plugin->info;
- if (!data->event_notify || !data->class_mask[0]) {
+ if (!data->event_notify || !data->class_mask[0])
+ {
sql_print_error("Plugin '%s' has invalid data.",
plugin->name.str);
return 1;
}
-
+
if (plugin->plugin->init && plugin->plugin->init(NULL))
{
sql_print_error("Plugin '%s' init function returned error.",
@@ -266,7 +267,7 @@ int initialize_audit_plugin(st_plugin_int *plugin)
/* Make the interface info more easily accessible */
plugin->data= plugin->plugin->info;
-
+
/* Add the bits the plugin is interested in to the global mask */
mysql_mutex_lock(&LOCK_audit_mask);
add_audit_mask(mysql_global_audit_mask, data->class_mask);
diff --git a/sql/sql_class.cc b/sql/sql_class.cc
index 076ae8b9923..69c1770da39 100644
--- a/sql/sql_class.cc
+++ b/sql/sql_class.cc
@@ -5601,6 +5601,7 @@ bool xid_cache_insert(THD *thd, XID_STATE *xid_state)
break;
case 1:
my_error(ER_XAER_DUPID, MYF(0));
+ /* fall through */
default:
xid_state->xid_cache_element= 0;
}
diff --git a/sql/sql_class.h b/sql/sql_class.h
index 58145703499..0aec247d7e6 100644
--- a/sql/sql_class.h
+++ b/sql/sql_class.h
@@ -4670,7 +4670,7 @@ public:
select_result(thd_arg), suppress_my_ok(false)
{
DBUG_ENTER("select_result_interceptor::select_result_interceptor");
- DBUG_PRINT("enter", ("this 0x%lx", (ulong) this));
+ DBUG_PRINT("enter", ("this %p", this));
DBUG_VOID_RETURN;
} /* Remove gcc warning */
uint field_count(List<Item> &fields) const { return 0; }
diff --git a/sql/sql_digest.cc b/sql/sql_digest.cc
index 18106a70475..27c33f1c64b 100644
--- a/sql/sql_digest.cc
+++ b/sql/sql_digest.cc
@@ -1,4 +1,5 @@
/* Copyright (c) 2008, 2015, Oracle and/or its affiliates. All rights reserved.
+ Copyright (c) 2017, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -454,7 +455,8 @@ sql_digest_state* digest_add_token(sql_digest_state *state,
}
} while (found_unary);
}
- /* fall through, for case NULL_SYM below */
+ /* for case NULL_SYM below */
+ /* fall through */
case LEX_HOSTNAME:
case TEXT_STRING:
case NCHAR_STRING:
diff --git a/sql/sql_lex.cc b/sql/sql_lex.cc
index febd0931e48..3469ffcdbfe 100644
--- a/sql/sql_lex.cc
+++ b/sql/sql_lex.cc
@@ -1,5 +1,5 @@
/* Copyright (c) 2000, 2014, Oracle and/or its affiliates.
- Copyright (c) 2009, 2016, MariaDB
+ Copyright (c) 2009, 2017, MariaDB
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -1477,12 +1477,14 @@ static int lex_one_token(YYSTYPE *yylval, THD *thd)
state= MY_LEX_HEX_NUMBER;
break;
}
+ /* fall through */
case MY_LEX_IDENT_OR_BIN:
if (lip->yyPeek() == '\'')
{ // Found b'bin-number'
state= MY_LEX_BIN_NUMBER;
break;
}
+ /* fall through */
case MY_LEX_IDENT:
const char *start;
#if defined(USE_MB) && defined(USE_MB_IDENT)
@@ -1824,6 +1826,7 @@ static int lex_one_token(YYSTYPE *yylval, THD *thd)
break;
}
/* " used for strings */
+ /* fall through */
case MY_LEX_STRING: // Incomplete text string
{
uint sep;
@@ -2182,6 +2185,7 @@ void st_select_lex::init_query()
leaf_tables_prep.empty();
leaf_tables.empty();
item_list.empty();
+ min_max_opt_list.empty();
join= 0;
having= prep_having= where= prep_where= 0;
cond_pushed_into_where= cond_pushed_into_having= 0;
diff --git a/sql/sql_lex.h b/sql/sql_lex.h
index 4ac407b260d..84ab8df0864 100644
--- a/sql/sql_lex.h
+++ b/sql/sql_lex.h
@@ -845,6 +845,11 @@ public:
*/
List<Item_func_match> *ftfunc_list;
List<Item_func_match> ftfunc_list_alloc;
+ /*
+ The list of items to which MIN/MAX optimizations of opt_sum_query()
+ have been applied. Used to rollback those optimizations if it's needed.
+ */
+ List<Item_sum> min_max_opt_list;
JOIN *join; /* after JOIN::prepare it is pointer to corresponding JOIN */
List<TABLE_LIST> top_join_list; /* join list of the top level */
List<TABLE_LIST> *join_list; /* list for the currently parsed join */
diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc
index 7169b5dd068..3b9c1c81fd7 100644
--- a/sql/sql_parse.cc
+++ b/sql/sql_parse.cc
@@ -3534,8 +3534,8 @@ mysql_execute_command(THD *thd)
MYF(0));
goto error;
}
- /* no break; fall through */
}
+ /* fall through */
case SQLCOM_SHOW_STATUS_PROC:
case SQLCOM_SHOW_STATUS_FUNC:
case SQLCOM_SHOW_DATABASES:
@@ -3549,7 +3549,7 @@ mysql_execute_command(THD *thd)
case SQLCOM_SELECT:
if (WSREP_CLIENT(thd) && wsrep_sync_wait(thd))
goto error;
-
+ /* fall through */
case SQLCOM_SHOW_PLUGINS:
case SQLCOM_SHOW_VARIABLES:
case SQLCOM_SHOW_CHARSETS:
@@ -4391,8 +4391,8 @@ end_with_restore_list:
/* mysql_update return 2 if we need to switch to multi-update */
if (up_result != 2)
break;
- /* Fall through */
}
+ /* Fall through */
case SQLCOM_UPDATE_MULTI:
{
DBUG_ASSERT(first_table == all_tables && first_table != 0);
@@ -4510,6 +4510,7 @@ end_with_restore_list:
}
#endif
}
+ /* fall through */
case SQLCOM_INSERT:
{
DBUG_ASSERT(first_table == all_tables && first_table != 0);
@@ -5442,6 +5443,7 @@ end_with_restore_list:
initialize this variable because RESET shares the same code as FLUSH
*/
lex->no_write_to_binlog= 1;
+ /* fall through */
case SQLCOM_FLUSH:
{
int write_to_binlog;
@@ -7354,12 +7356,6 @@ bool check_fk_parent_table_access(THD *thd,
****************************************************************************/
-#if STACK_DIRECTION < 0
-#define used_stack(A,B) (long) (A - B)
-#else
-#define used_stack(A,B) (long) (B - A)
-#endif
-
#ifndef DBUG_OFF
long max_stack_used;
#endif
@@ -7376,7 +7372,7 @@ bool check_stack_overrun(THD *thd, long margin,
{
long stack_used;
DBUG_ASSERT(thd == current_thd);
- if ((stack_used=used_stack(thd->thread_stack,(char*) &stack_used)) >=
+ if ((stack_used= available_stack_size(thd->thread_stack, &stack_used)) >=
(long) (my_thread_stack_size - margin))
{
thd->is_fatal_error= 1;
diff --git a/sql/sql_plugin.cc b/sql/sql_plugin.cc
index d53ef34d1e8..919ced1a3eb 100644
--- a/sql/sql_plugin.cc
+++ b/sql/sql_plugin.cc
@@ -1,6 +1,6 @@
/*
Copyright (c) 2005, 2013, Oracle and/or its affiliates.
- Copyright (c) 2010, 2014, SkySQL Ab.
+ Copyright (c) 2010, 2017, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -1864,8 +1864,8 @@ static bool plugin_load_list(MEM_ROOT *tmp_root, const char *list)
switch ((*(p++)= *(list++))) {
case '\0':
list= NULL; /* terminate the loop */
- /* fall through */
#ifndef __WIN__
+ /* fall through */
case ':': /* can't use this as delimiter as it may be drive letter */
#endif
case ';':
@@ -1908,6 +1908,7 @@ static bool plugin_load_list(MEM_ROOT *tmp_root, const char *list)
str->str= p;
continue;
}
+ /* fall through */
default:
str->length++;
continue;
diff --git a/sql/sql_prepare.cc b/sql/sql_prepare.cc
index 0e15bf45cc7..e0d03fc3173 100644
--- a/sql/sql_prepare.cc
+++ b/sql/sql_prepare.cc
@@ -1,5 +1,5 @@
/* Copyright (c) 2002, 2015, Oracle and/or its affiliates.
- Copyright (c) 2008, 2016, MariaDB
+ Copyright (c) 2008, 2017, MariaDB
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -2343,7 +2343,7 @@ static bool check_prepared_statement(Prepared_statement *stmt)
/* mysql_test_update returns 2 if we need to switch to multi-update */
if (res != 2)
break;
-
+ /* fall through */
case SQLCOM_UPDATE_MULTI:
res= mysql_test_multiupdate(stmt, tables, res == 2);
break;
@@ -3426,7 +3426,7 @@ void mysql_stmt_get_longdata(THD *thd, char *packet, ulong packet_length)
{
stmt->state= Query_arena::STMT_ERROR;
stmt->last_errno= thd->get_stmt_da()->sql_errno();
- strncpy(stmt->last_error, thd->get_stmt_da()->message(), MYSQL_ERRMSG_SIZE);
+ strmake_buf(stmt->last_error, thd->get_stmt_da()->message());
}
thd->set_stmt_da(save_stmt_da);
diff --git a/sql/sql_reload.cc b/sql/sql_reload.cc
index 400c230ea43..f54c0b88110 100644
--- a/sql/sql_reload.cc
+++ b/sql/sql_reload.cc
@@ -181,8 +181,12 @@ bool reload_acl_and_cache(THD *thd, unsigned long long options,
slave is not likely to have the same connection names.
*/
tmp_write_to_binlog= 0;
-
- if (!(mi= (get_master_info(&connection_name,
+ if (connection_name.length == 0)
+ {
+ if (master_info_index->flush_all_relay_logs())
+ *write_to_binlog= -1;
+ }
+ else if (!(mi= (get_master_info(&connection_name,
Sql_condition::WARN_LEVEL_ERROR))))
{
result= 1;
diff --git a/sql/sql_repl.cc b/sql/sql_repl.cc
index 98009827cb4..c5fd142f620 100644
--- a/sql/sql_repl.cc
+++ b/sql/sql_repl.cc
@@ -3912,9 +3912,6 @@ bool mysql_show_binlog_events(THD* thd)
Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF))
DBUG_RETURN(TRUE);
- Format_description_log_event *description_event= new
- Format_description_log_event(3); /* MySQL 4.0 by default */
-
DBUG_ASSERT(thd->lex->sql_command == SQLCOM_SHOW_BINLOG_EVENTS ||
thd->lex->sql_command == SQLCOM_SHOW_RELAYLOG_EVENTS);
@@ -3935,6 +3932,9 @@ bool mysql_show_binlog_events(THD* thd)
binary_log= &(mi->rli.relay_log);
}
+ Format_description_log_event *description_event= new
+ Format_description_log_event(3); /* MySQL 4.0 by default */
+
if (binary_log->is_open())
{
SELECT_LEX_UNIT *unit= &thd->lex->unit;
diff --git a/sql/sql_select.cc b/sql/sql_select.cc
index ecffa2b4976..b8297af627b 100644
--- a/sql/sql_select.cc
+++ b/sql/sql_select.cc
@@ -1991,7 +1991,8 @@ JOIN::optimize_inner()
having= new (thd->mem_root) Item_int(thd, (longlong) 0,1);
zero_result_cause= "Impossible HAVING noticed after reading const tables";
error= 0;
- DBUG_RETURN(0);
+ select_lex->mark_const_derived(zero_result_cause);
+ goto setup_subq_exit;
}
}
@@ -3384,7 +3385,8 @@ void JOIN::exec_inner()
condtions may be arbitrarily costly, and because the optimize phase
might not have produced a complete executable plan for EXPLAINs.
*/
- if (exec_const_cond && !(select_options & SELECT_DESCRIBE) &&
+ if (!zero_result_cause &&
+ exec_const_cond && !(select_options & SELECT_DESCRIBE) &&
!exec_const_cond->val_int())
zero_result_cause= "Impossible WHERE noticed after reading const tables";
@@ -9725,12 +9727,20 @@ make_join_select(JOIN *join,SQL_SELECT *select,COND *cond)
/*
Step #2: Extract WHERE/ON parts
*/
+ uint i;
+ for (i= join->top_join_tab_count - 1; i >= join->const_tables; i--)
+ {
+ if (!join->join_tab[i].bush_children)
+ break;
+ }
+ uint last_top_base_tab_idx= i;
+
table_map save_used_tables= 0;
used_tables=((select->const_tables=join->const_table_map) |
OUTER_REF_TABLE_BIT | RAND_TABLE_BIT);
JOIN_TAB *tab;
table_map current_map;
- uint i= join->const_tables;
+ i= join->const_tables;
for (tab= first_depth_first_tab(join); tab;
tab= next_depth_first_tab(join, tab), i++)
{
@@ -9769,7 +9779,7 @@ make_join_select(JOIN *join,SQL_SELECT *select,COND *cond)
Following force including random expression in last table condition.
It solve problem with select like SELECT * FROM t1 WHERE rand() > 0.5
*/
- if (tab == join->join_tab + join->top_join_tab_count - 1)
+ if (tab == join->join_tab + last_top_base_tab_idx)
current_map|= RAND_TABLE_BIT;
used_tables|=current_map;
@@ -9809,10 +9819,10 @@ make_join_select(JOIN *join,SQL_SELECT *select,COND *cond)
save_used_tables= 0;
}
else
- {
- tmp= make_cond_for_table(thd, cond, used_tables, current_map, i,
+ {
+ tmp= make_cond_for_table(thd, cond, used_tables, current_map, i,
FALSE, FALSE);
- }
+ }
/* Add conditions added by add_not_null_conds(). */
if (tab->select_cond)
add_cond_and_fix(thd, &tmp, tab->select_cond);
@@ -14517,7 +14527,8 @@ simplify_joins(JOIN *join, List<TABLE_LIST> *join_list, COND *conds, bool top,
table->table->maybe_null= FALSE;
table->outer_join= 0;
if (!(straight_join || table->straight))
- table->dep_tables= table->embedding? table->embedding->dep_tables: 0;
+ table->dep_tables= table->embedding && !table->embedding->sj_subq_pred ?
+ table->embedding->dep_tables : 0;
if (table->on_expr)
{
/* Add ON expression to the WHERE or upper-level ON condition. */
diff --git a/sql/sql_show.cc b/sql/sql_show.cc
index 7f2a3c4cd0b..b2ee8a2eef0 100644
--- a/sql/sql_show.cc
+++ b/sql/sql_show.cc
@@ -1,5 +1,5 @@
/* Copyright (c) 2000, 2015, Oracle and/or its affiliates.
- Copyright (c) 2009, 2016, MariaDB
+ Copyright (c) 2009, 2017, MariaDB
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -1095,6 +1095,7 @@ public:
is_handled= FALSE;
break;
}
+ /* fall through */
case ER_COLUMNACCESS_DENIED_ERROR:
case ER_VIEW_NO_EXPLAIN: /* Error was anonymized, ignore all the same. */
case ER_PROCACCESS_DENIED_ERROR:
diff --git a/sql/sql_table.cc b/sql/sql_table.cc
index 462b78aeb62..90be57868cd 100644
--- a/sql/sql_table.cc
+++ b/sql/sql_table.cc
@@ -6896,7 +6896,8 @@ bool alter_table_manage_keys(TABLE *table, int indexes_were_disabled,
case Alter_info::LEAVE_AS_IS:
if (!indexes_were_disabled)
break;
- /* fall-through: disabled indexes */
+ /* disabled indexes */
+ /* fall through */
case Alter_info::DISABLE:
error= table->file->ha_disable_indexes(HA_KEY_SWITCH_NONUNIQ_SAVE);
}
@@ -9002,7 +9003,9 @@ bool mysql_alter_table(THD *thd, const char *new_db, const char *new_name,
TODO don't create the frm in the first place
*/
- deletefrm(alter_ctx.get_tmp_path());
+ const char *path= alter_ctx.get_tmp_path();
+ table->file->ha_create_partitioning_metadata(path, NULL, CHF_DELETE_FLAG);
+ deletefrm(path);
my_free(const_cast<uchar*>(frm.str));
goto end_inplace;
}
@@ -9807,7 +9810,9 @@ copy_data_between_tables(THD *thd, TABLE *from, TABLE *to,
}
if (to->file->ha_end_bulk_insert() && error <= 0)
{
- to->file->print_error(my_errno,MYF(0));
+ /* Give error, if not already given */
+ if (!thd->is_error())
+ to->file->print_error(my_errno,MYF(0));
error= 1;
}
to->file->extra(HA_EXTRA_NO_IGNORE_DUP_KEY);
diff --git a/sql/sql_yacc.yy b/sql/sql_yacc.yy
index a92adc470ab..bda8d3fc7f3 100644
--- a/sql/sql_yacc.yy
+++ b/sql/sql_yacc.yy
@@ -4737,9 +4737,11 @@ size_number:
case 'g':
case 'G':
text_shift_number+=10;
+ /* fall through */
case 'm':
case 'M':
text_shift_number+=10;
+ /* fall through */
case 'k':
case 'K':
text_shift_number+=10;
diff --git a/sql/sys_vars.cc b/sql/sys_vars.cc
index e5fe26c6b94..6558d930e72 100644
--- a/sql/sys_vars.cc
+++ b/sql/sys_vars.cc
@@ -1740,7 +1740,10 @@ Sys_var_gtid_binlog_state::do_check(THD *thd, set_var *var)
return true;
}
if (res->length() == 0)
+ {
list= NULL;
+ list_len= 0;
+ }
else if (!(list= gtid_parse_string_to_list(res->ptr(), res->length(),
&list_len)))
{
diff --git a/sql/table.cc b/sql/table.cc
index 9c3613a879c..0a23d2f1f41 100644
--- a/sql/table.cc
+++ b/sql/table.cc
@@ -2056,6 +2056,7 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write,
keyinfo= share->key_info;
uint primary_key= my_strcasecmp(system_charset_info, share->keynames.type_names[0],
primary_key_name) ? MAX_KEY : 0;
+ KEY* key_first_info;
if (primary_key >= MAX_KEY && keyinfo->flags & HA_NOSAME)
{
@@ -2135,19 +2136,38 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write,
keyinfo->name_length+1);
}
+ if (!key)
+ key_first_info= keyinfo;
+
if (ext_key_parts > share->key_parts && key)
{
KEY_PART_INFO *new_key_part= (keyinfo-1)->key_part +
(keyinfo-1)->ext_key_parts;
uint add_keyparts_for_this_key= add_first_key_parts;
+ uint length_bytes= 0, len_null_byte= 0, ext_key_length= 0;
+ Field *field;
/*
Do not extend the key that contains a component
defined over the beginning of a field.
*/
for (i= 0; i < keyinfo->user_defined_key_parts; i++)
- {
+ {
uint fieldnr= keyinfo->key_part[i].fieldnr;
+ field= share->field[keyinfo->key_part[i].fieldnr-1];
+
+ if (field->null_ptr)
+ len_null_byte= HA_KEY_NULL_LENGTH;
+
+ if (field->type() == MYSQL_TYPE_BLOB ||
+ field->real_type() == MYSQL_TYPE_VARCHAR ||
+ field->type() == MYSQL_TYPE_GEOMETRY)
+ {
+ length_bytes= HA_KEY_BLOB_LENGTH;
+ }
+
+ ext_key_length+= keyinfo->key_part[i].length + len_null_byte
+ + length_bytes;
if (share->field[fieldnr-1]->key_length() !=
keyinfo->key_part[i].length)
{
@@ -2156,6 +2176,23 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write,
}
}
+ if (add_keyparts_for_this_key)
+ {
+ for (i= 0; i < add_keyparts_for_this_key; i++)
+ {
+ uint pk_part_length= key_first_info->key_part[i].store_length;
+ if (keyinfo->ext_key_part_map & 1<<i)
+ {
+ if (ext_key_length + pk_part_length > MAX_KEY_LENGTH)
+ {
+ add_keyparts_for_this_key= i;
+ break;
+ }
+ ext_key_length+= pk_part_length;
+ }
+ }
+ }
+
if (add_keyparts_for_this_key < (keyinfo->ext_key_parts -
keyinfo->user_defined_key_parts))
{
diff --git a/sql/wsrep_thd.cc b/sql/wsrep_thd.cc
index 4acf8a3bf1e..f8db20ace99 100644
--- a/sql/wsrep_thd.cc
+++ b/sql/wsrep_thd.cc
@@ -384,7 +384,8 @@ static void wsrep_replication_process(THD *thd)
case WSREP_TRX_MISSING:
/* these suggests a bug in provider code */
WSREP_WARN("bad return from recv() call: %d", rcode);
- /* fall through to node shutdown */
+ /* Shut down this node. */
+ /* fall through */
case WSREP_FATAL:
/* Cluster connectivity is lost.
*
diff --git a/storage/connect/CMakeLists.txt b/storage/connect/CMakeLists.txt
index a602084b5bd..81441892215 100644
--- a/storage/connect/CMakeLists.txt
+++ b/storage/connect/CMakeLists.txt
@@ -18,10 +18,10 @@ SET(CONNECT_PLUGIN_DYNAMIC "connect")
SET(CONNECT_SOURCES
ha_connect.cc connect.cc user_connect.cc mycat.cc
-fmdlex.c osutil.c plugutil.c rcmsg.c rcmsg.h
+fmdlex.c osutil.c rcmsg.c rcmsg.h
array.cpp blkfil.cpp colblk.cpp csort.cpp
-filamap.cpp filamdbf.cpp filamfix.cpp filamgz.cpp filamtxt.cpp
-filter.cpp json.cpp jsonudf.cpp maputil.cpp myconn.cpp myutil.cpp plgdbutl.cpp
+filamap.cpp filamdbf.cpp filamfix.cpp filamgz.cpp filamtxt.cpp filter.cpp
+json.cpp jsonudf.cpp maputil.cpp myconn.cpp myutil.cpp plgdbutl.cpp plugutil.cpp
reldef.cpp tabcol.cpp tabdos.cpp tabext.cpp tabfix.cpp tabfmt.cpp tabjson.cpp
table.cpp tabmul.cpp tabmysql.cpp taboccur.cpp tabpivot.cpp tabsys.cpp tabtbl.cpp
tabutil.cpp tabvir.cpp tabxcl.cpp valblk.cpp value.cpp xindex.cpp xobject.cpp
@@ -38,7 +38,7 @@ user_connect.h valblk.h value.h xindex.h xobject.h xtable.h)
# Definitions that are shared for all OSes
#
add_definitions( -DMARIADB -DFORCE_INIT_OF_VARS -Dconnect_EXPORTS)
-add_definitions( -DHUGE_SUPPORT -DGZ_SUPPORT -DPIVOT_SUPPORT )
+add_definitions( -DHUGE_SUPPORT -DGZ_SUPPORT -DPIVOT_SUPPORT -DUSE_TRY )
#
@@ -270,8 +270,8 @@ IF(CONNECT_WITH_JDBC)
# Find required libraries and include directories
SET (JAVA_SOURCES JdbcInterface.java)
add_jar(JdbcInterface ${JAVA_SOURCES})
- install_jar(JdbcInterface DESTINATION ${INSTALL_PLUGINDIR} COMPONENT connect-engine)
INSTALL(FILES ${CMAKE_CURRENT_SOURCE_DIR}/JavaWrappers.jar
+ ${CMAKE_CURRENT_BINARY_DIR}/JdbcInterface.jar
DESTINATION ${INSTALL_PLUGINDIR} COMPONENT connect-engine)
add_definitions(-DJDBC_SUPPORT)
ELSE()
diff --git a/storage/connect/array.cpp b/storage/connect/array.cpp
index 1998ab890e9..6e0da312ca3 100644
--- a/storage/connect/array.cpp
+++ b/storage/connect/array.cpp
@@ -155,6 +155,7 @@ ARRAY::ARRAY(PGLOBAL g, int type, int size, int length, int prec)
switch (type) {
case TYPE_STRING:
Len = length;
+ /* fall through */
case TYPE_SHORT:
case TYPE_INT:
case TYPE_DOUBLE:
@@ -518,8 +519,8 @@ bool ARRAY::FilTest(PGLOBAL g, PVAL valp, OPVAL opc, int opm)
vp = valp;
} else if (opc != OP_EXIST) {
- sprintf(g->Message, MSG(MISSING_ARG), opc);
- longjmp(g->jumper[g->jump_level], TYPE_ARRAY);
+ sprintf(g->Message, MSG(MISSING_ARG), opc);
+ throw TYPE_ARRAY;
} else // OP_EXIST
return Nval > 0;
@@ -592,6 +593,7 @@ int ARRAY::Convert(PGLOBAL g, int k, PVAL vp)
switch (Type) {
case TYPE_DOUBLE:
prec = 2;
+ /* fall through */
case TYPE_SHORT:
case TYPE_INT:
case TYPE_DATE:
@@ -681,15 +683,15 @@ void ARRAY::SetPrecision(PGLOBAL g, int p)
{
if (Vblp == NULL) {
strcpy(g->Message, MSG(PREC_VBLP_NULL));
- longjmp(g->jumper[g->jump_level], TYPE_ARRAY);
+ throw TYPE_ARRAY;
} // endif Vblp
bool was = Vblp->IsCi();
if (was && !p) {
strcpy(g->Message, MSG(BAD_SET_CASE));
- longjmp(g->jumper[g->jump_level], TYPE_ARRAY);
- } // endif Vblp
+ throw TYPE_ARRAY;
+ } // endif Vblp
if (was || !p)
return;
@@ -699,7 +701,7 @@ void ARRAY::SetPrecision(PGLOBAL g, int p)
if (!was && Type == TYPE_STRING)
// Must be resorted to eliminate duplicate strings
if (Sort(g))
- longjmp(g->jumper[g->jump_level], TYPE_ARRAY);
+ throw TYPE_ARRAY;
} // end of SetPrecision
@@ -977,14 +979,14 @@ PSZ ARRAY::MakeArrayList(PGLOBAL g)
size_t z, len = 2;
if (Type == TYPE_LIST)
- return "(?" "?" "?)"; // To be implemented
+ return (PSZ)("(?" "?" "?)"); // To be implemented
z = MY_MAX(24, GetTypeSize(Type, Len) + 4);
tp = (char*)PlugSubAlloc(g, NULL, z);
for (i = 0; i < Nval; i++) {
Value->SetValue_pvblk(Vblp, i);
- Value->Print(g, tp, z);
+ Value->Prints(g, tp, z);
len += strlen(tp);
} // enfor i
@@ -996,7 +998,7 @@ PSZ ARRAY::MakeArrayList(PGLOBAL g)
for (i = 0; i < Nval;) {
Value->SetValue_pvblk(Vblp, i);
- Value->Print(g, tp, z);
+ Value->Prints(g, tp, z);
strcat(p, tp);
strcat(p, (++i == Nval) ? ")" : ",");
} // enfor i
@@ -1010,7 +1012,7 @@ PSZ ARRAY::MakeArrayList(PGLOBAL g)
/***********************************************************************/
/* Make file output of ARRAY contents. */
/***********************************************************************/
-void ARRAY::Print(PGLOBAL g, FILE *f, uint n)
+void ARRAY::Printf(PGLOBAL g, FILE *f, uint n)
{
char m[64];
int lim = MY_MIN(Nval,10);
@@ -1027,7 +1029,7 @@ void ARRAY::Print(PGLOBAL g, FILE *f, uint n)
if (Vblp)
for (int i = 0; i < lim; i++) {
Value->SetValue_pvblk(Vblp, i);
- Value->Print(g, f, n+4);
+ Value->Printf(g, f, n+4);
} // endfor i
} else
@@ -1038,7 +1040,7 @@ void ARRAY::Print(PGLOBAL g, FILE *f, uint n)
/***********************************************************************/
/* Make string output of ARRAY contents. */
/***********************************************************************/
-void ARRAY::Print(PGLOBAL, char *ps, uint z)
+void ARRAY::Prints(PGLOBAL, char *ps, uint z)
{
if (z < 16)
return;
diff --git a/storage/connect/array.h b/storage/connect/array.h
index dfc3638de8a..bd38344de06 100644
--- a/storage/connect/array.h
+++ b/storage/connect/array.h
@@ -56,8 +56,8 @@ class DllExport ARRAY : public XOBJECT, public CSORT { // Array descblock
virtual bool Compare(PXOB) {assert(false); return false;}
virtual bool SetFormat(PGLOBAL, FORMAT&) {assert(false); return false;}
//virtual int CheckSpcCol(PTDB, int) {return 0;}
- virtual void Print(PGLOBAL g, FILE *f, uint n);
- virtual void Print(PGLOBAL g, char *ps, uint z);
+ virtual void Printf(PGLOBAL g, FILE *f, uint n);
+ virtual void Prints(PGLOBAL g, char *ps, uint z);
// void Empty(void);
void SetPrecision(PGLOBAL g, int p);
bool AddValue(PGLOBAL g, PSZ sp);
diff --git a/storage/connect/blkfil.cpp b/storage/connect/blkfil.cpp
index 50535d20a3d..3a0292481be 100644
--- a/storage/connect/blkfil.cpp
+++ b/storage/connect/blkfil.cpp
@@ -1,11 +1,11 @@
/************* BlkFil C++ Program Source Code File (.CPP) **************/
/* PROGRAM NAME: BLKFIL */
/* ------------- */
-/* Version 2.5 */
+/* Version 2.6 */
/* */
/* COPYRIGHT: */
/* ---------- */
-/* (C) Copyright to the author Olivier BERTRAND 2004-2015 */
+/* (C) Copyright to the author Olivier BERTRAND 2004-2017 */
/* */
/* WHAT THIS PROGRAM DOES: */
/* ----------------------- */
@@ -56,7 +56,7 @@ BLOCKFILTER::BLOCKFILTER(PTDBDOS tdbp, int op)
/***********************************************************************/
/* Make file output of BLOCKFILTER contents. */
/***********************************************************************/
-void BLOCKFILTER::Print(PGLOBAL, FILE *f, uint n)
+void BLOCKFILTER::Printf(PGLOBAL, FILE *f, uint n)
{
char m[64];
@@ -70,7 +70,7 @@ void BLOCKFILTER::Print(PGLOBAL, FILE *f, uint n)
/***********************************************************************/
/* Make string output of BLOCKFILTER contents. */
/***********************************************************************/
-void BLOCKFILTER::Print(PGLOBAL, char *ps, uint z)
+void BLOCKFILTER::Prints(PGLOBAL, char *ps, uint z)
{
strncat(ps, "BlockFilter(s)", z);
} // end of Print
@@ -595,8 +595,8 @@ BLKFILIN::BLKFILIN(PGLOBAL g, PTDBDOS tdbp, int op, int opm, PXOB *xp)
if (Colp->GetResultType() != Type) {
sprintf(g->Message, "BLKFILIN: %s", MSG(VALTYPE_NOMATCH));
- longjmp(g->jumper[g->jump_level], 99);
- } else if (Colp->GetValue()->IsCi())
+ throw g->Message;
+ } else if (Colp->GetValue()->IsCi())
Arap->SetPrecision(g, 1); // Case insensitive
Sorted = Colp->IsSorted() > 0;
@@ -995,7 +995,7 @@ int BLOCKINDEX::BlockEval(PGLOBAL g)
/***********************************************************************/
/* Make file output of BLOCKINDEX contents. */
/***********************************************************************/
-void BLOCKINDEX::Print(PGLOBAL g, FILE *f, UINT n)
+void BLOCKINDEX::Printf(PGLOBAL g, FILE *f, UINT n)
{
char m[64];
@@ -1013,7 +1013,7 @@ void BLOCKINDEX::Print(PGLOBAL g, FILE *f, UINT n)
/***********************************************************************/
/* Make string output of BLOCKINDEX contents. */
/***********************************************************************/
-void BLOCKINDEX::Print(PGLOBAL g, char *ps, UINT z)
+void BLOCKINDEX::Prints(PGLOBAL g, char *ps, UINT z)
{
strncat(ps, "BlockIndex(es)", z);
} // end of Print
diff --git a/storage/connect/blkfil.h b/storage/connect/blkfil.h
index 00b00139042..61b02c53c14 100644
--- a/storage/connect/blkfil.h
+++ b/storage/connect/blkfil.h
@@ -27,8 +27,8 @@ class DllExport BLOCKFILTER : public BLOCK { /* Block Filter */
// Methods
virtual void Reset(PGLOBAL) = 0;
virtual int BlockEval(PGLOBAL) = 0;
- virtual void Print(PGLOBAL g, FILE *f, uint n);
- virtual void Print(PGLOBAL g, char *ps, uint z);
+ virtual void Printf(PGLOBAL g, FILE *f, uint n);
+ virtual void Prints(PGLOBAL g, char *ps, uint z);
protected:
BLOCKFILTER(void) {} // Standard constructor not to be used
@@ -234,8 +234,8 @@ class DllExport BLOCKINDEX : public BLOCK { /* Indexing Test Block */
// Methods
void Reset(void);
virtual int BlockEval(PGLOBAL);
- virtual void Print(PGLOBAL g, FILE *f, UINT n);
- virtual void Print(PGLOBAL g, char *ps, UINT z);
+ virtual void Printf(PGLOBAL g, FILE *f, UINT n);
+ virtual void Prints(PGLOBAL g, char *ps, UINT z);
protected:
BLOCKINDEX(void) {} // Standard constructor not to be used
diff --git a/storage/connect/block.h b/storage/connect/block.h
index aa4edde5ec9..8ac7be80988 100644
--- a/storage/connect/block.h
+++ b/storage/connect/block.h
@@ -44,8 +44,8 @@ class DllExport BLOCK {
return (PlugSubAlloc(g, p, size));
} // end of new
- virtual void Print(PGLOBAL, FILE *, uint) {} // Produce file desc
- virtual void Print(PGLOBAL, char *, uint) {} // Produce string desc
+ virtual void Printf(PGLOBAL, FILE *, uint) {} // Produce file desc
+ virtual void Prints(PGLOBAL, char *, uint) {} // Produce string desc
#if !defined(__BORLANDC__)
// Avoid warning C4291 by defining a matching dummy delete operator
diff --git a/storage/connect/catalog.h b/storage/connect/catalog.h
index 70304c410cc..48347d7519e 100644
--- a/storage/connect/catalog.h
+++ b/storage/connect/catalog.h
@@ -36,7 +36,7 @@ typedef struct _curtab {
/* Defines the structure used to get column catalog info. */
/***********************************************************************/
typedef struct _colinfo {
- char *Name;
+ PCSZ Name;
int Type;
int Offset;
int Length;
@@ -45,9 +45,9 @@ typedef struct _colinfo {
int Scale;
int Opt;
int Freq;
- char *Remark;
- char *Datefmt;
- char *Fieldfmt;
+ PCSZ Remark;
+ PCSZ Datefmt;
+ PCSZ Fieldfmt;
ushort Flags; // Used by MariaDB CONNECT handlers
} COLINFO, *PCOLINFO;
@@ -68,11 +68,9 @@ class DllExport CATALOG {
bool GetDefHuge(void) {return DefHuge;}
void SetDefHuge(bool b) {DefHuge = b;}
char *GetCbuf(void) {return Cbuf;}
-//char *GetDataPath(void) {return (char*)DataPath;}
// Methods
virtual void Reset(void) {}
-//virtual void SetDataPath(PGLOBAL g, const char *path) {}
virtual bool CheckName(PGLOBAL, char*) {return true;}
virtual bool ClearName(PGLOBAL, PSZ) {return true;}
virtual PRELDEF MakeOneTableDesc(PGLOBAL, LPCSTR, LPCSTR) {return NULL;}
@@ -102,7 +100,6 @@ class DllExport CATALOG {
int Cblen; /* Length of suballoc. buffer */
CURTAB Ctb; /* Used to enumerate tables */
bool DefHuge; /* true: tables default to huge */
-//LPCSTR DataPath; /* Is the Path of DB data dir */
}; // end of class CATALOG
#endif // __CATALOG__H
diff --git a/storage/connect/colblk.cpp b/storage/connect/colblk.cpp
index 58841387249..324d59ab40e 100644
--- a/storage/connect/colblk.cpp
+++ b/storage/connect/colblk.cpp
@@ -195,10 +195,10 @@ int COLBLK::GetLengthEx(void)
/* corresponding to this column and convert it to buffer type. */
/***********************************************************************/
void COLBLK::ReadColumn(PGLOBAL g)
- {
+{
sprintf(g->Message, MSG(UNDEFINED_AM), "ReadColumn");
- longjmp(g->jumper[g->jump_level], TYPE_COLBLK);
- } // end of ReadColumn
+ throw TYPE_COLBLK;
+} // end of ReadColumn
/***********************************************************************/
/* WriteColumn: what this routine does is to access the last line */
@@ -206,15 +206,15 @@ void COLBLK::ReadColumn(PGLOBAL g)
/* corresponding to this column from the column buffer and type. */
/***********************************************************************/
void COLBLK::WriteColumn(PGLOBAL g)
- {
+{
sprintf(g->Message, MSG(UNDEFINED_AM), "WriteColumn");
- longjmp(g->jumper[g->jump_level], TYPE_COLBLK);
- } // end of WriteColumn
+ throw TYPE_COLBLK;
+} // end of WriteColumn
/***********************************************************************/
/* Make file output of a column descriptor block. */
/***********************************************************************/
-void COLBLK::Print(PGLOBAL, FILE *f, uint n)
+void COLBLK::Printf(PGLOBAL, FILE *f, uint n)
{
char m[64];
int i;
@@ -237,7 +237,7 @@ void COLBLK::Print(PGLOBAL, FILE *f, uint n)
/***********************************************************************/
/* Make string output of a column descriptor block. */
/***********************************************************************/
-void COLBLK::Print(PGLOBAL, char *ps, uint)
+void COLBLK::Prints(PGLOBAL, char *ps, uint)
{
sprintf(ps, "R%d.%s", To_Tdb->GetTdb_No(), Name);
} // end of Print
@@ -260,10 +260,10 @@ SPCBLK::SPCBLK(PCOLUMN cp)
/* corresponding to this column from the column buffer and type. */
/***********************************************************************/
void SPCBLK::WriteColumn(PGLOBAL g)
- {
+{
sprintf(g->Message, MSG(SPCOL_READONLY), Name);
- longjmp(g->jumper[g->jump_level], TYPE_COLBLK);
- } // end of WriteColumn
+ throw TYPE_COLBLK;
+} // end of WriteColumn
/***********************************************************************/
/* RIDBLK constructor for the ROWID special column. */
@@ -377,7 +377,7 @@ PRTBLK::PRTBLK(PCOLUMN cp) : SPCBLK(cp)
void PRTBLK::ReadColumn(PGLOBAL g)
{
if (Pname == NULL) {
- char *p;
+ const char *p;
Pname = To_Tdb->GetDef()->GetStringCatInfo(g, "partname", "?");
p = strrchr(Pname, '#');
@@ -407,7 +407,7 @@ SIDBLK::SIDBLK(PCOLUMN cp) : SPCBLK(cp)
void SIDBLK::ReadColumn(PGLOBAL)
{
//if (Sname == NULL) {
- Sname = (char*)To_Tdb->GetServer();
+ Sname = To_Tdb->GetServer();
Value->SetValue_psz(Sname);
// } // endif Sname
diff --git a/storage/connect/colblk.h b/storage/connect/colblk.h
index c64f9d95129..608aa040787 100644
--- a/storage/connect/colblk.h
+++ b/storage/connect/colblk.h
@@ -72,8 +72,8 @@ class DllExport COLBLK : public XOBJECT {
virtual void SetTo_Val(PVAL) {}
virtual void ReadColumn(PGLOBAL g);
virtual void WriteColumn(PGLOBAL g);
- virtual void Print(PGLOBAL g, FILE *, uint);
- virtual void Print(PGLOBAL g, char *, uint);
+ virtual void Printf(PGLOBAL g, FILE *, uint);
+ virtual void Prints(PGLOBAL g, char *, uint);
virtual bool VarSize(void) {return false;}
bool InitValue(PGLOBAL g);
@@ -154,7 +154,7 @@ class DllExport FIDBLK : public SPCBLK {
virtual void ReadColumn(PGLOBAL g);
protected:
- PSZ Fn; // The current To_File of the table
+ PCSZ Fn; // The current To_File of the table
OPVAL Op; // The file part operator
}; // end of class FIDBLK
@@ -178,7 +178,7 @@ class DllExport TIDBLK : public SPCBLK {
TIDBLK(void) {}
// Members
- PSZ Tname; // The current table name
+ PCSZ Tname; // The current table name
}; // end of class TIDBLK
/***********************************************************************/
@@ -201,7 +201,7 @@ class DllExport PRTBLK : public SPCBLK {
PRTBLK(void) {}
// Members
- PSZ Pname; // The current partition name
+ PCSZ Pname; // The current partition name
}; // end of class PRTBLK
/***********************************************************************/
@@ -224,7 +224,7 @@ class DllExport SIDBLK : public SPCBLK {
SIDBLK(void) {}
// Members
- PSZ Sname; // The current server name
+ PCSZ Sname; // The current server name
}; // end of class SIDBLK
#endif // __COLBLK__H
diff --git a/storage/connect/connect.cc b/storage/connect/connect.cc
index 098119e7be1..e15cc724b85 100644
--- a/storage/connect/connect.cc
+++ b/storage/connect/connect.cc
@@ -1,4 +1,4 @@
-/* Copyright (C) Olivier Bertrand 2004 - 2015
+/* Copyright (C) Olivier Bertrand 2004 - 2017
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -15,10 +15,10 @@
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111-1301 USA */
/***********************************************************************/
-/* Author Olivier BERTRAND bertrandop@gmail.com 2004-2015 */
+/* Author Olivier BERTRAND bertrandop@gmail.com 2004-2017 */
/* */
-/* WHAT THIS PROGRAM DOES: */
-/* ----------------------- */
+/* WHAT THIS PROGRAM DOES: */
+/* ----------------------- */
/* This program are the CONNECT general purpose semantic routines. */
/***********************************************************************/
#ifdef USE_PRAGMA_IMPLEMENTATION
@@ -117,11 +117,10 @@ bool CntCheckDB(PGLOBAL g, PHC handler, const char *pathname)
handler);
// Set the database path for this table
- handler->SetDataPath(g, pathname);
+ if (handler->SetDataPath(g, pathname))
+ return true;
if (dbuserp->Catalog) {
-// ((MYCAT *)dbuserp->Catalog)->SetHandler(handler); done later
-// ((MYCAT *)dbuserp->Catalog)->SetDataPath(g, pathname);
return false; // Nothing else to do
} // endif Catalog
@@ -138,9 +137,6 @@ bool CntCheckDB(PGLOBAL g, PHC handler, const char *pathname)
if (!(dbuserp->Catalog= new MYCAT(handler)))
return true;
-//((MYCAT *)dbuserp->Catalog)->SetDataPath(g, pathname);
-//dbuserp->UseTemp= TMP_AUTO;
-
/*********************************************************************/
/* All is correct. */
/*********************************************************************/
@@ -172,7 +168,7 @@ bool CntInfo(PGLOBAL g, PTDB tp, PXF info)
// info->mean_rec_length= tdbp->GetLrecl();
info->mean_rec_length= 0;
- info->data_file_name= (b) ? NULL : tdbp->GetFile(g);
+ info->data_file_name= (b) ? NULL : (char*)tdbp->GetFile(g);
return true;
} else {
info->data_file_length= 0;
@@ -188,49 +184,43 @@ bool CntInfo(PGLOBAL g, PTDB tp, PXF info)
/* GetTDB: Get the table description block of a CONNECT table. */
/***********************************************************************/
PTDB CntGetTDB(PGLOBAL g, LPCSTR name, MODE mode, PHC h)
- {
- int rc;
- PTDB tdbp;
- PTABLE tabp;
- PDBUSER dup= PlgGetUser(g);
- volatile PCATLG cat= (dup) ? dup->Catalog : NULL; // Safe over longjmp
-
- if (trace)
- printf("CntGetTDB: name=%s mode=%d cat=%p\n", name, mode, cat);
+{
+ PTDB tdbp;
+ PTABLE tabp;
+ PDBUSER dup = PlgGetUser(g);
+ volatile PCATLG cat = (dup) ? dup->Catalog : NULL; // Safe over longjmp
- if (!cat)
- return NULL;
+ if (trace)
+ printf("CntGetTDB: name=%s mode=%d cat=%p\n", name, mode, cat);
- // Save stack and allocation environment and prepare error return
- if (g->jump_level == MAX_JUMP) {
- strcpy(g->Message, MSG(TOO_MANY_JUMPS));
- return NULL;
- } // endif jump_level
+ if (!cat)
+ return NULL;
- if ((rc= setjmp(g->jumper[++g->jump_level])) != 0) {
- tdbp= NULL;
- goto err;
- } // endif rc
+ try {
+ // Get table object from the catalog
+ tabp = new(g) XTAB(name);
- // Get table object from the catalog
- tabp= new(g) XTAB(name);
+ if (trace)
+ printf("CntGetTDB: tabp=%p\n", tabp);
- if (trace)
- printf("CntGetTDB: tabp=%p\n", tabp);
+ // Perhaps this should be made thread safe
+ ((MYCAT*)cat)->SetHandler(h);
- // Perhaps this should be made thread safe
- ((MYCAT*)cat)->SetHandler(h);
+ if (!(tdbp = cat->GetTable(g, tabp, mode)))
+ printf("CntGetTDB: %s\n", g->Message);
- if (!(tdbp= cat->GetTable(g, tabp, mode)))
- printf("CntGetTDB: %s\n", g->Message);
+ } catch (int n) {
+ if (trace)
+ htrc("Exception %d: %s\n", n, g->Message);
+ } catch (const char *msg) {
+ strcpy(g->Message, msg);
+ } // end catch
- err:
if (trace)
printf("Returning tdbp=%p mode=%d\n", tdbp, mode);
- g->jump_level--;
return tdbp;
- } // end of CntGetTDB
+} // end of CntGetTDB
/***********************************************************************/
/* OPENTAB: Open a Table. */
@@ -239,7 +229,7 @@ bool CntOpenTable(PGLOBAL g, PTDB tdbp, MODE mode, char *c1, char *c2,
bool del, PHC)
{
char *p;
- int i, n, rc;
+ int i, n;
bool rcop= true;
PCOL colp;
//PCOLUMN cp;
@@ -254,120 +244,116 @@ bool CntOpenTable(PGLOBAL g, PTDB tdbp, MODE mode, char *c1, char *c2,
return true;
} // endif tdbp
- // Save stack and allocation environment and prepare error return
- if (g->jump_level == MAX_JUMP) {
- strcpy(g->Message, MSG(TOO_MANY_JUMPS));
- return true;
- } // endif jump_level
-
- if ((rc= setjmp(g->jumper[++g->jump_level])) != 0) {
- goto err;
- } // endif rc
-
- if (!c1) {
- if (mode == MODE_INSERT)
- // Allocate all column blocks for that table
- tdbp->ColDB(g, NULL, 0);
-
- } else for (p= c1; *p; p+= n) {
- // Allocate only used column blocks
- if (trace)
- printf("Allocating column %s\n", p);
-
- g->Message[0] = 0; // To check whether ColDB made an error message
- colp= tdbp->ColDB(g, p, 0);
-
- if (!colp && !(mode == MODE_INSERT && tdbp->IsSpecial(p))) {
- if (g->Message[0] == 0)
- sprintf(g->Message, MSG(COL_ISNOT_TABLE), p, tdbp->GetName());
-
- goto err;
- } // endif colp
-
- n= strlen(p) + 1;
- } // endfor p
-
- for (i= 0, colp= tdbp->GetColumns(); colp; i++, colp= colp->GetNext()) {
- if (colp->InitValue(g))
- goto err;
-
- if (mode == MODE_INSERT)
- // Allow type conversion
- if (colp->SetBuffer(g, colp->GetValue(), true, false))
- goto err;
-
- colp->AddColUse(U_P); // For PLG tables
- } // endfor colp
-
- /*********************************************************************/
- /* In Update mode, the updated column blocks must be distinct from */
- /* the read column blocks. So make a copy of the TDB and allocate */
- /* its column blocks in mode write (required by XML tables). */
- /*********************************************************************/
- if (mode == MODE_UPDATE) {
- PTDBASE utp;
-
- if (!(utp= (PTDBASE)tdbp->Duplicate(g))) {
- sprintf(g->Message, MSG(INV_UPDT_TABLE), tdbp->GetName());
- goto err;
- } // endif tp
-
- if (!c2)
- // Allocate all column blocks for that table
- utp->ColDB(g, NULL, 0);
- else for (p= c2; *p; p+= n) {
- // Allocate only used column blocks
- colp= utp->ColDB(g, p, 0);
- n= strlen(p) + 1;
- } // endfor p
-
- for (i= 0, colp= utp->GetColumns(); colp; i++, colp= colp->GetNext()) {
- if (colp->InitValue(g))
- goto err;
-
- if (colp->SetBuffer(g, colp->GetValue(), true, false))
- goto err;
-
- } // endfor colp
-
- // Attach the updated columns list to the main table
- tdbp->SetSetCols(utp->GetColumns());
- } else if (tdbp && mode == MODE_INSERT)
- tdbp->SetSetCols(tdbp->GetColumns());
-
- // Now do open the physical table
- if (trace)
- printf("Opening table %s in mode %d tdbp=%p\n",
- tdbp->GetName(), mode, tdbp);
-
-//tdbp->SetMode(mode);
-
- if (del/* && (tdbp->GetFtype() != RECFM_NAF*/) {
- // To avoid erasing the table when doing a partial delete
- // make a fake Next
+ try {
+ if (!c1) {
+ if (mode == MODE_INSERT)
+ // Allocate all column blocks for that table
+ tdbp->ColDB(g, NULL, 0);
+
+ } else for (p = c1; *p; p += n) {
+ // Allocate only used column blocks
+ if (trace)
+ printf("Allocating column %s\n", p);
+
+ g->Message[0] = 0; // To check whether ColDB made an error message
+ colp = tdbp->ColDB(g, p, 0);
+
+ if (!colp && !(mode == MODE_INSERT && tdbp->IsSpecial(p))) {
+ if (g->Message[0] == 0)
+ sprintf(g->Message, MSG(COL_ISNOT_TABLE), p, tdbp->GetName());
+
+ throw 1;
+ } // endif colp
+
+ n = strlen(p) + 1;
+ } // endfor p
+
+ for (i = 0, colp = tdbp->GetColumns(); colp; i++, colp = colp->GetNext()) {
+ if (colp->InitValue(g))
+ throw 2;
+
+ if (mode == MODE_INSERT)
+ // Allow type conversion
+ if (colp->SetBuffer(g, colp->GetValue(), true, false))
+ throw 3;
+
+ colp->AddColUse(U_P); // For PLG tables
+ } // endfor colp
+
+ /*******************************************************************/
+ /* In Update mode, the updated column blocks must be distinct from */
+ /* the read column blocks. So make a copy of the TDB and allocate */
+ /* its column blocks in mode write (required by XML tables). */
+ /*******************************************************************/
+ if (mode == MODE_UPDATE) {
+ PTDBASE utp;
+
+ if (!(utp = (PTDBASE)tdbp->Duplicate(g))) {
+ sprintf(g->Message, MSG(INV_UPDT_TABLE), tdbp->GetName());
+ throw 4;
+ } // endif tp
+
+ if (!c2)
+ // Allocate all column blocks for that table
+ utp->ColDB(g, NULL, 0);
+ else for (p = c2; *p; p += n) {
+ // Allocate only used column blocks
+ colp = utp->ColDB(g, p, 0);
+ n = strlen(p) + 1;
+ } // endfor p
+
+ for (i = 0, colp = utp->GetColumns(); colp; i++, colp = colp->GetNext()) {
+ if (colp->InitValue(g))
+ throw 5;
+
+ if (colp->SetBuffer(g, colp->GetValue(), true, false))
+ throw 6;
+
+ } // endfor colp
+
+ // Attach the updated columns list to the main table
+ tdbp->SetSetCols(utp->GetColumns());
+ } else if (tdbp && mode == MODE_INSERT)
+ tdbp->SetSetCols(tdbp->GetColumns());
+
+ // Now do open the physical table
+ if (trace)
+ printf("Opening table %s in mode %d tdbp=%p\n",
+ tdbp->GetName(), mode, tdbp);
+
+ //tdbp->SetMode(mode);
+
+ if (del/* && (tdbp->GetFtype() != RECFM_NAF*/) {
+ // To avoid erasing the table when doing a partial delete
+ // make a fake Next
// PDOSDEF ddp= new(g) DOSDEF;
// PTDB tp= new(g) TDBDOS(ddp, NULL);
- tdbp->SetNext((PTDB)1);
- dup->Check &= ~CHK_DELETE;
- } // endif del
+ tdbp->SetNext((PTDB)1);
+ dup->Check &= ~CHK_DELETE;
+ } // endif del
- if (trace)
- printf("About to open the table: tdbp=%p\n", tdbp);
+ if (trace)
+ printf("About to open the table: tdbp=%p\n", tdbp);
- if (mode != MODE_ANY && mode != MODE_ALTER) {
- if (tdbp->OpenDB(g)) {
- printf("%s\n", g->Message);
- goto err;
- } else
- tdbp->SetNext(NULL);
+ if (mode != MODE_ANY && mode != MODE_ALTER) {
+ if (tdbp->OpenDB(g)) {
+ printf("%s\n", g->Message);
+ throw 7;
+ } else
+ tdbp->SetNext(NULL);
+
+ } // endif mode
- } // endif mode
+ rcop = false;
- rcop= false;
+ } catch (int n) {
+ if (trace)
+ htrc("Exception %d: %s\n", n, g->Message);
+ } catch (const char *msg) {
+ strcpy(g->Message, msg);
+ } // end catch
- err:
- g->jump_level--;
return rcop;
} // end of CntOpenTable
@@ -387,50 +373,40 @@ bool CntRewindTable(PGLOBAL g, PTDB tdbp)
/* Evaluate all columns after a record is read. */
/***********************************************************************/
RCODE EvalColumns(PGLOBAL g, PTDB tdbp, bool reset, bool mrr)
- {
+{
RCODE rc= RC_OK;
PCOL colp;
- // Save stack and allocation environment and prepare error return
- if (g->jump_level == MAX_JUMP) {
- if (trace) {
- strcpy(g->Message, MSG(TOO_MANY_JUMPS));
- printf("EvalColumns: %s\n", g->Message);
- } // endif
-
- return RC_FX;
- } // endif jump_level
-
- if (setjmp(g->jumper[++g->jump_level]) != 0) {
- if (trace)
- printf("Error reading columns: %s\n", g->Message);
+ try {
+ for (colp = tdbp->GetColumns(); rc == RC_OK && colp;
+ colp = colp->GetNext()) {
+ if (reset)
+ colp->Reset();
- rc= RC_FX;
- goto err;
- } // endif rc
+ // Virtual columns are computed by MariaDB
+ if (!colp->GetColUse(U_VIRTUAL) && (!mrr || colp->GetKcol()))
+ if (colp->Eval(g))
+ rc = RC_FX;
- for (colp= tdbp->GetColumns(); rc == RC_OK && colp;
- colp= colp->GetNext()) {
- if (reset)
- colp->Reset();
+ } // endfor colp
- // Virtual columns are computed by MariaDB
- if (!colp->GetColUse(U_VIRTUAL) && (!mrr || colp->GetKcol()))
- if (colp->Eval(g))
- rc= RC_FX;
+ } catch (int n) {
+ if (trace)
+ printf("Error %d reading columns: %s\n", n, g->Message);
- } // endfor colp
+ rc = RC_FX;
+ } catch (const char *msg) {
+ strcpy(g->Message, msg);
+ } // end catch
- err:
- g->jump_level--;
return rc;
- } // end of EvalColumns
+} // end of EvalColumns
/***********************************************************************/
/* ReadNext: Read next record sequentially. */
/***********************************************************************/
RCODE CntReadNext(PGLOBAL g, PTDB tdbp)
- {
+{
RCODE rc;
if (!tdbp)
@@ -445,76 +421,66 @@ RCODE CntReadNext(PGLOBAL g, PTDB tdbp)
((PTDBASE)tdbp)->ResetKindex(g, NULL);
} // endif index
- // Save stack and allocation environment and prepare error return
- if (g->jump_level == MAX_JUMP) {
- strcpy(g->Message, MSG(TOO_MANY_JUMPS));
- return RC_FX;
- } // endif jump_level
-
- if ((setjmp(g->jumper[++g->jump_level])) != 0) {
- rc= RC_FX;
- goto err;
- } // endif rc
+ try {
+ // Do it now to avoid double eval when filtering
+ for (PCOL colp = tdbp->GetColumns(); colp; colp = colp->GetNext())
+ colp->Reset();
- // Do it now to avoid double eval when filtering
- for (PCOL colp= tdbp->GetColumns(); colp; colp= colp->GetNext())
- colp->Reset();
+ do {
+ if ((rc = (RCODE)tdbp->ReadDB(g)) == RC_OK)
+ if (!ApplyFilter(g, tdbp->GetFilter()))
+ rc = RC_NF;
- do {
- if ((rc= (RCODE)tdbp->ReadDB(g)) == RC_OK)
- if (!ApplyFilter(g, tdbp->GetFilter()))
- rc= RC_NF;
+ } while (rc == RC_NF);
- } while (rc == RC_NF);
+ if (rc == RC_OK)
+ rc = EvalColumns(g, tdbp, false);
- if (rc == RC_OK)
- rc= EvalColumns(g, tdbp, false);
+ } catch (int) {
+ rc = RC_FX;
+ } catch (const char *msg) {
+ strcpy(g->Message, msg);
+ rc = RC_FX;
+ } // end catch
- err:
- g->jump_level--;
return rc;
- } // end of CntReadNext
+} // end of CntReadNext
/***********************************************************************/
/* WriteRow: Insert a new row into a table. */
/***********************************************************************/
RCODE CntWriteRow(PGLOBAL g, PTDB tdbp)
- {
- RCODE rc;
- PCOL colp;
-//PTDBASE tp= (PTDBASE)tdbp;
-
- if (!tdbp)
- return RC_FX;
-
- // Save stack and allocation environment and prepare error return
- if (g->jump_level == MAX_JUMP) {
- strcpy(g->Message, MSG(TOO_MANY_JUMPS));
- return RC_FX;
- } // endif jump_level
-
- if (setjmp(g->jumper[++g->jump_level]) != 0) {
- printf("%s\n", g->Message);
- rc= RC_FX;
- goto err;
- } // endif rc
-
- // Store column values in table write buffer(s)
- for (colp= tdbp->GetSetCols(); colp; colp= colp->GetNext())
- if (!colp->GetColUse(U_VIRTUAL))
- colp->WriteColumn(g);
-
- if (tdbp->IsIndexed())
- // Index values must be sorted before updating
- rc= (RCODE)((PTDBDOS)tdbp)->GetTxfp()->StoreValues(g, true);
- else
- // Return result code from write operation
- rc= (RCODE)tdbp->WriteDB(g);
-
- err:
- g->jump_level--;
- return rc;
- } // end of CntWriteRow
+{
+ RCODE rc;
+ PCOL colp;
+ //PTDBASE tp= (PTDBASE)tdbp;
+
+ if (!tdbp)
+ return RC_FX;
+
+ try {
+ // Store column values in table write buffer(s)
+ for (colp = tdbp->GetSetCols(); colp; colp = colp->GetNext())
+ if (!colp->GetColUse(U_VIRTUAL))
+ colp->WriteColumn(g);
+
+ if (tdbp->IsIndexed())
+ // Index values must be sorted before updating
+ rc = (RCODE)((PTDBDOS)tdbp)->GetTxfp()->StoreValues(g, true);
+ else
+ // Return result code from write operation
+ rc = (RCODE)tdbp->WriteDB(g);
+
+ } catch (int n) {
+ printf("Exception %d: %s\n", n, g->Message);
+ rc = RC_FX;
+ } catch (const char *msg) {
+ strcpy(g->Message, msg);
+ rc = RC_FX;
+ } // end catch
+
+ return rc;
+} // end of CntWriteRow
/***********************************************************************/
/* UpdateRow: Update a row into a table. */
@@ -562,88 +528,78 @@ RCODE CntDeleteRow(PGLOBAL g, PTDB tdbp, bool all)
/* CLOSETAB: Close a table. */
/***********************************************************************/
int CntCloseTable(PGLOBAL g, PTDB tdbp, bool nox, bool abort)
- {
- int rc= RC_OK;
-//TDBASE *tbxp= (PTDBASE)tdbp;
-
- if (!tdbp)
- return rc; // Nothing to do
- else if (tdbp->GetUse() != USE_OPEN) {
- if (tdbp->GetAmType() == TYPE_AM_XML)
- tdbp->CloseDB(g); // Opened by GetMaxSize
-
- return rc;
- } // endif !USE_OPEN
-
- if (trace)
- printf("CntCloseTable: tdbp=%p mode=%d nox=%d abort=%d\n",
- tdbp, tdbp->GetMode(), nox, abort);
-
- if (tdbp->GetMode() == MODE_DELETE && tdbp->GetUse() == USE_OPEN) {
- if (tdbp->IsIndexed())
- rc= ((PTDBDOS)tdbp)->GetTxfp()->DeleteSortedRows(g);
-
- if (!rc)
- rc= tdbp->DeleteDB(g, RC_EF); // Specific A.M. delete routine
-
- } else if (tdbp->GetMode() == MODE_UPDATE && tdbp->IsIndexed())
- rc= ((PTDBDOX)tdbp)->Txfp->UpdateSortedRows(g);
-
- switch(rc) {
- case RC_FX:
- abort= true;
- break;
- case RC_INFO:
- PushWarning(g, tdbp);
- break;
- } // endswitch rc
-
- // Prepare error return
- if (g->jump_level == MAX_JUMP) {
- strcpy(g->Message, MSG(TOO_MANY_JUMPS));
- rc= RC_FX;
- goto err;
- } // endif
-
- if ((rc = setjmp(g->jumper[++g->jump_level])) != 0) {
- rc= RC_FX;
- g->jump_level--;
- goto err;
- } // endif
-
- // This will close the table file(s) and also finalize write
- // operations such as Insert, Update, or Delete.
- tdbp->SetAbort(abort);
- tdbp->CloseDB(g);
- tdbp->SetAbort(false);
- g->jump_level--;
-
- if (trace > 1)
- printf("Table %s closed\n", tdbp->GetName());
-
-//if (!((PTDBDOX)tdbp)->GetModified())
-// return 0;
-
- if (nox || tdbp->GetMode() == MODE_READ || tdbp->GetMode() == MODE_ANY)
- return 0;
-
- if (trace > 1)
- printf("About to reset opt\n");
-
- if (!tdbp->IsRemote()) {
- // Make all the eventual indexes
- PTDBDOX tbxp = (PTDBDOX)tdbp;
- tbxp->ResetKindex(g, NULL);
- tbxp->SetKey_Col(NULL);
- rc = tbxp->ResetTableOpt(g, true, tbxp->GetDef()->Indexable() == 1);
- } // endif remote
-
- err:
- if (trace > 1)
- printf("Done rc=%d\n", rc);
-
- return (rc == RC_OK || rc == RC_INFO) ? 0 : rc;
- } // end of CntCloseTable
+{
+ int rc = RC_OK;
+ //TDBASE *tbxp= (PTDBASE)tdbp;
+
+ if (!tdbp)
+ return rc; // Nothing to do
+ else if (tdbp->GetUse() != USE_OPEN) {
+ if (tdbp->GetAmType() == TYPE_AM_XML)
+ tdbp->CloseDB(g); // Opened by GetMaxSize
+
+ return rc;
+ } // endif !USE_OPEN
+
+ if (trace)
+ printf("CntCloseTable: tdbp=%p mode=%d nox=%d abort=%d\n",
+ tdbp, tdbp->GetMode(), nox, abort);
+
+ if (tdbp->GetMode() == MODE_DELETE && tdbp->GetUse() == USE_OPEN) {
+ if (tdbp->IsIndexed())
+ rc = ((PTDBDOS)tdbp)->GetTxfp()->DeleteSortedRows(g);
+
+ if (!rc)
+ rc = tdbp->DeleteDB(g, RC_EF); // Specific A.M. delete routine
+
+ } else if (tdbp->GetMode() == MODE_UPDATE && tdbp->IsIndexed())
+ rc = ((PTDBDOX)tdbp)->Txfp->UpdateSortedRows(g);
+
+ switch (rc) {
+ case RC_FX:
+ abort = true;
+ break;
+ case RC_INFO:
+ PushWarning(g, tdbp);
+ break;
+ } // endswitch rc
+
+ try {
+ // This will close the table file(s) and also finalize write
+ // operations such as Insert, Update, or Delete.
+ tdbp->SetAbort(abort);
+ tdbp->CloseDB(g);
+ tdbp->SetAbort(false);
+
+ if (trace > 1)
+ printf("Table %s closed\n", tdbp->GetName());
+
+ if (!nox && tdbp->GetMode() != MODE_READ && tdbp->GetMode() != MODE_ANY) {
+ if (trace > 1)
+ printf("About to reset opt\n");
+
+ if (!tdbp->IsRemote()) {
+ // Make all the eventual indexes
+ PTDBDOX tbxp = (PTDBDOX)tdbp;
+ tbxp->ResetKindex(g, NULL);
+ tbxp->SetKey_Col(NULL);
+ rc = tbxp->ResetTableOpt(g, true, tbxp->GetDef()->Indexable() == 1);
+ } // endif remote
+
+ } // endif nox
+
+ } catch (int) {
+ rc = RC_FX;
+ } catch (const char *msg) {
+ strcpy(g->Message, msg);
+ rc = RC_FX;
+ } // end catch
+
+ if (trace > 1)
+ htrc("Done rc=%d\n", rc);
+
+ return (rc == RC_OK || rc == RC_INFO) ? 0 : rc;
+} // end of CntCloseTable
/***********************************************************************/
/* Load and initialize the use of an index. */
@@ -752,8 +708,9 @@ RCODE CntIndexRead(PGLOBAL g, PTDB ptdb, OPVAL op,
sprintf(g->Message, MSG(TABLE_NO_INDEX), ptdb->GetName());
return RC_FX;
} else if (x == 2) {
- // Remote index
- if (op != OP_SAME && ptdb->ReadKey(g, op, kr))
+ // Remote index. Only used in read mode
+ if ((ptdb->GetMode() == MODE_READ || ptdb->GetMode() == MODE_READX)
+ && op != OP_SAME && ptdb->ReadKey(g, op, kr))
return RC_FX;
goto rnd;
diff --git a/storage/connect/csort.h b/storage/connect/csort.h
index 55ff6268a4b..6e700059881 100644
--- a/storage/connect/csort.h
+++ b/storage/connect/csort.h
@@ -49,8 +49,8 @@ class DllExport CSORT {
public:
// Methods
int Qsort(PGLOBAL g, int n); /* Sort calling routine */
-//virtual void Print(PGLOBAL g, FILE *f, uint n);
-//virtual void Print(PGLOBAL g, char *ps, uint z);
+//virtual void Printf(PGLOBAL g, FILE *f, uint n);
+//virtual void Prints(PGLOBAL g, char *ps, uint z);
#ifdef DEBTRACE
int GetNcmp(void) {return num_comp;}
#endif
diff --git a/storage/connect/domdoc.cpp b/storage/connect/domdoc.cpp
index 1622ec16c68..e24e10835c1 100644
--- a/storage/connect/domdoc.cpp
+++ b/storage/connect/domdoc.cpp
@@ -58,13 +58,15 @@ void CloseXMLFile(PGLOBAL g, PFBLOCK fp, bool all)
if (xp && xp->Count > 1 && !all) {
xp->Count--;
} else if (xp && xp->Count > 0) {
- try {
+ try {
if (xp->Docp)
xp->Docp->Release();
- } catch(_com_error e) {
- sprintf(g->Message, "%s %s", MSG(COM_ERROR), e.Description());
- } catch(...) {}
+ } catch(_com_error e) {
+ char *p = _com_util::ConvertBSTRToString(e.Description());
+ sprintf(g->Message, "%s %s", MSG(COM_ERROR), p);
+ delete[] p;
+ } catch(...) {}
CoUninitialize();
xp->Count = 0;
@@ -89,7 +91,7 @@ DOMDOC::DOMDOC(char *nsl, char *nsdf, char *enc, PFBLOCK fp)
/******************************************************************/
/* Initialize XML parser and check library compatibility. */
/******************************************************************/
-bool DOMDOC::Initialize(PGLOBAL g, char *entry, bool zipped)
+bool DOMDOC::Initialize(PGLOBAL g, PCSZ entry, bool zipped)
{
if (zipped && InitZip(g, entry))
return true;
@@ -155,7 +157,7 @@ PFBLOCK DOMDOC::LinkXblock(PGLOBAL g, MODE m, int rc, char *fn)
/******************************************************************/
/* Create the XML node. */
/******************************************************************/
-bool DOMDOC::NewDoc(PGLOBAL g, char *ver)
+bool DOMDOC::NewDoc(PGLOBAL g, PCSZ ver)
{
char buf[64];
MSXML2::IXMLDOMProcessingInstructionPtr pip;
@@ -490,9 +492,9 @@ PXATTR DOMNODE::GetAttribute(PGLOBAL g, char *name, PXATTR ap)
/******************************************************************/
/* Add a new element child node to this node and return it. */
/******************************************************************/
-PXNODE DOMNODE::AddChildNode(PGLOBAL g, char *name, PXNODE np)
+PXNODE DOMNODE::AddChildNode(PGLOBAL g, PCSZ name, PXNODE np)
{
- char *p, *pn;
+ const char *p, *pn;
// char *p, *pn, *epf, *pf = NULL;
MSXML2::IXMLDOMNodePtr ep;
// _bstr_t uri((wchar_t*)NULL);
@@ -585,7 +587,7 @@ PXATTR DOMNODE::AddProperty(PGLOBAL g, char *name, PXATTR ap)
/******************************************************************/
/* Add a new text node to this node. */
/******************************************************************/
-void DOMNODE::AddText(PGLOBAL g, char *txtp)
+void DOMNODE::AddText(PGLOBAL g, PCSZ txtp)
{
MSXML2::IXMLDOMTextPtr tp= Docp->createTextNode((_bstr_t)txtp);
diff --git a/storage/connect/domdoc.h b/storage/connect/domdoc.h
index 7f269002d59..dd8936097e2 100644
--- a/storage/connect/domdoc.h
+++ b/storage/connect/domdoc.h
@@ -37,9 +37,9 @@ class DOMDOC : public XMLDOCUMENT {
virtual void SetNofree(bool b) {} // Only libxml2
// Methods
- virtual bool Initialize(PGLOBAL g, char *entry, bool zipped);
+ virtual bool Initialize(PGLOBAL g, PCSZ entry, bool zipped);
virtual bool ParseFile(PGLOBAL g, char *fn);
- virtual bool NewDoc(PGLOBAL g, char *ver);
+ virtual bool NewDoc(PGLOBAL g, PCSZ ver);
virtual void AddComment(PGLOBAL g, char *com);
virtual PXNODE GetRoot(PGLOBAL g);
virtual PXNODE NewRoot(PGLOBAL g, char *name);
@@ -78,9 +78,9 @@ class DOMNODE : public XMLNODE {
virtual PXLIST SelectNodes(PGLOBAL g, char *xp, PXLIST lp);
virtual PXNODE SelectSingleNode(PGLOBAL g, char *xp, PXNODE np);
virtual PXATTR GetAttribute(PGLOBAL g, char *name, PXATTR ap);
- virtual PXNODE AddChildNode(PGLOBAL g, char *name, PXNODE np);
+ virtual PXNODE AddChildNode(PGLOBAL g, PCSZ name, PXNODE np);
virtual PXATTR AddProperty(PGLOBAL g, char *name, PXATTR ap);
- virtual void AddText(PGLOBAL g, char *txtp);
+ virtual void AddText(PGLOBAL g, PCSZ txtp);
virtual void DeleteChild(PGLOBAL g, PXNODE dnp);
protected:
diff --git a/storage/connect/filamap.cpp b/storage/connect/filamap.cpp
index 8fffaca3d06..84dff422db7 100644
--- a/storage/connect/filamap.cpp
+++ b/storage/connect/filamap.cpp
@@ -301,10 +301,9 @@ int MAPFAM::SkipRecord(PGLOBAL g, bool header)
PDBUSER dup = (PDBUSER)g->Activityp->Aptr;
// Skip this record
- while (*Mempos++ != '\n') ; // What about Unix ???
-
- if (Mempos >= Top)
- return RC_EF;
+ while (*Mempos++ != '\n') // What about Unix ???
+ if (Mempos == Top)
+ return RC_EF;
// Update progress information
dup->ProgCur = GetPos();
@@ -320,7 +319,7 @@ int MAPFAM::SkipRecord(PGLOBAL g, bool header)
/***********************************************************************/
int MAPFAM::ReadBuffer(PGLOBAL g)
{
- int rc, len;
+ int rc, len, n = 1;
// Are we at the end of the memory
if (Mempos >= Top) {
@@ -362,10 +361,14 @@ int MAPFAM::ReadBuffer(PGLOBAL g)
Placed = false;
// Immediately calculate next position (Used by DeleteDB)
- while (*Mempos++ != '\n') ; // What about Unix ???
+ while (*Mempos++ != '\n') // What about Unix ???
+ if (Mempos == Top) {
+ n = 0;
+ break;
+ } // endif Mempos
// Set caller line buffer
- len = (Mempos - Fpos) - 1;
+ len = (Mempos - Fpos) - n;
// Don't rely on ENDING setting
if (len > 0 && *(Mempos - 2) == '\r')
@@ -619,7 +622,9 @@ int MBKFAM::ReadBuffer(PGLOBAL g)
} // endif's
// Immediately calculate next position (Used by DeleteDB)
- while (*Mempos++ != '\n') ; // What about Unix ???
+ while (*Mempos++ != '\n') // What about Unix ???
+ if (Mempos == Top)
+ break;
// Set caller line buffer
len = (Mempos - Fpos) - Ending;
diff --git a/storage/connect/filamdbf.cpp b/storage/connect/filamdbf.cpp
index 55feaa02bc4..2ac0071a92d 100644
--- a/storage/connect/filamdbf.cpp
+++ b/storage/connect/filamdbf.cpp
@@ -128,7 +128,7 @@ typedef struct _descriptor {
/* Moves file pointer to byte 32; fills buffer at buf with */
/* first 32 bytes of file. */
/****************************************************************************/
-static int dbfhead(PGLOBAL g, FILE *file, PSZ fn, DBFHEADER *buf)
+static int dbfhead(PGLOBAL g, FILE *file, PCSZ fn, DBFHEADER *buf)
{
char endmark[2];
int dbc = 2, rc = RC_OK;
@@ -186,7 +186,7 @@ static int dbfhead(PGLOBAL g, FILE *file, PSZ fn, DBFHEADER *buf)
/* DBFColumns: constructs the result blocks containing the description */
/* of all the columns of a DBF file that will be retrieved by #GetData. */
/****************************************************************************/
-PQRYRES DBFColumns(PGLOBAL g, char *dp, const char *fn, bool info)
+PQRYRES DBFColumns(PGLOBAL g, PCSZ dp, PCSZ fn, bool info)
{
int buftyp[] = {TYPE_STRING, TYPE_SHORT, TYPE_STRING,
TYPE_INT, TYPE_INT, TYPE_SHORT};
@@ -393,7 +393,7 @@ DBFBASE::DBFBASE(DBFBASE *txfp)
/* and header length. Set Records, check that Reclen is equal to lrecl and */
/* return the header length or 0 in case of error. */
/****************************************************************************/
-int DBFBASE::ScanHeader(PGLOBAL g, PSZ fn, int lrecl, int *rln, char *defpath)
+int DBFBASE::ScanHeader(PGLOBAL g, PCSZ fn, int lrecl, int *rln, PCSZ defpath)
{
int rc;
char filename[_MAX_PATH];
@@ -503,7 +503,8 @@ bool DBFFAM::OpenTableFile(PGLOBAL g)
break;
} // endif
- // Selective delete, pass thru
+ // Selective delete
+ /* fall through */
case MODE_UPDATE:
UseTemp = Tdbp->IsUsingTemp(g);
strcpy(opmode, (UseTemp) ? "rb" : "r+b");
@@ -623,6 +624,7 @@ bool DBFFAM::AllocateBuffer(PGLOBAL g)
case 'L': // Large (big) integer
case 'T': // Tiny integer
c = 'N'; // Numeric
+ /* fall through */
case 'N': // Numeric (integer)
case 'F': // Float (double)
descp->Decimals = (uchar)cdp->F.Prec;
diff --git a/storage/connect/filamdbf.h b/storage/connect/filamdbf.h
index 66458a10eaa..640fc349b4c 100644
--- a/storage/connect/filamdbf.h
+++ b/storage/connect/filamdbf.h
@@ -19,7 +19,7 @@ typedef class DBMFAM *PDBMFAM;
/****************************************************************************/
/* Functions used externally. */
/****************************************************************************/
-PQRYRES DBFColumns(PGLOBAL g, char *dp, const char *fn, bool info);
+PQRYRES DBFColumns(PGLOBAL g, PCSZ dp, PCSZ fn, bool info);
/****************************************************************************/
/* This is the base class for dBASE file access methods. */
@@ -31,7 +31,7 @@ class DllExport DBFBASE {
DBFBASE(PDBF txfp);
// Implementation
- int ScanHeader(PGLOBAL g, PSZ fname, int lrecl, int *rlen, char *defpath);
+ int ScanHeader(PGLOBAL g, PCSZ fname, int lrecl, int *rlen, PCSZ defpath);
protected:
// Default constructor, not to be used
diff --git a/storage/connect/filamfix.cpp b/storage/connect/filamfix.cpp
index cd25429318a..1d6194b154d 100644
--- a/storage/connect/filamfix.cpp
+++ b/storage/connect/filamfix.cpp
@@ -761,7 +761,8 @@ bool BGXFAM::BigWrite(PGLOBAL g, HANDLE h, void *inbuf, int req)
htrc("after write req=%d brc=%d nbw=%d\n", req, brc, nbw);
if (!brc || nbw != len) {
- char buf[256], *fn = (h == Hfile) ? To_File : "Tempfile";
+ char buf[256];
+ PCSZ fn = (h == Hfile) ? To_File : "Tempfile";
if (brc)
strcpy(buf, MSG(BAD_BYTE_NUM));
@@ -919,7 +920,8 @@ bool BGXFAM::OpenTableFile(PGLOBAL g)
break;
} // endif
- // Selective delete, pass thru
+ // Selective delete
+ /* fall through */
case MODE_UPDATE:
UseTemp = Tdbp->IsUsingTemp(g);
oflag |= (UseTemp) ? O_RDONLY : O_RDWR;
diff --git a/storage/connect/filamgz.cpp b/storage/connect/filamgz.cpp
index dc6f277ee27..df366ef15f9 100644
--- a/storage/connect/filamgz.cpp
+++ b/storage/connect/filamgz.cpp
@@ -920,8 +920,8 @@ int ZLBFAM::GetFileLength(PGLOBAL g)
/***********************************************************************/
bool ZLBFAM::AllocateBuffer(PGLOBAL g)
{
- char *msg;
- int n, zrc;
+ PCSZ msg;
+ int n, zrc;
#if 0
if (!Optimized && Tdbp->NeedIndexing(g)) {
diff --git a/storage/connect/filamtxt.cpp b/storage/connect/filamtxt.cpp
index e53cdcd9ba9..c456ee9e9b7 100644
--- a/storage/connect/filamtxt.cpp
+++ b/storage/connect/filamtxt.cpp
@@ -1,11 +1,11 @@
/*********** File AM Txt C++ Program Source Code File (.CPP) ***********/
/* PROGRAM NAME: FILAMTXT */
/* ------------- */
-/* Version 1.6 */
+/* Version 1.7 */
/* */
/* COPYRIGHT: */
/* ---------- */
-/* (C) Copyright to the author Olivier BERTRAND 2005-2015 */
+/* (C) Copyright to the author Olivier BERTRAND 2005-2017 */
/* */
/* WHAT THIS PROGRAM DOES: */
/* ----------------------- */
@@ -71,8 +71,23 @@ TXTFAM::TXTFAM(PDOSDEF tdp)
{
Tdbp = NULL;
To_Fb = NULL;
- To_File = tdp->Fn;
- Lrecl = tdp->Lrecl;
+
+ if (tdp) {
+ To_File = tdp->Fn;
+ Lrecl = tdp->Lrecl;
+ Eof = tdp->Eof;
+ Ending = tdp->Ending;
+ } else {
+ To_File = NULL;
+ Lrecl = 0;
+ Eof = false;
+#if defined(__WIN__)
+ Ending = 2;
+#else
+ Ending = 1;
+#endif
+ } // endif tdp
+
Placed = false;
IsRead = true;
Blocked = false;
@@ -103,8 +118,6 @@ TXTFAM::TXTFAM(PDOSDEF tdp)
Blksize = 0;
Fpos = Spos = Tpos = 0;
Padded = false;
- Eof = tdp->Eof;
- Ending = tdp->Ending;
Abort = false;
CrLf = (char*)(Ending == 1 ? "\n" : "\r\n");
} // end of TXTFAM standard constructor
@@ -561,6 +574,7 @@ bool DOSFAM::OpenTableFile(PGLOBAL g)
// Selective delete, pass thru
Bin = true;
+ /* fall through */
case MODE_UPDATE:
if ((UseTemp = Tdbp->IsUsingTemp(g))) {
strcpy(opmode, "r");
@@ -973,7 +987,7 @@ int DOSFAM::DeleteRecords(PGLOBAL g, int irc)
} else {
/*****************************************************************/
- /* Move of eventual preceding lines is not required here. */
+ /* Move of eventual preceding lines is not required here. */
/* Set the target file as being the source file itself. */
/* Set the future Tpos, and give Spos a value to block copying. */
/*****************************************************************/
@@ -1161,13 +1175,13 @@ int DOSFAM::RenameTempFile(PGLOBAL g)
if (rename(filename, filetemp)) { // Save file for security
sprintf(g->Message, MSG(RENAME_ERROR),
filename, filetemp, strerror(errno));
- longjmp(g->jumper[g->jump_level], 51);
- } else if (rename(tempname, filename)) {
+ throw 51;
+ } else if (rename(tempname, filename)) {
sprintf(g->Message, MSG(RENAME_ERROR),
tempname, filename, strerror(errno));
rc = rename(filetemp, filename); // Restore saved file
- longjmp(g->jumper[g->jump_level], 52);
- } else if (remove(filetemp)) {
+ throw 52;
+ } else if (remove(filetemp)) {
sprintf(g->Message, MSG(REMOVE_ERROR),
filetemp, strerror(errno));
rc = RC_INFO; // Acceptable
diff --git a/storage/connect/filamtxt.h b/storage/connect/filamtxt.h
index ae8f74a9830..1fdae8fcd37 100644
--- a/storage/connect/filamtxt.h
+++ b/storage/connect/filamtxt.h
@@ -80,7 +80,7 @@ class DllExport TXTFAM : public BLOCK {
protected:
// Members
PTDBDOS Tdbp; // To table class
- PSZ To_File; // Points to table file name
+ PCSZ To_File; // Points to table file name
PFBLOCK To_Fb; // Pointer to file block
PPARM To_Pos; // Pointer to position list
PPARM To_Sos; // Pointer to start position list
diff --git a/storage/connect/filamvct.cpp b/storage/connect/filamvct.cpp
index fdc5433f4a4..537f77d01ac 100755
--- a/storage/connect/filamvct.cpp
+++ b/storage/connect/filamvct.cpp
@@ -143,9 +143,9 @@ int VCTFAM::GetFileLength(PGLOBAL g)
{
if (Split) {
// Get the total file length
- char filename[_MAX_PATH];
- char *savfile = To_File;
- int i, len = 0;
+ char filename[_MAX_PATH];
+ PCSZ savfile = To_File;
+ int i, len = 0;
// Initialize the array of file structures
if (!Colfn) {
@@ -313,8 +313,8 @@ int VCTFAM::Cardinality(PGLOBAL g)
// and Last must be set from the file cardinality.
// Only happens when called by sub classes.
char filename[_MAX_PATH];
- PSZ savfn = To_File;
- int len, clen, card = -1;
+ PCSZ savfn = To_File;
+ int len, clen, card = -1;
PCOLDEF cdp = Tdbp->GetDef()->GetCols();
if (!Colfn) {
@@ -368,7 +368,7 @@ int VCTFAM::GetRowID(void)
/***********************************************************************/
/* VCT Create an empty file for Vector formatted tables. */
/***********************************************************************/
-bool VCTFAM::MakeEmptyFile(PGLOBAL g, char *fn)
+bool VCTFAM::MakeEmptyFile(PGLOBAL g, PCSZ fn)
{
// Vector formatted file: this will create an empty file of the
// required length if it does not exists yet.
@@ -440,6 +440,7 @@ bool VCTFAM::OpenTableFile(PGLOBAL g)
} // endif
// Selective delete, pass thru
+ /* fall through */
case MODE_UPDATE:
UseTemp = Tdbp->IsUsingTemp(g);
strcpy(opmode, (UseTemp) ? "rb" : "r+b");
@@ -559,41 +560,42 @@ bool VCTFAM::AllocateBuffer(PGLOBAL g)
/* Do initial action when inserting. */
/***********************************************************************/
bool VCTFAM::InitInsert(PGLOBAL g)
- {
+{
+ bool rc = false;
+
// We come here in MODE_INSERT only
if (Last == Nrec) {
CurBlk = Block;
CurNum = 0;
AddBlock = !MaxBlk;
} else {
- int rc;
PVCTCOL cp = (PVCTCOL)Tdbp->GetColumns();
// The starting point must be at the end of file as for append.
CurBlk = Block - 1;
CurNum = Last;
- // Prepare error return
- if (g->jump_level == MAX_JUMP) {
- strcpy(g->Message, MSG(TOO_MANY_JUMPS));
- return true;
- } // endif
+ try {
+ // Last block must be updated by new values
+ for (; cp; cp = (PVCTCOL)cp->Next)
+ cp->ReadBlock(g);
- if ((rc = setjmp(g->jumper[++g->jump_level])) != 0) {
- g->jump_level--;
- return true;
- } // endif
+ } catch (int n) {
+ if (trace)
+ htrc("Exception %d: %s\n", n, g->Message);
+ rc = true;
+ } catch (const char *msg) {
+ strcpy(g->Message, msg);
+ rc = true;
+ } // end catch
- // Last block must be updated by new values
- for (; cp; cp = (PVCTCOL)cp->Next)
- cp->ReadBlock(g);
-
- g->jump_level--;
} // endif Last
- // We are not currently using a temporary file for Insert
- T_Stream = Stream;
- return false;
+ if (!rc)
+ // We are not currently using a temporary file for Insert
+ T_Stream = Stream;
+
+ return rc;
} // end of InitInsert
/***********************************************************************/
@@ -878,8 +880,9 @@ int VCTFAM::DeleteRecords(PGLOBAL g, int irc)
/***********************************************************************/
bool VCTFAM::OpenTempFile(PGLOBAL g)
{
- char *opmode, tempname[_MAX_PATH];
- bool rc = false;
+ PCSZ opmode;
+ char tempname[_MAX_PATH];
+ bool rc = false;
/*********************************************************************/
/* Open the temporary file, Spos is at the beginning of file. */
@@ -1107,7 +1110,7 @@ void VCTFAM::CloseTableFile(PGLOBAL g, bool abort)
} else if (AddBlock) {
// Last block was not written
rc = ResetTableSize(g, CurBlk, Nrec);
- longjmp(g->jumper[g->jump_level], 44);
+ throw 44;
} // endif
} else if (mode == MODE_UPDATE) {
@@ -1527,8 +1530,8 @@ bool VCMFAM::AllocateBuffer(PGLOBAL g)
/* Do initial action when inserting. */
/***********************************************************************/
bool VCMFAM::InitInsert(PGLOBAL g)
- {
- int rc;
+{
+ bool rc = false;
volatile PVCTCOL cp = (PVCTCOL)Tdbp->GetColumns();
// We come here in MODE_INSERT only
@@ -1542,24 +1545,22 @@ bool VCMFAM::InitInsert(PGLOBAL g)
CurNum = Last;
} // endif Last
- // Prepare error return
- if (g->jump_level == MAX_JUMP) {
- strcpy(g->Message, MSG(TOO_MANY_JUMPS));
- return true;
- } // endif
-
- if ((rc = setjmp(g->jumper[++g->jump_level])) != 0) {
- g->jump_level--;
- return true;
- } // endif
+ try {
+ // Initialize the column block pointer
+ for (; cp; cp = (PVCTCOL)cp->Next)
+ cp->ReadBlock(g);
- // Initialize the column block pointer
- for (; cp; cp = (PVCTCOL)cp->Next)
- cp->ReadBlock(g);
+ } catch (int n) {
+ if (trace)
+ htrc("Exception %d: %s\n", n, g->Message);
+ rc = true;
+ } catch (const char *msg) {
+ strcpy(g->Message, msg);
+ rc = true;
+ } // end catch
- g->jump_level--;
- return false;
- } // end of InitInsert
+ return rc;
+} // end of InitInsert
/***********************************************************************/
/* Data Base write routine for VMP access method. */
@@ -1918,6 +1919,7 @@ bool VECFAM::OpenTableFile(PGLOBAL g)
} // endif filter
// Selective delete, pass thru
+ /* fall through */
case MODE_UPDATE:
UseTemp = Tdbp->IsUsingTemp(g);
strcpy(opmode, (UseTemp) ? "rb": "r+b");
@@ -1998,7 +2000,7 @@ bool VECFAM::OpenTableFile(PGLOBAL g)
/***********************************************************************/
/* Open the file corresponding to one column. */
/***********************************************************************/
-bool VECFAM::OpenColumnFile(PGLOBAL g, char *opmode, int i)
+bool VECFAM::OpenColumnFile(PGLOBAL g, PCSZ opmode, int i)
{
char filename[_MAX_PATH];
PDBUSER dup = PlgGetUser(g);
@@ -2503,7 +2505,7 @@ void VECFAM::CloseTableFile(PGLOBAL g, bool abort)
if (wrc != RC_FX)
rc = ResetTableSize(g, Block, Last);
else
- longjmp(g->jumper[g->jump_level], 44);
+ throw 44;
} else if (mode == MODE_UPDATE) {
if (UseTemp && !InitUpdate && !Abort) {
@@ -3143,7 +3145,8 @@ bool BGVFAM::BigWrite(PGLOBAL g, HANDLE h, void *inbuf, int req)
htrc("after write req=%d brc=%d nbw=%d\n", req, brc, nbw);
if (!brc || nbw != len) {
- char buf[256], *fn = (h == Hfile) ? To_File : "Tempfile";
+ char buf[256];
+ PCSZ fn = (h == Hfile) ? To_File : "Tempfile";
if (brc)
strcpy(buf, MSG(BAD_BYTE_NUM));
@@ -3319,7 +3322,7 @@ bool BGVFAM::SetBlockInfo(PGLOBAL g)
/***********************************************************************/
/* VEC Create an empty file for new Vector formatted tables. */
/***********************************************************************/
-bool BGVFAM::MakeEmptyFile(PGLOBAL g, char *fn)
+bool BGVFAM::MakeEmptyFile(PGLOBAL g, PCSZ fn)
{
// Vector formatted file this will create an empty file of the
// required length if it does not exists yet.
@@ -3329,7 +3332,7 @@ bool BGVFAM::MakeEmptyFile(PGLOBAL g, char *fn)
PlugSetPath(filename, fn, Tdbp->GetPath());
#if defined(__WIN__)
- char *p;
+ PCSZ p;
DWORD rc;
bool brc;
LARGE_INTEGER of;
@@ -3584,6 +3587,7 @@ bool BGVFAM::OpenTableFile(PGLOBAL g)
} // endif
// Selective delete, pass thru
+ /* fall through */
case MODE_UPDATE:
UseTemp = Tdbp->IsUsingTemp(g);
oflag = (UseTemp) ? O_RDONLY : O_RDWR;
@@ -4164,8 +4168,8 @@ void BGVFAM::CloseTableFile(PGLOBAL g, bool abort)
} else if (AddBlock) {
// Last block was not written
rc = ResetTableSize(g, CurBlk, Nrec);
- longjmp(g->jumper[g->jump_level], 44);
- } // endif
+ throw 44;
+ } // endif
} else if (mode == MODE_UPDATE) {
// Write back to file any pending modifications
diff --git a/storage/connect/filamvct.h b/storage/connect/filamvct.h
index be66232acfb..85982403270 100644
--- a/storage/connect/filamvct.h
+++ b/storage/connect/filamvct.h
@@ -61,7 +61,7 @@ class DllExport VCTFAM : public FIXFAM {
virtual bool WriteBlock(PGLOBAL g, PVCTCOL colp);
protected:
- virtual bool MakeEmptyFile(PGLOBAL g, char *fn);
+ virtual bool MakeEmptyFile(PGLOBAL g, PCSZ fn);
virtual bool OpenTempFile(PGLOBAL g);
virtual bool MoveLines(PGLOBAL g) {return false;}
virtual bool MoveIntermediateLines(PGLOBAL g, bool *b = NULL);
@@ -160,7 +160,7 @@ class DllExport VECFAM : public VCTFAM {
virtual bool MoveLines(PGLOBAL g);
virtual bool MoveIntermediateLines(PGLOBAL g, bool *b = NULL);
virtual int RenameTempFile(PGLOBAL g);
- bool OpenColumnFile(PGLOBAL g, char *opmode, int i);
+ bool OpenColumnFile(PGLOBAL g, PCSZ opmode, int i);
// Members
FILE* *Streams; // Points to Dos file structure array
@@ -235,7 +235,7 @@ class BGVFAM : public VCTFAM {
bool BigSeek(PGLOBAL g, HANDLE h, BIGINT pos, bool b = false);
bool BigRead(PGLOBAL g, HANDLE h, void *inbuf, int req);
bool BigWrite(PGLOBAL g, HANDLE h, void *inbuf, int req);
- virtual bool MakeEmptyFile(PGLOBAL g, char *fn);
+ virtual bool MakeEmptyFile(PGLOBAL g, PCSZ fn);
virtual bool OpenTempFile(PGLOBAL g);
virtual bool MoveIntermediateLines(PGLOBAL g, bool *b = NULL);
virtual bool CleanUnusedSpace(PGLOBAL g);
diff --git a/storage/connect/filamzip.cpp b/storage/connect/filamzip.cpp
index 3d157da5e87..eb06ee7ad1e 100644
--- a/storage/connect/filamzip.cpp
+++ b/storage/connect/filamzip.cpp
@@ -1,7 +1,7 @@
/*********** File AM Zip C++ Program Source Code File (.CPP) ***********/
/* PROGRAM NAME: FILAMZIP */
/* ------------- */
-/* Version 1.1 */
+/* Version 1.3 */
/* */
/* COPYRIGHT: */
/* ---------- */
@@ -45,12 +45,12 @@
#define WRITEBUFFERSIZE (16384)
-bool ZipLoadFile(PGLOBAL g, char *zfn, char *fn, char *entry, bool append, bool mul);
+bool ZipLoadFile(PGLOBAL g, PCSZ zfn, PCSZ fn, PCSZ entry, bool append, bool mul);
/***********************************************************************/
/* Compress a file in zip when creating a table. */
/***********************************************************************/
-static bool ZipFile(PGLOBAL g, ZIPUTIL *zutp, char *fn, char *entry, char *buf)
+static bool ZipFile(PGLOBAL g, ZIPUTIL *zutp, PCSZ fn, PCSZ entry, char *buf)
{
int rc = RC_OK, size_read, size_buf = WRITEBUFFERSIZE;
FILE *fin;
@@ -88,7 +88,7 @@ static bool ZipFile(PGLOBAL g, ZIPUTIL *zutp, char *fn, char *entry, char *buf)
/***********************************************************************/
/* Find and Compress several files in zip when creating a table. */
/***********************************************************************/
-static bool ZipFiles(PGLOBAL g, ZIPUTIL *zutp, char *pat, char *buf)
+static bool ZipFiles(PGLOBAL g, ZIPUTIL *zutp, PCSZ pat, char *buf)
{
char filename[_MAX_PATH];
int rc;
@@ -203,7 +203,7 @@ static bool ZipFiles(PGLOBAL g, ZIPUTIL *zutp, char *pat, char *buf)
/***********************************************************************/
/* Load and Compress a file in zip when creating a table. */
/***********************************************************************/
-bool ZipLoadFile(PGLOBAL g, char *zfn, char *fn, char *entry, bool append, bool mul)
+bool ZipLoadFile(PGLOBAL g, PCSZ zfn, PCSZ fn, PCSZ entry, bool append, bool mul)
{
char *buf;
bool err;
@@ -228,7 +228,7 @@ bool ZipLoadFile(PGLOBAL g, char *zfn, char *fn, char *entry, bool append, bool
/***********************************************************************/
/* Constructors. */
/***********************************************************************/
-ZIPUTIL::ZIPUTIL(PSZ tgt)
+ZIPUTIL::ZIPUTIL(PCSZ tgt)
{
zipfile = NULL;
target = tgt;
@@ -269,7 +269,7 @@ void ZIPUTIL::getTime(tm_zip& tmZip)
/* append: set true to append the zip file */
/* return: true if open, false otherwise. */
/***********************************************************************/
-bool ZIPUTIL::open(PGLOBAL g, char *filename, bool append)
+bool ZIPUTIL::open(PGLOBAL g, PCSZ filename, bool append)
{
if (!zipfile && !(zipfile = zipOpen64(filename,
append ? APPEND_STATUS_ADDINZIP
@@ -295,7 +295,7 @@ void ZIPUTIL::close()
/***********************************************************************/
/* OpenTableFile: Open a DOS/UNIX table file from a ZIP file. */
/***********************************************************************/
-bool ZIPUTIL::OpenTable(PGLOBAL g, MODE mode, char *fn, bool append)
+bool ZIPUTIL::OpenTable(PGLOBAL g, MODE mode, PCSZ fn, bool append)
{
/*********************************************************************/
/* The file will be compressed. */
@@ -338,10 +338,10 @@ bool ZIPUTIL::OpenTable(PGLOBAL g, MODE mode, char *fn, bool append)
/***********************************************************************/
/* Add target in zip file. */
/***********************************************************************/
-bool ZIPUTIL::addEntry(PGLOBAL g, char *entry)
+bool ZIPUTIL::addEntry(PGLOBAL g, PCSZ entry)
{
//?? we dont need the stinking time
- zip_fileinfo zi = { 0, 0, 0, 0, 0, 0, 0, 0, 0 };
+ zip_fileinfo zi = { {0, 0, 0, 0, 0, 0}, 0, 0, 0 };
getTime(zi.tmz_date);
target = entry;
@@ -382,10 +382,11 @@ void ZIPUTIL::closeEntry()
/***********************************************************************/
/* Constructors. */
/***********************************************************************/
-UNZIPUTL::UNZIPUTL(PSZ tgt, bool mul)
+UNZIPUTL::UNZIPUTL(PCSZ tgt, bool mul)
{
zipfile = NULL;
target = tgt;
+ pwd = NULL;
fp = NULL;
memory = NULL;
size = 0;
@@ -401,6 +402,26 @@ UNZIPUTL::UNZIPUTL(PSZ tgt, bool mul)
#endif
} // end of UNZIPUTL standard constructor
+UNZIPUTL::UNZIPUTL(PDOSDEF tdp)
+{
+ zipfile = NULL;
+ target = tdp->GetEntry();
+ pwd = tdp->Pwd;
+ fp = NULL;
+ memory = NULL;
+ size = 0;
+ entryopen = false;
+ multiple = tdp->GetMul();
+ memset(fn, 0, sizeof(fn));
+
+ // Init the case mapping table.
+#if defined(__WIN__)
+ for (int i = 0; i < 256; ++i) mapCaseTable[i] = toupper(i);
+#else
+ for (int i = 0; i < 256; ++i) mapCaseTable[i] = i;
+#endif
+} // end of UNZIPUTL standard constructor
+
#if 0
UNZIPUTL::UNZIPUTL(PZIPUTIL zutp)
{
@@ -418,8 +439,8 @@ UNZIPUTL::UNZIPUTL(PZIPUTIL zutp)
/* This code is the copyright property of Alessandro Felice Cantatore. */
/* http://xoomer.virgilio.it/acantato/dev/wildcard/wildmatch.html */
/***********************************************************************/
-bool UNZIPUTL::WildMatch(PSZ pat, PSZ str) {
- PSZ s, p;
+bool UNZIPUTL::WildMatch(PCSZ pat, PCSZ str) {
+ PCSZ s, p;
bool star = FALSE;
loopStart:
@@ -453,7 +474,7 @@ starCheck:
/* param: filename path and the filename of the zip file to open. */
/* return: true if open, false otherwise. */
/***********************************************************************/
-bool UNZIPUTL::open(PGLOBAL g, char *filename)
+bool UNZIPUTL::open(PGLOBAL g, PCSZ filename)
{
if (!zipfile && !(zipfile = unzOpen64(filename)))
sprintf(g->Message, "Zipfile open error on %s", filename);
@@ -543,7 +564,7 @@ int UNZIPUTL::nextEntry(PGLOBAL g)
/***********************************************************************/
/* OpenTableFile: Open a DOS/UNIX table file from a ZIP file. */
/***********************************************************************/
-bool UNZIPUTL::OpenTable(PGLOBAL g, MODE mode, char *fn)
+bool UNZIPUTL::OpenTable(PGLOBAL g, MODE mode, PCSZ fn)
{
/*********************************************************************/
/* The file will be decompressed into virtual memory. */
@@ -581,7 +602,7 @@ bool UNZIPUTL::OpenTable(PGLOBAL g, MODE mode, char *fn)
if (openEntry(g))
return true;
- if (size > 0) {
+ if (size > 0) {
/*******************************************************************/
/* Link a Fblock. This make possible to automatically close it */
/* in case of error g->jump. */
@@ -613,6 +634,28 @@ bool UNZIPUTL::OpenTable(PGLOBAL g, MODE mode, char *fn)
} // end of OpenTableFile
/***********************************************************************/
+/* Insert only if the entry does not exist. */
+/***********************************************************************/
+bool UNZIPUTL::IsInsertOk(PGLOBAL g, PCSZ fn)
+{
+ bool ok = true, b = open(g, fn);
+
+ if (!b) {
+ if (!target || *target == 0) {
+ unz_global_info64 ginfo;
+ int err = unzGetGlobalInfo64(zipfile, &ginfo);
+
+ ok = !(err == UNZ_OK && ginfo.number_entry > 0);
+ } else // Check if the target exist
+ ok = (unzLocateFile(zipfile, target, 0) != UNZ_OK);
+
+ unzClose(zipfile);
+ } // endif b
+
+ return ok;
+} // end of IsInsertOk
+
+/***********************************************************************/
/* Open target in zip file. */
/***********************************************************************/
bool UNZIPUTL::openEntry(PGLOBAL g)
@@ -625,18 +668,24 @@ bool UNZIPUTL::openEntry(PGLOBAL g)
if (rc != UNZ_OK) {
sprintf(g->Message, "unzGetCurrentFileInfo64 rc=%d", rc);
return true;
- } else if ((rc = unzOpenCurrentFile(zipfile)) != UNZ_OK) {
+ } else if ((rc = unzOpenCurrentFilePassword(zipfile, pwd)) != UNZ_OK) {
sprintf(g->Message, "unzOpen fn=%s rc=%d", fn, rc);
return true;
} // endif rc
size = finfo.uncompressed_size;
- memory = new char[size + 1];
+
+ try {
+ memory = new char[size + 1];
+ } catch (...) {
+ strcpy(g->Message, "Out of memory");
+ return true;
+ } // end try/catch
if ((rc = unzReadCurrentFile(zipfile, memory, size)) < 0) {
sprintf(g->Message, "unzReadCurrentFile rc = %d", rc);
unzCloseCurrentFile(zipfile);
- free(memory);
+ delete[] memory;
memory = NULL;
entryopen = false;
} else {
@@ -661,7 +710,7 @@ void UNZIPUTL::closeEntry()
} // endif entryopen
if (memory) {
- free(memory);
+ delete[] memory;
memory = NULL;
} // endif memory
@@ -675,15 +724,17 @@ void UNZIPUTL::closeEntry()
UNZFAM::UNZFAM(PDOSDEF tdp) : MAPFAM(tdp)
{
zutp = NULL;
- target = tdp->GetEntry();
- mul = tdp->GetMul();
+ tdfp = tdp;
+ //target = tdp->GetEntry();
+ //mul = tdp->GetMul();
} // end of UNZFAM standard constructor
UNZFAM::UNZFAM(PUNZFAM txfp) : MAPFAM(txfp)
{
zutp = txfp->zutp;
- target = txfp->target;
- mul = txfp->mul;
+ tdfp = txfp->tdfp;
+ //target = txfp->target;
+ //mul = txfp->mul;
} // end of UNZFAM copy constructor
/***********************************************************************/
@@ -711,7 +762,13 @@ int UNZFAM::Cardinality(PGLOBAL g)
int card = -1;
int len = GetFileLength(g);
- card = (len / (int)Lrecl) * 2; // Estimated ???
+ if (len) {
+ // Estimated ???
+ card = (len / (int)Lrecl) * 2;
+ card = card ? card : 10; // Lrecl can be too big
+ } else
+ card = 0;
+
return card;
} // end of Cardinality
@@ -726,7 +783,7 @@ bool UNZFAM::OpenTableFile(PGLOBAL g)
/*********************************************************************/
/* Allocate the ZIP utility class. */
/*********************************************************************/
- zutp = new(g) UNZIPUTL(target, mul);
+ zutp = new(g) UNZIPUTL(tdfp);
// We used the file name relative to recorded datapath
PlugSetPath(filename, To_File, Tdbp->GetPath());
@@ -841,17 +898,19 @@ void UNZFAM::CloseTableFile(PGLOBAL g, bool)
UZXFAM::UZXFAM(PDOSDEF tdp) : MPXFAM(tdp)
{
zutp = NULL;
- target = tdp->GetEntry();
- mul = tdp->GetMul();
+ tdfp = tdp;
+ //target = tdp->GetEntry();
+ //mul = tdp->GetMul();
//Lrecl = tdp->GetLrecl();
} // end of UZXFAM standard constructor
UZXFAM::UZXFAM(PUZXFAM txfp) : MPXFAM(txfp)
{
zutp = txfp->zutp;
- target = txfp->target;
- mul = txfp->mul;
-//Lrecl = txfp->Lrecl;
+ tdfp = txfp->tdfp;
+ //target = txfp->target;
+ //mul = txfp->mul;
+ //Lrecl = txfp->Lrecl;
} // end of UZXFAM copy constructor
/***********************************************************************/
@@ -907,7 +966,7 @@ bool UZXFAM::OpenTableFile(PGLOBAL g)
/* Allocate the ZIP utility class. */
/*********************************************************************/
if (!zutp)
- zutp = new(g)UNZIPUTL(target, mul);
+ zutp = new(g)UNZIPUTL(tdfp);
// We used the file name relative to recorded datapath
PlugSetPath(filename, To_File, Tdbp->GetPath());
@@ -969,6 +1028,25 @@ bool ZIPFAM::OpenTableFile(PGLOBAL g)
{
char filename[_MAX_PATH];
MODE mode = Tdbp->GetMode();
+ int len = TXTFAM::GetFileLength(g);
+
+ // We used the file name relative to recorded datapath
+ PlugSetPath(filename, To_File, Tdbp->GetPath());
+
+ if (len < 0)
+ return true;
+ else if (!append && len > 0) {
+ strcpy(g->Message, "No insert into existing zip file");
+ return true;
+ } else if (append && len > 0) {
+ UNZIPUTL *zutp = new(g) UNZIPUTL(target, false);
+
+ if (!zutp->IsInsertOk(g, filename)) {
+ strcpy(g->Message, "No insert into existing entry");
+ return true;
+ } // endif Ok
+
+ } // endif's
/*********************************************************************/
/* Allocate the ZIP utility class. */
@@ -1028,15 +1106,31 @@ ZPXFAM::ZPXFAM(PDOSDEF tdp) : FIXFAM(tdp)
target = tdp->GetEntry();
append = tdp->GetAppend();
//Lrecl = tdp->GetLrecl();
-} // end of UZXFAM standard constructor
+} // end of ZPXFAM standard constructor
/***********************************************************************/
/* OpenTableFile: Open a DOS/UNIX table file from a ZIP file. */
/***********************************************************************/
bool ZPXFAM::OpenTableFile(PGLOBAL g)
{
- char filename[_MAX_PATH];
- MODE mode = Tdbp->GetMode();
+ char filename[_MAX_PATH];
+ MODE mode = Tdbp->GetMode();
+ int len = TXTFAM::GetFileLength(g);
+
+ if (len < 0)
+ return true;
+ else if (!append && len > 0) {
+ strcpy(g->Message, "No insert into existing zip file");
+ return true;
+ } else if (append && len > 0) {
+ UNZIPUTL *zutp = new(g) UNZIPUTL(target, false);
+
+ if (!zutp->IsInsertOk(g, filename)) {
+ strcpy(g->Message, "No insert into existing entry");
+ return true;
+ } // endif Ok
+
+ } // endif's
/*********************************************************************/
/* Allocate the ZIP utility class. */
diff --git a/storage/connect/filamzip.h b/storage/connect/filamzip.h
index 3160703bd20..be17d954728 100644
--- a/storage/connect/filamzip.h
+++ b/storage/connect/filamzip.h
@@ -1,5 +1,5 @@
/************** filamzip H Declares Source Code File (.H) **************/
-/* Name: filamzip.h Version 1.1 */
+/* Name: filamzip.h Version 1.2 */
/* */
/* (C) Copyright to the author Olivier BERTRAND 2016-2017 */
/* */
@@ -27,16 +27,13 @@ typedef class ZPXFAM *PZPXFAM;
class DllExport ZIPUTIL : public BLOCK {
public:
// Constructor
- ZIPUTIL(PSZ tgt);
+ ZIPUTIL(PCSZ tgt);
//ZIPUTIL(ZIPUTIL *zutp);
- // Implementation
- //PTXF Duplicate(PGLOBAL g) { return (PTXF) new(g)UNZFAM(this); }
-
// Methods
- bool OpenTable(PGLOBAL g, MODE mode, char *fn, bool append);
- bool open(PGLOBAL g, char *fn, bool append);
- bool addEntry(PGLOBAL g, char *entry);
+ bool OpenTable(PGLOBAL g, MODE mode, PCSZ fn, bool append);
+ bool open(PGLOBAL g, PCSZ fn, bool append);
+ bool addEntry(PGLOBAL g, PCSZ entry);
void close(void);
void closeEntry(void);
int writeEntry(PGLOBAL g, char *buf, int len);
@@ -44,15 +41,10 @@ class DllExport ZIPUTIL : public BLOCK {
// Members
zipFile zipfile; // The ZIP container file
- PSZ target; // The target file name
-//unz_file_info finfo; // The current file info
+ PCSZ target; // The target file name
+ PCSZ pwd; // The ZIP file password
PFBLOCK fp;
-//char *memory;
-//uint size;
-//int multiple; // Multiple targets
bool entryopen; // True when open current entry
-//char fn[FILENAME_MAX]; // The current entry file name
-//char mapCaseTable[256];
}; // end of ZIPUTIL
/***********************************************************************/
@@ -61,25 +53,27 @@ class DllExport ZIPUTIL : public BLOCK {
class DllExport UNZIPUTL : public BLOCK {
public:
// Constructor
- UNZIPUTL(PSZ tgt, bool mul);
-//UNZIPUTL(UNZIPUTL *zutp);
+ UNZIPUTL(PCSZ tgt, bool mul);
+ UNZIPUTL(PDOSDEF tdp);
// Implementation
//PTXF Duplicate(PGLOBAL g) { return (PTXF) new(g)UNZFAM(this); }
// Methods
- bool OpenTable(PGLOBAL g, MODE mode, char *fn);
- bool open(PGLOBAL g, char *fn);
+ bool OpenTable(PGLOBAL g, MODE mode, PCSZ fn);
+ bool open(PGLOBAL g, PCSZ fn);
bool openEntry(PGLOBAL g);
void close(void);
void closeEntry(void);
- bool WildMatch(PSZ pat, PSZ str);
+ bool WildMatch(PCSZ pat, PCSZ str);
int findEntry(PGLOBAL g, bool next);
int nextEntry(PGLOBAL g);
+ bool IsInsertOk(PGLOBAL g, PCSZ fn);
// Members
unzFile zipfile; // The ZIP container file
- PSZ target; // The target file name
+ PCSZ target; // The target file name
+ PCSZ pwd; // The ZIP file password
unz_file_info finfo; // The current file info
PFBLOCK fp;
char *memory;
@@ -119,8 +113,7 @@ class DllExport UNZFAM : public MAPFAM {
protected:
// Members
UNZIPUTL *zutp;
- PSZ target;
- bool mul;
+ PDOSDEF tdfp;
}; // end of UNZFAM
/***********************************************************************/
@@ -147,8 +140,7 @@ class DllExport UZXFAM : public MPXFAM {
protected:
// Members
UNZIPUTL *zutp;
- PSZ target;
- bool mul;
+ PDOSDEF tdfp;
}; // end of UZXFAM
/***********************************************************************/
@@ -175,8 +167,9 @@ class DllExport ZIPFAM : public DOSFAM {
protected:
// Members
ZIPUTIL *zutp;
- PSZ target;
+ PCSZ target;
bool append;
+//bool replace;
}; // end of ZIPFAM
/***********************************************************************/
@@ -200,7 +193,7 @@ class DllExport ZPXFAM : public FIXFAM {
protected:
// Members
ZIPUTIL *zutp;
- PSZ target;
+ PCSZ target;
bool append;
}; // end of ZPXFAM
diff --git a/storage/connect/filter.cpp b/storage/connect/filter.cpp
index 262d6b58a70..da44b129ccb 100644
--- a/storage/connect/filter.cpp
+++ b/storage/connect/filter.cpp
@@ -1,7 +1,7 @@
/***************** Filter C++ Class Filter Code (.CPP) *****************/
-/* Name: FILTER.CPP Version 3.9 */
+/* Name: FILTER.CPP Version 4.0 */
/* */
-/* (C) Copyright to the author Olivier BERTRAND 1998-2014 */
+/* (C) Copyright to the author Olivier BERTRAND 1998-2017 */
/* */
/* This file contains the class FILTER function code. */
/***********************************************************************/
@@ -87,8 +87,8 @@ BYTE OpBmp(PGLOBAL g, OPVAL opc)
case OP_EXIST: bt = 0x00; break;
default:
sprintf(g->Message, MSG(BAD_FILTER_OP), opc);
- longjmp(g->jumper[g->jump_level], TYPE_ARRAY);
- } // endswitch opc
+ throw TYPE_ARRAY;
+ } // endswitch opc
return bt;
} // end of OpBmp
@@ -1193,7 +1193,7 @@ bool FILTER::Convert(PGLOBAL g, bool having)
Arg(0) = pXVOID;
} // endif void
- // pass thru
+ // fall through
case OP_IN:
// For IN operator do optimize if operand is an array
if (GetArgType(1) != TYPE_ARRAY)
@@ -1260,6 +1260,7 @@ bool FILTER::Eval(PGLOBAL g)
} // endif Opm
// For modified operators, pass thru
+ /* fall through */
case OP_IN:
case OP_EXIST:
// For IN operations, special processing is done here
@@ -1408,7 +1409,7 @@ PFIL FILTER::Copy(PTABS t)
/*********************************************************************/
/* Make file output of FILTER contents. */
/*********************************************************************/
-void FILTER::Print(PGLOBAL g, FILE *f, uint n)
+void FILTER::Printf(PGLOBAL g, FILE *f, uint n)
{
char m[64];
@@ -1430,7 +1431,7 @@ void FILTER::Print(PGLOBAL g, FILE *f, uint n)
if (lin && fp->GetArgType(i) == TYPE_FILTER)
fprintf(f, "%s Filter at %p\n", m, fp->Arg(i));
else
- fp->Arg(i)->Print(g, f, n + 2);
+ fp->Arg(i)->Printf(g, f, n + 2);
} // endfor i
@@ -1441,7 +1442,7 @@ void FILTER::Print(PGLOBAL g, FILE *f, uint n)
/***********************************************************************/
/* Make string output of TABLE contents (z should be checked). */
/***********************************************************************/
-void FILTER::Print(PGLOBAL g, char *ps, uint z)
+void FILTER::Prints(PGLOBAL g, char *ps, uint z)
{
#define FLEN 100
@@ -1469,7 +1470,7 @@ void FILTER::Print(PGLOBAL g, char *ps, uint z)
bcp = bxp;
p = bcp->Cold;
n = FLEN;
- fp->Arg(0)->Print(g, p, n);
+ fp->Arg(0)->Prints(g, p, n);
n = FLEN - strlen(p);
switch (fp->Opc) {
@@ -1515,7 +1516,7 @@ void FILTER::Print(PGLOBAL g, char *ps, uint z)
n = FLEN - strlen(p);
p += strlen(p);
- fp->Arg(1)->Print(g, p, n);
+ fp->Arg(1)->Prints(g, p, n);
} else
if (!bcp) {
strncat(ps, "???", z);
@@ -1711,7 +1712,7 @@ PFIL PrepareFilter(PGLOBAL g, PFIL fp, bool having)
break; // Remove eventual ending separator(s)
// if (fp->Convert(g, having))
-// longjmp(g->jumper[g->jump_level], TYPE_FILTER);
+// throw TYPE_ARRAY;
filp = fp;
fp = fp->Next;
@@ -1744,7 +1745,7 @@ DllExport bool ApplyFilter(PGLOBAL g, PFIL filp)
// return TRUE;
if (filp->Eval(g))
- longjmp(g->jumper[g->jump_level], TYPE_FILTER);
+ throw TYPE_FILTER;
if (trace > 1)
htrc("PlugFilter filp=%p result=%d\n",
diff --git a/storage/connect/filter.h b/storage/connect/filter.h
index 15730e2cc44..22d1e4ed4be 100644
--- a/storage/connect/filter.h
+++ b/storage/connect/filter.h
@@ -61,8 +61,8 @@ class DllExport FILTER : public XOBJECT { /* Filter description block */
//virtual PXOB CheckSubQuery(PGLOBAL, PSQL);
//virtual bool CheckLocal(PTDB);
//virtual int CheckSpcCol(PTDB tdbp, int n);
- virtual void Print(PGLOBAL g, FILE *f, uint n);
- virtual void Print(PGLOBAL g, char *ps, uint z);
+ virtual void Printf(PGLOBAL g, FILE *f, uint n);
+ virtual void Prints(PGLOBAL g, char *ps, uint z);
// PFIL Linearize(bool nosep);
// PFIL Link(PGLOBAL g, PFIL fil2);
// PFIL RemoveLastSep(void);
diff --git a/storage/connect/global.h b/storage/connect/global.h
index 4d01a3ff05b..a2030fdb5d0 100644
--- a/storage/connect/global.h
+++ b/storage/connect/global.h
@@ -1,6 +1,6 @@
/***********************************************************************/
/* GLOBAL.H: Declaration file used by all CONNECT implementations. */
-/* (C) Copyright Olivier Bertrand 1993-2014 */
+/* (C) Copyright Olivier Bertrand 1993-2017 */
/***********************************************************************/
/***********************************************************************/
@@ -59,7 +59,7 @@
#define NO_IVAL -95684275 /* Used by GetIntegerOption */
#define VMLANG 370 /* Size of olf VM lang blocks */
#define MAX_JUMP 24 /* Maximum jump level number */
-#define MAX_STR 1024 /* Maximum string length */
+#define MAX_STR 4160 /* Maximum message length */
#define STR_SIZE 501 /* Length of char strings. */
#define STD_INPUT 0 /* Standard language input */
#define STD_OUTPUT 1 /* Standard language output */
@@ -229,9 +229,10 @@ typedef struct _parm {
typedef struct _global { /* Global structure */
void *Sarea; /* Points to work area */
uint Sarea_Size; /* Work area size */
- PACTIVITY Activityp, ActivityStart;
+ PACTIVITY Activityp;
char Message[MAX_STR];
- int Createas; /* To pass info to created table */
+ ulong More; /* Used by jsonudf */
+ int Createas; /* To pass info to created table */
void *Xchk; /* indexes in create/alter */
short Alchecked; /* Checked for ALTER */
short Mrr; /* True when doing mrr */
diff --git a/storage/connect/ha_connect.cc b/storage/connect/ha_connect.cc
index 1b6078cc3d9..887d692ba69 100644
--- a/storage/connect/ha_connect.cc
+++ b/storage/connect/ha_connect.cc
@@ -172,9 +172,9 @@
#define JSONMAX 10 // JSON Default max grp size
extern "C" {
- char version[]= "Version 1.05.0003 February 27, 2017";
+ char version[]= "Version 1.06.0001 April 17, 2017";
#if defined(__WIN__)
- char compver[]= "Version 1.05.0003 " __DATE__ " " __TIME__;
+ char compver[]= "Version 1.06.0001 " __DATE__ " " __TIME__;
char slash= '\\';
#else // !__WIN__
char slash= '/';
@@ -213,10 +213,11 @@ PQRYRES JSONColumns(PGLOBAL g, char *db, PTOS topt, bool info);
PQRYRES XMLColumns(PGLOBAL g, char *db, char *tab, PTOS topt, bool info);
int TranslateJDBCType(int stp, char *tn, int prec, int& len, char& v);
void PushWarning(PGLOBAL g, THD *thd, int level);
-bool CheckSelf(PGLOBAL g, TABLE_SHARE *s, const char *host,
- const char *db, char *tab, const char *src, int port);
-bool ZipLoadFile(PGLOBAL, char*, char*, char*, bool, bool);
+bool CheckSelf(PGLOBAL g, TABLE_SHARE *s, PCSZ host, PCSZ db,
+ PCSZ tab, PCSZ src, int port);
+bool ZipLoadFile(PGLOBAL, PCSZ, PCSZ, PCSZ, bool, bool);
bool ExactInfo(void);
+void mongo_init(bool);
USETEMP UseTemp(void);
int GetConvSize(void);
TYPCONV GetTypeConv(void);
@@ -509,7 +510,7 @@ ha_create_table_option connect_table_option_list[]=
HA_TOPTION_NUMBER("LRECL", lrecl, 0, 0, INT_MAX32, 1),
HA_TOPTION_NUMBER("BLOCK_SIZE", elements, 0, 0, INT_MAX32, 1),
//HA_TOPTION_NUMBER("ESTIMATE", estimate, 0, 0, INT_MAX32, 1),
- HA_TOPTION_NUMBER("MULTIPLE", multiple, 0, 0, 2, 1),
+ HA_TOPTION_NUMBER("MULTIPLE", multiple, 0, 0, 3, 1),
HA_TOPTION_NUMBER("HEADER", header, 0, 0, 3, 1),
HA_TOPTION_NUMBER("QUOTED", quoted, (ulonglong) -1, 0, 3, 1),
HA_TOPTION_NUMBER("ENDING", ending, (ulonglong) -1, 0, INT_MAX32, 1),
@@ -670,7 +671,7 @@ static int connect_init_func(void *p)
sql_print_information("CONNECT: %s", version);
#endif // !__WIN__
-#ifdef LIBXML2_SUPPORT
+#if defined(LIBXML2_SUPPORT)
XmlInitParserLib();
#endif // LIBXML2_SUPPORT
@@ -1018,15 +1019,14 @@ ulonglong ha_connect::table_flags() const
/****************************************************************************/
/* Return the value of an option specified in an option list. */
/****************************************************************************/
-char *GetListOption(PGLOBAL g, const char *opname,
- const char *oplist, const char *def)
+PCSZ GetListOption(PGLOBAL g, PCSZ opname, PCSZ oplist, PCSZ def)
{
if (!oplist)
return (char*)def;
char key[16], val[256];
char *pk, *pv, *pn;
- char *opval= (char*)def;
+ PCSZ opval= def;
int n;
for (pk= (char*)oplist; pk; pk= ++pn) {
@@ -1062,9 +1062,9 @@ char *GetListOption(PGLOBAL g, const char *opname,
/****************************************************************************/
/* Return the value of a string option or NULL if not specified. */
/****************************************************************************/
-char *GetStringTableOption(PGLOBAL g, PTOS options, char *opname, char *sdef)
+PCSZ GetStringTableOption(PGLOBAL g, PTOS options, PCSZ opname, PCSZ sdef)
{
- const char *opval= NULL;
+ PCSZ opval= NULL;
if (!options)
return sdef;
@@ -1107,10 +1107,10 @@ char *GetStringTableOption(PGLOBAL g, PTOS options, char *opname, char *sdef)
/****************************************************************************/
/* Return the value of a Boolean option or bdef if not specified. */
/****************************************************************************/
-bool GetBooleanTableOption(PGLOBAL g, PTOS options, char *opname, bool bdef)
+bool GetBooleanTableOption(PGLOBAL g, PTOS options, PCSZ opname, bool bdef)
{
- bool opval= bdef;
- char *pv;
+ bool opval= bdef;
+ PCSZ pv;
if (!options)
return bdef;
@@ -1138,7 +1138,7 @@ bool GetBooleanTableOption(PGLOBAL g, PTOS options, char *opname, bool bdef)
/****************************************************************************/
/* Return the value of an integer option or NO_IVAL if not specified. */
/****************************************************************************/
-int GetIntegerTableOption(PGLOBAL g, PTOS options, char *opname, int idef)
+int GetIntegerTableOption(PGLOBAL g, PTOS options, PCSZ opname, int idef)
{
ulonglong opval= (ulonglong) NO_IVAL;
@@ -1160,10 +1160,10 @@ int GetIntegerTableOption(PGLOBAL g, PTOS options, char *opname, int idef)
opval= (options->compressed);
if ((ulonglong) opval == (ulonglong)NO_IVAL) {
- char *pv;
+ PCSZ pv;
if ((pv= GetListOption(g, opname, options->oplist)))
- opval= CharToNumber(pv, strlen(pv), ULONGLONG_MAX, true);
+ opval= CharToNumber((char*)pv, strlen(pv), ULONGLONG_MAX, true);
else
return idef;
@@ -1188,7 +1188,7 @@ PTOS ha_connect::GetTableOptionStruct(TABLE_SHARE *s)
/****************************************************************************/
/* Return the string eventually formatted with partition name. */
/****************************************************************************/
-char *ha_connect::GetRealString(const char *s)
+char *ha_connect::GetRealString(PCSZ s)
{
char *sv;
@@ -1205,10 +1205,10 @@ char *ha_connect::GetRealString(const char *s)
/****************************************************************************/
/* Return the value of a string option or sdef if not specified. */
/****************************************************************************/
-char *ha_connect::GetStringOption(char *opname, char *sdef)
+PCSZ ha_connect::GetStringOption(PCSZ opname, PCSZ sdef)
{
- char *opval= NULL;
- PTOS options= GetTableOptionStruct();
+ PCSZ opval= NULL;
+ PTOS options= GetTableOptionStruct();
if (!stricmp(opname, "Connect")) {
LEX_CSTRING cnc= (tshp) ? tshp->connect_string
@@ -1267,7 +1267,7 @@ char *ha_connect::GetStringOption(char *opname, char *sdef)
/****************************************************************************/
/* Return the value of a Boolean option or bdef if not specified. */
/****************************************************************************/
-bool ha_connect::GetBooleanOption(char *opname, bool bdef)
+bool ha_connect::GetBooleanOption(PCSZ opname, bool bdef)
{
bool opval;
PTOS options= GetTableOptionStruct();
@@ -1284,7 +1284,7 @@ bool ha_connect::GetBooleanOption(char *opname, bool bdef)
/* Set the value of the opname option (does not work for oplist options) */
/* Currently used only to set the Sepindex value. */
/****************************************************************************/
-bool ha_connect::SetBooleanOption(char *opname, bool b)
+bool ha_connect::SetBooleanOption(PCSZ opname, bool b)
{
PTOS options= GetTableOptionStruct();
@@ -1302,7 +1302,7 @@ bool ha_connect::SetBooleanOption(char *opname, bool b)
/****************************************************************************/
/* Return the value of an integer option or NO_IVAL if not specified. */
/****************************************************************************/
-int ha_connect::GetIntegerOption(char *opname)
+int ha_connect::GetIntegerOption(PCSZ opname)
{
int opval;
PTOS options= GetTableOptionStruct();
@@ -1322,7 +1322,7 @@ int ha_connect::GetIntegerOption(char *opname)
/* Set the value of the opname option (does not work for oplist options) */
/* Currently used only to set the Lrecl value. */
/****************************************************************************/
-bool ha_connect::SetIntegerOption(char *opname, int n)
+bool ha_connect::SetIntegerOption(PCSZ opname, int n)
{
PTOS options= GetTableOptionStruct();
@@ -1427,7 +1427,7 @@ void *ha_connect::GetColumnOption(PGLOBAL g, void *field, PCOLINFO pcf)
case MYSQL_TYPE_VARCHAR:
case MYSQL_TYPE_VAR_STRING:
pcf->Flags |= U_VAR;
- /* no break */
+ /* fall through */
default:
pcf->Type= MYSQLtoPLG(fp->type(), &v);
break;
@@ -1522,7 +1522,7 @@ PXOS ha_connect::GetIndexOptionStruct(KEY *kp)
/****************************************************************************/
/* Return a Boolean index option or false if not specified. */
/****************************************************************************/
-bool ha_connect::GetIndexOption(KEY *kp, char *opname)
+bool ha_connect::GetIndexOption(KEY *kp, PCSZ opname)
{
bool opval= false;
PXOS options= GetIndexOptionStruct(kp);
@@ -1534,7 +1534,7 @@ bool ha_connect::GetIndexOption(KEY *kp, char *opname)
opval= options->mapped;
} else if (kp->comment.str && kp->comment.length) {
- char *pv, *oplist= strz(xp->g, kp->comment);
+ PCSZ pv, oplist= strz(xp->g, kp->comment);
if ((pv= GetListOption(xp->g, opname, oplist)))
opval= (!*pv || *pv == 'y' || *pv == 'Y' || atoi(pv) != 0);
@@ -1739,9 +1739,9 @@ void ha_connect::AddColName(char *cp, Field *fp)
/***********************************************************************/
/* This function sets the current database path. */
/***********************************************************************/
-void ha_connect::SetDataPath(PGLOBAL g, const char *path)
+bool ha_connect::SetDataPath(PGLOBAL g, const char *path)
{
- datapath= SetPath(g, path);
+ return (!(datapath= SetPath(g, path)));
} // end of SetDataPath
/****************************************************************************/
@@ -1895,40 +1895,36 @@ int ha_connect::OpenTable(PGLOBAL g, bool del)
bool ha_connect::CheckColumnList(PGLOBAL g)
{
// Check the list of used fields (columns)
- int rc;
bool brc= false;
PCOL colp;
Field* *field;
Field* fp;
MY_BITMAP *map= table->read_set;
- // Save stack and allocation environment and prepare error return
- if (g->jump_level == MAX_JUMP) {
- strcpy(g->Message, MSG(TOO_MANY_JUMPS));
- return true;
- } // endif jump_level
-
- if ((rc= setjmp(g->jumper[++g->jump_level])) == 0) {
+ try {
for (field= table->field; fp= *field; field++)
if (bitmap_is_set(map, fp->field_index)) {
if (!(colp= tdbp->ColDB(g, (PSZ)fp->field_name.str, 0))) {
sprintf(g->Message, "Column %s not found in %s",
fp->field_name.str, tdbp->GetName());
- brc= true;
- goto fin;
- } // endif colp
+ throw 1;
+ } // endif colp
if ((brc= colp->InitValue(g)))
- goto fin;
+ throw 2;
colp->AddColUse(U_P); // For PLG tables
} // endif
- } else
- brc= true;
+ } catch (int n) {
+ if (trace)
+ htrc("Exception %d: %s\n", n, g->Message);
+ brc = true;
+ } catch (const char *msg) {
+ strcpy(g->Message, msg);
+ brc = true;
+ } // end catch
- fin:
- g->jump_level--;
return brc;
} // end of CheckColumnList
@@ -1965,7 +1961,8 @@ int ha_connect::CloseTable(PGLOBAL g)
/***********************************************************************/
int ha_connect::MakeRecord(char *buf)
{
- char *p, *fmt, val[32];
+ PCSZ fmt;
+ char *p, val[32];
int rc= 0;
Field* *field;
Field *fp;
@@ -2101,7 +2098,7 @@ int ha_connect::ScanRecord(PGLOBAL g, const uchar *)
{
char attr_buffer[1024];
char data_buffer[1024];
- char *fmt;
+ PCSZ fmt;
int rc= 0;
PCOL colp;
PVAL value, sdvalin;
@@ -2274,7 +2271,7 @@ bool ha_connect::MakeKeyWhere(PGLOBAL g, PSTRG qry, OPVAL vop, char q,
const uchar *ptr;
//uint i, rem, len, klen, stlen;
uint i, rem, len, stlen;
- bool nq, both, oom= false;
+ bool nq, both, oom;
OPVAL op;
Field *fp;
const key_range *ranges[2];
@@ -2302,9 +2299,9 @@ bool ha_connect::MakeKeyWhere(PGLOBAL g, PSTRG qry, OPVAL vop, char q,
continue;
if (both && i > 0)
- oom|= qry->Append(") AND (");
+ qry->Append(") AND (");
else
- oom|= qry->Append(" WHERE (");
+ qry->Append(" WHERE (");
// klen= len= ranges[i]->length;
len= ranges[i]->length;
@@ -2317,14 +2314,14 @@ bool ha_connect::MakeKeyWhere(PGLOBAL g, PSTRG qry, OPVAL vop, char q,
nq= fp->str_needs_quotes();
if (kpart != kfp->key_part)
- oom|= qry->Append(" AND ");
+ qry->Append(" AND ");
if (q) {
- oom|= qry->Append(q);
- oom|= qry->Append((PSZ)fp->field_name.str);
- oom|= qry->Append(q);
+ qry->Append(q);
+ qry->Append((PSZ)fp->field_name.str);
+ qry->Append(q);
} else
- oom|= qry->Append((PSZ)fp->field_name.str);
+ qry->Append((PSZ)fp->field_name.str);
switch (ranges[i]->flag) {
case HA_READ_KEY_EXACT:
@@ -2349,10 +2346,10 @@ bool ha_connect::MakeKeyWhere(PGLOBAL g, PSTRG qry, OPVAL vop, char q,
goto err;
} // endswitch flag
- oom|= qry->Append((PSZ)GetValStr(op, false));
+ qry->Append((PSZ)GetValStr(op, false));
if (nq)
- oom|= qry->Append('\'');
+ qry->Append('\'');
if (kpart->key_part_flag & HA_VAR_LENGTH_PART) {
String varchar;
@@ -2360,17 +2357,17 @@ bool ha_connect::MakeKeyWhere(PGLOBAL g, PSTRG qry, OPVAL vop, char q,
varchar.set_quick((char*)ptr + HA_KEY_BLOB_LENGTH,
var_length, &my_charset_bin);
- oom|= qry->Append(varchar.ptr(), varchar.length(), nq);
+ qry->Append(varchar.ptr(), varchar.length(), nq);
} else {
char strbuff[MAX_FIELD_WIDTH];
String str(strbuff, sizeof(strbuff), kpart->field->charset()), *res;
res= fp->val_str(&str, ptr);
- oom|= qry->Append(res->ptr(), res->length(), nq);
+ qry->Append(res->ptr(), res->length(), nq);
} // endif flag
if (nq)
- oom |= qry->Append('\'');
+ qry->Append('\'');
if (stlen >= len)
break;
@@ -2385,7 +2382,9 @@ bool ha_connect::MakeKeyWhere(PGLOBAL g, PSTRG qry, OPVAL vop, char q,
} // endfor i
- if ((oom|= qry->Append(")")))
+ qry->Append(')');
+
+ if ((oom= qry->IsTruncated()))
strcpy(g->Message, "Out of memory");
dbug_tmp_restore_column_map(table->write_set, old_map);
@@ -2698,6 +2697,8 @@ PCFIL ha_connect::CheckCond(PGLOBAL g, PCFIL filp, const Item *cond)
if (x)
return NULL;
+ else
+ pb0= pb1= pb2= ph0= ph1= ph2= NULL;
if (trace)
htrc("Cond: Ftype=%d name=%s\n", cond_item->functype(),
@@ -2800,6 +2801,7 @@ PCFIL ha_connect::CheckCond(PGLOBAL g, PCFIL filp, const Item *cond)
case Item_func::LIKE_FUNC: vop= OP_LIKE; break;
case Item_func::ISNOTNULL_FUNC:
neg = true;
+ /* fall through */
case Item_func::ISNULL_FUNC: vop= OP_NULL; break;
case Item_func::IN_FUNC: vop= OP_IN;
case Item_func::BETWEEN:
@@ -3053,7 +3055,6 @@ const COND *ha_connect::cond_push(const COND *cond)
DBUG_ENTER("ha_connect::cond_push");
if (tdbp) {
- int rc;
PGLOBAL& g= xp->g;
AMT tty= tdbp->GetAmType();
bool x= (tty == TYPE_AM_MYX || tty == TYPE_AM_XDBC);
@@ -3061,58 +3062,66 @@ const COND *ha_connect::cond_push(const COND *cond)
tty == TYPE_AM_TBL || tty == TYPE_AM_MYSQL ||
tty == TYPE_AM_PLG || tty == TYPE_AM_JDBC || x);
- // Save stack and allocation environment and prepare error return
- if (g->jump_level == MAX_JUMP) {
- strcpy(g->Message, MSG(TOO_MANY_JUMPS));
- DBUG_RETURN(cond);
- } // endif jump_level
+ // This should never happen but is done to avoid crashing
+ try {
+ if (b) {
+ PCFIL filp;
+ int rc;
- // This should never happen but is done to avoid crashing
- if ((rc= setjmp(g->jumper[++g->jump_level])) != 0)
- goto fin;
+ if ((filp = tdbp->GetCondFil()) && tdbp->GetCond() == cond &&
+ filp->Idx == active_index && filp->Type == tty)
+ goto fin;
- if (b) {
- PCFIL filp;
- int rc;
+ filp = new(g) CONDFIL(active_index, tty);
+ rc = filp->Init(g, this);
- if ((filp= tdbp->GetCondFil()) && filp->Cond == cond &&
- filp->Idx == active_index && filp->Type == tty)
- goto fin; // Already done
+ if (rc == RC_INFO) {
+ filp->Having = (char*)PlugSubAlloc(g, NULL, 256);
+ *filp->Having = 0;
+ } else if (rc == RC_FX)
+ goto fin;
- filp= new(g) CONDFIL(cond, active_index, tty);
- rc = filp->Init(g, this);
+ filp->Body = (char*)PlugSubAlloc(g, NULL, (x) ? 128 : 0);
+ *filp->Body = 0;
- if (rc == RC_INFO) {
- filp->Having = (char*)PlugSubAlloc(g, NULL, 256);
- *filp->Having = 0;
- } else if (rc == RC_FX)
- goto fin;
+ if (CheckCond(g, filp, cond)) {
+ if (filp->Having && strlen(filp->Having) > 255)
+ goto fin; // Memory collapse
- filp->Body = (char*)PlugSubAlloc(g, NULL, (x) ? 128 : 0);
- *filp->Body = 0;
+ if (trace)
+ htrc("cond_push: %s\n", filp->Body);
- if (CheckCond(g, filp, cond)) {
- if (filp->Having && strlen(filp->Having) > 255)
- goto fin; // Memory collapse
+ tdbp->SetCond(cond);
- if (trace)
- htrc("cond_push: %s\n", filp->Body);
+ if (!x)
+ PlugSubAlloc(g, NULL, strlen(filp->Body) + 1);
+ else
+ cond = NULL; // Does this work?
- if (!x)
- PlugSubAlloc(g, NULL, strlen(filp->Body) + 1);
- else
- cond= NULL; // Does this work?
+ tdbp->SetCondFil(filp);
+ } else if (x && cond)
+ tdbp->SetCondFil(filp); // Wrong filter
- tdbp->SetCondFil(filp);
- } else if (x && cond)
- tdbp->SetCondFil(filp); // Wrong filter
+ } else if (tdbp->CanBeFiltered()) {
+ if (!tdbp->GetCond() || tdbp->GetCond() != cond) {
+ tdbp->SetFilter(CondFilter(g, (Item *)cond));
- } else if (tty != TYPE_AM_JSN && tty != TYPE_AM_JSON)
- tdbp->SetFilter(CondFilter(g, (Item *)cond));
+ if (tdbp->GetFilter())
+ tdbp->SetCond(cond);
- fin:
- g->jump_level--;
- } // endif tdbp
+ } // endif cond
+
+ } // endif tty
+
+ } catch (int n) {
+ if (trace)
+ htrc("Exception %d: %s\n", n, g->Message);
+ } catch (const char *msg) {
+ strcpy(g->Message, msg);
+ } // end catch
+
+ fin:;
+ } // endif tdbp
// Let MySQL do the filtering
DBUG_RETURN(cond);
@@ -3262,26 +3271,36 @@ int ha_connect::optimize(THD* thd, HA_CHECK_OPT*)
PGLOBAL& g= xp->g;
PDBUSER dup= PlgGetUser(g);
- // Ignore error on the opt file
- dup->Check &= ~CHK_OPT;
- tdbp= GetTDB(g);
- dup->Check |= CHK_OPT;
+ try {
+ // Ignore error on the opt file
+ dup->Check &= ~CHK_OPT;
+ tdbp = GetTDB(g);
+ dup->Check |= CHK_OPT;
- if (tdbp && !tdbp->IsRemote()) {
- bool dop= IsTypeIndexable(GetRealType(NULL));
- bool dox= (tdbp->GetDef()->Indexable() == 1);
+ if (tdbp && !tdbp->IsRemote()) {
+ bool dop = IsTypeIndexable(GetRealType(NULL));
+ bool dox = (tdbp->GetDef()->Indexable() == 1);
- if ((rc= ((PTDBASE)tdbp)->ResetTableOpt(g, dop, dox))) {
- if (rc == RC_INFO) {
- push_warning(thd, Sql_condition::WARN_LEVEL_WARN, 0, g->Message);
- rc= 0;
- } else
- rc= HA_ERR_INTERNAL_ERROR;
+ if ((rc = ((PTDBASE)tdbp)->ResetTableOpt(g, dop, dox))) {
+ if (rc == RC_INFO) {
+ push_warning(thd, Sql_condition::WARN_LEVEL_WARN, 0, g->Message);
+ rc = 0;
+ } else
+ rc = HA_ERR_INTERNAL_ERROR;
- } // endif rc
+ } // endif rc
- } else if (!tdbp)
- rc= HA_ERR_INTERNAL_ERROR;
+ } else if (!tdbp)
+ rc = HA_ERR_INTERNAL_ERROR;
+
+ } catch (int n) {
+ if (trace)
+ htrc("Exception %d: %s\n", n, g->Message);
+ rc = HA_ERR_INTERNAL_ERROR;
+ } catch (const char *msg) {
+ strcpy(g->Message, msg);
+ rc = HA_ERR_INTERNAL_ERROR;
+ } // end catch
return rc;
} // end of optimize
@@ -3976,8 +3995,12 @@ int ha_connect::rnd_pos(uchar *buf, uchar *pos)
tdbp->SetFilter(NULL);
rc= rnd_next(buf);
- } else
- rc= HA_ERR_KEY_NOT_FOUND;
+ } else {
+ PGLOBAL g = GetPlug((table) ? table->in_use : NULL, xp);
+ strcpy(g->Message, "Not supported by this table type");
+ my_message(ER_ILLEGAL_HA, g->Message, MYF(0));
+ rc= HA_ERR_INTERNAL_ERROR;
+ } // endif SetRecpos
DBUG_RETURN(rc);
} // end of rnd_pos
@@ -4043,9 +4066,13 @@ int ha_connect::info(uint flag)
} // endif xmod
// This is necessary for getting file length
- if (table)
- SetDataPath(g, table->s->db.str);
- else
+ if (table) {
+ if (SetDataPath(g, table->s->db.str)) {
+ my_message(ER_UNKNOWN_ERROR, g->Message, MYF(0));
+ DBUG_RETURN(HA_ERR_INTERNAL_ERROR);
+ } // endif SetDataPath
+
+ } else
DBUG_RETURN(HA_ERR_INTERNAL_ERROR); // Should never happen
if (!(tdbp= GetTDB(g)))
@@ -4195,35 +4222,36 @@ bool ha_connect::check_privileges(THD *thd, PTOS options, const char *dbn, bool
case TAB_INI:
case TAB_VEC:
case TAB_JSON:
- if (options->filename && *options->filename) {
- if (!quick) {
- char *s, path[FN_REFLEN], dbpath[FN_REFLEN];
+ if (options->filename && *options->filename) {
+ if (!quick) {
+ char path[FN_REFLEN], dbpath[FN_REFLEN];
+
+ strcpy(dbpath, mysql_real_data_home);
+
+ if (db)
#if defined(__WIN__)
- s= "\\";
+ strcat(strcat(dbpath, db), "\\");
#else // !__WIN__
- s= "/";
+ strcat(strcat(dbpath, db), "/");
#endif // !__WIN__
- strcpy(dbpath, mysql_real_data_home);
-
- if (db)
- strcat(strcat(dbpath, db), s);
- (void) fn_format(path, options->filename, dbpath, "",
- MY_RELATIVE_PATH | MY_UNPACK_FILENAME);
+ (void)fn_format(path, options->filename, dbpath, "",
+ MY_RELATIVE_PATH | MY_UNPACK_FILENAME);
- if (!is_secure_file_path(path)) {
- my_error(ER_OPTION_PREVENTS_STATEMENT, MYF(0), "--secure-file-priv");
- return true;
- } // endif path
- }
- } else
+ if (!is_secure_file_path(path)) {
+ my_error(ER_OPTION_PREVENTS_STATEMENT, MYF(0), "--secure-file-priv");
+ return true;
+ } // endif path
+ }
+ } else
return false;
- /* Fall through to check FILE_ACL */
+ /* check FILE_ACL */
+ /* fall through */
case TAB_ODBC:
case TAB_JDBC:
case TAB_MYSQL:
- case TAB_DIR:
+ case TAB_DIR:
case TAB_MAC:
case TAB_WMI:
case TAB_ZIP:
@@ -4528,9 +4556,11 @@ int ha_connect::external_lock(THD *thd, int lock_type)
DBUG_RETURN(0);
} else if (g->Xchk) {
if (!tdbp) {
- if (!(tdbp= GetTDB(g)))
- DBUG_RETURN(HA_ERR_INTERNAL_ERROR);
- else if (!tdbp->GetDef()->Indexable()) {
+ if (!(tdbp = GetTDB(g))) {
+// DBUG_RETURN(HA_ERR_INTERNAL_ERROR); causes assert error
+ push_warning(thd, Sql_condition::WARN_LEVEL_WARN, 0, g->Message);
+ DBUG_RETURN(0);
+ } else if (!tdbp->GetDef()->Indexable()) {
sprintf(g->Message, "external_lock: Table %s is not indexable", tdbp->GetName());
// DBUG_RETURN(HA_ERR_INTERNAL_ERROR); causes assert error
push_warning(thd, Sql_condition::WARN_LEVEL_WARN, 0, g->Message);
@@ -4614,7 +4644,9 @@ int ha_connect::external_lock(THD *thd, int lock_type)
push_warning(thd, Sql_condition::WARN_LEVEL_WARN,
0, g->Message);
rc= 0;
- } // endif MakeIndex
+ //my_message(ER_UNKNOWN_ERROR, g->Message, MYF(0));
+ //rc= HA_ERR_INTERNAL_ERROR;
+ } // endif MakeIndex
} else if (tdbp->GetDef()->Indexable() == 3) {
if (CheckVirtualIndex(NULL)) {
@@ -4635,9 +4667,12 @@ int ha_connect::external_lock(THD *thd, int lock_type)
// Make it a warning to avoid crash
push_warning(thd, Sql_condition::WARN_LEVEL_WARN, 0, g->Message);
rc= 0;
- } // endif Close
+ //my_message(ER_UNKNOWN_ERROR, g->Message, MYF(0));
+ //rc = HA_ERR_INTERNAL_ERROR;
+ } // endif Close
locked= 0;
+// m_lock_type= lock_type;
xmod= MODE_ANY; // For info commands
DBUG_RETURN(rc);
} // endif MODE_ANY
@@ -4995,8 +5030,8 @@ ha_rows ha_connect::records_in_range(uint inx, key_range *min_key,
} // end of records_in_range
// Used to check whether a MYSQL table is created on itself
-bool CheckSelf(PGLOBAL g, TABLE_SHARE *s, const char *host,
- const char *db, char *tab, const char *src, int port)
+bool CheckSelf(PGLOBAL g, TABLE_SHARE *s, PCSZ host,
+ PCSZ db, PCSZ tab, PCSZ src, int port)
{
if (src)
return false;
@@ -5232,41 +5267,41 @@ static int connect_assisted_discovery(handlerton *, THD* thd,
TABLE_SHARE *table_s,
HA_CREATE_INFO *create_info)
{
- char v=0;
- const char *fncn= "?";
- const char *user, *fn, *db, *host, *pwd, *sep, *tbl, *src;
- const char *col, *ocl, *rnk, *pic, *fcl, *skc, *zfn;
- char *tab, *dsn, *shm, *dpath;
+ char v=0;
+ PCSZ fncn= "?";
+ PCSZ user, fn, db, host, pwd, sep, tbl, src;
+ PCSZ col, ocl, rnk, pic, fcl, skc, zfn;
+ char *tab, *dsn, *shm, *dpath;
#if defined(__WIN__)
- char *nsp= NULL, *cls= NULL;
+ PCSZ nsp= NULL, cls= NULL;
#endif // __WIN__
-//int hdr, mxe;
- int port = 0, mxr = 0, rc = 0, mul = 0, lrecl = 0;
+//int hdr, mxe;
+ int port = 0, mxr = 0, rc = 0, mul = 0, lrecl = 0;
#if defined(ODBC_SUPPORT)
- POPARM sop= NULL;
- char *ucnc= NULL;
- bool cnc= false;
- int cto= -1, qto= -1;
+ POPARM sop= NULL;
+ PCSZ ucnc= NULL;
+ bool cnc= false;
+ int cto= -1, qto= -1;
#endif // ODBC_SUPPORT
#if defined(JDBC_SUPPORT)
- PJPARM sjp= NULL;
- char *driver= NULL;
- char *url= NULL;
-//char *prop= NULL;
- char *tabtyp= NULL;
+ PJPARM sjp= NULL;
+ PCSZ driver= NULL;
+ char *url= NULL;
+//char *prop= NULL;
+ PCSZ tabtyp= NULL;
#endif // JDBC_SUPPORT
- uint tm, fnc= FNC_NO, supfnc= (FNC_NO | FNC_COL);
- bool bif, ok= false, dbf= false;
- TABTYPE ttp= TAB_UNDEF;
- PQRYRES qrp= NULL;
- PCOLRES crp;
- PCONNECT xp= NULL;
- PGLOBAL g= GetPlug(thd, xp);
- PDBUSER dup= PlgGetUser(g);
- PCATLG cat= (dup) ? dup->Catalog : NULL;
- PTOS topt= table_s->option_struct;
- char buf[1024];
- String sql(buf, sizeof(buf), system_charset_info);
+ uint tm, fnc= FNC_NO, supfnc= (FNC_NO | FNC_COL);
+ bool bif, ok= false, dbf= false;
+ TABTYPE ttp= TAB_UNDEF;
+ PQRYRES qrp= NULL;
+ PCOLRES crp;
+ PCONNECT xp= NULL;
+ PGLOBAL g= GetPlug(thd, xp);
+ PDBUSER dup= PlgGetUser(g);
+ PCATLG cat= (dup) ? dup->Catalog : NULL;
+ PTOS topt= table_s->option_struct;
+ char buf[1024];
+ String sql(buf, sizeof(buf), system_charset_info);
sql.copy(STRING_WITH_LEN("CREATE TABLE whatever ("), system_charset_info);
@@ -5291,7 +5326,7 @@ static int connect_assisted_discovery(handlerton *, THD* thd,
if (topt->oplist) {
host= GetListOption(g, "host", topt->oplist, "localhost");
user= GetListOption(g, "user", topt->oplist,
- (ttp == TAB_ODBC ? NULL : "root"));
+ ((ttp == TAB_ODBC || ttp == TAB_JDBC) ? NULL : "root"));
// Default value db can come from the DBNAME=xxx option.
db= GetListOption(g, "database", topt->oplist, db);
col= GetListOption(g, "colist", topt->oplist, col);
@@ -5328,132 +5363,122 @@ static int connect_assisted_discovery(handlerton *, THD* thd,
#endif // ZIP_SUPPORT
} else {
host= "localhost";
- user= (ttp == TAB_ODBC ? NULL : "root");
+ user= ((ttp == TAB_ODBC || ttp == TAB_JDBC) ? NULL : "root");
} // endif option_list
if (!(shm= (char*)db))
db= table_s->db.str; // Default value
- // Save stack and allocation environment and prepare error return
- if (g->jump_level == MAX_JUMP) {
- strcpy(g->Message, MSG(TOO_MANY_JUMPS));
- goto jer;
- } // endif jump_level
-
- if ((rc= setjmp(g->jumper[++g->jump_level])) != 0) {
- my_message(ER_UNKNOWN_ERROR, g->Message, MYF(0));
- goto err;
- } // endif rc
-
- // Check table type
- if (ttp == TAB_UNDEF) {
- topt->type= (src) ? "MYSQL" : (tab) ? "PROXY" : "DOS";
- ttp= GetTypeID(topt->type);
- sprintf(g->Message, "No table_type. Was set to %s", topt->type);
- push_warning(thd, Sql_condition::WARN_LEVEL_WARN, 0, g->Message);
- } else if (ttp == TAB_NIY) {
- sprintf(g->Message, "Unsupported table type %s", topt->type);
- my_message(ER_UNKNOWN_ERROR, g->Message, MYF(0));
- goto err;
- } // endif ttp
-
- if (!tab) {
- if (ttp == TAB_TBL) {
- // Make tab the first table of the list
- char *p;
-
- if (!tbl) {
- strcpy(g->Message, "Missing table list");
- my_message(ER_UNKNOWN_ERROR, g->Message, MYF(0));
- goto err;
- } // endif tbl
-
- tab= PlugDup(g, tbl);
-
- if ((p= strchr(tab, ',')))
- *p= 0;
-
- if ((p=strchr(tab, '.'))) {
- *p= 0;
- db= tab;
- tab= p + 1;
- } // endif p
-
- } else if (ttp != TAB_ODBC || !(fnc & (FNC_TABLE | FNC_COL)))
- tab= (char*) table_s->table_name.str; // Default value
-
- } // endif tab
-
- switch (ttp) {
+ try {
+ // Check table type
+ if (ttp == TAB_UNDEF) {
+ topt->type = (src) ? "MYSQL" : (tab) ? "PROXY" : "DOS";
+ ttp = GetTypeID(topt->type);
+ sprintf(g->Message, "No table_type. Was set to %s", topt->type);
+ push_warning(thd, Sql_condition::WARN_LEVEL_WARN, 0, g->Message);
+ } else if (ttp == TAB_NIY) {
+ sprintf(g->Message, "Unsupported table type %s", topt->type);
+ rc = HA_ERR_INTERNAL_ERROR;
+ goto err;
+ } // endif ttp
+
+ if (!tab) {
+ if (ttp == TAB_TBL) {
+ // Make tab the first table of the list
+ char *p;
+
+ if (!tbl) {
+ strcpy(g->Message, "Missing table list");
+ rc = HA_ERR_INTERNAL_ERROR;
+ goto err;
+ } // endif tbl
+
+ tab = PlugDup(g, tbl);
+
+ if ((p = strchr(tab, ',')))
+ *p = 0;
+
+ if ((p = strchr(tab, '.'))) {
+ *p = 0;
+ db = tab;
+ tab = p + 1;
+ } // endif p
+
+ } else if (ttp != TAB_ODBC || !(fnc & (FNC_TABLE | FNC_COL)))
+ tab = (char *) table_s->table_name.str; // Default value
+
+ } // endif tab
+
+ switch (ttp) {
#if defined(ODBC_SUPPORT)
- case TAB_ODBC:
- dsn= strz(g, create_info->connect_string);
+ case TAB_ODBC:
+ dsn = strz(g, create_info->connect_string);
- if (fnc & (FNC_DSN | FNC_DRIVER)) {
- ok= true;
+ if (fnc & (FNC_DSN | FNC_DRIVER)) {
+ ok = true;
#if defined(PROMPT_OK)
- } else if (!stricmp(thd->main_security_ctx.host, "localhost")
- && cop == 1) {
- if ((dsn = ODBCCheckConnection(g, dsn, cop)) != NULL) {
- thd->make_lex_string(&create_info->connect_string, dsn, strlen(dsn));
- ok= true;
- } // endif dsn
+ } else if (!stricmp(thd->main_security_ctx.host, "localhost")
+ && cop == 1) {
+ if ((dsn = ODBCCheckConnection(g, dsn, cop)) != NULL) {
+ thd->make_lex_string(&create_info->connect_string, dsn, strlen(dsn));
+ ok = true;
+ } // endif dsn
#endif // PROMPT_OK
- } else if (!dsn) {
- sprintf(g->Message, "Missing %s connection string", topt->type);
- } else {
- // Store ODBC additional parameters
- sop= (POPARM)PlugSubAlloc(g, NULL, sizeof(ODBCPARM));
- sop->User= (char*)user;
- sop->Pwd= (char*)pwd;
- sop->Cto= cto;
- sop->Qto= qto;
- sop->UseCnc= cnc;
- ok= true;
- } // endif's
-
- supfnc |= (FNC_TABLE | FNC_DSN | FNC_DRIVER);
- break;
+ } else if (!dsn) {
+ sprintf(g->Message, "Missing %s connection string", topt->type);
+ } else {
+ // Store ODBC additional parameters
+ sop = (POPARM)PlugSubAlloc(g, NULL, sizeof(ODBCPARM));
+ sop->User = (char*)user;
+ sop->Pwd = (char*)pwd;
+ sop->Cto = cto;
+ sop->Qto = qto;
+ sop->UseCnc = cnc;
+ ok = true;
+ } // endif's
+
+ supfnc |= (FNC_TABLE | FNC_DSN | FNC_DRIVER);
+ break;
#endif // ODBC_SUPPORT
#if defined(JDBC_SUPPORT)
- case TAB_JDBC:
- if (fnc & FNC_DRIVER) {
- ok= true;
- } else if (!(url= strz(g, create_info->connect_string))) {
- strcpy(g->Message, "Missing URL");
- } else {
- // Store JDBC additional parameters
- int rc;
- PJDBCDEF jdef= new(g) JDBCDEF();
-
- jdef->SetName(create_info->alias);
- sjp= (PJPARM)PlugSubAlloc(g, NULL, sizeof(JDBCPARM));
- sjp->Driver= driver;
-// sjp->Properties = prop;
- sjp->Fsize= 0;
- sjp->Scrollable= false;
-
- if ((rc = jdef->ParseURL(g, url, false)) == RC_OK) {
- sjp->Url= url;
- sjp->User= (char*)user;
- sjp->Pwd= (char*)pwd;
- ok= true;
- } else if (rc == RC_NF) {
- if (jdef->GetTabname())
- tab= jdef->GetTabname();
-
- ok= jdef->SetParms(sjp);
- } // endif rc
-
- } // endif's
-
- supfnc |= (FNC_DRIVER | FNC_TABLE);
- break;
+ case TAB_JDBC:
+ if (fnc & FNC_DRIVER) {
+ ok = true;
+ } else if (!(url = strz(g, create_info->connect_string))) {
+ strcpy(g->Message, "Missing URL");
+ } else {
+ // Store JDBC additional parameters
+ int rc;
+ PJDBCDEF jdef = new(g) JDBCDEF();
+
+ jdef->SetName(create_info->alias);
+ sjp = (PJPARM)PlugSubAlloc(g, NULL, sizeof(JDBCPARM));
+ sjp->Driver = driver;
+ // sjp->Properties = prop;
+ sjp->Fsize = 0;
+ sjp->Scrollable = false;
+
+ if ((rc = jdef->ParseURL(g, url, false)) == RC_OK) {
+ sjp->Url = url;
+ sjp->User = (char*)user;
+ sjp->Pwd = (char*)pwd;
+ ok = true;
+ } else if (rc == RC_NF) {
+ if (jdef->GetTabname())
+ tab = (char*)jdef->GetTabname();
+
+ ok = jdef->SetParms(sjp);
+ } // endif rc
+
+ } // endif's
+
+ supfnc |= (FNC_DRIVER | FNC_TABLE);
+ break;
#endif // JDBC_SUPPORT
case TAB_DBF:
dbf= true;
- // Passthru
+ // fall through
case TAB_CSV:
if (!fn && fnc != FNC_NO)
sprintf(g->Message, "Missing %s file name", topt->type);
@@ -5462,415 +5487,372 @@ static int connect_assisted_discovery(handlerton *, THD* thd,
else
ok= true;
- break;
- case TAB_MYSQL:
- ok= true;
+ break;
+ case TAB_MYSQL:
+ ok = true;
- if (create_info->connect_string.str &&
- create_info->connect_string.length) {
- PMYDEF mydef= new(g) MYSQLDEF();
+ if (create_info->connect_string.str &&
+ create_info->connect_string.length) {
+ PMYDEF mydef = new(g) MYSQLDEF();
- dsn= strz(g, create_info->connect_string);
- mydef->SetName(create_info->alias);
+ dsn = strz(g, create_info->connect_string);
+ mydef->SetName(create_info->alias);
- if (!mydef->ParseURL(g, dsn, false)) {
- if (mydef->GetHostname())
- host= mydef->GetHostname();
+ if (!mydef->ParseURL(g, dsn, false)) {
+ if (mydef->GetHostname())
+ host = mydef->GetHostname();
- if (mydef->GetUsername())
- user= mydef->GetUsername();
+ if (mydef->GetUsername())
+ user = mydef->GetUsername();
- if (mydef->GetPassword())
- pwd= mydef->GetPassword();
+ if (mydef->GetPassword())
+ pwd = mydef->GetPassword();
- if (mydef->GetTabschema())
- db = mydef->GetTabschema();
+ if (mydef->GetTabschema())
+ db = mydef->GetTabschema();
- if (mydef->GetTabname())
- tab= mydef->GetTabname();
+ if (mydef->GetTabname())
+ tab = (char*)mydef->GetTabname();
- if (mydef->GetPortnumber())
- port= mydef->GetPortnumber();
+ if (mydef->GetPortnumber())
+ port = mydef->GetPortnumber();
- } else
- ok= false;
+ } else
+ ok = false;
- } else if (!user)
- user= "root";
+ } else if (!user)
+ user = "root";
- if (ok && CheckSelf(g, table_s, host, db, tab, src, port))
- ok= false;
+ if (ok && CheckSelf(g, table_s, host, db, tab, src, port))
+ ok = false;
- break;
+ break;
#if defined(__WIN__)
- case TAB_WMI:
- ok= true;
- break;
+ case TAB_WMI:
+ ok = true;
+ break;
#endif // __WIN__
#if defined(PIVOT_SUPPORT)
- case TAB_PIVOT:
- supfnc= FNC_NO;
+ case TAB_PIVOT:
+ supfnc = FNC_NO;
#endif // PIVOT_SUPPORT
- case TAB_PRX:
- case TAB_TBL:
- case TAB_XCL:
- case TAB_OCCUR:
- if (!src && !stricmp(tab, create_info->alias) &&
- (!db || !stricmp(db, table_s->db.str)))
- sprintf(g->Message, "A %s table cannot refer to itself", topt->type);
- else
- ok= true;
+ case TAB_PRX:
+ case TAB_TBL:
+ case TAB_XCL:
+ case TAB_OCCUR:
+ if (!src && !stricmp(tab, create_info->alias) &&
+ (!db || !stricmp(db, table_s->db.str)))
+ sprintf(g->Message, "A %s table cannot refer to itself", topt->type);
+ else
+ ok = true;
- break;
- case TAB_OEM:
- if (topt->module && topt->subtype)
- ok= true;
- else
- strcpy(g->Message, "Missing OEM module or subtype");
+ break;
+ case TAB_OEM:
+ if (topt->module && topt->subtype)
+ ok = true;
+ else
+ strcpy(g->Message, "Missing OEM module or subtype");
- break;
+ break;
#if defined(LIBXML2_SUPPORT) || defined(DOMDOC_SUPPORT)
- case TAB_XML:
+ case TAB_XML:
#endif // LIBXML2_SUPPORT || DOMDOC_SUPPORT
- case TAB_JSON:
- if (!fn && !zfn && !mul)
- sprintf(g->Message, "Missing %s file name", topt->type);
- else
- ok= true;
-
- break;
- case TAB_VIR:
- ok= true;
- break;
- default:
- sprintf(g->Message, "Cannot get column info for table type %s", topt->type);
- break;
- } // endif ttp
+ case TAB_JSON:
+ if (!fn && !zfn && !mul)
+ sprintf(g->Message, "Missing %s file name", topt->type);
+ else
+ ok = true;
- // Check for supported catalog function
- if (ok && !(supfnc & fnc)) {
- sprintf(g->Message, "Unsupported catalog function %s for table type %s",
- fncn, topt->type);
- ok= false;
- } // endif supfnc
-
- if (src && fnc != FNC_NO) {
- strcpy(g->Message, "Cannot make catalog table from srcdef");
- ok= false;
- } // endif src
-
- if (ok) {
- char *cnm, *rem, *dft, *xtra, *key, *fmt;
- int i, len, prec, dec, typ, flg;
-
-// if (cat)
-// cat->SetDataPath(g, table_s->db.str);
-// else
-// return HA_ERR_INTERNAL_ERROR; // Should never happen
-
- dpath= SetPath(g, table_s->db.str);
+ break;
+ case TAB_VIR:
+ ok = true;
+ break;
+ default:
+ sprintf(g->Message, "Cannot get column info for table type %s", topt->type);
+ break;
+ } // endif ttp
+
+ // Check for supported catalog function
+ if (ok && !(supfnc & fnc)) {
+ sprintf(g->Message, "Unsupported catalog function %s for table type %s",
+ fncn, topt->type);
+ ok = false;
+ } // endif supfnc
+
+ if (src && fnc != FNC_NO) {
+ strcpy(g->Message, "Cannot make catalog table from srcdef");
+ ok = false;
+ } // endif src
+
+ if (ok) {
+ const char *cnm, *rem;
+ char *dft, *xtra, *key, *fmt;
+ int i, len, prec, dec, typ, flg;
+
+ if (!(dpath = SetPath(g, table_s->db.str))) {
+ rc = HA_ERR_INTERNAL_ERROR;
+ goto err;
+ } // endif dpath
- if (src && ttp != TAB_PIVOT && ttp != TAB_ODBC && ttp != TAB_JDBC) {
- qrp= SrcColumns(g, host, db, user, pwd, src, port);
+ if (src && ttp != TAB_PIVOT && ttp != TAB_ODBC && ttp != TAB_JDBC) {
+ qrp = SrcColumns(g, host, db, user, pwd, src, port);
- if (qrp && ttp == TAB_OCCUR)
- if (OcrSrcCols(g, qrp, col, ocl, rnk)) {
- my_message(ER_UNKNOWN_ERROR, g->Message, MYF(0));
- goto err;
- } // endif OcrSrcCols
+ if (qrp && ttp == TAB_OCCUR)
+ if (OcrSrcCols(g, qrp, col, ocl, rnk)) {
+ rc = HA_ERR_INTERNAL_ERROR;
+ goto err;
+ } // endif OcrSrcCols
- } else switch (ttp) {
- case TAB_DBF:
- qrp= DBFColumns(g, dpath, fn, fnc == FNC_COL);
- break;
+ } else switch (ttp) {
+ case TAB_DBF:
+ qrp = DBFColumns(g, dpath, fn, fnc == FNC_COL);
+ break;
#if defined(ODBC_SUPPORT)
- case TAB_ODBC:
- switch (fnc) {
- case FNC_NO:
- case FNC_COL:
- if (src) {
- qrp= ODBCSrcCols(g, dsn, (char*)src, sop);
- src= NULL; // for next tests
- } else
- qrp= ODBCColumns(g, dsn, shm, tab, NULL,
- mxr, fnc == FNC_COL, sop);
+ case TAB_ODBC:
+ switch (fnc) {
+ case FNC_NO:
+ case FNC_COL:
+ if (src) {
+ qrp = ODBCSrcCols(g, dsn, (char*)src, sop);
+ src = NULL; // for next tests
+ } else
+ qrp = ODBCColumns(g, dsn, shm, tab, NULL,
+ mxr, fnc == FNC_COL, sop);
- break;
- case FNC_TABLE:
- qrp= ODBCTables(g, dsn, shm, tab, NULL, mxr, true, sop);
- break;
- case FNC_DSN:
- qrp= ODBCDataSources(g, mxr, true);
- break;
- case FNC_DRIVER:
- qrp= ODBCDrivers(g, mxr, true);
- break;
- default:
- sprintf(g->Message, "invalid catfunc %s", fncn);
- break;
- } // endswitch info
+ break;
+ case FNC_TABLE:
+ qrp = ODBCTables(g, dsn, shm, tab, NULL, mxr, true, sop);
+ break;
+ case FNC_DSN:
+ qrp = ODBCDataSources(g, mxr, true);
+ break;
+ case FNC_DRIVER:
+ qrp = ODBCDrivers(g, mxr, true);
+ break;
+ default:
+ sprintf(g->Message, "invalid catfunc %s", fncn);
+ break;
+ } // endswitch info
- break;
+ break;
#endif // ODBC_SUPPORT
#if defined(JDBC_SUPPORT)
- case TAB_JDBC:
- switch (fnc) {
- case FNC_NO:
- case FNC_COL:
- if (src) {
- qrp= JDBCSrcCols(g, (char*)src, sjp);
- src= NULL; // for next tests
- } else
- qrp= JDBCColumns(g, shm, tab, NULL, mxr, fnc == FNC_COL, sjp);
+ case TAB_JDBC:
+ switch (fnc) {
+ case FNC_NO:
+ case FNC_COL:
+ if (src) {
+ qrp = JDBCSrcCols(g, (char*)src, sjp);
+ src = NULL; // for next tests
+ } else
+ qrp = JDBCColumns(g, shm, tab, NULL, mxr, fnc == FNC_COL, sjp);
- break;
- case FNC_TABLE:
- qrp= JDBCTables(g, shm, tab, tabtyp, mxr, true, sjp);
- break;
+ break;
+ case FNC_TABLE:
+ qrp = JDBCTables(g, shm, tab, tabtyp, mxr, true, sjp);
+ break;
#if 0
- case FNC_DSN:
- qrp= JDBCDataSources(g, mxr, true);
- break;
+ case FNC_DSN:
+ qrp = JDBCDataSources(g, mxr, true);
+ break;
#endif // 0
- case FNC_DRIVER:
- qrp= JDBCDrivers(g, mxr, true);
- break;
- default:
- sprintf(g->Message, "invalid catfunc %s", fncn);
- break;
- } // endswitch info
+ case FNC_DRIVER:
+ qrp = JDBCDrivers(g, mxr, true);
+ break;
+ default:
+ sprintf(g->Message, "invalid catfunc %s", fncn);
+ break;
+ } // endswitch info
- break;
+ break;
#endif // JDBC_SUPPORT
- case TAB_MYSQL:
- qrp= MyColumns(g, thd, host, db, user, pwd, tab,
- NULL, port, fnc == FNC_COL);
- break;
- case TAB_CSV:
- qrp = CSVColumns(g, dpath, topt, fnc == FNC_COL);
- break;
+ case TAB_MYSQL:
+ qrp = MyColumns(g, thd, host, db, user, pwd, tab,
+ NULL, port, fnc == FNC_COL);
+ break;
+ case TAB_CSV:
+ qrp = CSVColumns(g, dpath, topt, fnc == FNC_COL);
+ break;
#if defined(__WIN__)
- case TAB_WMI:
- qrp= WMIColumns(g, nsp, cls, fnc == FNC_COL);
- break;
+ case TAB_WMI:
+ qrp = WMIColumns(g, nsp, cls, fnc == FNC_COL);
+ break;
#endif // __WIN__
- case TAB_PRX:
- case TAB_TBL:
- case TAB_XCL:
- case TAB_OCCUR:
- bif= fnc == FNC_COL;
- qrp= TabColumns(g, thd, db, tab, bif);
-
- if (!qrp && bif && fnc != FNC_COL) // tab is a view
- qrp= MyColumns(g, thd, host, db, user, pwd, tab, NULL, port, false);
-
- if (qrp && ttp == TAB_OCCUR && fnc != FNC_COL)
- if (OcrColumns(g, qrp, col, ocl, rnk)) {
- my_message(ER_UNKNOWN_ERROR, g->Message, MYF(0));
- goto err;
- } // endif OcrColumns
+ case TAB_PRX:
+ case TAB_TBL:
+ case TAB_XCL:
+ case TAB_OCCUR:
+ bif = fnc == FNC_COL;
+ qrp = TabColumns(g, thd, db, tab, bif);
+
+ if (!qrp && bif && fnc != FNC_COL) // tab is a view
+ qrp = MyColumns(g, thd, host, db, user, pwd, tab, NULL, port, false);
+
+ if (qrp && ttp == TAB_OCCUR && fnc != FNC_COL)
+ if (OcrColumns(g, qrp, col, ocl, rnk)) {
+ rc = HA_ERR_INTERNAL_ERROR;
+ goto err;
+ } // endif OcrColumns
- break;
+ break;
#if defined(PIVOT_SUPPORT)
- case TAB_PIVOT:
- qrp= PivotColumns(g, tab, src, pic, fcl, skc, host, db, user, pwd, port);
- break;
+ case TAB_PIVOT:
+ qrp = PivotColumns(g, tab, src, pic, fcl, skc, host, db, user, pwd, port);
+ break;
#endif // PIVOT_SUPPORT
- case TAB_VIR:
- qrp= VirColumns(g, fnc == FNC_COL);
- break;
- case TAB_JSON:
- qrp= JSONColumns(g, (char*)db, topt, fnc == FNC_COL);
- break;
+ case TAB_VIR:
+ qrp = VirColumns(g, fnc == FNC_COL);
+ break;
+ case TAB_JSON:
+ qrp = JSONColumns(g, (char*)db, topt, fnc == FNC_COL);
+ break;
#if defined(LIBXML2_SUPPORT) || defined(DOMDOC_SUPPORT)
- case TAB_XML:
- qrp= XMLColumns(g, (char*)db, tab, topt, fnc == FNC_COL);
- break;
+ case TAB_XML:
+ qrp = XMLColumns(g, (char*)db, tab, topt, fnc == FNC_COL);
+ break;
#endif // LIBXML2_SUPPORT || DOMDOC_SUPPORT
- case TAB_OEM:
- qrp= OEMColumns(g, topt, tab, (char*)db, fnc == FNC_COL);
- break;
- default:
- strcpy(g->Message, "System error during assisted discovery");
- break;
- } // endswitch ttp
-
- if (!qrp) {
- my_message(ER_UNKNOWN_ERROR, g->Message, MYF(0));
- goto err;
- } // endif !qrp
-
- if (fnc != FNC_NO || src || ttp == TAB_PIVOT) {
- // Catalog like table
- for (crp= qrp->Colresp; !rc && crp; crp= crp->Next) {
- cnm= (ttp == TAB_PIVOT) ? crp->Name : encode(g, crp->Name);
- typ= crp->Type;
- len= crp->Length;
- dec= crp->Prec;
- flg= crp->Flag;
- v= (crp->Kdata->IsUnsigned()) ? 'U' : crp->Var;
- tm= (crp->Kdata->IsNullable()) ? 0 : NOT_NULL_FLAG;
-
- if (!len && typ == TYPE_STRING)
- len= 256; // STRBLK's have 0 length
-
- // Now add the field
- if (add_field(&sql, cnm, typ, len, dec, NULL, tm,
- NULL, NULL, NULL, NULL, flg, dbf, v))
- rc= HA_ERR_OUT_OF_MEM;
- } // endfor crp
-
- } else {
- char *schem= NULL;
- char *tn= NULL;
-
- // Not a catalog table
- if (!qrp->Nblin) {
- if (tab)
- sprintf(g->Message, "Cannot get columns from %s", tab);
- else
- strcpy(g->Message, "Fail to retrieve columns");
-
- my_message(ER_UNKNOWN_ERROR, g->Message, MYF(0));
- goto err;
- } // endif !nblin
-
- for (i= 0; !rc && i < qrp->Nblin; i++) {
- typ= len= prec= dec= 0;
- tm= NOT_NULL_FLAG;
- cnm= (char*)"noname";
- dft= xtra= key= fmt= tn= NULL;
- v= ' ';
- rem= NULL;
-
- for (crp= qrp->Colresp; crp; crp= crp->Next)
- switch (crp->Fld) {
- case FLD_NAME:
- if (ttp == TAB_PRX ||
- (ttp == TAB_CSV && topt->data_charset &&
- (!stricmp(topt->data_charset, "UTF8") ||
- !stricmp(topt->data_charset, "UTF-8"))))
- cnm= crp->Kdata->GetCharValue(i);
- else
- cnm= encode(g, crp->Kdata->GetCharValue(i));
-
- break;
- case FLD_TYPE:
- typ= crp->Kdata->GetIntValue(i);
- v = (crp->Nulls) ? crp->Nulls[i] : 0;
- break;
- case FLD_TYPENAME:
- tn= crp->Kdata->GetCharValue(i);
- break;
- case FLD_PREC:
- // PREC must be always before LENGTH
- len= prec= crp->Kdata->GetIntValue(i);
- break;
- case FLD_LENGTH:
- len= crp->Kdata->GetIntValue(i);
- break;
- case FLD_SCALE:
- dec = (!crp->Kdata->IsNull(i)) ? crp->Kdata->GetIntValue(i) : -1;
- break;
- case FLD_NULL:
- if (crp->Kdata->GetIntValue(i))
- tm= 0; // Nullable
-
- break;
- case FLD_FORMAT:
- fmt= (crp->Kdata) ? crp->Kdata->GetCharValue(i) : NULL;
- break;
- case FLD_REM:
- rem= crp->Kdata->GetCharValue(i);
- break;
-// case FLD_CHARSET:
- // No good because remote table is already translated
-// if (*(csn= crp->Kdata->GetCharValue(i)))
-// cs= get_charset_by_name(csn, 0);
-
-// break;
- case FLD_DEFAULT:
- dft= crp->Kdata->GetCharValue(i);
- break;
- case FLD_EXTRA:
- xtra= crp->Kdata->GetCharValue(i);
-
- // Auto_increment is not supported yet
- if (!stricmp(xtra, "AUTO_INCREMENT"))
- xtra= NULL;
-
- break;
- case FLD_KEY:
- if (ttp == TAB_VIR)
- key= crp->Kdata->GetCharValue(i);
+ case TAB_OEM:
+ qrp = OEMColumns(g, topt, tab, (char*)db, fnc == FNC_COL);
+ break;
+ default:
+ strcpy(g->Message, "System error during assisted discovery");
+ break;
+ } // endswitch ttp
- break;
- case FLD_SCHEM:
+ if (!qrp) {
+ rc = HA_ERR_INTERNAL_ERROR;
+ goto err;
+ } // endif !qrp
+
+ if (fnc != FNC_NO || src || ttp == TAB_PIVOT) {
+ // Catalog like table
+ for (crp = qrp->Colresp; !rc && crp; crp = crp->Next) {
+ cnm = (ttp == TAB_PIVOT) ? crp->Name : encode(g, crp->Name);
+ typ = crp->Type;
+ len = crp->Length;
+ dec = crp->Prec;
+ flg = crp->Flag;
+ v = (crp->Kdata->IsUnsigned()) ? 'U' : crp->Var;
+ tm = (crp->Kdata->IsNullable()) ? 0 : NOT_NULL_FLAG;
+
+ if (!len && typ == TYPE_STRING)
+ len = 256; // STRBLK's have 0 length
+
+ // Now add the field
+ if (add_field(&sql, cnm, typ, len, dec, NULL, tm,
+ NULL, NULL, NULL, NULL, flg, dbf, v))
+ rc = HA_ERR_OUT_OF_MEM;
+ } // endfor crp
+
+ } else {
+ char *schem = NULL;
+ char *tn = NULL;
+
+ // Not a catalog table
+ if (!qrp->Nblin) {
+ if (tab)
+ sprintf(g->Message, "Cannot get columns from %s", tab);
+ else
+ strcpy(g->Message, "Fail to retrieve columns");
+
+ rc = HA_ERR_INTERNAL_ERROR;
+ goto err;
+ } // endif !nblin
+
+ for (i = 0; !rc && i < qrp->Nblin; i++) {
+ typ = len = prec = dec = 0;
+ tm = NOT_NULL_FLAG;
+ cnm = (char*)"noname";
+ dft = xtra = key = fmt = tn = NULL;
+ v = ' ';
+ rem = NULL;
+
+ for (crp = qrp->Colresp; crp; crp = crp->Next)
+ switch (crp->Fld) {
+ case FLD_NAME:
+ if (ttp == TAB_PRX ||
+ (ttp == TAB_CSV && topt->data_charset &&
+ (!stricmp(topt->data_charset, "UTF8") ||
+ !stricmp(topt->data_charset, "UTF-8"))))
+ cnm = crp->Kdata->GetCharValue(i);
+ else
+ cnm = encode(g, crp->Kdata->GetCharValue(i));
+
+ break;
+ case FLD_TYPE:
+ typ = crp->Kdata->GetIntValue(i);
+ v = (crp->Nulls) ? crp->Nulls[i] : 0;
+ break;
+ case FLD_TYPENAME:
+ tn = crp->Kdata->GetCharValue(i);
+ break;
+ case FLD_PREC:
+ // PREC must be always before LENGTH
+ len = prec = crp->Kdata->GetIntValue(i);
+ break;
+ case FLD_LENGTH:
+ len = crp->Kdata->GetIntValue(i);
+ break;
+ case FLD_SCALE:
+ dec = (!crp->Kdata->IsNull(i)) ? crp->Kdata->GetIntValue(i) : -1;
+ break;
+ case FLD_NULL:
+ if (crp->Kdata->GetIntValue(i))
+ tm = 0; // Nullable
+
+ break;
+ case FLD_FORMAT:
+ fmt = (crp->Kdata) ? crp->Kdata->GetCharValue(i) : NULL;
+ break;
+ case FLD_REM:
+ rem = crp->Kdata->GetCharValue(i);
+ break;
+ // case FLD_CHARSET:
+ // No good because remote table is already translated
+ // if (*(csn= crp->Kdata->GetCharValue(i)))
+ // cs= get_charset_by_name(csn, 0);
+
+ // break;
+ case FLD_DEFAULT:
+ dft = crp->Kdata->GetCharValue(i);
+ break;
+ case FLD_EXTRA:
+ xtra = crp->Kdata->GetCharValue(i);
+
+ // Auto_increment is not supported yet
+ if (!stricmp(xtra, "AUTO_INCREMENT"))
+ xtra = NULL;
+
+ break;
+ case FLD_KEY:
+ if (ttp == TAB_VIR)
+ key = crp->Kdata->GetCharValue(i);
+
+ break;
+ case FLD_SCHEM:
#if defined(ODBC_SUPPORT) || defined(JDBC_SUPPORT)
- if ((ttp == TAB_ODBC || ttp == TAB_JDBC) && crp->Kdata) {
- if (schem && stricmp(schem, crp->Kdata->GetCharValue(i))) {
- sprintf(g->Message,
- "Several %s tables found, specify DBNAME", tab);
- my_message(ER_UNKNOWN_ERROR, g->Message, MYF(0));
- goto err;
- } else if (!schem)
- schem= crp->Kdata->GetCharValue(i);
-
- } // endif ttp
+ if ((ttp == TAB_ODBC || ttp == TAB_JDBC) && crp->Kdata) {
+ if (schem && stricmp(schem, crp->Kdata->GetCharValue(i))) {
+ sprintf(g->Message,
+ "Several %s tables found, specify DBNAME", tab);
+ rc = HA_ERR_INTERNAL_ERROR;
+ goto err;
+ } else if (!schem)
+ schem = crp->Kdata->GetCharValue(i);
+
+ } // endif ttp
#endif // ODBC_SUPPORT || JDBC_SUPPORT
- default:
- break; // Ignore
- } // endswitch Fld
+ default:
+ break; // Ignore
+ } // endswitch Fld
#if defined(ODBC_SUPPORT)
- if (ttp == TAB_ODBC) {
- int plgtyp;
- bool w= false; // Wide character type
-
- // typ must be PLG type, not SQL type
- if (!(plgtyp= TranslateSQLType(typ, dec, prec, v, w))) {
- if (GetTypeConv() == TPC_SKIP) {
- // Skip this column
- sprintf(g->Message, "Column %s skipped (unsupported type %d)",
- cnm, typ);
- push_warning(thd, Sql_condition::WARN_LEVEL_WARN, 0, g->Message);
- continue;
- } else {
- sprintf(g->Message, "Unsupported SQL type %d", typ);
- my_message(ER_UNKNOWN_ERROR, g->Message, MYF(0));
- goto err;
- } // endif type_conv
-
- } else
- typ= plgtyp;
-
- switch (typ) {
- case TYPE_STRING:
- if (w) {
- sprintf(g->Message, "Column %s is wide characters", cnm);
- push_warning(thd, Sql_condition::WARN_LEVEL_NOTE, 0, g->Message);
- } // endif w
-
- break;
- case TYPE_DOUBLE:
- // Some data sources do not count dec in length (prec)
- prec += (dec + 2); // To be safe
- break;
- case TYPE_DECIM:
- prec= len;
- break;
- default:
- dec= 0;
- } // endswitch typ
-
- } else
-#endif // ODBC_SUPPORT
-#if defined(JDBC_SUPPORT)
- if (ttp == TAB_JDBC) {
+ if (ttp == TAB_ODBC) {
int plgtyp;
+ bool w = false; // Wide character type
// typ must be PLG type, not SQL type
- if (!(plgtyp= TranslateJDBCType(typ, tn, dec, prec, v))) {
+ if (!(plgtyp = TranslateSQLType(typ, dec, prec, v, w))) {
if (GetTypeConv() == TPC_SKIP) {
// Skip this column
sprintf(g->Message, "Column %s skipped (unsupported type %d)",
@@ -5879,55 +5861,107 @@ static int connect_assisted_discovery(handlerton *, THD* thd,
continue;
} else {
sprintf(g->Message, "Unsupported SQL type %d", typ);
- my_message(ER_UNKNOWN_ERROR, g->Message, MYF(0));
+ rc = HA_ERR_INTERNAL_ERROR;
goto err;
} // endif type_conv
} else
- typ= plgtyp;
+ typ = plgtyp;
switch (typ) {
- case TYPE_DOUBLE:
- case TYPE_DECIM:
- // Some data sources do not count dec in length (prec)
- prec += (dec + 2); // To be safe
- break;
- default:
- dec= 0;
+ case TYPE_STRING:
+ if (w) {
+ sprintf(g->Message, "Column %s is wide characters", cnm);
+ push_warning(thd, Sql_condition::WARN_LEVEL_NOTE, 0, g->Message);
+ } // endif w
+
+ break;
+ case TYPE_DOUBLE:
+ // Some data sources do not count dec in length (prec)
+ prec += (dec + 2); // To be safe
+ break;
+ case TYPE_DECIM:
+ prec = len;
+ break;
+ default:
+ dec = 0;
} // endswitch typ
} else
#endif // ODBC_SUPPORT
- // Make the arguments as required by add_fields
- if (typ == TYPE_DOUBLE)
- prec= len;
+#if defined(JDBC_SUPPORT)
+ if (ttp == TAB_JDBC) {
+ int plgtyp;
+
+ // typ must be PLG type, not SQL type
+ if (!(plgtyp = TranslateJDBCType(typ, tn, dec, prec, v))) {
+ if (GetTypeConv() == TPC_SKIP) {
+ // Skip this column
+ sprintf(g->Message, "Column %s skipped (unsupported type %d)",
+ cnm, typ);
+ push_warning(thd, Sql_condition::WARN_LEVEL_WARN, 0, g->Message);
+ continue;
+ } else {
+ sprintf(g->Message, "Unsupported SQL type %d", typ);
+ rc = HA_ERR_INTERNAL_ERROR;
+ goto err;
+ } // endif type_conv
+
+ } else
+ typ = plgtyp;
+
+ switch (typ) {
+ case TYPE_DOUBLE:
+ case TYPE_DECIM:
+ // Some data sources do not count dec in length (prec)
+ prec += (dec + 2); // To be safe
+ break;
+ default:
+ dec = 0;
+ } // endswitch typ
- if (typ == TYPE_DATE)
- prec= 0;
+ } else
+#endif // ODBC_SUPPORT
+ // Make the arguments as required by add_fields
+ if (typ == TYPE_DOUBLE)
+ prec = len;
- // Now add the field
- if (add_field(&sql, cnm, typ, prec, dec, key, tm, rem, dft, xtra,
- fmt, 0, dbf, v))
- rc= HA_ERR_OUT_OF_MEM;
- } // endfor i
+ if (typ == TYPE_DATE)
+ prec = 0;
- } // endif fnc
+ // Now add the field
+ if (add_field(&sql, cnm, typ, prec, dec, key, tm, rem, dft, xtra,
+ fmt, 0, dbf, v))
+ rc = HA_ERR_OUT_OF_MEM;
+ } // endfor i
- if (!rc)
- rc= init_table_share(thd, table_s, create_info, &sql);
+ } // endif fnc
- g->jump_level--;
- PopUser(xp);
- return rc;
- } // endif ok
+ if (!rc)
+ rc = init_table_share(thd, table_s, create_info, &sql);
- my_message(ER_UNKNOWN_ERROR, g->Message, MYF(0));
+ //g->jump_level--;
+ //PopUser(xp);
+ //return rc;
+ } else {
+ rc = HA_ERR_UNSUPPORTED;
+ } // endif ok
+
+ } catch (int n) {
+ if (trace)
+ htrc("Exception %d: %s\n", n, g->Message);
+ rc = HA_ERR_INTERNAL_ERROR;
+ } catch (const char *msg) {
+ strcpy(g->Message, msg);
+ rc = HA_ERR_INTERNAL_ERROR;
+ } // end catch
err:
- g->jump_level--;
- jer:
+ if (rc)
+ my_message(ER_UNKNOWN_ERROR, g->Message, MYF(0));
+
PopUser(xp);
- return HA_ERR_INTERNAL_ERROR;
+ return rc;
} // end of connect_assisted_discovery
/**
@@ -6092,8 +6126,8 @@ int ha_connect::create(const char *name, TABLE *table_arg,
if (!part_info)
#endif // WITH_PARTITION_STORAGE_ENGINE
{const char *src= options->srcdef;
- char *host, *db, *tab= (char*)options->tabname;
- int port;
+ PCSZ host, db, tab= options->tabname;
+ int port;
host= GetListOption(g, "host", options->oplist, NULL);
db= GetStringOption("database", NULL);
@@ -6137,8 +6171,8 @@ int ha_connect::create(const char *name, TABLE *table_arg,
} // endswitch ttp
if (type == TAB_XML) {
- bool dom; // True: MS-DOM, False libxml2
- char *xsup= GetListOption(g, "Xmlsup", options->oplist, "*");
+ bool dom; // True: MS-DOM, False libxml2
+ PCSZ xsup= GetListOption(g, "Xmlsup", options->oplist, "*");
// Note that if no support is specified, the default is MS-DOM
// on Windows and libxml2 otherwise
@@ -6398,15 +6432,15 @@ int ha_connect::create(const char *name, TABLE *table_arg,
if (options->zipped) {
// Check whether the zip entry must be made from a file
- char *fn = GetListOption(g, "Load", options->oplist, NULL);
+ PCSZ fn = GetListOption(g, "Load", options->oplist, NULL);
if (fn) {
- char zbuf[_MAX_PATH], buf[_MAX_PATH], dbpath[_MAX_PATH];
- char *entry = GetListOption(g, "Entry", options->oplist, NULL);
- char *a = GetListOption(g, "Append", options->oplist, "NO");
- bool append = *a == '1' || *a == 'Y' || *a == 'y' || !stricmp(a, "ON");
- char *m = GetListOption(g, "Mulentries", options->oplist, "NO");
- bool mul = *m == '1' || *m == 'Y' || *m == 'y' || !stricmp(m, "ON");
+ char zbuf[_MAX_PATH], buf[_MAX_PATH], dbpath[_MAX_PATH];
+ PCSZ entry = GetListOption(g, "Entry", options->oplist, NULL);
+ PCSZ a = GetListOption(g, "Append", options->oplist, "NO");
+ bool append = *a == '1' || *a == 'Y' || *a == 'y' || !stricmp(a, "ON");
+ PCSZ m = GetListOption(g, "Mulentries", options->oplist, "NO");
+ bool mul = *m == '1' || *m == 'Y' || *m == 'y' || !stricmp(m, "ON");
if (!entry && !mul) {
my_message(ER_UNKNOWN_ERROR, "Missing entry name", MYF(0));
@@ -6474,11 +6508,10 @@ int ha_connect::create(const char *name, TABLE *table_arg,
PDBUSER dup= PlgGetUser(g);
PCATLG cat= (dup) ? dup->Catalog : NULL;
- SetDataPath(g, table_arg->s->db.str);
-
- if (cat) {
-// cat->SetDataPath(g, table_arg->s->db.str);
-
+ if (SetDataPath(g, table_arg->s->db.str)) {
+ my_message(ER_UNKNOWN_ERROR, g->Message, MYF(0));
+ rc = HA_ERR_INTERNAL_ERROR;
+ } else if (cat) {
#if defined(WITH_PARTITION_STORAGE_ENGINE)
if (part_info)
strncpy(partname,
@@ -6534,8 +6567,9 @@ bool ha_connect::FileExists(const char *fn, bool bf)
return true;
if (table) {
- char *s, tfn[_MAX_PATH], filename[_MAX_PATH], path[_MAX_PATH];
- bool b= false;
+ const char *s;
+ char tfn[_MAX_PATH], filename[_MAX_PATH], path[_MAX_PATH];
+ bool b= false;
int n;
struct stat info;
@@ -6592,9 +6626,9 @@ bool ha_connect::CheckString(const char *str1, const char *str2)
/**
check whether a string option have changed
*/
-bool ha_connect::SameString(TABLE *tab, char *opn)
+bool ha_connect::SameString(TABLE *tab, PCSZ opn)
{
- char *str1, *str2;
+ PCSZ str1, str2;
tshp= tab->s; // The altered table
str1= GetStringOption(opn);
@@ -6606,7 +6640,7 @@ bool ha_connect::SameString(TABLE *tab, char *opn)
/**
check whether a Boolean option have changed
*/
-bool ha_connect::SameBool(TABLE *tab, char *opn)
+bool ha_connect::SameBool(TABLE *tab, PCSZ opn)
{
bool b1, b2;
@@ -6620,7 +6654,7 @@ bool ha_connect::SameBool(TABLE *tab, char *opn)
/**
check whether an integer option have changed
*/
-bool ha_connect::SameInt(TABLE *tab, char *opn)
+bool ha_connect::SameInt(TABLE *tab, PCSZ opn)
{
int i1, i2;
@@ -6799,7 +6833,7 @@ ha_connect::check_if_supported_inplace_alter(TABLE *altered_table,
// Conversion to outward table is only allowed for file based
// tables whose file does not exist.
tshp= altered_table->s;
- char *fn= GetStringOption("filename");
+ PCSZ fn= GetStringOption("filename");
tshp= NULL;
if (FileExists(fn, false)) {
@@ -7044,10 +7078,10 @@ maria_declare_plugin(connect)
PLUGIN_LICENSE_GPL,
connect_init_func, /* Plugin Init */
connect_done_func, /* Plugin Deinit */
- 0x0105, /* version number (1.05) */
+ 0x0106, /* version number (1.05) */
NULL, /* status variables */
connect_system_variables, /* system variables */
- "1.05.0003", /* string version */
- MariaDB_PLUGIN_MATURITY_GAMMA /* maturity */
+ "1.06.0001", /* string version */
+ MariaDB_PLUGIN_MATURITY_BETA /* maturity */
}
maria_declare_plugin_end;
diff --git a/storage/connect/ha_connect.h b/storage/connect/ha_connect.h
index de735668133..3788a3882b6 100644
--- a/storage/connect/ha_connect.h
+++ b/storage/connect/ha_connect.h
@@ -61,7 +61,7 @@ public:
oldopn= newopn= NULL;
oldpix= newpix= NULL;}
- inline char *SetName(PGLOBAL g, char *name) {return PlugDup(g, name);}
+ inline char *SetName(PGLOBAL g, PCSZ name) {return PlugDup(g, name);}
bool oldsep; // Sepindex before create/alter
bool newsep; // Sepindex after create/alter
@@ -168,18 +168,18 @@ public:
static bool connect_init(void);
static bool connect_end(void);
TABTYPE GetRealType(PTOS pos= NULL);
- char *GetRealString(const char *s);
- char *GetStringOption(char *opname, char *sdef= NULL);
+ char *GetRealString(PCSZ s);
+ PCSZ GetStringOption(PCSZ opname, PCSZ sdef= NULL);
PTOS GetTableOptionStruct(TABLE_SHARE *s= NULL);
- bool GetBooleanOption(char *opname, bool bdef);
- bool SetBooleanOption(char *opname, bool b);
- int GetIntegerOption(char *opname);
- bool GetIndexOption(KEY *kp, char *opname);
- bool CheckString(const char *str1, const char *str2);
- bool SameString(TABLE *tab, char *opn);
- bool SetIntegerOption(char *opname, int n);
- bool SameInt(TABLE *tab, char *opn);
- bool SameBool(TABLE *tab, char *opn);
+ bool GetBooleanOption(PCSZ opname, bool bdef);
+ bool SetBooleanOption(PCSZ opname, bool b);
+ int GetIntegerOption(PCSZ opname);
+ bool GetIndexOption(KEY *kp, PCSZ opname);
+ bool CheckString(PCSZ str1, PCSZ str2);
+ bool SameString(TABLE *tab, PCSZ opn);
+ bool SetIntegerOption(PCSZ opname, int n);
+ bool SameInt(TABLE *tab, PCSZ opn);
+ bool SameBool(TABLE *tab, PCSZ opn);
bool FileExists(const char *fn, bool bf);
bool NoFieldOptionChange(TABLE *tab);
PFOS GetFieldOptionStruct(Field *fp);
@@ -187,8 +187,8 @@ public:
PXOS GetIndexOptionStruct(KEY *kp);
PIXDEF GetIndexInfo(TABLE_SHARE *s= NULL);
bool CheckVirtualIndex(TABLE_SHARE *s);
- const char *GetDBName(const char *name);
- const char *GetTableName(void);
+ PCSZ GetDBName(PCSZ name);
+ PCSZ GetTableName(void);
char *GetPartName(void);
//int GetColNameLen(Field *fp);
//char *GetColName(Field *fp);
@@ -197,9 +197,9 @@ public:
bool IsSameIndex(PIXDEF xp1, PIXDEF xp2);
bool IsPartitioned(void);
bool IsUnique(uint n);
- char *GetDataPath(void) {return (char*)datapath;}
+ PCSZ GetDataPath(void) {return datapath;}
- void SetDataPath(PGLOBAL g, const char *path);
+ bool SetDataPath(PGLOBAL g, PCSZ path);
PTDB GetTDB(PGLOBAL g);
int OpenTable(PGLOBAL g, bool del= false);
bool CheckColumnList(PGLOBAL g);
@@ -513,7 +513,7 @@ protected:
ulong hnum; // The number of this handler
query_id_t valid_query_id; // The one when tdbp was allocated
query_id_t creat_query_id; // The one when handler was allocated
- char *datapath; // Is the Path of DB data directory
+ PCSZ datapath; // Is the Path of DB data directory
PTDB tdbp; // To table class object
PVAL sdvalin1; // Used to convert date values
PVAL sdvalin2; // Used to convert date values
diff --git a/storage/connect/jdbccat.h b/storage/connect/jdbccat.h
index 7108aa376ce..0b87df8bb51 100644
--- a/storage/connect/jdbccat.h
+++ b/storage/connect/jdbccat.h
@@ -4,10 +4,10 @@
typedef struct jdbc_parms {
int CheckSize(int rows);
- char *Driver; // JDBC driver
- char *Url; // Driver URL
- char *User; // User connect info
- char *Pwd; // Password connect info
+ PCSZ Driver; // JDBC driver
+ PCSZ Url; // Driver URL
+ PCSZ User; // User connect info
+ PCSZ Pwd; // Password connect info
//char *Properties; // Connection property list
//int Cto; // Connect timeout
//int Qto; // Query timeout
@@ -19,12 +19,12 @@ typedef struct jdbc_parms {
/* JDBC catalog function prototypes. */
/***********************************************************************/
#if defined(PROMPT_OK)
-char *JDBCCheckConnection(PGLOBAL g, char *dsn, int cop);
+char *JDBCCheckConnection(PGLOBAL g, PCSZ dsn, int cop);
#endif // PROMPT_OK
//PQRYRES JDBCDataSources(PGLOBAL g, int maxres, bool info);
-PQRYRES JDBCColumns(PGLOBAL g, char *db, char *table,
- char *colpat, int maxres, bool info, PJPARM sop);
-PQRYRES JDBCSrcCols(PGLOBAL g, char *src, PJPARM sop);
-PQRYRES JDBCTables(PGLOBAL g, char *db, char *tabpat,
- char *tabtyp, int maxres, bool info, PJPARM sop);
+PQRYRES JDBCColumns(PGLOBAL g, PCSZ db, PCSZ table,
+ PCSZ colpat, int maxres, bool info, PJPARM sop);
+PQRYRES JDBCSrcCols(PGLOBAL g, PCSZ src, PJPARM sop);
+PQRYRES JDBCTables(PGLOBAL g, PCSZ db, PCSZ tabpat,
+ PCSZ tabtyp, int maxres, bool info, PJPARM sop);
PQRYRES JDBCDrivers(PGLOBAL g, int maxres, bool info);
diff --git a/storage/connect/jdbconn.cpp b/storage/connect/jdbconn.cpp
index c1d077406b7..f162a7ae645 100644
--- a/storage/connect/jdbconn.cpp
+++ b/storage/connect/jdbconn.cpp
@@ -189,8 +189,8 @@ int TranslateJDBCType(int stp, char *tn, int prec, int& len, char& v)
/***********************************************************************/
/* Allocate the structure used to refer to the result set. */
/***********************************************************************/
-static JCATPARM *AllocCatInfo(PGLOBAL g, JCATINFO fid, char *db,
- char *tab, PQRYRES qrp)
+static JCATPARM *AllocCatInfo(PGLOBAL g, JCATINFO fid, PCSZ db,
+ PCSZ tab, PQRYRES qrp)
{
JCATPARM *cap;
@@ -213,7 +213,7 @@ static JCATPARM *AllocCatInfo(PGLOBAL g, JCATINFO fid, char *db,
/* JDBCColumns: constructs the result blocks containing all columns */
/* of a JDBC table that will be retrieved by GetData commands. */
/***********************************************************************/
-PQRYRES JDBCColumns(PGLOBAL g, char *db, char *table, char *colpat,
+PQRYRES JDBCColumns(PGLOBAL g, PCSZ db, PCSZ table, PCSZ colpat,
int maxres, bool info, PJPARM sjp)
{
int buftyp[] = {TYPE_STRING, TYPE_STRING, TYPE_STRING, TYPE_STRING,
@@ -316,7 +316,7 @@ PQRYRES JDBCColumns(PGLOBAL g, char *db, char *table, char *colpat,
/* JDBCSrcCols: constructs the result blocks containing the */
/* description of all the columns of a Srcdef option. */
/**************************************************************************/
-PQRYRES JDBCSrcCols(PGLOBAL g, char *src, PJPARM sjp)
+PQRYRES JDBCSrcCols(PGLOBAL g, PCSZ src, PJPARM sjp)
{
char *sqry;
PQRYRES qrp;
@@ -330,7 +330,7 @@ PQRYRES JDBCSrcCols(PGLOBAL g, char *src, PJPARM sjp)
sqry = (char*)PlugSubAlloc(g, NULL, strlen(src) + 2);
sprintf(sqry, src, "1=1"); // dummy where clause
} else
- sqry = src;
+ sqry = (char*)src;
qrp = jcp->GetMetaData(g, sqry);
jcp->Close();
@@ -341,7 +341,7 @@ PQRYRES JDBCSrcCols(PGLOBAL g, char *src, PJPARM sjp)
/* JDBCTables: constructs the result blocks containing all tables in */
/* an JDBC database that will be retrieved by GetData commands. */
/**************************************************************************/
-PQRYRES JDBCTables(PGLOBAL g, char *db, char *tabpat, char *tabtyp,
+PQRYRES JDBCTables(PGLOBAL g, PCSZ db, PCSZ tabpat, PCSZ tabtyp,
int maxres, bool info, PJPARM sjp)
{
int buftyp[] = {TYPE_STRING, TYPE_STRING, TYPE_STRING,
@@ -1059,7 +1059,7 @@ int JDBConn::Open(PJPARM sop)
/***********************************************************************/
/* Execute an SQL command. */
/***********************************************************************/
-int JDBConn::ExecSQLcommand(char *sql)
+int JDBConn::ExecSQLcommand(PCSZ sql)
{
int rc;
jint n;
@@ -1142,7 +1142,7 @@ int JDBConn::Fetch(int pos)
/***********************************************************************/
/* Restart from beginning of result set */
/***********************************************************************/
-int JDBConn::Rewind(char *sql)
+int JDBConn::Rewind(PCSZ sql)
{
int rbuf = -1;
@@ -1200,7 +1200,7 @@ void JDBConn::SetColumnValue(int rank, PSZ name, PVAL val)
if (rank == 0)
if (!name || (jn = env->NewStringUTF(name)) == nullptr) {
sprintf(g->Message, "Fail to allocate jstring %s", SVP(name));
- longjmp(g->jumper[g->jump_level], TYPE_AM_JDBC);
+ throw TYPE_AM_JDBC;
} // endif name
// Returns 666 is case of error
@@ -1208,7 +1208,7 @@ void JDBConn::SetColumnValue(int rank, PSZ name, PVAL val)
if (Check((ctyp == 666) ? -1 : 1)) {
sprintf(g->Message, "Getting ctyp: %s", Msg);
- longjmp(g->jumper[g->jump_level], TYPE_AM_JDBC);
+ throw TYPE_AM_JDBC;
} // endif Check
if (val->GetNullable())
@@ -1227,7 +1227,8 @@ void JDBConn::SetColumnValue(int rank, PSZ name, PVAL val)
case 12: // VARCHAR
case -1: // LONGVARCHAR
case 1: // CHAR
- if (jb)
+ case 3: // DECIMAL
+ if (jb && ctyp != 3)
cn = (jstring)jb;
else if (!gmID(g, chrfldid, "StringField", "(ILjava/lang/String;)Ljava/lang/String;"))
cn = (jstring)env->CallObjectMethod(job, chrfldid, (jint)rank, jn);
@@ -1253,7 +1254,7 @@ void JDBConn::SetColumnValue(int rank, PSZ name, PVAL val)
break;
case 8: // DOUBLE
case 2: // NUMERIC
- case 3: // DECIMAL
+//case 3: // DECIMAL
if (!gmID(g, dblfldid, "DoubleField", "(ILjava/lang/String;)D"))
val->SetValue((double)env->CallDoubleMethod(job, dblfldid, rank, jn));
else
@@ -1314,7 +1315,7 @@ void JDBConn::SetColumnValue(int rank, PSZ name, PVAL val)
env->DeleteLocalRef(jn);
sprintf(g->Message, "SetColumnValue: %s rank=%d ctyp=%d", Msg, rank, (int)ctyp);
- longjmp(g->jumper[g->jump_level], TYPE_AM_JDBC);
+ throw TYPE_AM_JDBC;
} // endif Check
if (rank == 0)
@@ -1325,7 +1326,7 @@ void JDBConn::SetColumnValue(int rank, PSZ name, PVAL val)
/***********************************************************************/
/* Prepare an SQL statement for insert. */
/***********************************************************************/
-bool JDBConn::PrepareSQL(char *sql)
+bool JDBConn::PrepareSQL(PCSZ sql)
{
bool b = true;
PGLOBAL& g = m_G;
@@ -1348,7 +1349,7 @@ bool JDBConn::PrepareSQL(char *sql)
/***********************************************************************/
/* Execute an SQL query that returns a result set. */
/***********************************************************************/
-int JDBConn::ExecuteQuery(char *sql)
+int JDBConn::ExecuteQuery(PCSZ sql)
{
int rc = RC_FX;
jint ncol;
@@ -1376,7 +1377,7 @@ int JDBConn::ExecuteQuery(char *sql)
/***********************************************************************/
/* Execute an SQL query and get the affected rows. */
/***********************************************************************/
-int JDBConn::ExecuteUpdate(char *sql)
+int JDBConn::ExecuteUpdate(PCSZ sql)
{
int rc = RC_FX;
jint n;
@@ -1404,7 +1405,7 @@ int JDBConn::ExecuteUpdate(char *sql)
/***********************************************************************/
/* Get the number of lines of the result set. */
/***********************************************************************/
-int JDBConn::GetResultSize(char *sql, JDBCCOL *colp)
+int JDBConn::GetResultSize(PCSZ sql, JDBCCOL *colp)
{
int rc, n = 0;
@@ -1642,7 +1643,7 @@ bool JDBConn::SetParam(JDBCCOL *colp)
/* GetMetaData: constructs the result blocks containing the */
/* description of all the columns of an SQL command. */
/**************************************************************************/
- PQRYRES JDBConn::GetMetaData(PGLOBAL g, char *src)
+ PQRYRES JDBConn::GetMetaData(PGLOBAL g, PCSZ src)
{
static int buftyp[] = {TYPE_STRING, TYPE_INT, TYPE_INT,
TYPE_INT, TYPE_INT};
@@ -1844,7 +1845,7 @@ bool JDBConn::SetParam(JDBCCOL *colp)
PGLOBAL& g = m_G;
// void *buffer;
int i, ncol;
- PSZ fnc = "Unknown";
+ PCSZ fnc = "Unknown";
uint n;
short len, tp;
int crow = 0;
diff --git a/storage/connect/jdbconn.h b/storage/connect/jdbconn.h
index 9d428142839..73271c8f5be 100644
--- a/storage/connect/jdbconn.h
+++ b/storage/connect/jdbconn.h
@@ -46,9 +46,9 @@ enum JCATINFO {
typedef struct tagJCATPARM {
JCATINFO Id; // Id to indicate function
PQRYRES Qrp; // Result set pointer
- char *DB; // Database (Schema)
- char *Tab; // Table name or pattern
- char *Pat; // Table type or column pattern
+ PCSZ DB; // Database (Schema)
+ PCSZ Tab; // Table name or pattern
+ PCSZ Pat; // Table type or column pattern
} JCATPARM;
typedef jint(JNICALL *CRTJVM) (JavaVM **, void **, void *);
@@ -77,7 +77,7 @@ public:
JDBConn(PGLOBAL g, TDBJDBC *tdbp);
int Open(PJPARM sop);
- int Rewind(char *sql);
+ int Rewind(PCSZ sql);
void Close(void);
PQRYRES AllocateResult(PGLOBAL g);
@@ -96,19 +96,19 @@ public:
//void SetQueryTimeout(DWORD sec) {m_QueryTimeout = sec;}
//void SetUserName(PSZ user) {m_User = user;}
//void SetUserPwd(PSZ pwd) {m_Pwd = pwd;}
- int GetResultSize(char *sql, JDBCCOL *colp);
- int ExecuteQuery(char *sql);
- int ExecuteUpdate(char *sql);
+ int GetResultSize(PCSZ sql, JDBCCOL *colp);
+ int ExecuteQuery(PCSZ sql);
+ int ExecuteUpdate(PCSZ sql);
int Fetch(int pos = 0);
- bool PrepareSQL(char *sql);
+ bool PrepareSQL(PCSZ sql);
int ExecuteSQL(void);
bool SetParam(JDBCCOL *colp);
- int ExecSQLcommand(char *sql);
+ int ExecSQLcommand(PCSZ sql);
void SetColumnValue(int rank, PSZ name, PVAL val);
int GetCatInfo(JCATPARM *cap);
//bool GetDataSources(PQRYRES qrp);
bool GetDrivers(PQRYRES qrp);
- PQRYRES GetMetaData(PGLOBAL g, char *src);
+ PQRYRES GetMetaData(PGLOBAL g, PCSZ src);
public:
// Set static variables
@@ -174,16 +174,10 @@ protected:
jmethodID timfldid; // The TimeField method ID
jmethodID tspfldid; // The TimestampField method ID
jmethodID bigfldid; // The BigintField method ID
- //DWORD m_LoginTimeout;
-//DWORD m_QueryTimeout;
-//DWORD m_UpdateOptions;
- char *Msg;
+ PCSZ Msg;
char *m_Wrap;
char m_IDQuoteChar[2];
-//PSZ m_Driver;
-//PSZ m_Url;
-//PSZ m_User;
- PSZ m_Pwd;
+ PCSZ m_Pwd;
int m_Ncol;
int m_Aff;
int m_Rows;
diff --git a/storage/connect/json.cpp b/storage/connect/json.cpp
index b473871e9f7..f8b1caa13e2 100644
--- a/storage/connect/json.cpp
+++ b/storage/connect/json.cpp
@@ -60,7 +60,7 @@ char *GetExceptionDesc(PGLOBAL g, unsigned int e);
/***********************************************************************/
PJSON ParseJson(PGLOBAL g, char *s, int len, int *ptyp, bool *comma)
{
- int i, rc, pretty = (ptyp) ? *ptyp : 3;
+ int i, pretty = (ptyp) ? *ptyp : 3;
bool b = false, pty[3] = {true, true, true};
PJSON jsp = NULL;
STRG src;
@@ -81,117 +81,91 @@ PJSON ParseJson(PGLOBAL g, char *s, int len, int *ptyp, bool *comma)
if (s[0] == '[' && (s[1] == '\n' || (s[1] == '\r' && s[2] == '\n')))
pty[0] = false;
-
- // Save stack and allocation environment and prepare error return
- if (g->jump_level == MAX_JUMP) {
- strcpy(g->Message, MSG(TOO_MANY_JUMPS));
- return NULL;
- } // endif jump_level
-
-#if defined(SE_CATCH)
- // Let's try to recover from any kind of interrupt
- _se_translator_function f = _set_se_translator(trans_func);
-
try {
-#endif // SE_CATCH --------------------- try section --------------------
- if ((rc = setjmp(g->jumper[++g->jump_level])) != 0) {
- goto err;
- } // endif rc
-
-#if defined(SE_CATCH) // ------------- end of try section -----------------
- } catch (SE_Exception e) {
- sprintf(g->Message, "ParseJson: exception doing setjmp: %s (rc=%hd)",
- GetExceptionDesc(g, e.nSE), e.nSE);
- _set_se_translator(f);
- goto err;
- } catch (...) {
- strcpy(g->Message, "Exception doing setjmp");
- _set_se_translator(f);
- goto err;
- } // end of try-catches
-
- _set_se_translator(f);
-#endif // SE_CATCH
-
- for (i = 0; i < len; i++)
- switch (s[i]) {
- case '[':
- if (jsp)
- goto tryit;
- else if (!(jsp = ParseArray(g, ++i, src, pty)))
- goto err;
-
- break;
- case '{':
- if (jsp)
- goto tryit;
- else if (!(jsp = ParseObject(g, ++i, src, pty)))
- goto err;
-
- break;
- case ' ':
- case '\t':
- case '\n':
- case '\r':
- break;
- case ',':
- if (jsp && (pretty == 1 || pretty == 3)) {
- if (comma)
- *comma = true;
-
- pty[0] = pty[2] = false;
- break;
- } // endif pretty
-
- sprintf(g->Message, "Unexpected ',' (pretty=%d)", pretty);
- goto err;
- case '(':
- b = true;
- break;
- case ')':
- if (b) {
- b = false;
- break;
- } // endif b
-
- default:
- if (jsp)
- goto tryit;
- else if (!(jsp = ParseValue(g, i, src, pty)))
- goto err;
-
- break;
- }; // endswitch s[i]
-
- if (!jsp)
- sprintf(g->Message, "Invalid Json string '%.*s'", 50, s);
- else if (ptyp && pretty == 3) {
- *ptyp = 3; // Not recognized pretty
-
- for (i = 0; i < 3; i++)
- if (pty[i]) {
- *ptyp = i;
- break;
- } // endif pty
-
- } // endif ptyp
-
- g->jump_level--;
- return jsp;
+ for (i = 0; i < len; i++)
+ switch (s[i]) {
+ case '[':
+ if (jsp)
+ goto tryit;
+ else if (!(jsp = ParseArray(g, ++i, src, pty)))
+ throw 1;
+
+ break;
+ case '{':
+ if (jsp)
+ goto tryit;
+ else if (!(jsp = ParseObject(g, ++i, src, pty)))
+ throw 2;
+
+ break;
+ case ' ':
+ case '\t':
+ case '\n':
+ case '\r':
+ break;
+ case ',':
+ if (jsp && (pretty == 1 || pretty == 3)) {
+ if (comma)
+ *comma = true;
+
+ pty[0] = pty[2] = false;
+ break;
+ } // endif pretty
+
+ sprintf(g->Message, "Unexpected ',' (pretty=%d)", pretty);
+ throw 3;
+ case '(':
+ b = true;
+ break;
+ case ')':
+ if (b) {
+ b = false;
+ break;
+ } // endif b
+
+ default:
+ if (jsp)
+ goto tryit;
+ else if (!(jsp = ParseValue(g, i, src, pty)))
+ throw 4;
+
+ break;
+ }; // endswitch s[i]
+
+ if (!jsp)
+ sprintf(g->Message, "Invalid Json string '%.*s'", 50, s);
+ else if (ptyp && pretty == 3) {
+ *ptyp = 3; // Not recognized pretty
+
+ for (i = 0; i < 3; i++)
+ if (pty[i]) {
+ *ptyp = i;
+ break;
+ } // endif pty
+
+ } // endif ptyp
+
+ } catch (int n) {
+ if (trace)
+ htrc("Exception %d: %s\n", n, g->Message);
+ jsp = NULL;
+ } catch (const char *msg) {
+ strcpy(g->Message, msg);
+ jsp = NULL;
+ } // end catch
+
+ return jsp;
tryit:
if (pty[0] && (!pretty || pretty > 2)) {
if ((jsp = ParseArray(g, (i = 0), src, pty)) && ptyp && pretty == 3)
*ptyp = (pty[0]) ? 0 : 3;
- g->jump_level--;
return jsp;
} else
strcpy(g->Message, "More than one item in file");
-err:
- g->jump_level--;
- return NULL;
+ return NULL;
} // end of ParseJson
/***********************************************************************/
@@ -335,16 +309,16 @@ PJVAL ParseValue(PGLOBAL g, int& i, STRG& src, bool *pty)
PJVAL jvp = new(g) JVALUE;
for (; i < len; i++)
- switch (s[i]) {
- case '\n':
- pty[0] = pty[1] = false;
- case '\r':
- case ' ':
- case '\t':
- break;
- default:
- goto suite;
- } // endswitch
+ switch (s[i]) {
+ case '\n':
+ pty[0] = pty[1] = false;
+ case '\r':
+ case ' ':
+ case '\t':
+ break;
+ default:
+ goto suite;
+ } // endswitch
suite:
switch (s[i]) {
@@ -533,7 +507,7 @@ PVAL ParseNumeric(PGLOBAL g, int& i, STRG& src)
if (!has_e)
goto err;
- // passthru
+ // fall through
case '-':
if (found_digit)
goto err;
@@ -585,78 +559,75 @@ PVAL ParseNumeric(PGLOBAL g, int& i, STRG& src)
PSZ Serialize(PGLOBAL g, PJSON jsp, char *fn, int pretty)
{
PSZ str = NULL;
- bool b = false, err = true;
- JOUT *jp;
+ bool b = false, err = true;
+ JOUT *jp;
FILE *fs = NULL;
g->Message[0] = 0;
- // Save stack and allocation environment and prepare error return
- if (g->jump_level == MAX_JUMP) {
- strcpy(g->Message, MSG(TOO_MANY_JUMPS));
- return NULL;
- } // endif jump_level
-
- if (setjmp(g->jumper[++g->jump_level])) {
- str = NULL;
- goto fin;
- } // endif jmp
-
- if (!jsp) {
- strcpy(g->Message, "Null json tree");
- goto fin;
- } else if (!fn) {
- // Serialize to a string
- jp = new(g) JOUTSTR(g);
- b = pretty == 1;
- } else {
- if (!(fs = fopen(fn, "wb"))) {
- sprintf(g->Message, MSG(OPEN_MODE_ERROR),
- "w", (int)errno, fn);
- strcat(strcat(g->Message, ": "), strerror(errno));
- goto fin;;
- } else if (pretty >= 2) {
- // Serialize to a pretty file
- jp = new(g)JOUTPRT(g, fs);
+ try {
+ if (!jsp) {
+ strcpy(g->Message, "Null json tree");
+ throw 1;
+ } else if (!fn) {
+ // Serialize to a string
+ jp = new(g) JOUTSTR(g);
+ b = pretty == 1;
} else {
- // Serialize to a flat file
- b = true;
- jp = new(g)JOUTFILE(g, fs, pretty);
- } // endif's
-
- } // endif's
-
- switch (jsp->GetType()) {
- case TYPE_JAR:
- err = SerializeArray(jp, (PJAR)jsp, b);
- break;
- case TYPE_JOB:
- err = ((b && jp->Prty()) && jp->WriteChr('\t'));
- err |= SerializeObject(jp, (PJOB)jsp);
- break;
- case TYPE_JVAL:
- err = SerializeValue(jp, (PJVAL)jsp);
- break;
- default:
- strcpy(g->Message, "Invalid json tree");
- } // endswitch Type
+ if (!(fs = fopen(fn, "wb"))) {
+ sprintf(g->Message, MSG(OPEN_MODE_ERROR),
+ "w", (int)errno, fn);
+ strcat(strcat(g->Message, ": "), strerror(errno));
+ throw 2;
+ } else if (pretty >= 2) {
+ // Serialize to a pretty file
+ jp = new(g)JOUTPRT(g, fs);
+ } else {
+ // Serialize to a flat file
+ b = true;
+ jp = new(g)JOUTFILE(g, fs, pretty);
+ } // endif's
+
+ } // endif's
+
+ switch (jsp->GetType()) {
+ case TYPE_JAR:
+ err = SerializeArray(jp, (PJAR)jsp, b);
+ break;
+ case TYPE_JOB:
+ err = ((b && jp->Prty()) && jp->WriteChr('\t'));
+ err |= SerializeObject(jp, (PJOB)jsp);
+ break;
+ case TYPE_JVAL:
+ err = SerializeValue(jp, (PJVAL)jsp);
+ break;
+ default:
+ strcpy(g->Message, "Invalid json tree");
+ } // endswitch Type
+
+ if (fs) {
+ fputs(EL, fs);
+ fclose(fs);
+ str = (err) ? NULL : strcpy(g->Message, "Ok");
+ } else if (!err) {
+ str = ((JOUTSTR*)jp)->Strp;
+ jp->WriteChr('\0');
+ PlugSubAlloc(g, NULL, ((JOUTSTR*)jp)->N);
+ } else {
+ if (!g->Message[0])
+ strcpy(g->Message, "Error in Serialize");
- if (fs) {
- fputs(EL, fs);
- fclose(fs);
- str = (err) ? NULL : strcpy(g->Message, "Ok");
- } else if (!err) {
- str = ((JOUTSTR*)jp)->Strp;
- jp->WriteChr('\0');
- PlugSubAlloc(g, NULL, ((JOUTSTR*)jp)->N);
- } else {
- if (!g->Message[0])
- strcpy(g->Message, "Error in Serialize");
+ } // endif's
- } // endif's
+ } catch (int n) {
+ if (trace)
+ htrc("Exception %d: %s\n", n, g->Message);
+ str = NULL;
+ } catch (const char *msg) {
+ strcpy(g->Message, msg);
+ str = NULL;
+ } // end catch
-fin:
- g->jump_level--;
return str;
} // end of Serialize
@@ -826,7 +797,7 @@ bool JOUTSTR::Escape(const char *s)
case '\r':
case '\b':
case '\f': WriteChr('\\');
- // passthru
+ // fall through
default:
WriteChr(s[i]);
break;
@@ -965,7 +936,7 @@ return false;
/***********************************************************************/
/* Add a new pair to an Object. */
/***********************************************************************/
-PJPR JOBJECT::AddPair(PGLOBAL g, PSZ key)
+PJPR JOBJECT::AddPair(PGLOBAL g, PCSZ key)
{
PJPR jpp = new(g) JPAIR(key);
@@ -1051,7 +1022,7 @@ bool JOBJECT::Merge(PGLOBAL g, PJSON jsp)
/***********************************************************************/
/* Set or add a value corresponding to the given key. */
/***********************************************************************/
-void JOBJECT::SetValue(PGLOBAL g, PJVAL jvp, PSZ key)
+void JOBJECT::SetValue(PGLOBAL g, PJVAL jvp, PCSZ key)
{
PJPR jp;
@@ -1071,7 +1042,7 @@ void JOBJECT::SetValue(PGLOBAL g, PJVAL jvp, PSZ key)
/***********************************************************************/
/* Delete a value corresponding to the given key. */
/***********************************************************************/
-void JOBJECT::DeleteKey(PSZ key)
+void JOBJECT::DeleteKey(PCSZ key)
{
PJPR jp, *pjp = &First;
@@ -1250,10 +1221,10 @@ JVALUE::JVALUE(PGLOBAL g, PVAL valp) : JSON()
/***********************************************************************/
/* Constructor for a given string. */
/***********************************************************************/
-JVALUE::JVALUE(PGLOBAL g, PSZ strp) : JSON()
+JVALUE::JVALUE(PGLOBAL g, PCSZ strp) : JSON()
{
Jsp = NULL;
- Value = AllocateValue(g, strp, TYPE_STRING);
+ Value = AllocateValue(g, (void*)strp, TYPE_STRING);
Next = NULL;
Del = false;
} // end of JVALUE constructor
@@ -1374,7 +1345,7 @@ void JVALUE::SetTiny(PGLOBAL g, char n)
{
Value = AllocateValue(g, &n, TYPE_TINY);
Jsp = NULL;
-} // end of SetInteger
+} // end of SetTiny
/***********************************************************************/
/* Set the Value's value as the given big integer. */
@@ -1408,6 +1379,6 @@ void JVALUE::SetString(PGLOBAL g, PSZ s, short c)
/***********************************************************************/
bool JVALUE::IsNull(void)
{
- return (Jsp) ? Jsp->IsNull() : (Value) ? Value->IsZero() : true;
+ return (Jsp) ? Jsp->IsNull() : (Value) ? Value->IsNull() : true;
} // end of IsNull
diff --git a/storage/connect/json.h b/storage/connect/json.h
index 4ea169e1b18..49675ce8559 100644
--- a/storage/connect/json.h
+++ b/storage/connect/json.h
@@ -125,14 +125,14 @@ class JPAIR : public BLOCK {
friend PJOB ParseObject(PGLOBAL, int&, STRG&, bool*);
friend bool SerializeObject(JOUT *, PJOB);
public:
- JPAIR(PSZ key) : BLOCK() {Key = key; Val = NULL; Next = NULL;}
+ JPAIR(PCSZ key) : BLOCK() {Key = key; Val = NULL; Next = NULL;}
- inline PSZ GetKey(void) {return Key;}
+ inline PCSZ GetKey(void) {return Key;}
inline PJVAL GetVal(void) {return Val;}
inline PJPR GetNext(void) {return Next;}
protected:
- PSZ Key; // This pair key name
+ PCSZ Key; // This pair key name
PJVAL Val; // To the value of the pair
PJPR Next; // To the next pair
}; // end of class JPAIR
@@ -150,7 +150,7 @@ class JSON : public BLOCK {
virtual JTYP GetValType(void) {X return TYPE_JSON;}
virtual void InitArray(PGLOBAL g) {X}
//virtual PJVAL AddValue(PGLOBAL g, PJVAL jvp = NULL, int *x = NULL) {X return NULL;}
- virtual PJPR AddPair(PGLOBAL g, PSZ key) {X return NULL;}
+ virtual PJPR AddPair(PGLOBAL g, PCSZ key) {X return NULL;}
virtual PJAR GetKeyList(PGLOBAL g) {X return NULL;}
virtual PJVAL GetValue(const char *key) {X return NULL;}
virtual PJOB GetObject(void) {return NULL;}
@@ -166,13 +166,13 @@ class JSON : public BLOCK {
virtual PSZ GetText(PGLOBAL g, PSZ text) {X return NULL;}
virtual bool Merge(PGLOBAL g, PJSON jsp) { X return true; }
virtual bool SetValue(PGLOBAL g, PJVAL jvp, int i) { X return true; }
- virtual void SetValue(PGLOBAL g, PJVAL jvp, PSZ key) {X}
+ virtual void SetValue(PGLOBAL g, PJVAL jvp, PCSZ key) {X}
virtual void SetValue(PVAL valp) {X}
virtual void SetValue(PJSON jsp) {X}
virtual void SetString(PGLOBAL g, PSZ s, short c) {X}
virtual void SetInteger(PGLOBAL g, int n) {X}
virtual void SetFloat(PGLOBAL g, double f) {X}
- virtual void DeleteKey(char *k) {X}
+ virtual void DeleteKey(PCSZ k) {X}
virtual bool DeleteValue(int i) {X return true;}
virtual bool IsNull(void) {X return true;}
@@ -195,14 +195,14 @@ class JOBJECT : public JSON {
virtual void Clear(void) {First = Last = NULL; Size = 0;}
virtual JTYP GetType(void) {return TYPE_JOB;}
virtual PJPR GetFirst(void) {return First;}
- virtual PJPR AddPair(PGLOBAL g, PSZ key);
+ virtual PJPR AddPair(PGLOBAL g, PCSZ key);
virtual PJOB GetObject(void) {return this;}
virtual PJVAL GetValue(const char* key);
virtual PJAR GetKeyList(PGLOBAL g);
virtual PSZ GetText(PGLOBAL g, PSZ text);
virtual bool Merge(PGLOBAL g, PJSON jsp);
- virtual void SetValue(PGLOBAL g, PJVAL jvp, PSZ key);
- virtual void DeleteKey(char *k);
+ virtual void SetValue(PGLOBAL g, PJVAL jvp, PCSZ key);
+ virtual void DeleteKey(PCSZ k);
virtual bool IsNull(void);
protected:
@@ -253,7 +253,7 @@ class JVALUE : public JSON {
JVALUE(PJSON jsp) : JSON()
{Jsp = jsp; Value = NULL; Next = NULL; Del = false;}
JVALUE(PGLOBAL g, PVAL valp);
- JVALUE(PGLOBAL g, PSZ strp);
+ JVALUE(PGLOBAL g, PCSZ strp);
using JSON::GetValue;
using JSON::SetValue;
diff --git a/storage/connect/jsonudf.cpp b/storage/connect/jsonudf.cpp
index 360d0d1a82a..87e818e6108 100644
--- a/storage/connect/jsonudf.cpp
+++ b/storage/connect/jsonudf.cpp
@@ -1104,7 +1104,7 @@ static my_bool JsonInit(UDF_INIT *initid, UDF_ARGS *args,
} // endif g
g->Mrr = (args->arg_count && args->args[0]) ? 1 : 0;
- g->ActivityStart = (PACTIVITY)more;
+ g->More = more;
initid->maybe_null = mbn;
initid->max_length = reslen;
initid->ptr = (char*)g;
@@ -1448,13 +1448,13 @@ static my_bool CheckMemory(PGLOBAL g, UDF_INIT *initid, UDF_ARGS *args, uint n,
} // endif b
- ml += (unsigned long)g->ActivityStart; // more
+ ml += g->More;
if (ml > g->Sarea_Size) {
free(g->Sarea);
if (!(g->Sarea = PlugAllocMem(g, ml))) {
- char errmsg[256];
+ char errmsg[MAX_STR];
sprintf(errmsg, MSG(WORK_AREA), g->Message);
strcpy(g->Message, errmsg);
@@ -1495,7 +1495,7 @@ static PSZ MakePSZ(PGLOBAL g, UDF_ARGS *args, int i)
/*********************************************************************************/
/* Make a valid key from the passed argument. */
/*********************************************************************************/
-static PSZ MakeKey(PGLOBAL g, UDF_ARGS *args, int i)
+static PCSZ MakeKey(PGLOBAL g, UDF_ARGS *args, int i)
{
if (args->arg_count > (unsigned)i) {
int j = 0, n = args->attribute_lengths[i];
@@ -2253,7 +2253,8 @@ my_bool json_object_add_init(UDF_INIT *initid, UDF_ARGS *args, char *message)
char *json_object_add(UDF_INIT *initid, UDF_ARGS *args, char *result,
unsigned long *res_length, char *is_null, char *error)
{
- char *key, *str = NULL;
+ PCSZ key;
+ char *str = NULL;
PGLOBAL g = (PGLOBAL)initid->ptr;
if (g->Xchk) {
@@ -2358,7 +2359,7 @@ char *json_object_delete(UDF_INIT *initid, UDF_ARGS *args, char *result,
} // endif Xchk
if (!CheckMemory(g, initid, args, 1, false, true, true)) {
- char *key;
+ PCSZ key;
PJOB jobp;
PJSON jsp, top;
PJVAL jvp = MakeValue(g, args, 0, &top);
@@ -2914,7 +2915,6 @@ char *jsonget_string(UDF_INIT *initid, UDF_ARGS *args, char *result,
unsigned long *res_length, char *is_null, char *)
{
char *p, *path, *str = NULL;
- int rc;
PJSON jsp;
PJSNX jsx;
PJVAL jvp;
@@ -2922,68 +2922,64 @@ char *jsonget_string(UDF_INIT *initid, UDF_ARGS *args, char *result,
if (g->N) {
str = (char*)g->Activityp;
- goto fin;
+ goto err;
} else if (initid->const_item)
g->N = 1;
- // Save stack and allocation environment and prepare error return
- if (g->jump_level == MAX_JUMP) {
- PUSH_WARNING(MSG(TOO_MANY_JUMPS));
- *is_null = 1;
- return NULL;
- } // endif jump_level
+ try {
+ if (!g->Xchk) {
+ if (CheckMemory(g, initid, args, 1, true)) {
+ PUSH_WARNING("CheckMemory error");
+ goto err;
+ } else
+ jvp = MakeValue(g, args, 0);
- if ((rc= setjmp(g->jumper[++g->jump_level])) != 0) {
- PUSH_WARNING(g->Message);
- str = NULL;
- goto err;
- } // endif rc
+ if ((p = jvp->GetString())) {
+ if (!(jsp = ParseJson(g, p, strlen(p)))) {
+ PUSH_WARNING(g->Message);
+ goto err;
+ } // endif jsp
- if (!g->Xchk) {
- if (CheckMemory(g, initid, args, 1, true)) {
- PUSH_WARNING("CheckMemory error");
- goto err;
- } else
- jvp = MakeValue(g, args, 0);
+ } else
+ jsp = jvp->GetJson();
- if ((p = jvp->GetString())) {
- if (!(jsp = ParseJson(g, p, strlen(p)))) {
- PUSH_WARNING(g->Message);
- goto err;
- } // endif jsp
+ if (g->Mrr) { // First argument is a constant
+ g->Xchk = jsp;
+ JsonMemSave(g);
+ } // endif Mrr
} else
- jsp = jvp->GetJson();
+ jsp = (PJSON)g->Xchk;
- if (g->Mrr) { // First argument is a constant
- g->Xchk = jsp;
- JsonMemSave(g);
- } // endif Mrr
+ path = MakePSZ(g, args, 1);
+ jsx = new(g) JSNX(g, jsp, TYPE_STRING, initid->max_length);
- } else
- jsp = (PJSON)g->Xchk;
+ if (jsx->SetJpath(g, path)) {
+ PUSH_WARNING(g->Message);
+ goto err;
+ } // endif SetJpath
- path = MakePSZ(g, args, 1);
- jsx = new(g) JSNX(g, jsp, TYPE_STRING, initid->max_length);
+ jsx->ReadValue(g);
- if (jsx->SetJpath(g, path)) {
- PUSH_WARNING(g->Message);
- goto err;
- } // endif SetJpath
+ if (!jsx->GetValue()->IsNull())
+ str = jsx->GetValue()->GetCharValue();
- jsx->ReadValue(g);
-
- if (!jsx->GetValue()->IsNull())
- str = jsx->GetValue()->GetCharValue();
+ if (initid->const_item)
+ // Keep result of constant function
+ g->Activityp = (PACTIVITY)str;
- if (initid->const_item)
- // Keep result of constant function
- g->Activityp = (PACTIVITY)str;
+ } catch (int n) {
+ if (trace)
+ htrc("Exception %d: %s\n", n, g->Message);
+ PUSH_WARNING(g->Message);
+ str = NULL;
+ } catch (const char *msg) {
+ strcpy(g->Message, msg);
+ PUSH_WARNING(g->Message);
+ str = NULL;
+ } // end catch
err:
- g->jump_level--;
-
- fin:
if (!str) {
*is_null = 1;
*res_length = 0;
@@ -3254,7 +3250,7 @@ char *jsonlocate(UDF_INIT *initid, UDF_ARGS *args, char *result,
unsigned long *res_length, char *is_null, char *error)
{
char *p, *path = NULL;
- int k, rc;
+ int k;
PJVAL jvp, jvp2;
PJSON jsp;
PJSNX jsx;
@@ -3274,61 +3270,58 @@ char *jsonlocate(UDF_INIT *initid, UDF_ARGS *args, char *result,
} else if (initid->const_item)
g->N = 1;
- // Save stack and allocation environment and prepare error return
- if (g->jump_level == MAX_JUMP) {
- PUSH_WARNING(MSG(TOO_MANY_JUMPS));
- *error = 1;
- *is_null = 1;
- return NULL;
- } // endif jump_level
+ try {
+ if (!g->Xchk) {
+ if (CheckMemory(g, initid, args, 1, !g->Xchk)) {
+ PUSH_WARNING("CheckMemory error");
+ *error = 1;
+ goto err;
+ } else
+ jvp = MakeValue(g, args, 0);
- if ((rc= setjmp(g->jumper[++g->jump_level])) != 0) {
- PUSH_WARNING(g->Message);
- *error = 1;
- path = NULL;
- goto err;
- } // endif rc
+ if ((p = jvp->GetString())) {
+ if (!(jsp = ParseJson(g, p, strlen(p)))) {
+ PUSH_WARNING(g->Message);
+ goto err;
+ } // endif jsp
- if (!g->Xchk) {
- if (CheckMemory(g, initid, args, 1, !g->Xchk)) {
- PUSH_WARNING("CheckMemory error");
- *error = 1;
- goto err;
- } else
- jvp = MakeValue(g, args, 0);
+ } else
+ jsp = jvp->GetJson();
- if ((p = jvp->GetString())) {
- if (!(jsp = ParseJson(g, p, strlen(p)))) {
- PUSH_WARNING(g->Message);
- goto err;
- } // endif jsp
+ if (g->Mrr) { // First argument is a constant
+ g->Xchk = jsp;
+ JsonMemSave(g);
+ } // endif Mrr
} else
- jsp = jvp->GetJson();
+ jsp = (PJSON)g->Xchk;
- if (g->Mrr) { // First argument is a constant
- g->Xchk = jsp;
- JsonMemSave(g);
- } // endif Mrr
-
- } else
- jsp = (PJSON)g->Xchk;
+ // The item to locate
+ jvp2 = MakeValue(g, args, 1);
- // The item to locate
- jvp2 = MakeValue(g, args, 1);
+ k = (args->arg_count > 2) ? (int)*(long long*)args->args[2] : 1;
- k = (args->arg_count > 2) ? (int)*(long long*)args->args[2] : 1;
+ jsx = new(g) JSNX(g, jsp, TYPE_STRING);
+ path = jsx->Locate(g, jsp, jvp2, k);
- jsx = new(g) JSNX(g, jsp, TYPE_STRING);
- path = jsx->Locate(g, jsp, jvp2, k);
+ if (initid->const_item)
+ // Keep result of constant function
+ g->Activityp = (PACTIVITY)path;
- if (initid->const_item)
- // Keep result of constant function
- g->Activityp = (PACTIVITY)path;
+ } catch (int n) {
+ if (trace)
+ htrc("Exception %d: %s\n", n, g->Message);
+ PUSH_WARNING(g->Message);
+ *error = 1;
+ path = NULL;
+ } catch (const char *msg) {
+ strcpy(g->Message, msg);
+ PUSH_WARNING(g->Message);
+ *error = 1;
+ path = NULL;
+ } // end catch
err:
- g->jump_level--;
-
if (!path) {
*res_length = 0;
*is_null = 1;
@@ -3379,7 +3372,7 @@ char *json_locate_all(UDF_INIT *initid, UDF_ARGS *args, char *result,
unsigned long *res_length, char *is_null, char *error)
{
char *p, *path = NULL;
- int rc, mx = 10;
+ int mx = 10;
PJVAL jvp, jvp2;
PJSON jsp;
PJSNX jsx;
@@ -3400,62 +3393,59 @@ char *json_locate_all(UDF_INIT *initid, UDF_ARGS *args, char *result,
} else if (initid->const_item)
g->N = 1;
- // Save stack and allocation environment and prepare error return
- if (g->jump_level == MAX_JUMP) {
- PUSH_WARNING(MSG(TOO_MANY_JUMPS));
- *error = 1;
- *is_null = 1;
- return NULL;
- } // endif jump_level
+ try {
+ if (!g->Xchk) {
+ if (CheckMemory(g, initid, args, 1, true)) {
+ PUSH_WARNING("CheckMemory error");
+ *error = 1;
+ goto err;
+ } else
+ jvp = MakeValue(g, args, 0);
- if ((rc= setjmp(g->jumper[++g->jump_level])) != 0) {
- PUSH_WARNING(g->Message);
- *error = 1;
- path = NULL;
- goto err;
- } // endif rc
+ if ((p = jvp->GetString())) {
+ if (!(jsp = ParseJson(g, p, strlen(p)))) {
+ PUSH_WARNING(g->Message);
+ goto err;
+ } // endif jsp
- if (!g->Xchk) {
- if (CheckMemory(g, initid, args, 1, true)) {
- PUSH_WARNING("CheckMemory error");
- *error = 1;
- goto err;
- } else
- jvp = MakeValue(g, args, 0);
+ } else
+ jsp = jvp->GetJson();
- if ((p = jvp->GetString())) {
- if (!(jsp = ParseJson(g, p, strlen(p)))) {
- PUSH_WARNING(g->Message);
- goto err;
- } // endif jsp
+ if (g->Mrr) { // First argument is a constant
+ g->Xchk = jsp;
+ JsonMemSave(g);
+ } // endif Mrr
} else
- jsp = jvp->GetJson();
-
- if (g->Mrr) { // First argument is a constant
- g->Xchk = jsp;
- JsonMemSave(g);
- } // endif Mrr
+ jsp = (PJSON)g->Xchk;
- } else
- jsp = (PJSON)g->Xchk;
+ // The item to locate
+ jvp2 = MakeValue(g, args, 1);
- // The item to locate
- jvp2 = MakeValue(g, args, 1);
+ if (args->arg_count > 2)
+ mx = (int)*(long long*)args->args[2];
- if (args->arg_count > 2)
- mx = (int)*(long long*)args->args[2];
+ jsx = new(g) JSNX(g, jsp, TYPE_STRING);
+ path = jsx->LocateAll(g, jsp, jvp2, mx);
- jsx = new(g) JSNX(g, jsp, TYPE_STRING);
- path = jsx->LocateAll(g, jsp, jvp2, mx);
+ if (initid->const_item)
+ // Keep result of constant function
+ g->Activityp = (PACTIVITY)path;
- if (initid->const_item)
- // Keep result of constant function
- g->Activityp = (PACTIVITY)path;
+ } catch (int n) {
+ if (trace)
+ htrc("Exception %d: %s\n", n, g->Message);
+ PUSH_WARNING(g->Message);
+ *error = 1;
+ path = NULL;
+ } catch (const char *msg) {
+ strcpy(g->Message, msg);
+ PUSH_WARNING(g->Message);
+ *error = 1;
+ path = NULL;
+ } // end catch
err:
- g->jump_level--;
-
if (!path) {
*res_length = 0;
*is_null = 1;
@@ -3637,7 +3627,7 @@ static char *handle_item(UDF_INIT *initid, UDF_ARGS *args, char *result,
unsigned long *res_length, char *is_null, char *error)
{
char *p, *path, *str = NULL;
- int w, rc;
+ int w;
my_bool b = true;
PJSON jsp;
PJSNX jsx;
@@ -3659,78 +3649,73 @@ static char *handle_item(UDF_INIT *initid, UDF_ARGS *args, char *result,
w = 2;
else {
PUSH_WARNING("Logical error, please contact CONNECT developer");
- goto err;
+ goto fin;
} // endelse
- // Save stack and allocation environment and prepare error return
- if (g->jump_level == MAX_JUMP) {
- PUSH_WARNING(MSG(TOO_MANY_JUMPS));
- *error = 1;
- goto fin;
- } // endif jump_level
+ try {
+ if (!g->Xchk) {
+ if (CheckMemory(g, initid, args, 1, true, false, true)) {
+ PUSH_WARNING("CheckMemory error");
+ throw 1;
+ } else
+ jvp = MakeValue(g, args, 0);
- if ((rc= setjmp(g->jumper[++g->jump_level])) != 0) {
- PUSH_WARNING(g->Message);
- str = NULL;
- goto err;
- } // endif rc
+ if ((p = jvp->GetString())) {
+ if (!(jsp = ParseJson(g, p, strlen(p)))) {
+ throw 2;
+ } // endif jsp
- if (!g->Xchk) {
- if (CheckMemory(g, initid, args, 1, true, false, true)) {
- PUSH_WARNING("CheckMemory error");
- goto err;
- } else
- jvp = MakeValue(g, args, 0);
+ } else
+ jsp = jvp->GetJson();
- if ((p = jvp->GetString())) {
- if (!(jsp = ParseJson(g, p, strlen(p)))) {
- PUSH_WARNING(g->Message);
- goto err;
- } // endif jsp
+ if (g->Mrr) { // First argument is a constant
+ g->Xchk = jsp;
+ JsonMemSave(g);
+ } // endif Mrr
} else
- jsp = jvp->GetJson();
+ jsp = (PJSON)g->Xchk;
- if (g->Mrr) { // First argument is a constant
- g->Xchk = jsp;
- JsonMemSave(g);
- } // endif Mrr
+ jsx = new(g)JSNX(g, jsp, TYPE_STRING, initid->max_length, 0, true);
- } else
- jsp = (PJSON)g->Xchk;
-
- jsx = new(g)JSNX(g, jsp, TYPE_STRING, initid->max_length, 0, true);
-
- for (uint i = 1; i+1 < args->arg_count; i += 2) {
- jvp = MakeValue(gb, args, i);
- path = MakePSZ(g, args, i+1);
+ for (uint i = 1; i + 1 < args->arg_count; i += 2) {
+ jvp = MakeValue(gb, args, i);
+ path = MakePSZ(g, args, i + 1);
- if (jsx->SetJpath(g, path, false)) {
- PUSH_WARNING(g->Message);
- continue;
- } // endif SetJpath
+ if (jsx->SetJpath(g, path, false)) {
+ PUSH_WARNING(g->Message);
+ continue;
+ } // endif SetJpath
- if (w) {
- jsx->ReadValue(g);
- b = jsx->GetValue()->IsNull();
- b = (w == 1) ? b : !b;
- } // endif w
+ if (w) {
+ jsx->ReadValue(g);
+ b = jsx->GetValue()->IsNull();
+ b = (w == 1) ? b : !b;
+ } // endif w
- if (b && jsx->WriteValue(gb, jvp))
- PUSH_WARNING(g->Message);
+ if (b && jsx->WriteValue(gb, jvp))
+ PUSH_WARNING(g->Message);
- } // endfor i
+ } // endfor i
- // In case of error or file, return unchanged argument
- if (!(str = MakeResult(g, args, jsp, INT_MAX32)))
- str = MakePSZ(g, args, 0);
+ // In case of error or file, return unchanged argument
+ if (!(str = MakeResult(g, args, jsp, INT_MAX32)))
+ str = MakePSZ(g, args, 0);
- if (g->N)
- // Keep result of constant function
- g->Activityp = (PACTIVITY)str;
+ if (g->N)
+ // Keep result of constant function
+ g->Activityp = (PACTIVITY)str;
-err:
- g->jump_level--;
+ } catch (int n) {
+ if (trace)
+ htrc("Exception %d: %s\n", n, g->Message);
+ PUSH_WARNING(g->Message);
+ str = NULL;
+ } catch (const char *msg) {
+ strcpy(g->Message, msg);
+ PUSH_WARNING(g->Message);
+ str = NULL;
+ } // end catch
fin:
if (!str) {
@@ -4557,7 +4542,7 @@ char *jbin_object_add(UDF_INIT *initid, UDF_ARGS *args, char *result,
} // endif bsp
if (!CheckMemory(g, initid, args, 2, false, true, true)) {
- char *key;
+ PCSZ key;
PJOB jobp;
PJVAL jvp = MakeValue(g, args, 0, &top);
PJSON jsp = jvp->GetJson();
@@ -4637,7 +4622,7 @@ char *jbin_object_delete(UDF_INIT *initid, UDF_ARGS *args, char *result,
} // endif bsp
if (!CheckMemory(g, initid, args, 1, false, true, true)) {
- char *key;
+ PCSZ key;
PJOB jobp;
PJVAL jvp = MakeValue(g, args, 0, &top);
PJSON jsp = jvp->GetJson();
diff --git a/storage/connect/jsonudf.h b/storage/connect/jsonudf.h
index d2890421c62..5f4b98a0652 100644
--- a/storage/connect/jsonudf.h
+++ b/storage/connect/jsonudf.h
@@ -232,7 +232,7 @@ extern "C" {
/*********************************************************************************/
typedef struct _jpn {
enum JTYP Type;
- PSZ Key;
+ PCSZ Key;
int N;
} JPN, *PJPN;
diff --git a/storage/connect/libdoc.cpp b/storage/connect/libdoc.cpp
index 2470d37c353..700d247da38 100644
--- a/storage/connect/libdoc.cpp
+++ b/storage/connect/libdoc.cpp
@@ -68,9 +68,9 @@ class LIBXMLDOC : public XMLDOCUMENT {
virtual void SetNofree(bool b) {Nofreelist = b;}
// Methods
- virtual bool Initialize(PGLOBAL g, char *entry, bool zipped);
+ virtual bool Initialize(PGLOBAL g, PCSZ entry, bool zipped);
virtual bool ParseFile(PGLOBAL g, char *fn);
- virtual bool NewDoc(PGLOBAL g, char *ver);
+ virtual bool NewDoc(PGLOBAL g, PCSZ ver);
virtual void AddComment(PGLOBAL g, char *com);
virtual PXNODE GetRoot(PGLOBAL g);
virtual PXNODE NewRoot(PGLOBAL g, char *name);
@@ -119,9 +119,9 @@ class XML2NODE : public XMLNODE {
virtual PXLIST SelectNodes(PGLOBAL g, char *xp, PXLIST lp);
virtual PXNODE SelectSingleNode(PGLOBAL g, char *xp, PXNODE np);
virtual PXATTR GetAttribute(PGLOBAL g, char *name, PXATTR ap);
- virtual PXNODE AddChildNode(PGLOBAL g, char *name, PXNODE np);
+ virtual PXNODE AddChildNode(PGLOBAL g, PCSZ name, PXNODE np);
virtual PXATTR AddProperty(PGLOBAL g, char *name, PXATTR ap);
- virtual void AddText(PGLOBAL g, char *txtp);
+ virtual void AddText(PGLOBAL g, PCSZ txtp);
virtual void DeleteChild(PGLOBAL g, PXNODE dnp);
protected:
@@ -373,7 +373,7 @@ LIBXMLDOC::LIBXMLDOC(char *nsl, char *nsdf, char *enc, PFBLOCK fp)
/******************************************************************/
/* Initialize XML parser and check library compatibility. */
/******************************************************************/
-bool LIBXMLDOC::Initialize(PGLOBAL g, char *entry, bool zipped)
+bool LIBXMLDOC::Initialize(PGLOBAL g, PCSZ entry, bool zipped)
{
if (zipped && InitZip(g, entry))
return true;
@@ -434,7 +434,7 @@ PFBLOCK LIBXMLDOC::LinkXblock(PGLOBAL g, MODE m, int rc, char *fn)
/******************************************************************/
/* Construct and add the XML processing instruction node. */
/******************************************************************/
-bool LIBXMLDOC::NewDoc(PGLOBAL g, char *ver)
+bool LIBXMLDOC::NewDoc(PGLOBAL g, PCSZ ver)
{
if (trace)
htrc("NewDoc\n");
@@ -863,14 +863,13 @@ RCODE XML2NODE::GetContent(PGLOBAL g, char *buf, int len)
xmlFree(Content);
if ((Content = xmlNodeGetContent(Nodep))) {
- char *extra = " \t\r\n";
char *p1 = (char*)Content, *p2 = buf;
bool b = false;
// Copy content eliminating extra characters
for (; *p1; p1++)
if ((p2 - buf) < len) {
- if (strchr(extra, *p1)) {
+ if (strchr(" \t\r\n", *p1)) {
if (b) {
// This to have one blank between sub-nodes
*p2++ = ' ';
@@ -1020,19 +1019,19 @@ PXATTR XML2NODE::GetAttribute(PGLOBAL g, char *name, PXATTR ap)
/******************************************************************/
/* Add a new child node to this node and return it. */
/******************************************************************/
-PXNODE XML2NODE::AddChildNode(PGLOBAL g, char *name, PXNODE np)
+PXNODE XML2NODE::AddChildNode(PGLOBAL g, PCSZ name, PXNODE np)
{
- char *p, *pn, *pf = NULL;
+ char *p, *pn, *pf = NULL, *nmp = PlugDup(g, name);
if (trace)
htrc("AddChildNode: %s\n", name);
// Is a prefix specified
- if ((pn = strchr(name, ':'))) {
- pf = name;
+ if ((pn = strchr(nmp, ':'))) {
+ pf = nmp;
*pn++ = '\0'; // Separate name from prefix
} else
- pn = name;
+ pn = nmp;
// If name has the format m[n] only m is taken as node name
if ((p = strchr(pn, '[')))
@@ -1096,7 +1095,7 @@ PXATTR XML2NODE::AddProperty(PGLOBAL g, char *name, PXATTR ap)
/******************************************************************/
/* Add a new text node to this node. */
/******************************************************************/
-void XML2NODE::AddText(PGLOBAL g, char *txtp)
+void XML2NODE::AddText(PGLOBAL g, PCSZ txtp)
{
if (trace)
htrc("AddText: %s\n", txtp);
diff --git a/storage/connect/macutil.cpp b/storage/connect/macutil.cpp
index f5d3bb11fe9..b9600bdac2e 100644
--- a/storage/connect/macutil.cpp
+++ b/storage/connect/macutil.cpp
@@ -192,7 +192,7 @@ bool MACINFO::GetOneInfo(PGLOBAL g, int flag, void *v, int lv)
case 23:
break;
default:
- p = "";
+ p = PlugDup(g, "");
} // endswitch flag
} else switch (flag) {
diff --git a/storage/connect/mycat.cc b/storage/connect/mycat.cc
index 1fcd8ac78da..750cf3c0639 100644
--- a/storage/connect/mycat.cc
+++ b/storage/connect/mycat.cc
@@ -161,7 +161,7 @@ TABTYPE GetTypeID(const char *type)
#ifdef ZIP_SUPPORT
: (!stricmp(type, "ZIP")) ? TAB_ZIP
#endif
- : (!stricmp(type, "OEM")) ? TAB_OEM : TAB_NIY;
+ : (!stricmp(type, "OEM")) ? TAB_OEM : TAB_NIY;
} // end of GetTypeID
/***********************************************************************/
@@ -477,39 +477,6 @@ void MYCAT::Reset(void)
{
} // end of Reset
-#if 0
-/***********************************************************************/
-/* This function sets the current database path. */
-/***********************************************************************/
-void MYCAT::SetPath(PGLOBAL g, LPCSTR *datapath, const char *path)
- {
- if (path) {
- size_t len= strlen(path) + (*path != '.' ? 4 : 1);
- char *buf= (char*)PlugSubAlloc(g, NULL, len);
-
- if (PlugIsAbsolutePath(path))
- {
- strcpy(buf, path);
- *datapath= buf;
- return;
- }
-
- if (*path != '.') {
-#if defined(__WIN__)
- char *s= "\\";
-#else // !__WIN__
- char *s= "/";
-#endif // !__WIN__
- strcat(strcat(strcat(strcpy(buf, "."), s), path), s);
- } else
- strcpy(buf, path);
-
- *datapath= buf;
- } // endif path
-
- } // end of SetDataPath
-#endif // 0
-
/***********************************************************************/
/* GetTableDesc: retrieve a table descriptor. */
/* Look for a table descriptor matching the name and type. */
diff --git a/storage/connect/mycat.h b/storage/connect/mycat.h
index a3682b31f17..b6bdd5e5e11 100644
--- a/storage/connect/mycat.h
+++ b/storage/connect/mycat.h
@@ -98,10 +98,7 @@ class MYCAT : public CATALOG {
// Methods
void Reset(void);
-//void SetDataPath(PGLOBAL g, const char *path)
-// {SetPath(g, &DataPath, path);}
bool StoreIndex(PGLOBAL, PTABDEF) {return false;} // Temporary
-// PRELDEF GetTableDesc(PGLOBAL g, LPCSTR name,
PRELDEF GetTableDesc(PGLOBAL g, PTABLE tablep,
LPCSTR type, PRELDEF *prp = NULL);
PTDB GetTable(PGLOBAL g, PTABLE tablep,
@@ -109,9 +106,7 @@ class MYCAT : public CATALOG {
void ClearDB(PGLOBAL g);
protected:
-// PRELDEF MakeTableDesc(PGLOBAL g, LPCSTR name, LPCSTR am);
PRELDEF MakeTableDesc(PGLOBAL g, PTABLE tablep, LPCSTR am);
- //void SetPath(PGLOBAL g, LPCSTR *datapath, const char *path);
// Members
ha_connect *Hc; // The Connect handler
diff --git a/storage/connect/myconn.cpp b/storage/connect/myconn.cpp
index d05254a32a6..e68489faad5 100644
--- a/storage/connect/myconn.cpp
+++ b/storage/connect/myconn.cpp
@@ -135,10 +135,13 @@ PQRYRES MyColumns(PGLOBAL g, THD *thd, const char *host, const char *db,
FLD_KEY, FLD_SCALE, FLD_RADIX, FLD_NULL,
FLD_REM, FLD_NO, FLD_DEFAULT, FLD_EXTRA,
FLD_CHARSET};
- unsigned int length[] = {0, 4, 16, 4, 4, 4, 4, 4, 0, 0, 0, 0, 0};
- char *fld, *colname, *chset, *fmt, v, buf[128], uns[16], zero[16];
+ //unsigned int length[] = {0, 4, 16, 4, 4, 4, 4, 4, 0, 0, 0, 0, 0};
+ unsigned int length[] = {0, 4, 0, 4, 4, 4, 4, 4, 0, 0, 0, 0, 0};
+ PCSZ fmt;
+ char *fld, *colname, *chset, v, buf[128], uns[16], zero[16];
int i, n, nf, ncol = sizeof(buftyp) / sizeof(int);
int len, type, prec, rc, k = 0;
+ bool b;
PQRYRES qrp;
PCOLRES crp;
MYSQLC myc;
@@ -157,7 +160,7 @@ PQRYRES MyColumns(PGLOBAL g, THD *thd, const char *host, const char *db,
/* Do an evaluation of the result size. */
/********************************************************************/
STRING cmd(g, 64, "SHOW FULL COLUMNS FROM ");
- bool b = cmd.Append((PSZ)table);
+ b = cmd.Append((PSZ)table);
b |= cmd.Append(" FROM ");
b |= cmd.Append((PSZ)(db ? db : PlgGetUser(g)->DBName));
@@ -232,11 +235,31 @@ PQRYRES MyColumns(PGLOBAL g, THD *thd, const char *host, const char *db,
fld = myc.GetCharField(1);
prec = 0;
len = 0;
- v = (chset && !strcmp(chset, "binary")) ? 'B' : 0;
+// v = (chset && !strcmp(chset, "binary")) ? 'B' : 0;
+ v = 0;
*uns = 0;
*zero = 0;
-
- switch ((nf = sscanf(fld, "%[^(](%d,%d", buf, &len, &prec))) {
+ b = false;
+
+ if (!strnicmp(fld, "enum", 4)) {
+ char *p2, *p1 = fld + 6; // to skip enum('
+
+ while (true) {
+ p2 = strchr(p1, '\'');
+ len = MY_MAX(len, p2 - p1);
+ if (*++p2 != ',') break;
+ p1 = p2 + 2;
+ } // endwhile
+
+ v = (len > 255) ? 'V' : 0;
+ strcpy(buf, "enum");
+ b = true;
+ } else if (!strnicmp(fld, "set", 3)) {
+ len = (int)strlen(fld) - 2;
+ v = 'V';
+ strcpy(buf, "set");
+ b = true;
+ } else switch ((nf = sscanf(fld, "%[^(](%d,%d", buf, &len, &prec))) {
case 3:
nf = sscanf(fld, "%[^(](%d,%d) %s %s", buf, &len, &prec, uns, zero);
break;
@@ -271,7 +294,7 @@ PQRYRES MyColumns(PGLOBAL g, THD *thd, const char *host, const char *db,
colname, len);
PushWarning(g, thd);
v = 'V';
- } else
+ } else
len = MY_MIN(len, 4096);
} // endif type
@@ -286,6 +309,9 @@ PQRYRES MyColumns(PGLOBAL g, THD *thd, const char *host, const char *db,
default: crp->Nulls[i] = v; break;
} // endswitch nf
+ if (b) // enum or set
+ nf = sscanf(fld, "%s ", buf); // get values
+
crp = crp->Next; // Type_Name
crp->Kdata->SetValue(buf, i);
@@ -849,7 +875,8 @@ MYSQL_FIELD *MYSQLC::GetNextField(void)
/***********************************************************************/
PQRYRES MYSQLC::GetResult(PGLOBAL g, bool pdb)
{
- char *fmt, v;
+ PCSZ fmt;
+ char *name, v;
int n;
bool uns;
PCOLRES *pcrp, crp;
@@ -887,8 +914,9 @@ PQRYRES MYSQLC::GetResult(PGLOBAL g, bool pdb)
memset(crp, 0, sizeof(COLRES));
crp->Ncol = ++qrp->Nbcol;
- crp->Name = (char*)PlugSubAlloc(g, NULL, fld->name_length + 1);
- strcpy(crp->Name, fld->name);
+ name = (char*)PlugSubAlloc(g, NULL, fld->name_length + 1);
+ strcpy(name, fld->name);
+ crp->Name = name;
if ((crp->Type = MYSQLtoPLG(fld->type, &v)) == TYPE_ERROR) {
sprintf(g->Message, "Type %d not supported for column %s",
diff --git a/storage/connect/mysql-test/connect/disabled.def b/storage/connect/mysql-test/connect/disabled.def
index 0e5a5fc64e3..64d7ece3fe1 100644
--- a/storage/connect/mysql-test/connect/disabled.def
+++ b/storage/connect/mysql-test/connect/disabled.def
@@ -9,8 +9,8 @@
# Do not use any TAB characters for whitespace.
#
##############################################################################
-#jdbc : Variable settings depend on machine configuration
-#jdbc_new : Variable settings depend on machine configuration
+jdbc : Variable settings depend on machine configuration
+jdbc_new : Variable settings depend on machine configuration
jdbc_oracle : Variable settings depend on machine configuration
jdbc_postgresql : Variable settings depend on machine configuration
json : TABLE_TYPE = JSON conflicts with the SQL syntax
diff --git a/storage/connect/mysql-test/connect/r/jdbc_new.result b/storage/connect/mysql-test/connect/r/jdbc_new.result
index 5cc4826213d..6f977166598 100644
--- a/storage/connect/mysql-test/connect/r/jdbc_new.result
+++ b/storage/connect/mysql-test/connect/r/jdbc_new.result
@@ -14,9 +14,7 @@ NULL NULL
SET GLOBAL time_zone='+1:00';
CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=JDBC
CONNECTION='jdbc:mysql://127.0.0.1:SLAVE_PORT/test?user=unknown';
-SELECT * FROM t1;
-ERROR HY000: Got error 174 'Connecting: java.sql.SQLException: Access denied for user 'unknown'@'localhost' (using password: NO) rc=-2' from CONNECT
-DROP TABLE t1;
+ERROR HY000: Connecting: java.sql.SQLException: Access denied for user 'unknown'@'localhost' (using password: NO) rc=-2
CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=JDBC
CONNECTION='jdbc:mysql://127.0.0.1:SLAVE_PORT/unknown?user=root';
ERROR HY000: Connecting: java.sql.SQLSyntaxErrorException: Unknown database 'unknown' rc=-2
diff --git a/storage/connect/mysql-test/connect/t/jdbc_new.test b/storage/connect/mysql-test/connect/t/jdbc_new.test
index 5586cf8c027..86c4ad57c5f 100644
--- a/storage/connect/mysql-test/connect/t/jdbc_new.test
+++ b/storage/connect/mysql-test/connect/t/jdbc_new.test
@@ -24,11 +24,9 @@ SET GLOBAL time_zone='+1:00';
# Bad user name
# Suppress "mysql_real_connect failed:" (printed in _DEBUG build)
--replace_result $SLAVE_MYPORT SLAVE_PORT "mysql_real_connect failed: " ""
+--error ER_UNKNOWN_ERROR
eval CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=JDBC
CONNECTION='jdbc:mysql://127.0.0.1:$SLAVE_MYPORT/test?user=unknown';
---error ER_GET_ERRMSG
-SELECT * FROM t1;
-DROP TABLE t1;
# Bad database name
--replace_result $SLAVE_MYPORT SLAVE_PORT "mysql_real_connect failed: " ""
diff --git a/storage/connect/myutil.cpp b/storage/connect/myutil.cpp
index d4416e188c8..c2053f1c832 100644
--- a/storage/connect/myutil.cpp
+++ b/storage/connect/myutil.cpp
@@ -42,7 +42,8 @@ int MYSQLtoPLG(char *typname, char *var)
type = TYPE_INT;
else if (!stricmp(typname, "smallint"))
type = TYPE_SHORT;
- else if (!stricmp(typname, "char") || !stricmp(typname, "varchar"))
+ else if (!stricmp(typname, "char") || !stricmp(typname, "varchar") ||
+ !stricmp(typname, "enum") || !stricmp(typname, "set"))
type = TYPE_STRING;
else if (!stricmp(typname, "double") || !stricmp(typname, "float") ||
!stricmp(typname, "real"))
@@ -87,10 +88,12 @@ int MYSQLtoPLG(char *typname, char *var)
else if (!stricmp(typname, "year"))
*var = 'Y';
- } else if (type == TYPE_STRING && !stricmp(typname, "varchar"))
- // This is to make the difference between CHAR and VARCHAR
- *var = 'V';
- else if (type == TYPE_ERROR && xconv == TPC_SKIP)
+ } else if (type == TYPE_STRING) {
+ if (!stricmp(typname, "varchar"))
+ // This is to make the difference between CHAR and VARCHAR
+ *var = 'V';
+
+ } else if (type == TYPE_ERROR && xconv == TPC_SKIP)
*var = 'K';
else
*var = 0;
@@ -266,9 +269,9 @@ int MYSQLtoPLG(int mytype, char *var)
/************************************************************************/
/* Returns the format corresponding to a MySQL date type number. */
/************************************************************************/
-char *MyDateFmt(int mytype)
+PCSZ MyDateFmt(int mytype)
{
- char *fmt;
+ PCSZ fmt;
switch (mytype) {
case MYSQL_TYPE_TIMESTAMP:
@@ -294,9 +297,9 @@ char *MyDateFmt(int mytype)
/************************************************************************/
/* Returns the format corresponding to a MySQL date type name. */
/************************************************************************/
-char *MyDateFmt(char *typname)
+PCSZ MyDateFmt(char *typname)
{
- char *fmt;
+ PCSZ fmt;
if (!stricmp(typname, "datetime") || !stricmp(typname, "timestamp"))
fmt = "YYYY-MM-DD hh:mm:ss";
diff --git a/storage/connect/myutil.h b/storage/connect/myutil.h
index 9c22cfef118..6991172b39e 100644
--- a/storage/connect/myutil.h
+++ b/storage/connect/myutil.h
@@ -6,9 +6,9 @@
enum enum_field_types PLGtoMYSQL(int type, bool dbf, char var = 0);
const char *PLGtoMYSQLtype(int type, bool dbf, char var = 0);
-int MYSQLtoPLG(char *typname, char *var = NULL);
-int MYSQLtoPLG(int mytype, char *var = NULL);
-char *MyDateFmt(int mytype);
-char *MyDateFmt(char *typname);
+int MYSQLtoPLG(char *typname, char *var = NULL);
+int MYSQLtoPLG(int mytype, char *var = NULL);
+PCSZ MyDateFmt(int mytype);
+PCSZ MyDateFmt(char *typname);
#endif // __MYUTIL__H
diff --git a/storage/connect/odbccat.h b/storage/connect/odbccat.h
index 3b729bcb4bb..05b82e49727 100644
--- a/storage/connect/odbccat.h
+++ b/storage/connect/odbccat.h
@@ -3,11 +3,11 @@
#define DEFAULT_QUERY_TIMEOUT -1 // means do not set
typedef struct odbc_parms {
- char *User; // User connect info
- char *Pwd; // Password connect info
- int Cto; // Connect timeout
- int Qto; // Query timeout
- bool UseCnc; // Use SQLConnect (!SQLDriverConnect)
+ PCSZ User; // User connect info
+ PCSZ Pwd; // Password connect info
+ int Cto; // Connect timeout
+ int Qto; // Query timeout
+ bool UseCnc; // Use SQLConnect (!SQLDriverConnect)
} ODBCPARM, *POPARM;
/***********************************************************************/
@@ -17,9 +17,9 @@ typedef struct odbc_parms {
char *ODBCCheckConnection(PGLOBAL g, char *dsn, int cop);
#endif // PROMPT_OK
PQRYRES ODBCDataSources(PGLOBAL g, int maxres, bool info);
-PQRYRES ODBCColumns(PGLOBAL g, char *dsn, char *db, char *table,
- char *colpat, int maxres, bool info, POPARM sop);
+PQRYRES ODBCColumns(PGLOBAL g, PCSZ dsn, PCSZ db, PCSZ table,
+ PCSZ colpat, int maxres, bool info, POPARM sop);
PQRYRES ODBCSrcCols(PGLOBAL g, char *dsn, char *src, POPARM sop);
-PQRYRES ODBCTables(PGLOBAL g, char *dsn, char *db, char *tabpat,
- char *tabtyp, int maxres, bool info, POPARM sop);
+PQRYRES ODBCTables(PGLOBAL g, PCSZ dsn, PCSZ db, PCSZ tabpat,
+ PCSZ tabtyp, int maxres, bool info, POPARM sop);
PQRYRES ODBCDrivers(PGLOBAL g, int maxres, bool info);
diff --git a/storage/connect/odbconn.cpp b/storage/connect/odbconn.cpp
index 433e392eace..3dbc2d577d5 100644
--- a/storage/connect/odbconn.cpp
+++ b/storage/connect/odbconn.cpp
@@ -1,7 +1,7 @@
-/************ Odbconn C++ Functions Source Code File (.CPP) ************/
-/* Name: ODBCONN.CPP Version 2.2 */
+/***********************************************************************/
+/* Name: ODBCONN.CPP Version 2.3 */
/* */
-/* (C) Copyright to the author Olivier BERTRAND 1998-2016 */
+/* (C) Copyright to the author Olivier BERTRAND 1998-2017 */
/* */
/* This file contains the ODBC connection classes functions. */
/***********************************************************************/
@@ -239,47 +239,43 @@ char *ODBCCheckConnection(PGLOBAL g, char *dsn, int cop)
/***********************************************************************/
/* Allocate the structure used to refer to the result set. */
/***********************************************************************/
-static CATPARM *AllocCatInfo(PGLOBAL g, CATINFO fid, char *db,
- char *tab, PQRYRES qrp)
- {
- size_t i, m, n;
- CATPARM *cap;
+static CATPARM *AllocCatInfo(PGLOBAL g, CATINFO fid, PCSZ db,
+ PCSZ tab, PQRYRES qrp)
+{
+ size_t i, m, n;
+ CATPARM *cap;
#if defined(_DEBUG)
- assert(qrp);
+ assert(qrp);
#endif
- // Save stack and allocation environment and prepare error return
- if (g->jump_level == MAX_JUMP) {
- strcpy(g->Message, MSG(TOO_MANY_JUMPS));
- return NULL;
- } // endif jump_level
-
- if (setjmp(g->jumper[++g->jump_level]) != 0) {
- printf("%s\n", g->Message);
- cap = NULL;
- goto fin;
- } // endif rc
-
- m = (size_t)qrp->Maxres;
- n = (size_t)qrp->Nbcol;
- cap = (CATPARM *)PlugSubAlloc(g, NULL, sizeof(CATPARM));
- memset(cap, 0, sizeof(CATPARM));
- cap->Id = fid;
- cap->Qrp = qrp;
- cap->DB = (PUCHAR)db;
- cap->Tab = (PUCHAR)tab;
- cap->Vlen = (SQLLEN* *)PlugSubAlloc(g, NULL, n * sizeof(SQLLEN *));
-
- for (i = 0; i < n; i++)
- cap->Vlen[i] = (SQLLEN *)PlugSubAlloc(g, NULL, m * sizeof(SQLLEN));
-
- cap->Status = (UWORD *)PlugSubAlloc(g, NULL, m * sizeof(UWORD));
-
- fin:
- g->jump_level--;
- return cap;
- } // end of AllocCatInfo
+ try {
+ m = (size_t)qrp->Maxres;
+ n = (size_t)qrp->Nbcol;
+ cap = (CATPARM *)PlugSubAlloc(g, NULL, sizeof(CATPARM));
+ memset(cap, 0, sizeof(CATPARM));
+ cap->Id = fid;
+ cap->Qrp = qrp;
+ cap->DB = db;
+ cap->Tab = tab;
+ cap->Vlen = (SQLLEN* *)PlugSubAlloc(g, NULL, n * sizeof(SQLLEN *));
+
+ for (i = 0; i < n; i++)
+ cap->Vlen[i] = (SQLLEN *)PlugSubAlloc(g, NULL, m * sizeof(SQLLEN));
+
+ cap->Status = (UWORD *)PlugSubAlloc(g, NULL, m * sizeof(UWORD));
+
+ } catch (int n) {
+ htrc("Exeption %d: %s\n", n, g->Message);
+ cap = NULL;
+ } catch (const char *msg) {
+ htrc(g->Message, msg);
+ printf("%s\n", g->Message);
+ cap = NULL;
+ } // end catch
+
+ return cap;
+} // end of AllocCatInfo
#if 0
/***********************************************************************/
@@ -309,8 +305,8 @@ static void ResetNullValues(CATPARM *cap)
/* ODBCColumns: constructs the result blocks containing all columns */
/* of an ODBC table that will be retrieved by GetData commands. */
/***********************************************************************/
-PQRYRES ODBCColumns(PGLOBAL g, char *dsn, char *db, char *table,
- char *colpat, int maxres, bool info, POPARM sop)
+PQRYRES ODBCColumns(PGLOBAL g, PCSZ dsn, PCSZ db, PCSZ table,
+ PCSZ colpat, int maxres, bool info, POPARM sop)
{
int buftyp[] = {TYPE_STRING, TYPE_STRING, TYPE_STRING, TYPE_STRING,
TYPE_SHORT, TYPE_STRING, TYPE_INT, TYPE_INT,
@@ -383,7 +379,7 @@ PQRYRES ODBCColumns(PGLOBAL g, char *dsn, char *db, char *table,
if (!(cap = AllocCatInfo(g, CAT_COL, db, table, qrp)))
return NULL;
- cap->Pat = (PUCHAR)colpat;
+ cap->Pat = colpat;
/************************************************************************/
/* Now get the results into blocks. */
@@ -618,8 +614,8 @@ PQRYRES ODBCDataSources(PGLOBAL g, int maxres, bool info)
/* ODBCTables: constructs the result blocks containing all tables in */
/* an ODBC database that will be retrieved by GetData commands. */
/**************************************************************************/
-PQRYRES ODBCTables(PGLOBAL g, char *dsn, char *db, char *tabpat,
- char *tabtyp, int maxres, bool info, POPARM sop)
+PQRYRES ODBCTables(PGLOBAL g, PCSZ dsn, PCSZ db, PCSZ tabpat, PCSZ tabtyp,
+ int maxres, bool info, POPARM sop)
{
int buftyp[] = {TYPE_STRING, TYPE_STRING, TYPE_STRING,
TYPE_STRING, TYPE_STRING};
@@ -681,7 +677,7 @@ PQRYRES ODBCTables(PGLOBAL g, char *dsn, char *db, char *tabpat,
if (!(cap = AllocCatInfo(g, CAT_TAB, db, tabpat, qrp)))
return NULL;
- cap->Pat = (PUCHAR)tabtyp;
+ cap->Pat = tabtyp;
if (trace)
htrc("Getting table results ncol=%d\n", cap->Qrp->Nbcol);
@@ -879,7 +875,7 @@ PQRYRES ODBCStatistics(PGLOBAL g, ODBConn *op, char *dsn, char *pat,
/***********************************************************************/
/* Implementation of DBX class. */
/***********************************************************************/
-DBX::DBX(RETCODE rc, PSZ msg)
+DBX::DBX(RETCODE rc, PCSZ msg)
{
m_RC = rc;
m_Msg = msg;
@@ -1020,7 +1016,7 @@ bool ODBConn::Check(RETCODE rc)
/***********************************************************************/
/* DB exception throw routines. */
/***********************************************************************/
-void ODBConn::ThrowDBX(RETCODE rc, PSZ msg, HSTMT hstmt)
+void ODBConn::ThrowDBX(RETCODE rc, PCSZ msg, HSTMT hstmt)
{
DBX* xp = new(m_G) DBX(rc, msg);
@@ -1030,7 +1026,7 @@ void ODBConn::ThrowDBX(RETCODE rc, PSZ msg, HSTMT hstmt)
} // end of ThrowDBX
-void ODBConn::ThrowDBX(PSZ msg)
+void ODBConn::ThrowDBX(PCSZ msg)
{
DBX* xp = new(m_G) DBX(0, "Error");
@@ -1110,7 +1106,7 @@ void ODBConn::OnSetOptions(HSTMT hstmt)
/***********************************************************************/
/* Open: connect to a data source. */
/***********************************************************************/
-int ODBConn::Open(PSZ ConnectString, POPARM sop, DWORD options)
+int ODBConn::Open(PCSZ ConnectString, POPARM sop, DWORD options)
{
PGLOBAL& g = m_G;
//ASSERT_VALID(this);
@@ -1192,7 +1188,7 @@ void ODBConn::AllocConnect(DWORD Options)
#if defined(_DEBUG)
if (Options & traceSQL) {
- SQLSetConnectOption(m_hdbc, SQL_OPT_TRACEFILE, (DWORD)"xodbc.out");
+ SQLSetConnectOption(m_hdbc, SQL_OPT_TRACEFILE, (SQLULEN)"xodbc.out");
SQLSetConnectOption(m_hdbc, SQL_OPT_TRACE, 1);
} // endif
#endif // _DEBUG
@@ -1215,7 +1211,7 @@ void ODBConn::AllocConnect(DWORD Options)
// Turn on cursor lib support
if (Options & useCursorLib)
- rc = SQLSetConnectOption(m_hdbc, SQL_ODBC_CURSORS, SQL_CUR_USE_ODBC);
+ rc = SQLSetConnectOption(m_hdbc, SQL_ODBC_CURSORS, SQL_CUR_USE_DRIVER);
return;
} // end of AllocConnect
@@ -1921,7 +1917,7 @@ bool ODBConn::ExecSQLcommand(char *sql)
/* GetMetaData: constructs the result blocks containing the */
/* description of all the columns of an SQL command. */
/**************************************************************************/
-PQRYRES ODBConn::GetMetaData(PGLOBAL g, char *dsn, char *src)
+PQRYRES ODBConn::GetMetaData(PGLOBAL g, PCSZ dsn, PCSZ src)
{
static int buftyp[] = {TYPE_STRING, TYPE_SHORT, TYPE_INT,
TYPE_SHORT, TYPE_SHORT};
@@ -2244,7 +2240,7 @@ int ODBConn::GetCatInfo(CATPARM *cap)
void *buffer;
int i, irc;
bool b;
- PSZ fnc = "Unknown";
+ PCSZ fnc = "Unknown";
UWORD n;
SWORD ncol, len, tp;
SQLULEN crow = 0;
@@ -2283,22 +2279,20 @@ int ODBConn::GetCatInfo(CATPARM *cap)
// Now do call the proper ODBC API
switch (cap->Id) {
case CAT_TAB:
-// rc = SQLSetStmtAttr(hstmt, SQL_ATTR_METADATA_ID,
-// (SQLPOINTER)false, 0);
fnc = "SQLTables";
rc = SQLTables(hstmt, name.ptr(2), name.length(2),
name.ptr(1), name.length(1),
name.ptr(0), name.length(0),
- cap->Pat, cap->Pat ? SQL_NTS : 0);
+ (SQLCHAR *)cap->Pat,
+ cap->Pat ? SQL_NTS : 0);
break;
case CAT_COL:
-// rc = SQLSetStmtAttr(hstmt, SQL_ATTR_METADATA_ID,
-// (SQLPOINTER)true, 0);
fnc = "SQLColumns";
rc = SQLColumns(hstmt, name.ptr(2), name.length(2),
name.ptr(1), name.length(1),
name.ptr(0), name.length(0),
- cap->Pat, cap->Pat ? SQL_NTS : 0);
+ (SQLCHAR *)cap->Pat,
+ cap->Pat ? SQL_NTS : 0);
break;
case CAT_KEY:
fnc = "SQLPrimaryKeys";
diff --git a/storage/connect/odbconn.h b/storage/connect/odbconn.h
index 063985218ec..5abb8354160 100644
--- a/storage/connect/odbconn.h
+++ b/storage/connect/odbconn.h
@@ -54,9 +54,9 @@ enum CATINFO {CAT_TAB = 1, /* SQLTables */
typedef struct tagCATPARM {
CATINFO Id; // Id to indicate function
PQRYRES Qrp; // Result set pointer
- PUCHAR DB; // Database (Schema)
- PUCHAR Tab; // Table name or pattern
- PUCHAR Pat; // Table type or column pattern
+ PCSZ DB; // Database (Schema)
+ PCSZ Tab; // Table name or pattern
+ PCSZ Pat; // Table type or column pattern
SQLLEN* *Vlen; // To array of indicator values
UWORD *Status; // To status block
// For SQLStatistics
@@ -80,23 +80,23 @@ class DBX : public BLOCK {
friend class ODBConn;
// Construction (by ThrowDBX only) -- destruction
protected:
- DBX(RETCODE rc, PSZ msg = NULL);
+ DBX(RETCODE rc, PCSZ msg = NULL);
public:
//virtual ~DBX() {}
//void operator delete(void*, PGLOBAL, void*) {};
// Implementation (use ThrowDBX to create)
RETCODE GetRC(void) {return m_RC;}
- PSZ GetMsg(void) {return m_Msg;}
- const char *GetErrorMessage(int i);
+ PCSZ GetMsg(void) {return m_Msg;}
+ PCSZ GetErrorMessage(int i);
protected:
bool BuildErrorMessage(ODBConn* pdb, HSTMT hstmt = SQL_NULL_HSTMT);
// Attributes
RETCODE m_RC;
- PSZ m_Msg;
- PSZ m_ErrMsg[MAX_NUM_OF_MSG];
+ PCSZ m_Msg;
+ PCSZ m_ErrMsg[MAX_NUM_OF_MSG];
}; // end of DBX class definition
/***********************************************************************/
@@ -119,7 +119,7 @@ class ODBConn : public BLOCK {
noOdbcDialog = 0x0008, // Don't display ODBC Connect dialog
forceOdbcDialog = 0x0010}; // Always display ODBC connect dialog
- int Open(PSZ ConnectString, POPARM sop, DWORD Options = 0);
+ int Open(PCSZ ConnectString, POPARM sop, DWORD Options = 0);
int Rewind(char *sql, ODBCCOL *tocols);
void Close(void);
PQRYRES AllocateResult(PGLOBAL g);
@@ -131,7 +131,7 @@ class ODBConn : public BLOCK {
bool IsOpen(void) {return m_hdbc != SQL_NULL_HDBC;}
PSZ GetStringInfo(ushort infotype);
int GetMaxValue(ushort infotype);
- PSZ GetConnect(void) {return m_Connect;}
+ PCSZ GetConnect(void) {return m_Connect;}
public:
// Operations
@@ -149,7 +149,7 @@ class ODBConn : public BLOCK {
int GetCatInfo(CATPARM *cap);
bool GetDataSources(PQRYRES qrp);
bool GetDrivers(PQRYRES qrp);
- PQRYRES GetMetaData(PGLOBAL g, char *dsn, char *src);
+ PQRYRES GetMetaData(PGLOBAL g, PCSZ dsn, PCSZ src);
public:
// Set special options
@@ -162,8 +162,8 @@ class ODBConn : public BLOCK {
// ODBC operations
protected:
bool Check(RETCODE rc);
- void ThrowDBX(RETCODE rc, PSZ msg, HSTMT hstmt = SQL_NULL_HSTMT);
- void ThrowDBX(PSZ msg);
+ void ThrowDBX(RETCODE rc, PCSZ msg, HSTMT hstmt = SQL_NULL_HSTMT);
+ void ThrowDBX(PCSZ msg);
void AllocConnect(DWORD dwOptions);
void Connect(void);
bool DriverConnect(DWORD Options);
@@ -187,9 +187,9 @@ class ODBConn : public BLOCK {
DWORD m_UpdateOptions;
DWORD m_RowsetSize;
char m_IDQuoteChar[2];
- PSZ m_Connect;
- PSZ m_User;
- PSZ m_Pwd;
+ PCSZ m_Connect;
+ PCSZ m_User;
+ PCSZ m_Pwd;
int m_Catver;
int m_Rows;
int m_Fetch;
diff --git a/storage/connect/os.h b/storage/connect/os.h
index 2dc603fdcda..8056a272990 100644
--- a/storage/connect/os.h
+++ b/storage/connect/os.h
@@ -17,13 +17,16 @@ typedef off_t off64_t;
#if defined(__WIN__)
typedef __int64 BIGINT;
+typedef _Null_terminated_ const char *PCSZ;
#else // !__WIN__
typedef longlong BIGINT;
#define FILE_BEGIN SEEK_SET
#define FILE_CURRENT SEEK_CUR
#define FILE_END SEEK_END
+typedef const char *PCSZ;
#endif // !__WIN__
+
#if !defined(__WIN__)
typedef const void *LPCVOID;
typedef const char *LPCTSTR;
diff --git a/storage/connect/osutil.c b/storage/connect/osutil.c
index 2e9e120b0c8..66743c7403b 100644
--- a/storage/connect/osutil.c
+++ b/storage/connect/osutil.c
@@ -43,34 +43,6 @@ PSZ strlwr(PSZ p)
return (p);
} /* end of strlwr */
-#if defined(NOT_USED) /*&& !defined(sun) && !defined(LINUX) && !defined(AIX)*/
-/***********************************************************************/
-/* Define stricmp function not existing in some UNIX libraries. */
-/***********************************************************************/
-int stricmp(char *str1, char *str2)
- {
- register int i;
- int n;
- char c;
- char *sup1 = malloc(strlen(str1) + 1);
- char *sup2 = malloc(strlen(str2) + 1);
-
- for (i = 0; c = str1[i]; i++)
- sup1[i] = toupper(c);
-
- sup1[i] = 0;
-
- for (i = 0; c = str2[i]; i++)
- sup2[i] = toupper(c);
-
- sup2[i] = 0;
- n = strcmp(sup1, sup2);
- free(sup1);
- free(sup2);
- return (n);
- } /* end of stricmp */
-#endif /* sun */
-
/***********************************************************************/
/* Define the splitpath function not existing in the UNIX library. */
/***********************************************************************/
@@ -143,13 +115,6 @@ my_bool CloseFileHandle(HANDLE h)
return (close(h)) ? TRUE : FALSE;
} /* end of CloseFileHandle */
-#if 0
-void Sleep(DWORD time)
- {
- //FIXME: TODO
- } /* end of Sleep */
-#endif
-
int GetLastError()
{
return errno;
@@ -210,21 +175,4 @@ BOOL MessageBeep(uint i __attribute__((unused)))
return TRUE;
} /* end of MessageBeep */
-#if 0
-/* This function is ridiculous and should be revisited */
-DWORD FormatMessage(DWORD dwFlags, LPCVOID lpSource, DWORD dwMessageId,
- DWORD dwLanguageId, LPSTR lpBuffer, DWORD nSize, ...)
- {
- char buff[32];
- int n;
-
-//if (dwFlags & FORMAT_MESSAGE_ALLOCATE_BUFFER)
-// return 0; /* means error */
-
- n = sprintf(buff, "Error code: %d", (int) dwMessageId);
- strncpy(lpBuffer, buff, nSize);
- return min(n, nSize);
- } /* end of FormatMessage */
-#endif
-
#endif // UNIX
diff --git a/storage/connect/plgdbsem.h b/storage/connect/plgdbsem.h
index 800b1098d50..2198c44c200 100644
--- a/storage/connect/plgdbsem.h
+++ b/storage/connect/plgdbsem.h
@@ -80,7 +80,8 @@ enum TABTYPE {TAB_UNDEF = 0, /* Table of undefined type */
TAB_DMY = 25, /* DMY Dummy tables NIY */
TAB_JDBC = 26, /* Table accessed via JDBC */
TAB_ZIP = 27, /* ZIP file info table */
- TAB_NIY = 28}; /* Table not implemented yet */
+// TAB_MONGO = 28, /* Table retrieved from MongoDB */
+ TAB_NIY = 30}; /* Table not implemented yet */
enum AMT {TYPE_AM_ERROR = 0, /* Type not defined */
TYPE_AM_ROWID = 1, /* ROWID type (special column) */
@@ -143,7 +144,8 @@ enum AMT {TYPE_AM_ERROR = 0, /* Type not defined */
TYPE_AM_MYX = 193, /* MYSQL EXEC access method type */
TYPE_AM_CAT = 195, /* Catalog access method type no */
TYPE_AM_ZIP = 198, /* ZIP access method type no */
- TYPE_AM_OUT = 200}; /* Output relations (storage) */
+ TYPE_AM_MGO = 199, /* MGO access method type no */
+ TYPE_AM_OUT = 200}; /* Output relations (storage) */
enum RECFM {RECFM_NAF = -2, /* Not a file */
RECFM_OEM = -1, /* OEM file access method */
@@ -553,7 +555,7 @@ typedef struct _qryres {
typedef struct _colres {
PCOLRES Next; /* To next result column */
PCOL Colp; /* To matching column block */
- PSZ Name; /* Column header */
+ PCSZ Name; /* Column header */
PVBLK Kdata; /* Column block of values */
char *Nulls; /* Column null value array */
int Type; /* Internal type */
@@ -583,7 +585,7 @@ void PlugLineDB(PGLOBAL, PSZ, short, void *, uint);
char *SetPath(PGLOBAL g, const char *path);
char *ExtractFromPath(PGLOBAL, char *, char *, OPVAL);
void AddPointer(PTABS, void *);
-PDTP MakeDateFormat(PGLOBAL, PSZ, bool, bool, int);
+PDTP MakeDateFormat(PGLOBAL, PCSZ, bool, bool, int);
int ExtractDate(char *, PDTP, int, int val[6]);
/**************************************************************************/
@@ -615,11 +617,10 @@ DllExport void *PlgDBrealloc(PGLOBAL, void *, MBLOCK&, size_t);
DllExport void NewPointer(PTABS, void *, void *);
//lExport char *GetIni(int n= 0); // Not used anymore
DllExport void SetTrc(void);
-DllExport char *GetListOption(PGLOBAL, const char *, const char *,
- const char *def=NULL);
-DllExport char *GetStringTableOption(PGLOBAL, PTOS, char *, char *);
-DllExport bool GetBooleanTableOption(PGLOBAL, PTOS, char *, bool);
-DllExport int GetIntegerTableOption(PGLOBAL, PTOS, char *, int);
+DllExport PCSZ GetListOption(PGLOBAL, PCSZ, PCSZ, PCSZ def=NULL);
+DllExport PCSZ GetStringTableOption(PGLOBAL, PTOS, PCSZ, PCSZ);
+DllExport bool GetBooleanTableOption(PGLOBAL, PTOS, PCSZ, bool);
+DllExport int GetIntegerTableOption(PGLOBAL, PTOS, PCSZ, int);
#define MSGID_NONE 0
#define MSGID_CANNOT_OPEN 1
diff --git a/storage/connect/plgdbutl.cpp b/storage/connect/plgdbutl.cpp
index 1910cdcdec8..b6f59bac8cf 100644
--- a/storage/connect/plgdbutl.cpp
+++ b/storage/connect/plgdbutl.cpp
@@ -238,88 +238,84 @@ void ptrc(char const *fmt, ...)
PQRYRES PlgAllocResult(PGLOBAL g, int ncol, int maxres, int ids,
int *buftyp, XFLD *fldtyp,
unsigned int *length, bool blank, bool nonull)
- {
+{
char cname[NAM_LEN+1];
int i;
PCOLRES *pcrp, crp;
PQRYRES qrp;
- // Save stack and allocation environment and prepare error return
- if (g->jump_level == MAX_JUMP) {
- strcpy(g->Message, MSG(TOO_MANY_JUMPS));
- return NULL;
- } // endif jump_level
-
- if (setjmp(g->jumper[++g->jump_level]) != 0) {
- printf("%s\n", g->Message);
- qrp = NULL;
- goto fin;
- } // endif rc
-
- /************************************************************************/
- /* Allocate the structure used to contain the result set. */
- /************************************************************************/
- qrp = (PQRYRES)PlugSubAlloc(g, NULL, sizeof(QRYRES));
- pcrp = &qrp->Colresp;
- qrp->Continued = false;
- qrp->Truncated = false;
- qrp->Info = false;
- qrp->Suball = true;
- qrp->Maxres = maxres;
- qrp->Maxsize = 0;
- qrp->Nblin = 0;
- qrp->Nbcol = 0; // will be ncol
- qrp->Cursor = 0;
- qrp->BadLines = 0;
-
- for (i = 0; i < ncol; i++) {
- *pcrp = (PCOLRES)PlugSubAlloc(g, NULL, sizeof(COLRES));
- crp = *pcrp;
- pcrp = &crp->Next;
- memset(crp, 0, sizeof(COLRES));
- crp->Colp = NULL;
- crp->Ncol = ++qrp->Nbcol;
- crp->Type = buftyp[i];
- crp->Length = length[i];
- crp->Clen = GetTypeSize(crp->Type, length[i]);
- crp->Prec = 0;
-
- if (ids > 0) {
+ try {
+ /**********************************************************************/
+ /* Allocate the structure used to contain the result set. */
+ /**********************************************************************/
+ qrp = (PQRYRES)PlugSubAlloc(g, NULL, sizeof(QRYRES));
+ pcrp = &qrp->Colresp;
+ qrp->Continued = false;
+ qrp->Truncated = false;
+ qrp->Info = false;
+ qrp->Suball = true;
+ qrp->Maxres = maxres;
+ qrp->Maxsize = 0;
+ qrp->Nblin = 0;
+ qrp->Nbcol = 0; // will be ncol
+ qrp->Cursor = 0;
+ qrp->BadLines = 0;
+
+ for (i = 0; i < ncol; i++) {
+ *pcrp = (PCOLRES)PlugSubAlloc(g, NULL, sizeof(COLRES));
+ crp = *pcrp;
+ pcrp = &crp->Next;
+ memset(crp, 0, sizeof(COLRES));
+ crp->Colp = NULL;
+ crp->Ncol = ++qrp->Nbcol;
+ crp->Type = buftyp[i];
+ crp->Length = length[i];
+ crp->Clen = GetTypeSize(crp->Type, length[i]);
+ crp->Prec = 0;
+
+ if (ids > 0) {
#if defined(XMSG)
- // Get header from message file
- strncpy(cname, PlugReadMessage(g, ids + crp->Ncol, NULL), NAM_LEN);
- cname[NAM_LEN] = 0; // for truncated long names
+ // Get header from message file
+ strncpy(cname, PlugReadMessage(g, ids + crp->Ncol, NULL), NAM_LEN);
+ cname[NAM_LEN] = 0; // for truncated long names
#else // !XMSG
- GetRcString(ids + crp->Ncol, cname, sizeof(cname));
+ GetRcString(ids + crp->Ncol, cname, sizeof(cname));
#endif // !XMSG
- crp->Name = (PSZ)PlugDup(g, cname);
- } else
- crp->Name = NULL; // Will be set by caller
+ crp->Name = (PSZ)PlugDup(g, cname);
+ } else
+ crp->Name = NULL; // Will be set by caller
- if (fldtyp)
- crp->Fld = fldtyp[i];
- else
- crp->Fld = FLD_NO;
+ if (fldtyp)
+ crp->Fld = fldtyp[i];
+ else
+ crp->Fld = FLD_NO;
- // Allocate the Value Block that will contain data
- if (crp->Length || nonull)
- crp->Kdata = AllocValBlock(g, NULL, crp->Type, maxres,
- crp->Length, 0, true, blank, false);
- else
- crp->Kdata = NULL;
+ // Allocate the Value Block that will contain data
+ if (crp->Length || nonull)
+ crp->Kdata = AllocValBlock(g, NULL, crp->Type, maxres,
+ crp->Length, 0, true, blank, false);
+ else
+ crp->Kdata = NULL;
- if (trace)
- htrc("Column(%d) %s type=%d len=%d value=%p\n",
- crp->Ncol, crp->Name, crp->Type, crp->Length, crp->Kdata);
+ if (trace)
+ htrc("Column(%d) %s type=%d len=%d value=%p\n",
+ crp->Ncol, crp->Name, crp->Type, crp->Length, crp->Kdata);
- } // endfor i
+ } // endfor i
- *pcrp = NULL;
+ *pcrp = NULL;
- fin:
- g->jump_level--;
- return qrp;
- } // end of PlgAllocResult
+ } catch (int n) {
+ htrc("Exception %d: %s\n", n, g->Message);
+ qrp = NULL;
+ } catch (const char *msg) {
+ strcpy(g->Message, msg);
+ htrc("%s\n", g->Message);
+ qrp = NULL;
+ } // end catch
+
+ return qrp;
+} // end of PlgAllocResult
/***********************************************************************/
/* Allocate and initialize the new DB User Block. */
@@ -365,8 +361,8 @@ PCATLG PlgGetCatalog(PGLOBAL g, bool jump)
if (!cat && jump) {
// Raise exception so caller doesn't have to check return value
strcpy(g->Message, MSG(NO_ACTIVE_DB));
- longjmp(g->jumper[g->jump_level], 1);
- } // endif cat
+ throw 1;
+ } // endif cat
return cat;
} // end of PlgGetCatalog
@@ -391,26 +387,27 @@ char *SetPath(PGLOBAL g, const char *path)
char *buf= NULL;
if (path) {
- size_t len= strlen(path) + (*path != '.' ? 4 : 1);
+ size_t len = strlen(path) + (*path != '.' ? 4 : 1);
+
+ if (!(buf = (char*)PlgDBSubAlloc(g, NULL, len)))
+ return NULL;
- buf= (char*)PlugSubAlloc(g, NULL, len);
-
if (PlugIsAbsolutePath(path)) {
- strcpy(buf, path);
- return buf;
- } // endif path
+ strcpy(buf, path);
+ return buf;
+ } // endif path
if (*path != '.') {
#if defined(__WIN__)
- char *s= "\\";
+ const char *s = "\\";
#else // !__WIN__
- char *s= "/";
+ const char *s = "/";
#endif // !__WIN__
strcat(strcat(strcat(strcpy(buf, "."), s), path), s);
} else
strcpy(buf, path);
- } // endif path
+ } // endif path
return buf;
} // end of SetPath
@@ -448,7 +445,7 @@ char *ExtractFromPath(PGLOBAL g, char *pBuff, char *FileName, OPVAL op)
static bool PlugCheckPattern(PGLOBAL g, LPCSTR string, LPCSTR pat)
{
if (pat && strlen(pat)) {
- // This leaves 512 bytes (MAX_STR / 2) for each components
+ // This leaves 2048 bytes (MAX_STR / 2) for each components
LPSTR name = g->Message + MAX_STR / 2;
strlwr(strcpy(name, string));
@@ -476,8 +473,8 @@ bool PlugEvalLike(PGLOBAL g, LPCSTR strg, LPCSTR pat, bool ci)
tp = g->Message;
else if (!(tp = new char[strlen(pat) + strlen(strg) + 2])) {
strcpy(g->Message, MSG(NEW_RETURN_NULL));
- longjmp(g->jumper[g->jump_level], OP_LIKE);
- } /* endif tp */
+ throw OP_LIKE;
+ } /* endif tp */
sp = tp + strlen(pat) + 1;
strlwr(strcpy(tp, pat)); /* Make a lower case copy of pat */
@@ -487,8 +484,8 @@ bool PlugEvalLike(PGLOBAL g, LPCSTR strg, LPCSTR pat, bool ci)
tp = g->Message; /* Use this as temporary work space. */
else if (!(tp = new char[strlen(pat) + 1])) {
strcpy(g->Message, MSG(NEW_RETURN_NULL));
- longjmp(g->jumper[g->jump_level], OP_LIKE);
- } /* endif tp */
+ throw OP_LIKE;
+ } /* endif tp */
strcpy(tp, pat); /* Make a copy to be worked into */
sp = (char*)strg;
@@ -676,7 +673,7 @@ void PlugConvertConstant(PGLOBAL g, void* & value, short& type)
/* format and a Strftime output format. Flag if not 0 indicates that */
/* non quoted blanks are not included in the output format. */
/***********************************************************************/
-PDTP MakeDateFormat(PGLOBAL g, PSZ dfmt, bool in, bool out, int flag)
+PDTP MakeDateFormat(PGLOBAL g, PCSZ dfmt, bool in, bool out, int flag)
{
int rc;
PDTP pdp = (PDTP)PlugSubAlloc(g, NULL, sizeof(DATPAR));
@@ -685,7 +682,7 @@ PDTP MakeDateFormat(PGLOBAL g, PSZ dfmt, bool in, bool out, int flag)
htrc("MakeDateFormat: dfmt=%s\n", dfmt);
memset(pdp, 0, sizeof(DATPAR));
- pdp->Format = pdp->Curp = dfmt;
+ pdp->Format = pdp->Curp = PlugDup(g, dfmt);
pdp->Outsize = 2 * strlen(dfmt) + 1;
if (in)
@@ -727,10 +724,11 @@ PDTP MakeDateFormat(PGLOBAL g, PSZ dfmt, bool in, bool out, int flag)
/***********************************************************************/
int ExtractDate(char *dts, PDTP pdp, int defy, int val[6])
{
- char *fmt, c, d, e, W[8][12];
- int i, k, m, numval;
- int n, y = 30;
- bool b = true; // true for null dates
+ PCSZ fmt;
+ char c, d, e, W[8][12];
+ int i, k, m, numval;
+ int n, y = 30;
+ bool b = true; // true for null dates
if (pdp)
fmt = pdp->InFmt;
@@ -917,7 +915,7 @@ int PlugCloseFile(PGLOBAL g __attribute__((unused)), PFBLOCK fp, bool all)
fp->Memory = NULL;
fp->Mode = MODE_ANY;
- // Passthru
+ // fall through
case TYPE_FB_HANDLE:
if (fp->Handle && fp->Handle != INVALID_HANDLE_VALUE)
if (CloseFileHandle(fp->Handle))
@@ -1255,7 +1253,7 @@ void *PlgDBalloc(PGLOBAL g, void *area, MBLOCK& mp)
// in the area, do allocate from virtual storage.
#if defined(__WIN__)
if (mp.Size >= BIGMEM)
- mp.Memp = VirtualAlloc(NULL, mp.Size, MEM_COMMIT, PAGE_READWRITE);
+ mp.Memp = VirtualAlloc(NULL, mp.Size, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE);
else
#endif
mp.Memp = malloc(mp.Size);
@@ -1494,7 +1492,7 @@ void PlugPutOut(PGLOBAL g, FILE *f, short t, void *v, uint n)
case TYPE_TABLE:
case TYPE_TDB:
case TYPE_XOBJECT:
- ((PBLOCK)v)->Print(g, f, n-2);
+ ((PBLOCK)v)->Printf(g, f, n-2);
break;
default:
@@ -1520,8 +1518,8 @@ DllExport void NewPointer(PTABS t, void *oldv, void *newv)
PGLOBAL g = t->G;
sprintf(g->Message, "NewPointer: %s", MSG(MEM_ALLOC_ERROR));
- longjmp(g->jumper[g->jump_level], 3);
- } else {
+ throw 3;
+ } else {
tp->Next = t->P1;
tp->Num = 0;
t->P1 = tp;
@@ -1557,15 +1555,15 @@ int FileComp(PGLOBAL g, char *file1, char *file2)
sprintf(g->Message, MSG(OPEN_MODE_ERROR),
"rb", (int)errno, fn[i]);
strcat(strcat(g->Message, ": "), strerror(errno));
- longjmp(g->jumper[g->jump_level], 666);
-// } else
+ throw 666;
+ // } else
// len[i] = 0; // File does not exist yet
} else {
if ((len[i] = _filelength(h[i])) < 0) {
sprintf(g->Message, MSG(FILELEN_ERROR), "_filelength", fn[i]);
- longjmp(g->jumper[g->jump_level], 666);
- } // endif len
+ throw 666;
+ } // endif len
} // endif h
diff --git a/storage/connect/plgxml.cpp b/storage/connect/plgxml.cpp
index eb31e24235b..f3d3a010266 100644
--- a/storage/connect/plgxml.cpp
+++ b/storage/connect/plgxml.cpp
@@ -45,7 +45,7 @@ XMLDOCUMENT::XMLDOCUMENT(char *nsl, char *nsdf, char *enc)
/******************************************************************/
/* Initialize zipped file processing. */
/******************************************************************/
-bool XMLDOCUMENT::InitZip(PGLOBAL g, char *entry)
+bool XMLDOCUMENT::InitZip(PGLOBAL g, PCSZ entry)
{
#if defined(ZIP_SUPPORT)
bool mul = (entry) ? strchr(entry, '*') || strchr(entry, '?') : false;
@@ -173,7 +173,7 @@ void XMLNODE::Delete(PXNODE dnp)
/******************************************************************/
/* Store a string in Buf, enventually reallocating it. */
/******************************************************************/
-char *XMLNODE::BufAlloc(PGLOBAL g, char *p, int n)
+char *XMLNODE::BufAlloc(PGLOBAL g, const char *p, int n)
{
if (Len < n) {
Len = n;
diff --git a/storage/connect/plgxml.h b/storage/connect/plgxml.h
index 6870764c503..82629e4c7db 100644
--- a/storage/connect/plgxml.h
+++ b/storage/connect/plgxml.h
@@ -76,9 +76,9 @@ class XMLDOCUMENT : public BLOCK {
virtual void SetNofree(bool b) = 0;
// Methods
- virtual bool Initialize(PGLOBAL, char *, bool) = 0;
+ virtual bool Initialize(PGLOBAL, PCSZ, bool) = 0;
virtual bool ParseFile(PGLOBAL, char *) = 0;
- virtual bool NewDoc(PGLOBAL, char *) = 0;
+ virtual bool NewDoc(PGLOBAL, PCSZ) = 0;
virtual void AddComment(PGLOBAL, char *) = 0;
virtual PXNODE GetRoot(PGLOBAL) = 0;
virtual PXNODE NewRoot(PGLOBAL, char *) = 0;
@@ -95,7 +95,7 @@ class XMLDOCUMENT : public BLOCK {
// Utility
bool MakeNSlist(PGLOBAL g);
- bool InitZip(PGLOBAL g, char *entry);
+ bool InitZip(PGLOBAL g, PCSZ entry);
char *GetMemDoc(PGLOBAL g, char *fn);
void CloseZip(void);
@@ -131,15 +131,15 @@ class XMLNODE : public BLOCK {
virtual PXLIST SelectNodes(PGLOBAL, char *, PXLIST = NULL) = 0;
virtual PXNODE SelectSingleNode(PGLOBAL, char *, PXNODE = NULL) = 0;
virtual PXATTR GetAttribute(PGLOBAL, char *, PXATTR = NULL) = 0;
- virtual PXNODE AddChildNode(PGLOBAL, char *, PXNODE = NULL) = 0;
+ virtual PXNODE AddChildNode(PGLOBAL, PCSZ, PXNODE = NULL) = 0;
virtual PXATTR AddProperty(PGLOBAL, char *, PXATTR = NULL) = 0;
- virtual void AddText(PGLOBAL, char *) = 0;
+ virtual void AddText(PGLOBAL, PCSZ) = 0;
virtual void DeleteChild(PGLOBAL, PXNODE) = 0;
protected:
PXNODE NewChild(PXNODE ncp);
void Delete(PXNODE dnp);
- char *BufAlloc(PGLOBAL g, char *p, int n);
+ char *BufAlloc(PGLOBAL g, const char *p, int n);
// Constructor
XMLNODE(PXDOC dp);
diff --git a/storage/connect/plugutil.c b/storage/connect/plugutil.cpp
index bfac8a5fd99..f0822526b98 100644
--- a/storage/connect/plugutil.c
+++ b/storage/connect/plugutil.cpp
@@ -139,31 +139,39 @@ PGLOBAL PlugInit(LPCSTR Language, uint worksize)
htrc("PlugInit: Language='%s'\n",
((!Language) ? "Null" : (char*)Language));
- if (!(g = malloc(sizeof(GLOBAL)))) {
- fprintf(stderr, MSG(GLOBAL_ERROR), (int)sizeof(GLOBAL));
- return NULL;
- } else {
+ try {
+ g = new GLOBAL;
+ } catch (...) {
+ fprintf(stderr, MSG(GLOBAL_ERROR), (int)sizeof(GLOBAL));
+ return NULL;
+ } // end try/catch
+
+ //if (!(g = (PGLOBAL)malloc(sizeof(GLOBAL)))) {
+ // fprintf(stderr, MSG(GLOBAL_ERROR), (int)sizeof(GLOBAL));
+ // return NULL;
+ // } else {
g->Sarea = NULL;
g->Createas = 0;
g->Alchecked = 0;
g->Mrr = 0;
- g->Activityp = g->ActivityStart = NULL;
+ g->Activityp = NULL;
g->Xchk = NULL;
g->N = 0;
+ g->More = 0;
strcpy(g->Message, "");
/*******************************************************************/
/* Allocate the main work segment. */
/*******************************************************************/
if (worksize && !(g->Sarea = PlugAllocMem(g, worksize))) {
- char errmsg[256];
+ char errmsg[MAX_STR];
sprintf(errmsg, MSG(WORK_AREA), g->Message);
strcpy(g->Message, errmsg);
g->Sarea_Size = 0;
} else
g->Sarea_Size = worksize;
- } /* endif g */
+ //} /* endif g */
g->jump_level = -1; /* New setting to allow recursive call of Plug */
return(g);
@@ -182,7 +190,7 @@ int PlugExit(PGLOBAL g)
if (g->Sarea)
free(g->Sarea);
- free(g);
+ delete g;
return rc;
} /* end of PlugExit */
@@ -475,7 +483,7 @@ void *PlugAllocMem(PGLOBAL g, uint size)
/***********************************************************************/
BOOL PlugSubSet(PGLOBAL g __attribute__((unused)), void *memp, uint size)
{
- PPOOLHEADER pph = memp;
+ PPOOLHEADER pph = (PPOOLHEADER)memp;
pph->To_Free = (OFFSET)sizeof(POOLHEADER);
pph->FreeBlk = size - pph->To_Free;
@@ -501,7 +509,6 @@ void *PlugSubAlloc(PGLOBAL g, void *memp, size_t size)
/*******************************************************************/
memp = g->Sarea;
-//size = ((size + 3) / 4) * 4; /* Round up size to multiple of 4 */
size = ((size + 7) / 8) * 8; /* Round up size to multiple of 8 */
pph = (PPOOLHEADER)memp;
@@ -510,26 +517,24 @@ void *PlugSubAlloc(PGLOBAL g, void *memp, size_t size)
memp, size, pph->To_Free, pph->FreeBlk);
if ((uint)size > pph->FreeBlk) { /* Not enough memory left in pool */
- char *pname = "Work";
+ PCSZ pname = "Work";
sprintf(g->Message,
"Not enough memory in %s area for request of %u (used=%d free=%d)",
- pname, (uint) size, pph->To_Free, pph->FreeBlk);
+ pname, (uint)size, pph->To_Free, pph->FreeBlk);
if (trace)
htrc("PlugSubAlloc: %s\n", g->Message);
- /* Nothing we can do if longjmp is not initialized. */
- assert(g->jump_level >= 0);
- longjmp(g->jumper[g->jump_level], 1);
+ throw 1234;
} /* endif size OS32 code */
/*********************************************************************/
/* Do the suballocation the simplest way. */
/*********************************************************************/
memp = MakePtr(memp, pph->To_Free); /* Points to suballocated block */
- pph->To_Free += size; /* New offset of pool free block */
- pph->FreeBlk -= size; /* New size of pool free block */
+ pph->To_Free += (OFFSET)size; /* New offset of pool free block */
+ pph->FreeBlk -= (uint)size; /* New size of pool free block */
if (trace > 3)
htrc("Done memp=%p used=%d free=%d\n",
diff --git a/storage/connect/preparse.h b/storage/connect/preparse.h
index 2892a958bdd..f16624548fb 100644
--- a/storage/connect/preparse.h
+++ b/storage/connect/preparse.h
@@ -7,14 +7,14 @@
/* Struct of variables used by the date format pre-parser. */
/***********************************************************************/
typedef struct _datpar {
- char *Format; // Points to format to decode
+ const char *Format; // Points to format to decode
char *Curp; // Points to current parsing position
char *InFmt; // Start of input format
char *OutFmt; // Start of output format
int Index[8]; // Indexes of date values
int Num; // Number of values to retrieve
int Flag; // 1: Input, 2: Output, 4: no output blank
- int Outsize; // Size of output buffers
+ int Outsize; // Size of output buffers
} DATPAR, *PDTP;
/***********************************************************************/
diff --git a/storage/connect/reldef.cpp b/storage/connect/reldef.cpp
index c6878737f1d..5284e2ef856 100644
--- a/storage/connect/reldef.cpp
+++ b/storage/connect/reldef.cpp
@@ -88,7 +88,7 @@ PTOS RELDEF::GetTopt(void)
/***********************************************************************/
/* This function sets an integer table information. */
/***********************************************************************/
-bool RELDEF::SetIntCatInfo(PSZ what, int n)
+bool RELDEF::SetIntCatInfo(PCSZ what, int n)
{
return Hc->SetIntegerOption(what, n);
} // end of SetIntCatInfo
@@ -96,7 +96,7 @@ bool RELDEF::SetIntCatInfo(PSZ what, int n)
/***********************************************************************/
/* This function returns integer table information. */
/***********************************************************************/
-int RELDEF::GetIntCatInfo(PSZ what, int idef)
+int RELDEF::GetIntCatInfo(PCSZ what, int idef)
{
int n= Hc->GetIntegerOption(what);
@@ -106,7 +106,7 @@ int RELDEF::GetIntCatInfo(PSZ what, int idef)
/***********************************************************************/
/* This function returns Boolean table information. */
/***********************************************************************/
-bool RELDEF::GetBoolCatInfo(PSZ what, bool bdef)
+bool RELDEF::GetBoolCatInfo(PCSZ what, bool bdef)
{
bool b= Hc->GetBooleanOption(what, bdef);
@@ -116,9 +116,10 @@ bool RELDEF::GetBoolCatInfo(PSZ what, bool bdef)
/***********************************************************************/
/* This function returns size catalog information. */
/***********************************************************************/
-int RELDEF::GetSizeCatInfo(PSZ what, PSZ sdef)
+int RELDEF::GetSizeCatInfo(PCSZ what, PCSZ sdef)
{
- char * s, c;
+ char c;
+ PCSZ s;
int i, n= 0;
if (!(s= Hc->GetStringOption(what)))
@@ -128,6 +129,7 @@ int RELDEF::GetSizeCatInfo(PSZ what, PSZ sdef)
switch (toupper(c)) {
case 'M':
n *= 1024;
+ // fall through
case 'K':
n *= 1024;
} // endswitch c
@@ -138,9 +140,9 @@ int RELDEF::GetSizeCatInfo(PSZ what, PSZ sdef)
/***********************************************************************/
/* This function sets char table information in buf. */
/***********************************************************************/
-int RELDEF::GetCharCatInfo(PSZ what, PSZ sdef, char *buf, int size)
+int RELDEF::GetCharCatInfo(PCSZ what, PCSZ sdef, char *buf, int size)
{
- char *s= Hc->GetStringOption(what);
+ PCSZ s= Hc->GetStringOption(what);
strncpy(buf, ((s) ? s : sdef), size);
return size;
@@ -158,9 +160,10 @@ bool RELDEF::Partitioned(void)
/* This function returns string table information. */
/* Default parameter is "*" to get the handler default. */
/***********************************************************************/
-char *RELDEF::GetStringCatInfo(PGLOBAL g, PSZ what, PSZ sdef)
+char *RELDEF::GetStringCatInfo(PGLOBAL g, PCSZ what, PCSZ sdef)
{
- char *name, *sval= NULL, *s= Hc->GetStringOption(what, sdef);
+ char *sval = NULL;
+ PCSZ name, s= Hc->GetStringOption(what, sdef);
if (s) {
if (!Hc->IsPartitioned() ||
@@ -168,12 +171,12 @@ char *RELDEF::GetStringCatInfo(PGLOBAL g, PSZ what, PSZ sdef)
&& stricmp(what, "connect")))
sval= PlugDup(g, s);
else
- sval= s;
+ sval= (char*)s;
} else if (!stricmp(what, "filename")) {
// Return default file name
- char *ftype= Hc->GetStringOption("Type", "*");
- int i, n;
+ PCSZ ftype= Hc->GetStringOption("Type", "*");
+ int i, n;
if (IsFileType(GetTypeID(ftype))) {
name= Hc->GetPartName();
@@ -251,9 +254,9 @@ bool TABDEF::Define(PGLOBAL g, PCATLG cat,
/***********************************************************************/
/* This function returns the database data path. */
/***********************************************************************/
-PSZ TABDEF::GetPath(void)
+PCSZ TABDEF::GetPath(void)
{
- return (Database) ? (PSZ)Database : (Hc) ? Hc->GetDataPath() : NULL;
+ return (Database) ? Database : (Hc) ? Hc->GetDataPath() : NULL;
} // end of GetPath
/***********************************************************************/
@@ -277,8 +280,13 @@ int TABDEF::GetColCatInfo(PGLOBAL g)
// Take care of the column definitions
i= poff= nof= nlg= 0;
+#if defined(__WIN__)
// Offsets of HTML and DIR tables start from 0, DBF at 1
- loff= (tc == TAB_DBF) ? 1 : (tc == TAB_XML || tc == TAB_DIR) ? -1 : 0;
+ loff= (tc == TAB_DBF) ? 1 : (tc == TAB_XML || tc == TAB_DIR) ? -1 : 0;
+#else // !__WIN__
+ // Offsets of HTML tables start from 0, DIR and DBF at 1
+ loff = (tc == TAB_DBF || tc == TAB_DIR) ? 1 : (tc == TAB_XML) ? -1 : 0;
+#endif // !__WIN__
while (true) {
// Default Offset depends on table type
@@ -610,9 +618,10 @@ bool OEMDEF::DefineAM(PGLOBAL g, LPCSTR, int)
if (!*Module)
Module = Subtype;
- Desc = (char*)PlugSubAlloc(g, NULL, strlen(Module)
- + strlen(Subtype) + 3);
- sprintf(Desc, "%s(%s)", Module, Subtype);
+ char *desc = (char*)PlugSubAlloc(g, NULL, strlen(Module)
+ + strlen(Subtype) + 3);
+ sprintf(desc, "%s(%s)", Module, Subtype);
+ Desc = desc;
return false;
} // end of DefineAM
diff --git a/storage/connect/reldef.h b/storage/connect/reldef.h
index 52a131dbf3d..8b19a413ade 100644
--- a/storage/connect/reldef.h
+++ b/storage/connect/reldef.h
@@ -42,13 +42,13 @@ class DllExport RELDEF : public BLOCK { // Relation definition block
// Methods
PTOS GetTopt(void);
- bool GetBoolCatInfo(PSZ what, bool bdef);
- bool SetIntCatInfo(PSZ what, int ival);
+ bool GetBoolCatInfo(PCSZ what, bool bdef);
+ bool SetIntCatInfo(PCSZ what, int ival);
bool Partitioned(void);
- int GetIntCatInfo(PSZ what, int idef);
- int GetSizeCatInfo(PSZ what, PSZ sdef);
- int GetCharCatInfo(PSZ what, PSZ sdef, char *buf, int size);
- char *GetStringCatInfo(PGLOBAL g, PSZ what, PSZ sdef);
+ int GetIntCatInfo(PCSZ what, int idef);
+ int GetSizeCatInfo(PCSZ what, PCSZ sdef);
+ int GetCharCatInfo(PCSZ what, PCSZ sdef, char *buf, int size);
+ char *GetStringCatInfo(PGLOBAL g, PCSZ what, PCSZ sdef);
virtual int Indexable(void) {return 0;}
virtual bool Define(PGLOBAL g, PCATLG cat,
LPCSTR name, LPCSTR schema, LPCSTR am) = 0;
@@ -84,7 +84,7 @@ public:
void SetNext(PTABDEF tdfp) {Next = tdfp;}
int GetMultiple(void) {return Multiple;}
int GetPseudo(void) {return Pseudo;}
- PSZ GetPath(void);
+ PCSZ GetPath(void);
//PSZ GetPath(void)
// {return (Database) ? (PSZ)Database : Cat->GetDataPath();}
bool SepIndex(void) {return GetBoolCatInfo("SepIndex", false);}
@@ -105,8 +105,8 @@ public:
protected:
// Members
- PSZ Schema; /* Table schema (for ODBC) */
- PSZ Desc; /* Table description */
+ PCSZ Schema; /* Table schema (for ODBC) */
+ PCSZ Desc; /* Table description */
uint Catfunc; /* Catalog function ID */
int Card; /* (max) number of rows in table */
int Elemt; /* Number of rows in blocks or rowset */
diff --git a/storage/connect/tabcol.cpp b/storage/connect/tabcol.cpp
index fde1baa6317..2740864a69b 100644
--- a/storage/connect/tabcol.cpp
+++ b/storage/connect/tabcol.cpp
@@ -73,7 +73,7 @@ PTABLE XTAB::Link(PTABLE tab2)
/***********************************************************************/
/* Make file output of XTAB contents. */
/***********************************************************************/
-void XTAB::Print(PGLOBAL g, FILE *f, uint n)
+void XTAB::Printf(PGLOBAL g, FILE *f, uint n)
{
char m[64];
@@ -91,7 +91,7 @@ void XTAB::Print(PGLOBAL g, FILE *f, uint n)
/***********************************************************************/
/* Make string output of XTAB contents. */
/***********************************************************************/
-void XTAB::Print(PGLOBAL, char *ps, uint z)
+void XTAB::Prints(PGLOBAL, char *ps, uint z)
{
char buf[128];
int i, n = (int)z - 1;
@@ -134,7 +134,7 @@ bool COLUMN::SetFormat(PGLOBAL g, FORMAT&)
/***********************************************************************/
/* Make file output of COLUMN contents. */
/***********************************************************************/
-void COLUMN::Print(PGLOBAL g, FILE *f, uint n)
+void COLUMN::Printf(PGLOBAL g, FILE *f, uint n)
{
char m[64];
@@ -154,7 +154,7 @@ void COLUMN::Print(PGLOBAL g, FILE *f, uint n)
/***********************************************************************/
/* Make string output of COLUMN contents. */
/***********************************************************************/
-void COLUMN::Print(PGLOBAL, char *ps, uint z)
+void COLUMN::Prints(PGLOBAL, char *ps, uint z)
{
char buf[80];
diff --git a/storage/connect/tabcol.h b/storage/connect/tabcol.h
index 3bfc37e69c1..e4657e2f261 100644
--- a/storage/connect/tabcol.h
+++ b/storage/connect/tabcol.h
@@ -38,8 +38,8 @@ class DllExport XTAB: public BLOCK { // Table Name-Schema-Srcdef block.
// Methods
PTABLE Link(PTABLE);
- void Print(PGLOBAL g, FILE *f, uint n);
- void Print(PGLOBAL g, char *ps, uint z);
+ void Printf(PGLOBAL g, FILE *f, uint n);
+ void Prints(PGLOBAL g, char *ps, uint z);
protected:
// Members
@@ -78,8 +78,8 @@ class DllExport COLUMN: public XOBJECT { // Column Name/Qualifier block.
void SetTo_Col(PCOL colp) {To_Col = colp;}
// Methods
- virtual void Print(PGLOBAL g, FILE *f, uint n);
- virtual void Print(PGLOBAL g, char *ps, uint z);
+ virtual void Printf(PGLOBAL g, FILE *f, uint n);
+ virtual void Prints(PGLOBAL g, char *ps, uint z);
// All methods below should never be used for COLUMN's
virtual void Reset(void) {assert(false);}
virtual bool Compare(PXOB) {assert(false); return false;}
diff --git a/storage/connect/tabdos.cpp b/storage/connect/tabdos.cpp
index d2bb3d7a4af..468966e79d9 100644
--- a/storage/connect/tabdos.cpp
+++ b/storage/connect/tabdos.cpp
@@ -1,11 +1,11 @@
/************* TabDos C++ Program Source Code File (.CPP) **************/
/* PROGRAM NAME: TABDOS */
/* ------------- */
-/* Version 4.9.2 */
+/* Version 4.9.3 */
/* */
/* COPYRIGHT: */
/* ---------- */
-/* (C) Copyright to the author Olivier BERTRAND 1998-2016 */
+/* (C) Copyright to the author Olivier BERTRAND 1998-2017 */
/* */
/* WHAT THIS PROGRAM DOES: */
/* ----------------------- */
@@ -98,6 +98,7 @@ DOSDEF::DOSDEF(void)
Ofn = NULL;
Entry = NULL;
To_Indx = NULL;
+ Pwd = NULL;
Recfm = RECFM_VAR;
Mapped = false;
Zipped = false;
@@ -131,7 +132,7 @@ bool DOSDEF::DefineAM(PGLOBAL g, LPCSTR am, int)
bool map = (am && (*am == 'M' || *am == 'm'));
LPCSTR dfm = (am && (*am == 'F' || *am == 'f')) ? "F"
: (am && (*am == 'B' || *am == 'b')) ? "B"
- : (am && !stricmp(am, "DBF")) ? "D" : "V";
+ : (am && !stricmp(am, "DBF")) ? "D" : "V";
if ((Zipped = GetBoolCatInfo("Zipped", false))) {
Entry = GetStringCatInfo(g, "Entry", NULL);
@@ -139,14 +140,15 @@ bool DOSDEF::DefineAM(PGLOBAL g, LPCSTR am, int)
: false;
Mulentries = GetBoolCatInfo("Mulentries", Mulentries);
Append = GetBoolCatInfo("Append", false);
- }
+ Pwd = GetStringCatInfo(g, "Password", NULL);
+ } // endif Zipped
Desc = Fn = GetStringCatInfo(g, "Filename", NULL);
Ofn = GetStringCatInfo(g, "Optname", Fn);
GetCharCatInfo("Recfm", (PSZ)dfm, buf, sizeof(buf));
Recfm = (toupper(*buf) == 'F') ? RECFM_FIX :
(toupper(*buf) == 'B') ? RECFM_BIN :
- (toupper(*buf) == 'D') ? RECFM_DBF : RECFM_VAR;
+ (toupper(*buf) == 'D') ? RECFM_DBF : RECFM_VAR;
Lrecl = GetIntCatInfo("Lrecl", 0);
if (Recfm != RECFM_DBF)
@@ -180,7 +182,7 @@ bool DOSDEF::DefineAM(PGLOBAL g, LPCSTR am, int)
/***********************************************************************/
bool DOSDEF::GetOptFileName(PGLOBAL g, char *filename)
{
- char *ftype;
+ PCSZ ftype;
switch (Recfm) {
case RECFM_VAR: ftype = ".dop"; break;
@@ -237,9 +239,9 @@ void DOSDEF::RemoveOptValues(PGLOBAL g)
/***********************************************************************/
bool DOSDEF::DeleteIndexFile(PGLOBAL g, PIXDEF pxdf)
{
- char *ftype;
- char filename[_MAX_PATH];
- bool sep, rc = false;
+ PCSZ ftype;
+ char filename[_MAX_PATH];
+ bool sep, rc = false;
if (!To_Indx)
return false; // No index
@@ -351,7 +353,7 @@ PTDB DOSDEF::GetTable(PGLOBAL g, MODE mode)
if (Zipped) {
#if defined(ZIP_SUPPORT)
if (Recfm == RECFM_VAR) {
- if (mode == MODE_READ || mode == MODE_ANY) {
+ if (mode == MODE_READ || mode == MODE_ANY || mode == MODE_ALTER) {
txfp = new(g) UNZFAM(this);
} else if (mode == MODE_INSERT) {
txfp = new(g) ZIPFAM(this);
@@ -362,7 +364,7 @@ PTDB DOSDEF::GetTable(PGLOBAL g, MODE mode)
tdbp = new(g) TDBDOS(this, txfp);
} else {
- if (mode == MODE_READ || mode == MODE_ANY) {
+ if (mode == MODE_READ || mode == MODE_ANY || mode == MODE_ALTER) {
txfp = new(g) UZXFAM(this);
} else if (mode == MODE_INSERT) {
txfp = new(g) ZPXFAM(this);
@@ -1307,6 +1309,7 @@ PBF TDBDOS::InitBlockFilter(PGLOBAL g, PFIL filp)
} // endif !opm
// if opm, pass thru
+ /* fall through */
case OP_IN:
if (filp->GetArgType(0) == TYPE_COLBLK &&
filp->GetArgType(1) == TYPE_ARRAY) {
@@ -1509,8 +1512,8 @@ PBF TDBDOS::CheckBlockFilari(PGLOBAL g, PXOB *arg, int op, bool *cnv)
if (n == 8 && ctype != TYPE_LIST) {
// Should never happen
strcpy(g->Message, "Block opt: bad constant");
- longjmp(g->jumper[g->jump_level], 99);
- } // endif Conv
+ throw 99;
+ } // endif Conv
if (type[0] == 1) {
// Make it always as Column-op-Value
@@ -1790,8 +1793,8 @@ err:
/* Make a dynamic index. */
/***********************************************************************/
bool TDBDOS::InitialyzeIndex(PGLOBAL g, volatile PIXDEF xdp, bool sorted)
- {
- int k, rc;
+{
+ int k;
volatile bool dynamic;
bool brc;
PCOL colp;
@@ -1861,13 +1864,7 @@ bool TDBDOS::InitialyzeIndex(PGLOBAL g, volatile PIXDEF xdp, bool sorted)
} else // Column contains same values as ROWID
kxp = new(g) XXROW(this);
- // Prepare error return
- if (g->jump_level == MAX_JUMP) {
- strcpy(g->Message, MSG(TOO_MANY_JUMPS));
- return true;
- } // endif
-
- if (!(rc = setjmp(g->jumper[++g->jump_level])) != 0) {
+ try {
if (dynamic) {
ResetBlockFilter(g);
kxp->SetDynamic(dynamic);
@@ -1892,12 +1889,17 @@ bool TDBDOS::InitialyzeIndex(PGLOBAL g, volatile PIXDEF xdp, bool sorted)
} // endif brc
- } else
- brc = true;
+ } catch (int n) {
+ if (trace)
+ htrc("Exception %d: %s\n", n, g->Message);
+ brc = true;
+ } catch (const char *msg) {
+ strcpy(g->Message, msg);
+ brc = true;
+ } // end catch
- g->jump_level--;
- return brc;
- } // end of InitialyzeIndex
+ return brc;
+} // end of InitialyzeIndex
/***********************************************************************/
/* DOS GetProgMax: get the max value for progress information. */
@@ -2118,7 +2120,8 @@ bool TDBDOS::OpenDB(PGLOBAL g)
return false;
} // endif use
- if (Mode == MODE_DELETE && !Next && Txfp->GetAmType() != TYPE_AM_DOS) {
+ if (Mode == MODE_DELETE && !Next && Txfp->GetAmType() != TYPE_AM_DOS
+ && Txfp->GetAmType() != TYPE_AM_MGO) {
// Delete all lines. Not handled in MAP or block mode
Txfp = new(g) DOSFAM((PDOSDEF)To_Def);
Txfp->SetTdbp(this);
@@ -2156,16 +2159,18 @@ bool TDBDOS::OpenDB(PGLOBAL g)
To_BlkFil = InitBlockFilter(g, To_Filter);
/*********************************************************************/
- /* Allocate the line buffer plus a null character. */
- /*********************************************************************/
- To_Line = (char*)PlugSubAlloc(g, NULL, Lrecl + 1);
+ /* Lrecl does not include line ending */
+ /*********************************************************************/
+ size_t linelen = Lrecl + ((PDOSDEF)To_Def)->Ending + 1;
+
+ To_Line = (char*)PlugSubAlloc(g, NULL, linelen);
if (Mode == MODE_INSERT) {
// Spaces between fields must be filled with blanks
memset(To_Line, ' ', Lrecl);
To_Line[Lrecl] = '\0';
} else
- memset(To_Line, 0, Lrecl + 1);
+ memset(To_Line, 0, linelen);
if (trace)
htrc("OpenDos: R%hd mode=%d To_Line=%p\n", Tdb_No, Mode, To_Line);
@@ -2304,8 +2309,8 @@ void TDBDOS::CloseDB(PGLOBAL g)
/***********************************************************************/
/* DOSCOL public constructor (also called by MAPCOL). */
/***********************************************************************/
-DOSCOL::DOSCOL(PGLOBAL g, PCOLDEF cdp, PTDB tp, PCOL cp, int i, PSZ am)
- : COLBLK(cdp, tp, i)
+DOSCOL::DOSCOL(PGLOBAL g, PCOLDEF cdp, PTDB tp, PCOL cp, int i, PCSZ am)
+ : COLBLK(cdp, tp, i)
{
char *p;
int prec = Format.Prec;
@@ -2335,7 +2340,7 @@ DOSCOL::DOSCOL(PGLOBAL g, PCOLDEF cdp, PTDB tp, PCOL cp, int i, PSZ am)
Dval = NULL;
Buf = NULL;
- if (txfp->Blocked && Opt && (cdp->GetMin() || cdp->GetDval())) {
+ if (txfp && txfp->Blocked && Opt && (cdp->GetMin() || cdp->GetDval())) {
int nblk = txfp->GetBlock();
Clustered = (cdp->GetXdb2()) ? 2 : 1;
@@ -2514,8 +2519,8 @@ void DOSCOL::ReadColumn(PGLOBAL g)
if (rc == RC_EF)
sprintf(g->Message, MSG(INV_DEF_READ), rc);
- longjmp(g->jumper[g->jump_level], 11);
- } // endif
+ throw 11;
+ } // endif
p = tdbp->To_Line + Deplac;
field = Long;
@@ -2570,8 +2575,8 @@ void DOSCOL::ReadColumn(PGLOBAL g)
break;
default:
sprintf(g->Message, MSG(BAD_RECFM), tdbp->Ftype);
- longjmp(g->jumper[g->jump_level], 34);
- } // endswitch Ftype
+ throw 34;
+ } // endswitch Ftype
// Set null when applicable
if (Nullable)
@@ -2679,8 +2684,8 @@ void DOSCOL::WriteColumn(PGLOBAL g)
break;
default:
sprintf(g->Message, "Invalid field format for column %s", Name);
- longjmp(g->jumper[g->jump_level], 31);
- } // endswitch BufType
+ throw 31;
+ } // endswitch BufType
p2 = Buf;
} else // Standard CONNECT format
@@ -2691,8 +2696,8 @@ void DOSCOL::WriteColumn(PGLOBAL g)
if ((len = strlen(p2)) > field) {
sprintf(g->Message, MSG(VALUE_TOO_LONG), p2, Name, field);
- longjmp(g->jumper[g->jump_level], 31);
- } else if (Dsp)
+ throw 31;
+ } else if (Dsp)
for (i = 0; i < len; i++)
if (p2[i] == '.')
p2[i] = Dsp;
@@ -2866,9 +2871,9 @@ bool DOSCOL::AddDistinctValue(PGLOBAL g)
/***********************************************************************/
/* Make file output of a Dos column descriptor block. */
/***********************************************************************/
-void DOSCOL::Print(PGLOBAL g, FILE *f, uint n)
+void DOSCOL::Printf(PGLOBAL g, FILE *f, uint n)
{
- COLBLK::Print(g, f, n);
+ COLBLK::Printf(g, f, n);
} // end of Print
/* ------------------------------------------------------------------- */
diff --git a/storage/connect/tabdos.h b/storage/connect/tabdos.h
index 922d52ee399..9722cd3777d 100644
--- a/storage/connect/tabdos.h
+++ b/storage/connect/tabdos.h
@@ -39,9 +39,9 @@ class DllExport DOSDEF : public TABDEF { /* Logical table description */
virtual PIXDEF GetIndx(void) {return To_Indx;}
virtual void SetIndx(PIXDEF xdp) {To_Indx = xdp;}
virtual bool IsHuge(void) {return Huge;}
- PSZ GetFn(void) {return Fn;}
- PSZ GetOfn(void) {return Ofn;}
- PSZ GetEntry(void) {return Entry;}
+ PCSZ GetFn(void) {return Fn;}
+ PCSZ GetOfn(void) {return Ofn;}
+ PCSZ GetEntry(void) {return Entry;}
bool GetMul(void) {return Mulentries;}
bool GetAppend(void) {return Append;}
void SetBlock(int block) { Block = block; }
@@ -74,9 +74,10 @@ class DllExport DOSDEF : public TABDEF { /* Logical table description */
//virtual bool Erase(char *filename);
// Members
- PSZ Fn; /* Path/Name of corresponding file */
- PSZ Ofn; /* Base Path/Name of matching index files*/
- PSZ Entry; /* Zip entry name or pattern */
+ PCSZ Fn; /* Path/Name of corresponding file */
+ PCSZ Ofn; /* Base Path/Name of matching index files*/
+ PCSZ Entry; /* Zip entry name or pattern */
+ PCSZ Pwd; /* Zip password */
PIXDEF To_Indx; /* To index definitions blocks */
RECFM Recfm; /* 0:VAR, 1:FIX, 2:BIN, 3:VCT, 6:DBF */
bool Mapped; /* 0: disk file, 1: memory mapped file */
@@ -132,8 +133,8 @@ class DllExport TDBDOS : public TDBASE {
// Implementation
virtual AMT GetAmType(void) {return Txfp->GetAmType();}
- virtual PSZ GetFile(PGLOBAL) {return Txfp->To_File;}
- virtual void SetFile(PGLOBAL, PSZ fn) {Txfp->To_File = fn;}
+ virtual PCSZ GetFile(PGLOBAL) {return Txfp->To_File;}
+ virtual void SetFile(PGLOBAL, PCSZ fn) {Txfp->To_File = fn;}
virtual void SetAbort(bool b) {Abort = b;}
virtual RECFM GetFtype(void) {return Ftype;}
virtual bool SkipHeader(PGLOBAL) {return false;}
@@ -213,7 +214,7 @@ class DllExport DOSCOL : public COLBLK {
friend class TDBFIX;
public:
// Constructors
- DOSCOL(PGLOBAL g, PCOLDEF cdp, PTDB tp, PCOL cp, int i, PSZ am = "DOS");
+ DOSCOL(PGLOBAL g, PCOLDEF cdp, PTDB tp, PCOL cp, int i, PCSZ am = "DOS");
DOSCOL(DOSCOL *colp, PTDB tdbp); // Constructor used in copy process
// Implementation
@@ -231,12 +232,12 @@ class DllExport DOSCOL : public COLBLK {
virtual PVBLK GetDval(void) {return Dval;}
// Methods
- using COLBLK::Print;
+ //using COLBLK::Print;
virtual bool VarSize(void);
virtual bool SetBuffer(PGLOBAL g, PVAL value, bool ok, bool check);
virtual void ReadColumn(PGLOBAL g);
virtual void WriteColumn(PGLOBAL g);
- virtual void Print(PGLOBAL g, FILE *, uint);
+ virtual void Printf(PGLOBAL g, FILE *, uint);
protected:
virtual bool SetMinMax(PGLOBAL g);
diff --git a/storage/connect/tabext.cpp b/storage/connect/tabext.cpp
index b099321eff5..4ef88fec746 100644
--- a/storage/connect/tabext.cpp
+++ b/storage/connect/tabext.cpp
@@ -35,9 +35,9 @@
/***********************************************************************/
/* CONDFIL Constructor. */
/***********************************************************************/
-CONDFIL::CONDFIL(const Item *cond, uint idx, AMT type)
+CONDFIL::CONDFIL(uint idx, AMT type)
{
- Cond = cond;
+//Cond = cond;
Idx = idx;
Type = type;
Op = OP_XX;
@@ -61,7 +61,7 @@ int CONDFIL::Init(PGLOBAL g, PHC hc)
bool h;
if (options)
- alt = GetListOption(g, "Alias", options->oplist, NULL);
+ alt = (char*)GetListOption(g, "Alias", options->oplist, NULL);
while (alt) {
if (!(p = strchr(alt, '='))) {
@@ -267,7 +267,7 @@ TDBEXT::TDBEXT(PTDBEXT tdbp) : TDB(tdbp)
/******************************************************************/
/* Convert an UTF-8 string to latin characters. */
/******************************************************************/
-int TDBEXT::Decode(char *txt, char *buf, size_t n)
+int TDBEXT::Decode(PCSZ txt, char *buf, size_t n)
{
uint dummy_errors;
uint32 len = copy_and_convert(buf, n, &my_charset_latin1,
@@ -285,16 +285,17 @@ int TDBEXT::Decode(char *txt, char *buf, size_t n)
/***********************************************************************/
bool TDBEXT::MakeSQL(PGLOBAL g, bool cnt)
{
- char *schmp = NULL, *catp = NULL, buf[NAM_LEN * 3];
+ PCSZ schmp = NULL;
+ char *catp = NULL, buf[NAM_LEN * 3];
int len;
- bool oom = false, first = true;
+ bool first = true;
PTABLE tablep = To_Table;
PCOL colp;
if (Srcdef) {
if ((catp = strstr(Srcdef, "%s"))) {
char *fil1= 0, *fil2;
- PSZ ph = ((EXTDEF*)To_Def)->Phpos;
+ PCSZ ph = ((EXTDEF*)To_Def)->Phpos;
if (!ph)
ph = (strstr(catp + 2, "%s")) ? const_cast<char*>("WH") :
@@ -342,7 +343,7 @@ bool TDBEXT::MakeSQL(PGLOBAL g, bool cnt)
for (colp = Columns; colp; colp = colp->GetNext())
if (!colp->IsSpecial()) {
if (!first)
- oom |= Query->Append(", ");
+ Query->Append(", ");
else
first = false;
@@ -351,11 +352,11 @@ bool TDBEXT::MakeSQL(PGLOBAL g, bool cnt)
if (Quote) {
// Put column name between identifier quotes in case in contains blanks
- oom |= Query->Append(Quote);
- oom |= Query->Append(buf);
- oom |= Query->Append(Quote);
+ Query->Append(Quote);
+ Query->Append(buf);
+ Query->Append(Quote);
} else
- oom |= Query->Append(buf);
+ Query->Append(buf);
((PEXTCOL)colp)->SetRank(++Ncol);
} // endif colp
@@ -363,13 +364,13 @@ bool TDBEXT::MakeSQL(PGLOBAL g, bool cnt)
} else
// !Columns can occur for queries such that sql count(*) from...
// for which we will count the rows from sql * from...
- oom |= Query->Append('*');
+ Query->Append('*');
} else
// SQL statement used to retrieve the size of the result
- oom |= Query->Append("count(*)");
+ Query->Append("count(*)");
- oom |= Query->Append(" FROM ");
+ Query->Append(" FROM ");
if (Catalog && *Catalog)
catp = Catalog;
@@ -381,17 +382,17 @@ bool TDBEXT::MakeSQL(PGLOBAL g, bool cnt)
schmp = Schema;
if (catp) {
- oom |= Query->Append(catp);
+ Query->Append(catp);
if (schmp) {
- oom |= Query->Append('.');
- oom |= Query->Append(schmp);
+ Query->Append('.');
+ Query->Append(schmp);
} // endif schmp
- oom |= Query->Append('.');
+ Query->Append('.');
} else if (schmp) {
- oom |= Query->Append(schmp);
- oom |= Query->Append('.');
+ Query->Append(schmp);
+ Query->Append('.');
} // endif schmp
// Table name can be encoded in UTF-8
@@ -399,18 +400,18 @@ bool TDBEXT::MakeSQL(PGLOBAL g, bool cnt)
if (Quote) {
// Put table name between identifier quotes in case in contains blanks
- oom |= Query->Append(Quote);
- oom |= Query->Append(buf);
- oom |= Query->Append(Quote);
+ Query->Append(Quote);
+ Query->Append(buf);
+ Query->Append(Quote);
} else
- oom |= Query->Append(buf);
+ Query->Append(buf);
len = Query->GetLength();
if (To_CondFil) {
if (Mode == MODE_READ) {
- oom |= Query->Append(" WHERE ");
- oom |= Query->Append(To_CondFil->Body);
+ Query->Append(" WHERE ");
+ Query->Append(To_CondFil->Body);
len = Query->GetLength() + 1;
} else
len += (strlen(To_CondFil->Body) + 256);
@@ -418,10 +419,11 @@ bool TDBEXT::MakeSQL(PGLOBAL g, bool cnt)
} else
len += ((Mode == MODE_READX) ? 256 : 1);
- if (oom || Query->Resize(len)) {
+ if (Query->IsTruncated()) {
strcpy(g->Message, "MakeSQL: Out of memory");
return true;
- } // endif oom
+ } else
+ Query->Resize(len);
if (trace)
htrc("Query=%s\n", Query->GetStr());
@@ -435,15 +437,17 @@ bool TDBEXT::MakeSQL(PGLOBAL g, bool cnt)
/***********************************************************************/
bool TDBEXT::MakeCommand(PGLOBAL g)
{
- char *p, *stmt, name[68], *body = NULL;
+ PCSZ schmp = NULL;
+ char *p, *stmt, name[132], *body = NULL;
char *qrystr = (char*)PlugSubAlloc(g, NULL, strlen(Qrystr) + 1);
bool qtd = Quoted > 0;
+ char q = qtd ? *Quote : ' ';
int i = 0, k = 0;
// Make a lower case copy of the originale query and change
// back ticks to the data source identifier quoting character
do {
- qrystr[i] = (Qrystr[i] == '`') ? *Quote : tolower(Qrystr[i]);
+ qrystr[i] = (Qrystr[i] == '`') ? q : tolower(Qrystr[i]);
} while (Qrystr[i++]);
if (To_CondFil && (p = strstr(qrystr, " where "))) {
@@ -460,27 +464,50 @@ bool TDBEXT::MakeCommand(PGLOBAL g)
strlwr(strcat(strcat(strcpy(name, " "), Name), " "));
if (strstr(" update delete low_priority ignore quick from ", name)) {
- strlwr(strcat(strcat(strcpy(name, Quote), Name), Quote));
- k += 2;
+ if (Quote) {
+ strlwr(strcat(strcat(strcpy(name, Quote), Name), Quote));
+ k += 2;
+ } else {
+ strcpy(g->Message, "Quoted must be specified");
+ return true;
+ } // endif Quote
+
} else
strlwr(strcpy(name, Name)); // Not a keyword
if ((p = strstr(qrystr, name))) {
for (i = 0; i < p - qrystr; i++)
- stmt[i] = (Qrystr[i] == '`') ? *Quote : Qrystr[i];
+ stmt[i] = (Qrystr[i] == '`') ? q : Qrystr[i];
stmt[i] = 0;
+
k += i + (int)strlen(Name);
- if (qtd && *(p - 1) == ' ')
+ if (Schema && *Schema)
+ schmp = Schema;
+
+ if (qtd && *(p - 1) == ' ') {
+ if (schmp)
+ strcat(strcat(stmt, schmp), ".");
+
strcat(strcat(strcat(stmt, Quote), TableName), Quote);
- else
+ } else {
+ if (schmp) {
+ if (qtd && *(p - 1) != ' ') {
+ stmt[i - 1] = 0;
+ strcat(strcat(strcat(stmt, schmp), "."), Quote);
+ } else
+ strcat(strcat(stmt, schmp), ".");
+
+ } // endif schmp
+
strcat(stmt, TableName);
+ } // endif's
i = (int)strlen(stmt);
do {
- stmt[i++] = (Qrystr[k] == '`') ? *Quote : Qrystr[k];
+ stmt[i++] = (Qrystr[k] == '`') ? q : Qrystr[k];
} while (Qrystr[k++]);
if (body)
@@ -539,7 +566,7 @@ int TDBEXT::GetProgMax(PGLOBAL g)
/***********************************************************************/
/* EXTCOL public constructor. */
/***********************************************************************/
-EXTCOL::EXTCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PSZ am)
+EXTCOL::EXTCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PCSZ am)
: COLBLK(cdp, tdbp, i)
{
if (cprec) {
diff --git a/storage/connect/tabext.h b/storage/connect/tabext.h
index 2ef20c89f2c..497b6074d48 100644
--- a/storage/connect/tabext.h
+++ b/storage/connect/tabext.h
@@ -7,7 +7,7 @@
/***********************************************************************/
#ifndef __TABEXT_H
-#define __TABEXTF_H
+#define __TABEXT_H
#include "reldef.h"
@@ -28,14 +28,14 @@ class ALIAS : public BLOCK {
class CONDFIL : public BLOCK {
public:
// Constructor
- CONDFIL(const Item *cond, uint idx, AMT type);
+ CONDFIL(uint idx, AMT type);
// Functions
int Init(PGLOBAL g, PHC hc);
const char *Chk(const char *cln, bool *h);
// Members
- const Item *Cond;
+//const Item *Cond;
AMT Type;
uint Idx;
OPVAL Op;
@@ -60,10 +60,10 @@ public:
// Implementation
virtual const char *GetType(void) { return "EXT"; }
- inline PSZ GetTabname(void) { return Tabname; }
- inline PSZ GetTabschema(void) { return Tabschema; }
- inline PSZ GetUsername(void) { return Username; };
- inline PSZ GetPassword(void) { return Password; };
+ inline PCSZ GetTabname(void) { return Tabname; }
+ inline PCSZ GetTabschema(void) { return Tabschema; }
+ inline PCSZ GetUsername(void) { return Username; };
+ inline PCSZ GetPassword(void) { return Password; };
inline PSZ GetTabcat(void) { return Tabcat; }
inline PSZ GetSrcdef(void) { return Srcdef; }
inline char GetSep(void) { return (Sep) ? *Sep : 0; }
@@ -76,10 +76,10 @@ public:
protected:
// Members
- PSZ Tabname; /* External table name */
- PSZ Tabschema; /* External table schema */
- PSZ Username; /* User connect name */
- PSZ Password; /* Password connect info */
+ PCSZ Tabname; /* External table name */
+ PCSZ Tabschema; /* External table schema */
+ PCSZ Username; /* User connect name */
+ PCSZ Password; /* Password connect info */
PSZ Tabcat; /* External table catalog */
PSZ Tabtyp; /* Catalog table type */
PSZ Colpat; /* Catalog column pattern */
@@ -115,7 +115,7 @@ public:
virtual bool IsRemote(void) { return true; }
// Methods
- virtual PSZ GetServer(void) { return "Remote"; }
+ virtual PCSZ GetServer(void) { return "Remote"; }
virtual int GetRecpos(void);
// Database routines
@@ -127,19 +127,19 @@ protected:
virtual bool MakeSQL(PGLOBAL g, bool cnt);
//virtual bool MakeInsert(PGLOBAL g);
virtual bool MakeCommand(PGLOBAL g);
- int Decode(char *utf, char *buf, size_t n);
+ int Decode(PCSZ utf, char *buf, size_t n);
// Members
PQRYRES Qrp; // Points to storage result
PSTRG Query; // Constructed SQL query
- char *TableName; // Points to ODBC table name
- char *Schema; // Points to ODBC table Schema
- char *User; // User connect info
- char *Pwd; // Password connect info
+ PCSZ TableName; // Points to ODBC table name
+ PCSZ Schema; // Points to ODBC table Schema
+ PCSZ User; // User connect info
+ PCSZ Pwd; // Password connect info
char *Catalog; // Points to ODBC table Catalog
char *Srcdef; // The source table SQL definition
char *Count; // Points to count(*) SQL statement
- //char *Where; // Points to local where clause
+ //char *Where; // Points to local where clause
char *Quote; // The identifier quoting character
char *MulConn; // Used for multiple ODBC tables
char *DBQ; // The address part of Connect string
@@ -170,7 +170,7 @@ class DllExport EXTCOL : public COLBLK {
friend class TDBEXT;
public:
// Constructor
- EXTCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PSZ am);
+ EXTCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PCSZ am);
EXTCOL(PEXTCOL colp, PTDB tdbp); // Constructor used in copy process
// Implementation
diff --git a/storage/connect/tabfix.cpp b/storage/connect/tabfix.cpp
index bf123cd36c8..a78d5861e53 100644
--- a/storage/connect/tabfix.cpp
+++ b/storage/connect/tabfix.cpp
@@ -1,11 +1,11 @@
/************* TabFix C++ Program Source Code File (.CPP) **************/
/* PROGRAM NAME: TABFIX */
/* ------------- */
-/* Version 4.9.1 */
+/* Version 4.9.2 */
/* */
/* COPYRIGHT: */
/* ---------- */
-/* (C) Copyright to the author Olivier BERTRAND 1998-2016 */
+/* (C) Copyright to the author Olivier BERTRAND 1998-2017 */
/* */
/* WHAT THIS PROGRAM DOES: */
/* ----------------------- */
@@ -373,7 +373,7 @@ int TDBFIX::WriteDB(PGLOBAL g)
/***********************************************************************/
/* BINCOL public constructor. */
/***********************************************************************/
-BINCOL::BINCOL(PGLOBAL g, PCOLDEF cdp, PTDB tp, PCOL cp, int i, PSZ am)
+BINCOL::BINCOL(PGLOBAL g, PCOLDEF cdp, PTDB tp, PCOL cp, int i, PCSZ am)
: DOSCOL(g, cdp, tp, cp, i, am)
{
char c, *fmt = cdp->GetFmt();
@@ -411,8 +411,8 @@ BINCOL::BINCOL(PGLOBAL g, PCOLDEF cdp, PTDB tp, PCOL cp, int i, PSZ am)
case 'D': M = sizeof(double); break;
default:
sprintf(g->Message, MSG(BAD_BIN_FMT), Fmt, Name);
- longjmp(g->jumper[g->jump_level], 11);
- } // endswitch Fmt
+ throw 11;
+ } // endswitch Fmt
} else if (IsTypeChar(Buf_Type))
Eds = 0;
@@ -486,8 +486,8 @@ void BINCOL::ReadColumn(PGLOBAL g)
if (rc == RC_EF)
sprintf(g->Message, MSG(INV_DEF_READ), rc);
- longjmp(g->jumper[g->jump_level], 11);
- } // endif
+ throw 11;
+ } // endif
p = tdbp->To_Line + Deplac;
@@ -545,8 +545,8 @@ void BINCOL::ReadColumn(PGLOBAL g)
break;
default:
sprintf(g->Message, MSG(BAD_BIN_FMT), Fmt, Name);
- longjmp(g->jumper[g->jump_level], 11);
- } // endswitch Fmt
+ throw 11;
+ } // endswitch Fmt
// Set null when applicable
if (Nullable)
@@ -595,8 +595,8 @@ void BINCOL::WriteColumn(PGLOBAL g)
} else if (Value->GetBinValue(p, Long, Status)) {
sprintf(g->Message, MSG(BIN_F_TOO_LONG),
Name, Value->GetSize(), Long);
- longjmp(g->jumper[g->jump_level], 31);
- } // endif p
+ throw 31;
+ } // endif p
break;
case 'S': // Short integer
@@ -604,8 +604,8 @@ void BINCOL::WriteColumn(PGLOBAL g)
if (n > 32767LL || n < -32768LL) {
sprintf(g->Message, MSG(VALUE_TOO_BIG), n, Name);
- longjmp(g->jumper[g->jump_level], 31);
- } else if (Status)
+ throw 31;
+ } else if (Status)
Value->GetValueNonAligned<short>(p, (short)n);
break;
@@ -614,8 +614,8 @@ void BINCOL::WriteColumn(PGLOBAL g)
if (n > 255LL || n < -256LL) {
sprintf(g->Message, MSG(VALUE_TOO_BIG), n, Name);
- longjmp(g->jumper[g->jump_level], 31);
- } else if (Status)
+ throw 31;
+ } else if (Status)
*p = (char)n;
break;
@@ -624,8 +624,8 @@ void BINCOL::WriteColumn(PGLOBAL g)
if (n > INT_MAX || n < INT_MIN) {
sprintf(g->Message, MSG(VALUE_TOO_BIG), n, Name);
- longjmp(g->jumper[g->jump_level], 31);
- } else if (Status)
+ throw 31;
+ } else if (Status)
Value->GetValueNonAligned<int>(p, (int)n);
break;
@@ -648,8 +648,8 @@ void BINCOL::WriteColumn(PGLOBAL g)
case 'C': // Characters
if ((n = (signed)strlen(Value->GetCharString(Buf))) > Long) {
sprintf(g->Message, MSG(BIN_F_TOO_LONG), Name, (int) n, Long);
- longjmp(g->jumper[g->jump_level], 31);
- } // endif n
+ throw 31;
+ } // endif n
if (Status) {
s = Value->GetCharString(Buf);
@@ -660,8 +660,8 @@ void BINCOL::WriteColumn(PGLOBAL g)
break;
default:
sprintf(g->Message, MSG(BAD_BIN_FMT), Fmt, Name);
- longjmp(g->jumper[g->jump_level], 11);
- } // endswitch Fmt
+ throw 31;
+ } // endswitch Fmt
if (Eds && Status) {
p = tdbp->To_Line + Deplac;
diff --git a/storage/connect/tabfix.h b/storage/connect/tabfix.h
index 4b9f9689992..53c0af1c422 100644
--- a/storage/connect/tabfix.h
+++ b/storage/connect/tabfix.h
@@ -65,7 +65,7 @@ class DllExport BINCOL : public DOSCOL {
friend class TDBFIX;
public:
// Constructors
- BINCOL(PGLOBAL g, PCOLDEF cdp, PTDB tp, PCOL cp, int i, PSZ am = "BIN");
+ BINCOL(PGLOBAL g, PCOLDEF cdp, PTDB tp, PCOL cp, int i, PCSZ am = "BIN");
BINCOL(BINCOL *colp, PTDB tdbp); // Constructor used in copy process
// Implementation
@@ -108,7 +108,7 @@ class TDBDCL : public TDBCAT {
{return DBFColumns(g, ((PTABDEF)To_Def)->GetPath(), Fn, false);}
// Members
- char *Fn; // The DBF file (path) name
+ PCSZ Fn; // The DBF file (path) name
}; // end of class TDBOCL
diff --git a/storage/connect/tabfmt.cpp b/storage/connect/tabfmt.cpp
index 1a75d572ecd..13fbe7d33dd 100644
--- a/storage/connect/tabfmt.cpp
+++ b/storage/connect/tabfmt.cpp
@@ -81,7 +81,7 @@ USETEMP UseTemp(void);
/* of types (TYPE_STRING < TYPE_DOUBLE < TYPE_INT) (1 < 2 < 7). */
/* If these values are changed, this will have to be revisited. */
/***********************************************************************/
-PQRYRES CSVColumns(PGLOBAL g, char *dp, PTOS topt, bool info)
+PQRYRES CSVColumns(PGLOBAL g, PCSZ dp, PTOS topt, bool info)
{
static int buftyp[] = {TYPE_STRING, TYPE_SHORT, TYPE_STRING,
TYPE_INT, TYPE_INT, TYPE_SHORT};
@@ -153,7 +153,7 @@ PQRYRES CSVColumns(PGLOBAL g, char *dp, PTOS topt, bool info)
tdp->Lrecl = 4096;
tdp->Multiple = GetIntegerTableOption(g, topt, "Multiple", 0);
- p = GetStringTableOption(g, topt, "Separator", ",");
+ p = (char*)GetStringTableOption(g, topt, "Separator", ",");
tdp->Sep = (strlen(p) == 2 && p[0] == '\\' && p[1] == 't') ? '\t' : *p;
#if defined(__WIN__)
@@ -167,7 +167,7 @@ PQRYRES CSVColumns(PGLOBAL g, char *dp, PTOS topt, bool info)
sep = tdp->Sep;
tdp->Quoted = GetIntegerTableOption(g, topt, "Quoted", -1);
- p = GetStringTableOption(g, topt, "Qchar", "");
+ p = (char*)GetStringTableOption(g, topt, "Qchar", "");
tdp->Qot = *p;
if (tdp->Qot && tdp->Quoted < 0)
@@ -517,7 +517,7 @@ PTDB CSVDEF::GetTable(PGLOBAL g, MODE mode)
/*******************************************************************/
if (Zipped) {
#if defined(ZIP_SUPPORT)
- if (mode == MODE_READ || mode == MODE_ANY) {
+ if (mode == MODE_READ || mode == MODE_ANY || mode == MODE_ALTER) {
txfp = new(g) UNZFAM(this);
} else if (mode == MODE_INSERT) {
txfp = new(g) ZIPFAM(this);
@@ -1435,8 +1435,8 @@ void CSVCOL::ReadColumn(PGLOBAL g)
if (rc == RC_EF)
sprintf(g->Message, MSG(INV_DEF_READ), rc);
- longjmp(g->jumper[g->jump_level], 34);
- } // endif
+ throw 34;
+ } // endif
if (tdbp->Mode != MODE_UPDATE) {
int colen = Long; // Column length
@@ -1453,8 +1453,8 @@ void CSVCOL::ReadColumn(PGLOBAL g)
Long = colen; // Restore column length
sprintf(g->Message, MSG(FLD_TOO_LNG_FOR),
Fldnum + 1, Name, To_Tdb->RowNumber(g), tdbp->GetFile(g));
- longjmp(g->jumper[g->jump_level], 34);
- } // endif Long
+ throw 34;
+ } // endif Long
// Now do the reading
DOSCOL::ReadColumn(g);
@@ -1516,8 +1516,8 @@ void CSVCOL::WriteColumn(PGLOBAL g)
if ((signed)strlen(p) > flen) {
sprintf(g->Message, MSG(BAD_FLD_LENGTH), Name, p, flen,
tdbp->RowNumber(g), tdbp->GetFile(g));
- longjmp(g->jumper[g->jump_level], 34);
- } else if (Dsp)
+ throw 34;
+ } else if (Dsp)
for (int i = 0; p[i]; i++)
if (p[i] == '.')
p[i] = Dsp;
@@ -1532,8 +1532,8 @@ void CSVCOL::WriteColumn(PGLOBAL g)
if (Fldnum < 0) {
// This can happen for wrong offset value in XDB files
sprintf(g->Message, MSG(BAD_FIELD_RANK), Fldnum + 1, Name);
- longjmp(g->jumper[g->jump_level], 34);
- } else
+ throw 34;
+ } else
strncpy(tdbp->Field[Fldnum], p, flen);
if (trace > 1)
diff --git a/storage/connect/tabfmt.h b/storage/connect/tabfmt.h
index e5655435be7..396bba568ff 100644
--- a/storage/connect/tabfmt.h
+++ b/storage/connect/tabfmt.h
@@ -13,7 +13,7 @@ typedef class TDBFMT *PTDBFMT;
/***********************************************************************/
/* Functions used externally. */
/***********************************************************************/
-PQRYRES CSVColumns(PGLOBAL g, char *dp, PTOS topt, bool info);
+PQRYRES CSVColumns(PGLOBAL g, PCSZ dp, PTOS topt, bool info);
/***********************************************************************/
/* CSV table. */
@@ -21,7 +21,7 @@ PQRYRES CSVColumns(PGLOBAL g, char *dp, PTOS topt, bool info);
class DllExport CSVDEF : public DOSDEF { /* Logical table description */
friend class TDBCSV;
friend class TDBCCL;
- friend PQRYRES CSVColumns(PGLOBAL, char *, PTOS, bool);
+ friend PQRYRES CSVColumns(PGLOBAL, PCSZ, PTOS, bool);
public:
// Constructor
CSVDEF(void);
@@ -53,7 +53,7 @@ public:
class DllExport TDBCSV : public TDBDOS {
friend class CSVCOL;
friend class MAPFAM;
- friend PQRYRES CSVColumns(PGLOBAL, char *, PTOS, bool);
+ friend PQRYRES CSVColumns(PGLOBAL, PCSZ, PTOS, bool);
public:
// Constructor
TDBCSV(PCSVDEF tdp, PTXF txfp);
diff --git a/storage/connect/tabjdbc.cpp b/storage/connect/tabjdbc.cpp
index 5431e35e0ec..7c82a2fc138 100644
--- a/storage/connect/tabjdbc.cpp
+++ b/storage/connect/tabjdbc.cpp
@@ -227,41 +227,8 @@ bool JDBCDEF::DefineAM(PGLOBAL g, LPCSTR am, int poff)
if (rc == RC_FX) // Error
return true;
-//else if (rc == RC_OK) { // Url was not a server name
-// Tabname = GetStringCatInfo(g, "Name",
-// (Catfunc & (FNC_TABLE | FNC_COL)) ? NULL : Name);
-// Tabname = GetStringCatInfo(g, "Tabname", Tabname);
-// Username = GetStringCatInfo(g, "User", NULL);
-// Password = GetStringCatInfo(g, "Password", NULL);
-//} // endif rc
-
-//if ((Srcdef = GetStringCatInfo(g, "Srcdef", NULL)))
-// Read_Only = true;
Wrapname = GetStringCatInfo(g, "Wrapper", NULL);
-//Prop = GetStringCatInfo(g, "Properties", NULL);
-//Tabcat = GetStringCatInfo(g, "Qualifier", NULL);
-//Tabcat = GetStringCatInfo(g, "Catalog", Tabcat);
-//Tabschema = GetStringCatInfo(g, "Dbname", NULL);
-//Tabschema = GetStringCatInfo(g, "Schema", Tabschema);
-
-//if (Catfunc == FNC_COL)
-// Colpat = GetStringCatInfo(g, "Colpat", NULL);
-
-//if (Catfunc == FNC_TABLE)
-// Tabtyp = GetStringCatInfo(g, "Tabtype", NULL);
-
-//Qrystr = GetStringCatInfo(g, "Query_String", "?");
-//Sep = GetStringCatInfo(g, "Separator", NULL);
-//Xsrc = GetBoolCatInfo("Execsrc", FALSE);
-//Maxerr = GetIntCatInfo("Maxerr", 0);
-//Maxres = GetIntCatInfo("Maxres", 0);
-//Quoted = GetIntCatInfo("Quoted", 0);
-// Cto= GetIntCatInfo("ConnectTimeout", DEFAULT_LOGIN_TIMEOUT);
-// Qto= GetIntCatInfo("QueryTimeout", DEFAULT_QUERY_TIMEOUT);
-//Scrollable = GetBoolCatInfo("Scrollable", false);
-//Memory = GetIntCatInfo("Memory", 0);
-//Pseudo = 2; // FILID is Ok but not ROWID
return false;
} // end of DefineAM
@@ -341,9 +308,6 @@ TDBJDBC::TDBJDBC(PJDBCDEF tdp) : TDBEXT(tdp)
WrapName = tdp->Wrapname;
Ops.User = tdp->Username;
Ops.Pwd = tdp->Password;
-// Ops.Properties = tdp->Prop;
-// Ops.Cto = tdp->Cto;
-// Ops.Qto = tdp->Qto;
Ops.Scrollable = tdp->Scrollable;
} else {
WrapName = NULL;
@@ -351,13 +315,9 @@ TDBJDBC::TDBJDBC(PJDBCDEF tdp) : TDBEXT(tdp)
Ops.Url = NULL;
Ops.User = NULL;
Ops.Pwd = NULL;
-// Ops.Properties = NULL;
-// Ops.Cto = DEFAULT_LOGIN_TIMEOUT;
-// Ops.Qto = DEFAULT_QUERY_TIMEOUT;
Ops.Scrollable = false;
} // endif tdp
-//Ncol = 0;
Prepared = false;
Werr = false;
Rerr = false;
@@ -370,7 +330,6 @@ TDBJDBC::TDBJDBC(PTDBJDBC tdbp) : TDBEXT(tdbp)
Cnp = tdbp->Cnp;
WrapName = tdbp->WrapName;
Ops = tdbp->Ops;
-//Ncol = tdbp->Ncol;
Prepared = tdbp->Prepared;
Werr = tdbp->Werr;
Rerr = tdbp->Rerr;
@@ -406,10 +365,11 @@ PCOL TDBJDBC::MakeCol(PGLOBAL g, PCOLDEF cdp, PCOL cprec, int n)
/***********************************************************************/
bool TDBJDBC::MakeInsert(PGLOBAL g)
{
- char *schmp = NULL, *catp = NULL, buf[NAM_LEN * 3];
+ PCSZ schmp = NULL;
+ char *catp = NULL, buf[NAM_LEN * 3];
int len = 0;
uint pos;
- bool b = false, oom = false;
+ bool b = false;
PTABLE tablep = To_Table;
PCOL colp;
@@ -446,32 +406,32 @@ bool TDBJDBC::MakeInsert(PGLOBAL g)
Query = new(g)STRING(g, len, "INSERT INTO ");
if (catp) {
- oom |= Query->Append(catp);
+ Query->Append(catp);
if (schmp) {
- oom |= Query->Append('.');
- oom |= Query->Append(schmp);
+ Query->Append('.');
+ Query->Append(schmp);
} // endif schmp
- oom |= Query->Append('.');
+ Query->Append('.');
} else if (schmp) {
- oom |= Query->Append(schmp);
- oom |= Query->Append('.');
+ Query->Append(schmp);
+ Query->Append('.');
} // endif schmp
if (Quote) {
// Put table name between identifier quotes in case in contains blanks
- oom |= Query->Append(Quote);
- oom |= Query->Append(buf);
- oom |= Query->Append(Quote);
+ Query->Append(Quote);
+ Query->Append(buf);
+ Query->Append(Quote);
} else
- oom |= Query->Append(buf);
+ Query->Append(buf);
- oom |= Query->Append('(');
+ Query->Append('(');
for (colp = Columns; colp; colp = colp->GetNext()) {
if (b)
- oom |= Query->Append(", ");
+ Query->Append(", ");
else
b = true;
@@ -480,15 +440,15 @@ bool TDBJDBC::MakeInsert(PGLOBAL g)
if (Quote) {
// Put column name between identifier quotes in case in contains blanks
- oom |= Query->Append(Quote);
- oom |= Query->Append(buf);
- oom |= Query->Append(Quote);
+ Query->Append(Quote);
+ Query->Append(buf);
+ Query->Append(Quote);
} else
- oom |= Query->Append(buf);
+ Query->Append(buf);
} // endfor colp
- if ((oom |= Query->Append(") VALUES ("))) {
+ if ((Query->Append(") VALUES ("))) {
strcpy(g->Message, "MakeInsert: Out of memory");
return true;
} else // in case prepared statement fails
@@ -496,9 +456,9 @@ bool TDBJDBC::MakeInsert(PGLOBAL g)
// Make prepared statement
for (int i = 0; i < Nparm; i++)
- oom |= Query->Append("?,");
+ Query->Append("?,");
- if (oom) {
+ if (Query->IsTruncated()) {
strcpy(g->Message, "MakeInsert: Out of memory");
return true;
} else
@@ -737,18 +697,12 @@ bool TDBJDBC::SetRecpos(PGLOBAL g, int recpos)
{
if (Jcp->m_Full) {
Fpos = 0;
-// CurNum = 0;
CurNum = 1;
} else if (Memory == 3) {
-// Fpos = recpos;
-// CurNum = -1;
Fpos = 0;
CurNum = recpos;
} else if (Ops.Scrollable) {
// Is new position in the current row set?
-// if (recpos >= Curpos && recpos < Curpos + Rbuf) {
-// CurNum = recpos - Curpos;
-// Fpos = 0;
if (recpos > 0 && recpos <= Rbuf) {
CurNum = recpos;
Fpos = recpos;
@@ -797,7 +751,7 @@ bool TDBJDBC::ReadKey(PGLOBAL g, OPVAL op, const key_range *kr)
To_CondFil->Body= (char*)PlugSubAlloc(g, NULL, 0);
*To_CondFil->Body= 0;
- if ((To_CondFil = hc->CheckCond(g, To_CondFil, To_CondFil->Cond)))
+ if ((To_CondFil = hc->CheckCond(g, To_CondFil, Cond)))
PlugSubAlloc(g, NULL, strlen(To_CondFil->Body) + 1);
} // endif active_index
@@ -911,7 +865,6 @@ int TDBJDBC::WriteDB(PGLOBAL g)
// an insert query for each line to insert
uint len = Query->GetLength();
char buf[64];
- bool oom = false;
// Make the Insert command value list
for (PCOL colp = Columns; colp; colp = colp->GetNext()) {
@@ -919,28 +872,28 @@ int TDBJDBC::WriteDB(PGLOBAL g)
char *s = colp->GetValue()->GetCharString(buf);
if (colp->GetResultType() == TYPE_STRING)
- oom |= Query->Append_quoted(s);
+ Query->Append_quoted(s);
else if (colp->GetResultType() == TYPE_DATE) {
DTVAL *dtv = (DTVAL*)colp->GetValue();
if (dtv->IsFormatted())
- oom |= Query->Append_quoted(s);
+ Query->Append_quoted(s);
else
- oom |= Query->Append(s);
+ Query->Append(s);
} else
- oom |= Query->Append(s);
+ Query->Append(s);
} else
- oom |= Query->Append("NULL");
+ Query->Append("NULL");
- oom |= Query->Append(',');
+ Query->Append(',');
} // endfor colp
- if (unlikely(oom)) {
+ if (unlikely(Query->IsTruncated())) {
strcpy(g->Message, "WriteDB: Out of memory");
return RC_FX;
- } // endif oom
+ } // endif Query
Query->RepLast(')');
@@ -990,11 +943,6 @@ int TDBJDBC::DeleteDB(PGLOBAL g, int irc)
/***********************************************************************/
void TDBJDBC::CloseDB(PGLOBAL g)
{
- //if (To_Kindex) {
- // To_Kindex->Close();
- // To_Kindex = NULL;
- // } // endif
-
if (Jcp)
Jcp->Close();
@@ -1019,7 +967,7 @@ void TDBJDBC::CloseDB(PGLOBAL g)
/***********************************************************************/
/* JDBCCOL public constructor. */
/***********************************************************************/
-JDBCCOL::JDBCCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PSZ am)
+JDBCCOL::JDBCCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PCSZ am)
: EXTCOL(cdp, tdbp, cprec, i, am)
{
} // end of JDBCCOL constructor
@@ -1039,54 +987,6 @@ JDBCCOL::JDBCCOL(JDBCCOL *col1, PTDB tdbp) : EXTCOL(col1, tdbp)
{
} // end of JDBCCOL copy constructor
-#if 0
-/***********************************************************************/
-/* SetBuffer: prepare a column block for write operation. */
-/***********************************************************************/
-bool JDBCCOL::SetBuffer(PGLOBAL g, PVAL value, bool ok, bool check)
-{
- if (!(To_Val = value)) {
- sprintf(g->Message, MSG(VALUE_ERROR), Name);
- return true;
- } else if (Buf_Type == value->GetType()) {
- // Values are of the (good) column type
- if (Buf_Type == TYPE_DATE) {
- // If any of the date values is formatted
- // output format must be set for the receiving table
- if (GetDomain() || ((DTVAL *)value)->IsFormatted())
- goto newval; // This will make a new value;
-
- } else if (Buf_Type == TYPE_DOUBLE)
- // Float values must be written with the correct (column) precision
- // Note: maybe this should be forced by ShowValue instead of this ?
- value->SetPrec(GetScale());
-
- Value = value; // Directly access the external value
- } else {
- // Values are not of the (good) column type
- if (check) {
- sprintf(g->Message, MSG(TYPE_VALUE_ERR), Name,
- GetTypeName(Buf_Type), GetTypeName(value->GetType()));
- return true;
- } // endif check
-
- newval:
- if (InitValue(g)) // Allocate the matching value block
- return true;
-
- } // endif's Value, Buf_Type
-
- // Because Colblk's have been made from a copy of the original TDB in
- // case of Update, we must reset them to point to the original one.
- if (To_Tdb->GetOrig())
- To_Tdb = (PTDB)To_Tdb->GetOrig();
-
- // Set the Column
- Status = (ok) ? BUF_EMPTY : BUF_NO;
- return false;
-} // end of SetBuffer
-#endif // 0
-
/***********************************************************************/
/* ReadColumn: when SQLFetch is used there is nothing to do as the */
/* column buffer was bind to the record set. This is also the case */
@@ -1196,26 +1096,6 @@ PCMD TDBXJDC::MakeCMD(PGLOBAL g)
return xcmd;
} // end of MakeCMD
-#if 0
-/***********************************************************************/
-/* JDBC Bind Parameter function. */
-/***********************************************************************/
-bool TDBXJDC::BindParameters(PGLOBAL g)
-{
- PJDBCCOL colp;
-
- for (colp = (PJDBCCOL)Columns; colp; colp = (PJDBCCOL)colp->Next) {
- colp->AllocateBuffers(g, 0);
-
- if (Jcp->BindParam(colp))
- return true;
-
- } // endfor colp
-
- return false;
-} // end of BindParameters
-#endif // 0
-
/***********************************************************************/
/* XDBC GetMaxSize: returns table size (not always one row). */
/***********************************************************************/
@@ -1332,8 +1212,8 @@ int TDBXJDC::DeleteDB(PGLOBAL g, int irc)
/***********************************************************************/
/* JSRCCOL public constructor. */
/***********************************************************************/
-JSRCCOL::JSRCCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PSZ am)
- : JDBCCOL(cdp, tdbp, cprec, i, am)
+JSRCCOL::JSRCCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PCSZ am)
+ : JDBCCOL(cdp, tdbp, cprec, i, am)
{
// Set additional JDBC access method information for column.
Flag = cdp->GetOffset();
@@ -1416,17 +1296,3 @@ PQRYRES TDBJDBCL::GetResult(PGLOBAL g)
{
return JDBCColumns(g, Schema, Tab, Colpat, Maxres, false, &Ops);
} // end of GetResult
-
-#if 0
-/* ---------------------------TDBJSRC class -------------------------- */
-
-/***********************************************************************/
-/* GetResult: Get the list of JDBC data sources. */
-/***********************************************************************/
-PQRYRES TDBJSRC::GetResult(PGLOBAL g)
-{
- return JDBCDataSources(g, Maxres, false);
-} // end of GetResult
-
-/* ------------------------ End of TabJDBC --------------------------- */
-#endif // 0
diff --git a/storage/connect/tabjdbc.h b/storage/connect/tabjdbc.h
index 46d2073e923..d8ec65d02d8 100644
--- a/storage/connect/tabjdbc.h
+++ b/storage/connect/tabjdbc.h
@@ -1,7 +1,7 @@
/*************** Tabjdbc H Declares Source Code File (.H) **************/
-/* Name: TABJDBC.H Version 1.0 */
+/* Name: TABJDBC.H Version 1.1 */
/* */
-/* (C) Copyright to the author Olivier BERTRAND 2016 */
+/* (C) Copyright to the author Olivier BERTRAND 2016-2017 */
/* */
/* This file contains the TDBJDBC classes declares. */
/***********************************************************************/
@@ -14,9 +14,6 @@ typedef class TDBJDBC *PTDBJDBC;
typedef class JDBCCOL *PJDBCCOL;
typedef class TDBXJDC *PTDBXJDC;
typedef class JSRCCOL *PJSRCCOL;
-//typedef class TDBOIF *PTDBOIF;
-//typedef class OIFCOL *POIFCOL;
-//typedef class TDBJSRC *PTDBJSRC;
/***********************************************************************/
/* JDBC table. */
@@ -68,20 +65,14 @@ public:
// Methods
virtual PTDB Clone(PTABS t);
-//virtual int GetRecpos(void);
virtual bool SetRecpos(PGLOBAL g, int recpos);
-//virtual PSZ GetFile(PGLOBAL g);
-//virtual void SetFile(PGLOBAL g, PSZ fn);
virtual void ResetSize(void);
-//virtual int GetAffectedRows(void) {return AftRows;}
- virtual PSZ GetServer(void) { return "JDBC"; }
+ virtual PCSZ GetServer(void) { return "JDBC"; }
virtual int Indexable(void) { return 2; }
// Database routines
virtual PCOL MakeCol(PGLOBAL g, PCOLDEF cdp, PCOL cprec, int n);
virtual int Cardinality(PGLOBAL g);
-//virtual int GetMaxSize(PGLOBAL g);
-//virtual int GetProgMax(PGLOBAL g);
virtual bool OpenDB(PGLOBAL g);
virtual int ReadDB(PGLOBAL g);
virtual int WriteDB(PGLOBAL g);
@@ -91,21 +82,14 @@ public:
protected:
// Internal functions
-//int Decode(char *utf, char *buf, size_t n);
-//bool MakeSQL(PGLOBAL g, bool cnt);
bool MakeInsert(PGLOBAL g);
-//virtual bool MakeCommand(PGLOBAL g);
-//bool MakeFilter(PGLOBAL g, bool c);
bool SetParameters(PGLOBAL g);
-//char *MakeUpdate(PGLOBAL g);
-//char *MakeDelete(PGLOBAL g);
// Members
JDBConn *Jcp; // Points to a JDBC connection class
JDBCCOL *Cnp; // Points to count(*) column
JDBCPARM Ops; // Additional parameters
char *WrapName; // Points to Java wrapper name
-//int Ncol; // The column number
bool Prepared; // True when using prepared statement
bool Werr; // Write error
bool Rerr; // Rewind error
@@ -119,7 +103,7 @@ class JDBCCOL : public EXTCOL {
friend class TDBJDBC;
public:
// Constructors
- JDBCCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PSZ am = "JDBC");
+ JDBCCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PCSZ am = "JDBC");
JDBCCOL(JDBCCOL *colp, PTDB tdbp); // Constructor used in copy process
// Implementation
@@ -152,12 +136,6 @@ public:
virtual AMT GetAmType(void) {return TYPE_AM_XDBC;}
// Methods
- //virtual int GetRecpos(void);
- //virtual PSZ GetFile(PGLOBAL g);
- //virtual void SetFile(PGLOBAL g, PSZ fn);
- //virtual void ResetSize(void);
- //virtual int GetAffectedRows(void) {return AftRows;}
- //virtual PSZ GetServer(void) {return "JDBC";}
// Database routines
virtual PCOL MakeCol(PGLOBAL g, PCOLDEF cdp, PCOL cprec, int n);
@@ -172,7 +150,6 @@ public:
protected:
// Internal functions
PCMD MakeCMD(PGLOBAL g);
- //bool BindParameters(PGLOBAL g);
// Members
PCMD Cmdlist; // The commands to execute
@@ -188,7 +165,7 @@ class JSRCCOL : public JDBCCOL {
friend class TDBXJDC;
public:
// Constructors
- JSRCCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PSZ am = "JDBC");
+ JSRCCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PCSZ am = "JDBC");
// Implementation
virtual int GetAmType(void) {return TYPE_AM_JDBC;}
@@ -196,7 +173,7 @@ public:
// Methods
virtual void ReadColumn(PGLOBAL g);
virtual void WriteColumn(PGLOBAL g);
- // void Print(PGLOBAL g, FILE *, uint);
+ // void Printf(PGLOBAL g, FILE *, uint);
protected:
// Members
@@ -233,9 +210,9 @@ protected:
virtual PQRYRES GetResult(PGLOBAL g);
// Members
- char *Schema; // Points to schema name or NULL
- char *Tab; // Points to JDBC table name or pattern
- char *Tabtype; // Points to JDBC table type
+ PCSZ Schema; // Points to schema name or NULL
+ PCSZ Tab; // Points to JDBC table name or pattern
+ PCSZ Tabtype; // Points to JDBC table type
JDBCPARM Ops; // Additional parameters
}; // end of class TDBJTB
@@ -252,24 +229,7 @@ protected:
virtual PQRYRES GetResult(PGLOBAL g);
// Members
- char *Colpat; // Points to catalog column pattern
+ PCSZ Colpat; // Points to catalog column pattern
}; // end of class TDBJDBCL
-#if 0
-/***********************************************************************/
-/* This is the class declaration for the Data Sources catalog table. */
-/***********************************************************************/
-class TDBJSRC : public TDBJDRV {
-public:
- // Constructor
- TDBJSRC(PJDBCDEF tdp) : TDBJDRV(tdp) {}
-
-protected:
- // Specific routines
- virtual PQRYRES GetResult(PGLOBAL g);
-
- // No additional Members
-}; // end of class TDBJSRC
-#endif // 0
-
#endif // !NJDBC
diff --git a/storage/connect/tabjson.cpp b/storage/connect/tabjson.cpp
index 1e11d454cfc..063115c7a60 100644
--- a/storage/connect/tabjson.cpp
+++ b/storage/connect/tabjson.cpp
@@ -1,6 +1,6 @@
/************* tabjson C++ Program Source Code File (.CPP) *************/
-/* PROGRAM NAME: tabjson Version 1.3 */
-/* (C) Copyright to the author Olivier BERTRAND 2014 - 2016 */
+/* PROGRAM NAME: tabjson Version 1.4 */
+/* (C) Copyright to the author Olivier BERTRAND 2014 - 2017 */
/* This program are the JSON class DB execution routines. */
/***********************************************************************/
@@ -117,7 +117,9 @@ PQRYRES JSONColumns(PGLOBAL g, char *db, PTOS topt, bool info)
return NULL;
} // endif Fn
- tdp->Database = SetPath(g, db);
+ if (!(tdp->Database = SetPath(g, db)))
+ return NULL;
+
tdp->Objname = GetStringTableOption(g, topt, "Object", NULL);
tdp->Base = GetIntegerTableOption(g, topt, "Base", 0) ? 1 : 0;
tdp->Pretty = GetIntegerTableOption(g, topt, "Pretty", 2);
@@ -151,7 +153,7 @@ PQRYRES JSONColumns(PGLOBAL g, char *db, PTOS topt, bool info)
if (tdp->Zipped) {
#if defined(ZIP_SUPPORT)
- tjnp = new(g)TDBJSN(tdp, new(g)UNZFAM(tdp));
+ tjnp = new(g)TDBJSN(tdp, new(g) UNZFAM(tdp));
#else // !ZIP_SUPPORT
sprintf(g->Message, MSG(NO_FEAT_SUPPORT), "ZIP");
return NULL;
@@ -168,7 +170,7 @@ PQRYRES JSONColumns(PGLOBAL g, char *db, PTOS topt, bool info)
G->Sarea_Size = tdp->Lrecl * 10;
G->Sarea = PlugSubAlloc(g, NULL, G->Sarea_Size);
PlugSubSet(G, G->Sarea, G->Sarea_Size);
- G->jump_level = -1;
+ G->jump_level = 0;
tjnp->SetG(G);
#else
tjnp->SetG(g);
@@ -262,8 +264,13 @@ PQRYRES JSONColumns(PGLOBAL g, char *db, PTOS topt, bool info)
break;
if (jcp) {
- if (jcp->Type != jcol.Type)
- jcp->Type = TYPE_STRING;
+ if (jcp->Type != jcol.Type) {
+ if (jcp->Type == TYPE_UNKNOWN)
+ jcp->Type = jcol.Type;
+ else if (jcol.Type != TYPE_UNKNOWN)
+ jcp->Type = TYPE_STRING;
+
+ } // endif Type
if (*fmt && (!jcp->Fmt || strlen(jcp->Fmt) < strlen(fmt))) {
jcp->Fmt = PlugDup(g, fmt);
@@ -336,7 +343,7 @@ PQRYRES JSONColumns(PGLOBAL g, char *db, PTOS topt, bool info)
skipit:
if (trace)
- htrc("CSVColumns: n=%d len=%d\n", n, length[0]);
+ htrc("JSONColumns: n=%d len=%d\n", n, length[0]);
/*********************************************************************/
/* Allocate the structures used to refer to the result set. */
@@ -417,7 +424,7 @@ bool JSONDEF::DefineAM(PGLOBAL g, LPCSTR, int poff)
Pretty = GetIntCatInfo("Pretty", 2);
Limit = GetIntCatInfo("Limit", 10);
Base = GetIntCatInfo("Base", 0) ? 1 : 0;
- return DOSDEF::DefineAM(g, "DOS", poff);
+ return DOSDEF::DefineAM(g, "DOS", poff);
} // end of DefineAM
/***********************************************************************/
@@ -441,7 +448,7 @@ PTDB JSONDEF::GetTable(PGLOBAL g, MODE m)
if (Zipped) {
#if defined(ZIP_SUPPORT)
- if (m == MODE_READ || m == MODE_UPDATE) {
+ if (m == MODE_READ || m == MODE_ANY || m == MODE_ALTER) {
txfp = new(g) UNZFAM(this);
} else if (m == MODE_INSERT) {
txfp = new(g) ZIPFAM(this);
@@ -463,7 +470,7 @@ PTDB JSONDEF::GetTable(PGLOBAL g, MODE m)
sprintf(g->Message, MSG(NO_FEAT_SUPPORT), "GZ");
return NULL;
#endif // !GZ_SUPPORT
- } else if (map)
+ } else if (map)
txfp = new(g) MAPFAM(this);
else
txfp = new(g) DOSFAM(this);
@@ -478,7 +485,7 @@ PTDB JSONDEF::GetTable(PGLOBAL g, MODE m)
G->Sarea_Size = Lrecl * 10;
G->Sarea = PlugSubAlloc(g, NULL, G->Sarea_Size);
PlugSubSet(G, G->Sarea, G->Sarea_Size);
- G->jump_level = -1;
+ G->jump_level = 0;
((TDBJSN*)tdbp)->G = G;
#else
((TDBJSN*)tdbp)->G = g;
@@ -486,7 +493,7 @@ PTDB JSONDEF::GetTable(PGLOBAL g, MODE m)
} else {
if (Zipped) {
#if defined(ZIP_SUPPORT)
- if (m == MODE_READ || m == MODE_UPDATE) {
+ if (m == MODE_READ || m == MODE_ANY || m == MODE_ALTER) {
txfp = new(g) UNZFAM(this);
} else if (m == MODE_INSERT) {
strcpy(g->Message, "INSERT supported only for zipped JSON when pretty=0");
@@ -535,7 +542,7 @@ TDBJSN::TDBJSN(PJDEF tdp, PTXF txfp) : TDBDOS(tdp, txfp)
} else {
Jmode = MODE_OBJECT;
Objname = NULL;
- Xcol = NULL;
+ Xcol = NULL;
Limit = 1;
Pretty = 0;
B = 0;
@@ -695,6 +702,9 @@ bool TDBJSN::OpenDB(PGLOBAL g)
return true;
} // endswitch Jmode
+ if (Xcol && Txfp->GetAmType() != TYPE_AM_MGO)
+ To_Filter = NULL; // Imcompatible
+
} // endif Use
return TDBDOS::OpenDB(g);
@@ -865,24 +875,21 @@ int TDBJSN::MakeTopTree(PGLOBAL g, PJSON jsp)
} // end of PrepareWriting
- /***********************************************************************/
- /* WriteDB: Data Base write routine for DOS access method. */
- /***********************************************************************/
- int TDBJSN::WriteDB(PGLOBAL g)
+/***********************************************************************/
+/* WriteDB: Data Base write routine for DOS access method. */
+/***********************************************************************/
+int TDBJSN::WriteDB(PGLOBAL g)
{
int rc = TDBDOS::WriteDB(g);
#if USE_G
- if (rc == RC_FX)
- strcpy(g->Message, G->Message);
-
PlugSubSet(G, G->Sarea, G->Sarea_Size);
#endif
Row->Clear();
return rc;
} // end of WriteDB
- /* ---------------------------- JSONCOL ------------------------------ */
+/* ---------------------------- JSONCOL ------------------------------ */
/***********************************************************************/
/* JSONCOL public constructor. */
@@ -1147,11 +1154,61 @@ bool JSONCOL::ParseJpath(PGLOBAL g)
} // end of ParseJpath
/***********************************************************************/
+/* Get Jpath converted to Mongo path. */
+/***********************************************************************/
+char *JSONCOL::GetJpath(PGLOBAL g, bool proj)
+{
+ if (Jpath) {
+ char *p1, *p2, *mgopath;
+ int i = 0;
+
+ if (strcmp(Jpath, "*"))
+ mgopath = PlugDup(g, Jpath);
+ else
+ return NULL;
+
+ for (p1 = p2 = mgopath; *p1; p1++)
+ if (i) { // Inside []
+ if (isdigit(*p1)) {
+ if (!proj)
+ *p2++ = *p1;
+
+ i = 2;
+ } else if (*p1 == ']' && i == 2) {
+ if (proj && *(p1 + 1) == ':')
+ p1++;
+
+ i = 0;
+ } else if (proj)
+ i = 2;
+ else
+ return NULL;
+
+ } else switch (*p1) {
+ case ':': *p2++ = '.'; break;
+ case '[': i = 1; break;
+ case '*':
+ if (*(p2 - 1) == '.' && !*(p1 + 1)) {
+ p2--; // Suppress last :*
+ break;
+ } // endif p2
+
+ default: *p2++ = *p1; break;
+ } // endswitch p1;
+
+ *p2 = 0;
+ return mgopath;
+ } else
+ return NULL;
+
+} // end of GetJpath
+
+/***********************************************************************/
/* MakeJson: Serialize the json item and set value to it. */
/***********************************************************************/
PVAL JSONCOL::MakeJson(PGLOBAL g, PJSON jsp)
- {
- if (Value->IsTypeNum()) {
+{
+ if (Value->IsTypeNum()) {
strcpy(g->Message, "Cannot make Json for a numeric column");
Value->Reset();
} else
@@ -1171,7 +1228,8 @@ void JSONCOL::SetJsonValue(PGLOBAL g, PVAL vp, PJVAL val, int n)
case TYPE_INTG:
case TYPE_BINT:
case TYPE_DBL:
- vp->SetValue_pval(val->GetValue());
+ case TYPE_DATE:
+ vp->SetValue_pval(val->GetValue());
break;
case TYPE_BOOL:
if (vp->IsTypeNum())
@@ -1190,11 +1248,14 @@ void JSONCOL::SetJsonValue(PGLOBAL g, PVAL vp, PJVAL val, int n)
// } // endif Type
default:
- vp->Reset();
- } // endswitch Type
+ vp->Reset();
+ vp->SetNull(true);
+ } // endswitch Type
- } else
- vp->Reset();
+ } else {
+ vp->Reset();
+ vp->SetNull(true);
+ } // endif val
} // end of SetJsonValue
@@ -1207,8 +1268,8 @@ void JSONCOL::ReadColumn(PGLOBAL g)
Value->SetValue_pval(GetColumnValue(g, Tjp->Row, 0));
// Set null when applicable
- if (Nullable)
- Value->SetNull(Value->IsZero());
+ if (!Nullable)
+ Value->SetNull(false);
} // end of ReadColumn
@@ -1289,8 +1350,8 @@ PVAL JSONCOL::ExpandArray(PGLOBAL g, PJAR arp, int n)
if (!(jvp = arp->GetValue((Nodes[n].Rx = Nodes[n].Nx)))) {
strcpy(g->Message, "Logical error expanding array");
- longjmp(g->jumper[g->jump_level], 666);
- } // endif jvp
+ throw 666;
+ } // endif jvp
if (n < Nod - 1 && jvp->GetJson()) {
jval.SetValue(GetColumnValue(g, jvp->GetJson(), n + 1));
@@ -1475,8 +1536,8 @@ void JSONCOL::WriteColumn(PGLOBAL g)
{
if (Xpd && Tjp->Pretty < 2) {
strcpy(g->Message, "Cannot write expanded column when Pretty is not 2");
- longjmp(g->jumper[g->jump_level], 666);
- } // endif Xpd
+ throw 666;
+ } // endif Xpd
/*********************************************************************/
/* Check whether this node must be written. */
@@ -1510,8 +1571,8 @@ void JSONCOL::WriteColumn(PGLOBAL g)
if (!(jsp = ParseJson(G, s, (int)strlen(s)))) {
strcpy(g->Message, s);
- longjmp(g->jumper[g->jump_level], 666);
- } // endif jsp
+ throw 666;
+ } // endif jsp
if (arp) {
if (Nod > 1 && Nodes[Nod-2].Op == OP_EQ)
@@ -1530,9 +1591,10 @@ void JSONCOL::WriteColumn(PGLOBAL g)
break;
} // endif Op
- // Passthru
+ // fall through
case TYPE_DATE:
case TYPE_INT:
+ case TYPE_TINY:
case TYPE_SHORT:
case TYPE_BIGINT:
case TYPE_DOUBLE:
@@ -1860,8 +1922,11 @@ bool TDBJSON::OpenDB(PGLOBAL g)
return true;
} // endswitch Jmode
- Use = USE_OPEN;
- return false;
+ if (Xcol)
+ To_Filter = NULL; // Imcompatible
+
+ Use = USE_OPEN;
+ return false;
} // end of OpenDB
/***********************************************************************/
@@ -1871,7 +1936,7 @@ int TDBJSON::ReadDB(PGLOBAL)
{
int rc;
- N++;
+ N++;
if (NextSame) {
SameRow = NextSame;
diff --git a/storage/connect/tabjson.h b/storage/connect/tabjson.h
index 924ce387900..c16cf6846b6 100644
--- a/storage/connect/tabjson.h
+++ b/storage/connect/tabjson.h
@@ -1,7 +1,7 @@
/*************** tabjson H Declares Source Code File (.H) **************/
-/* Name: tabjson.h Version 1.1 */
+/* Name: tabjson.h Version 1.3 */
/* */
-/* (C) Copyright to the author Olivier BERTRAND 2014 - 2015 */
+/* (C) Copyright to the author Olivier BERTRAND 2014 - 2017 */
/* */
/* This file contains the JSON classes declares. */
/***********************************************************************/
@@ -32,12 +32,12 @@ typedef struct _jnode {
/***********************************************************************/
/* JSON table. */
/***********************************************************************/
-class JSONDEF : public DOSDEF { /* Table description */
+class DllExport JSONDEF : public DOSDEF { /* Table description */
friend class TDBJSON;
friend class TDBJSN;
friend class TDBJCL;
friend PQRYRES JSONColumns(PGLOBAL, char*, PTOS, bool);
- public:
+public:
// Constructor
JSONDEF(void);
@@ -51,12 +51,12 @@ class JSONDEF : public DOSDEF { /* Table description */
protected:
// Members
JMODE Jmode; /* MODE_OBJECT by default */
- char *Objname; /* Name of first level object */
- char *Xcol; /* Name of expandable column */
+ PCSZ Objname; /* Name of first level object */
+ PCSZ Xcol; /* Name of expandable column */
int Limit; /* Limit of multiple values */
int Pretty; /* Depends on file structure */
int Level; /* Used for catalog table */
- int Base; /* Tne array index base */
+ int Base; /* The array index base */
bool Strict; /* Strict syntax checking */
}; // end of JSONDEF
@@ -66,7 +66,7 @@ class JSONDEF : public DOSDEF { /* Table description */
/* This is the JSN Access Method class declaration. */
/* The table is a DOS file, each record being a JSON object. */
/***********************************************************************/
-class TDBJSN : public TDBDOS {
+class DllExport TDBJSN : public TDBDOS {
friend class JSONCOL;
friend class JSONDEF;
public:
@@ -87,6 +87,8 @@ public:
virtual PCOL InsertSpecialColumn(PCOL colp);
virtual int RowNumber(PGLOBAL g, bool b = FALSE)
{return (b) ? M : N;}
+ virtual bool CanBeFiltered(void)
+ {return Txfp->GetAmType() == TYPE_AM_MGO || !Xcol;}
// Database routines
virtual int Cardinality(PGLOBAL g);
@@ -107,8 +109,8 @@ public:
PJSON Val; // The value of the current row
PJCOL Colp; // The multiple column
JMODE Jmode; // MODE_OBJECT by default
- char *Objname; // The table object name
- char *Xcol; // Name of expandable column
+ PCSZ Objname; // The table object name
+ PCSZ Xcol; // Name of expandable column
int Fpos; // The current row index
int N; // The current Rownum
int M; // Index of multiple value
@@ -127,9 +129,10 @@ public:
/***********************************************************************/
/* Class JSONCOL: JSON access method column descriptor. */
/***********************************************************************/
-class JSONCOL : public DOSCOL {
+class DllExport JSONCOL : public DOSCOL {
friend class TDBJSN;
friend class TDBJSON;
+ friend class MGOFAM;
public:
// Constructors
JSONCOL(PGLOBAL g, PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i);
@@ -139,20 +142,21 @@ class JSONCOL : public DOSCOL {
virtual int GetAmType(void) {return Tjp->GetAmType();}
// Methods
- virtual bool SetBuffer(PGLOBAL g, PVAL value, bool ok, bool check);
- bool ParseJpath(PGLOBAL g);
- virtual void ReadColumn(PGLOBAL g);
- virtual void WriteColumn(PGLOBAL g);
+ virtual bool SetBuffer(PGLOBAL g, PVAL value, bool ok, bool check);
+ bool ParseJpath(PGLOBAL g);
+ char *GetJpath(PGLOBAL g, bool proj);
+ virtual void ReadColumn(PGLOBAL g);
+ virtual void WriteColumn(PGLOBAL g);
protected:
- bool CheckExpand(PGLOBAL g, int i, PSZ nm, bool b);
- bool SetArrayOptions(PGLOBAL g, char *p, int i, PSZ nm);
- PVAL GetColumnValue(PGLOBAL g, PJSON row, int i);
- PVAL ExpandArray(PGLOBAL g, PJAR arp, int n);
- PVAL CalculateArray(PGLOBAL g, PJAR arp, int n);
- PVAL MakeJson(PGLOBAL g, PJSON jsp);
- void SetJsonValue(PGLOBAL g, PVAL vp, PJVAL val, int n);
- PJSON GetRow(PGLOBAL g);
+ bool CheckExpand(PGLOBAL g, int i, PSZ nm, bool b);
+ bool SetArrayOptions(PGLOBAL g, char *p, int i, PSZ nm);
+ PVAL GetColumnValue(PGLOBAL g, PJSON row, int i);
+ PVAL ExpandArray(PGLOBAL g, PJAR arp, int n);
+ PVAL CalculateArray(PGLOBAL g, PJAR arp, int n);
+ PVAL MakeJson(PGLOBAL g, PJSON jsp);
+ void SetJsonValue(PGLOBAL g, PVAL vp, PJVAL val, int n);
+ PJSON GetRow(PGLOBAL g);
// Default constructor not to be used
JSONCOL(void) {}
@@ -174,7 +178,7 @@ class JSONCOL : public DOSCOL {
/***********************************************************************/
/* This is the JSON Access Method class declaration. */
/***********************************************************************/
-class TDBJSON : public TDBJSN {
+class DllExport TDBJSON : public TDBJSN {
friend class JSONDEF;
friend class JSONCOL;
public:
@@ -221,7 +225,7 @@ class TDBJSON : public TDBJSN {
/***********************************************************************/
/* This is the class declaration for the JSON catalog table. */
/***********************************************************************/
-class TDBJCL : public TDBCAT {
+class DllExport TDBJCL : public TDBCAT {
public:
// Constructor
TDBJCL(PJDEF tdp);
@@ -233,4 +237,5 @@ class TDBJCL : public TDBCAT {
// Members
PTOS Topt;
char *Db;
+ char *Dsn;
}; // end of class TDBJCL
diff --git a/storage/connect/table.cpp b/storage/connect/table.cpp
index 916449be6c6..d39837a7b5a 100644
--- a/storage/connect/table.cpp
+++ b/storage/connect/table.cpp
@@ -47,6 +47,7 @@ TDB::TDB(PTABDEF tdp) : Tdb_No(++Tnum)
To_Orig = NULL;
To_Filter = NULL;
To_CondFil = NULL;
+ Cond = NULL;
Next = NULL;
Name = (tdp) ? tdp->GetName() : NULL;
To_Table = NULL;
@@ -68,6 +69,7 @@ TDB::TDB(PTDB tdbp) : Tdb_No(++Tnum)
To_Orig = tdbp;
To_Filter = NULL;
To_CondFil = NULL;
+ Cond = NULL;
Next = NULL;
Name = tdbp->Name;
To_Table = tdbp->To_Table;
@@ -97,7 +99,7 @@ CHARSET_INFO *TDB::data_charset(void)
/***********************************************************************/
/* Return the datapath of the DB this table belongs to. */
/***********************************************************************/
-PSZ TDB::GetPath(void)
+PCSZ TDB::GetPath(void)
{
return To_Def->GetPath();
} // end of GetPath
@@ -297,7 +299,7 @@ bool TDB::SetRecpos(PGLOBAL g, int)
return true;
} // end of SetRecpos
-void TDB::Print(PGLOBAL g, FILE *f, uint n)
+void TDB::Printf(PGLOBAL g, FILE *f, uint n)
{
PCOL cp;
char m[64];
@@ -313,13 +315,13 @@ void TDB::Print(PGLOBAL g, FILE *f, uint n)
fprintf(f, "%s Columns (deg=%d):\n", m, tp->Degree);
for (cp = tp->Columns; cp; cp = cp->GetNext())
- cp->Print(g, f, n);
+ cp->Printf(g, f, n);
} /* endfor tp */
} // end of Print
-void TDB::Print(PGLOBAL, char *ps, uint)
+void TDB::Prints(PGLOBAL, char *ps, uint)
{
sprintf(ps, "R%d.%s", Tdb_No, Name);
} // end of Print
diff --git a/storage/connect/tabmac.cpp b/storage/connect/tabmac.cpp
index bbaba591540..a28b5d7108c 100644
--- a/storage/connect/tabmac.cpp
+++ b/storage/connect/tabmac.cpp
@@ -329,7 +329,7 @@ void MACCOL::ReadColumn(PGLOBAL g)
n = 0;
break;
default:
- p = "";
+ p = PlugDup(g, "");
} // endswitch Flag
} else switch (Flag) {
diff --git a/storage/connect/tabmul.cpp b/storage/connect/tabmul.cpp
index 78adde81d12..5c41f9094ac 100644
--- a/storage/connect/tabmul.cpp
+++ b/storage/connect/tabmul.cpp
@@ -1,7 +1,7 @@
/************* TabMul C++ Program Source Code File (.CPP) **************/
/* PROGRAM NAME: TABMUL */
/* ------------- */
-/* Version 1.8 */
+/* Version 1.9 */
/* */
/* COPYRIGHT: */
/* ---------- */
@@ -44,6 +44,11 @@
#define __MFC_COMPAT__ // To define min/max as macro
#endif
//#include <windows.h>
+#if defined(PATHMATCHSPEC)
+#include "Shlwapi.h"
+//using namespace std;
+#pragma comment(lib,"shlwapi.lib")
+#endif // PATHMATCHSPEC
#else
#if defined(UNIX)
#include <fnmatch.h>
@@ -124,9 +129,10 @@ bool TDBMUL::InitFileNames(PGLOBAL g)
{
#define PFNZ 4096
#define FNSZ (_MAX_DRIVE+_MAX_DIR+_MAX_FNAME+_MAX_EXT)
- char *pfn[PFNZ];
- char *filename;
- int rc, n = 0;
+ PTDBDIR dirp;
+ PSZ pfn[PFNZ];
+ PSZ filename;
+ int rc, n = 0;
if (trace)
htrc("in InitFileName: fn[]=%d\n", FNSZ);
@@ -141,115 +147,39 @@ bool TDBMUL::InitFileNames(PGLOBAL g)
if (trace)
htrc("InitFileName: fn='%s'\n", filename);
- if (Mul == 1) {
+ if (Mul != 2) {
/*******************************************************************/
/* To_File is a multiple name with special characters */
/*******************************************************************/
-#if defined(__WIN__)
- char drive[_MAX_DRIVE], direc[_MAX_DIR];
- WIN32_FIND_DATA FileData;
- HANDLE hSearch;
-
- _splitpath(filename, drive, direc, NULL, NULL);
-
- // Start searching files in the target directory.
- hSearch = FindFirstFile(filename, &FileData);
-
- if (hSearch == INVALID_HANDLE_VALUE) {
- rc = GetLastError();
-
- if (rc != ERROR_FILE_NOT_FOUND) {
- FormatMessage(FORMAT_MESSAGE_FROM_SYSTEM |
- FORMAT_MESSAGE_IGNORE_INSERTS,
- NULL, GetLastError(), 0,
- (LPTSTR)&filename, sizeof(filename), NULL);
- sprintf(g->Message, MSG(BAD_FILE_HANDLE), filename);
- return true;
- } // endif rc
-
- goto suite;
- } // endif hSearch
+ if (Mul == 1)
+ dirp = new(g) TDBDIR(PlugDup(g, filename));
+ else // Mul == 3 (Subdir)
+ dirp = new(g) TDBSDR(PlugDup(g, filename));
- while (n < PFNZ) {
- if (!(FileData.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY)) {
- strcat(strcat(strcpy(filename, drive), direc), FileData.cFileName);
- pfn[n++] = PlugDup(g, filename);
- } // endif dwFileAttributes
-
- if (!FindNextFile(hSearch, &FileData)) {
- rc = GetLastError();
-
- if (rc != ERROR_NO_MORE_FILES) {
- sprintf(g->Message, MSG(NEXT_FILE_ERROR), rc);
- FindClose(hSearch);
- return true;
- } // endif rc
-
- break;
- } // endif FindNextFile
-
- } // endwhile n
+ if (dirp->OpenDB(g))
+ return true;
- // Close the search handle.
- if (!FindClose(hSearch)) {
- strcpy(g->Message, MSG(SRCH_CLOSE_ERR));
- return true;
- } // endif FindClose
+ if (trace && Mul == 3) {
+ int nf = ((PTDBSDR)dirp)->FindInDir(g);
+ htrc("Number of files = %d\n", nf);
+ } // endif trace
+ while (true)
+ if ((rc = dirp->ReadDB(g)) == RC_OK) {
+#if defined(__WIN__)
+ strcat(strcpy(filename, dirp->Drive), dirp->Direc);
#else // !__WIN__
- struct stat fileinfo;
- char fn[FN_REFLEN], direc[FN_REFLEN], pattern[FN_HEADLEN], ftype[FN_EXTLEN];
- DIR *dir;
- struct dirent *entry;
-
- _splitpath(filename, NULL, direc, pattern, ftype);
- strcat(pattern, ftype);
-
- if (trace)
- htrc("direc=%s pattern=%s ftype=%s\n", direc, pattern, ftype);
-
- // Start searching files in the target directory.
- if (!(dir = opendir(direc))) {
- sprintf(g->Message, MSG(BAD_DIRECTORY), direc, strerror(errno));
-
- if (trace)
- htrc("%s\n", g->Message);
-
- return true;
- } // endif dir
-
- if (trace)
- htrc("dir opened: reading files\n");
-
- while ((entry = readdir(dir)) && n < PFNZ) {
- strcat(strcpy(fn, direc), entry->d_name);
-
- if (trace)
- htrc("%s read\n", fn);
-
- if (lstat(fn, &fileinfo) < 0) {
- sprintf(g->Message, "%s: %s", fn, strerror(errno));
- return true;
- } else if (!S_ISREG(fileinfo.st_mode))
- continue; // Not a regular file (should test for links)
-
- /*******************************************************************/
- /* Test whether the file name matches the table name filter. */
- /*******************************************************************/
- if (fnmatch(pattern, entry->d_name, 0))
- continue; // Not a match
-
- strcat(strcpy(filename, direc), entry->d_name);
- pfn[n++] = PlugDup(g, filename);
-
- if (trace)
- htrc("Adding pfn[%d] %s\n", n, filename);
+ strcpy(filename, dirp->Direc);
+#endif // !__WIN__
+ strcat(strcat(filename, dirp->Fname), dirp->Ftype);
+ pfn[n++] = PlugDup(g, filename);
+ } else
+ break;
- } // endwhile readdir
+ dirp->CloseDB(g);
- // Close the dir handle.
- closedir(dir);
-#endif // !__WIN__
+ if (rc == RC_FX)
+ return true;
} else {
/*******************************************************************/
@@ -297,10 +227,6 @@ bool TDBMUL::InitFileNames(PGLOBAL g)
} // endif Mul
-#if defined(__WIN__)
- suite:
-#endif
-
if (n) {
Filenames = (char**)PlugSubAlloc(g, NULL, n * sizeof(char*));
@@ -581,7 +507,95 @@ void TDBMUL::CloseDB(PGLOBAL g)
} // end of CloseDB
-/* --------------------------- Class DIRDEF -------------------------- */
+#if 0
+/* ------------------------- Class TDBMSD ---------------------------- */
+
+ // Method
+PTDB TDBMSD::Clone(PTABS t)
+{
+ PTDBMSD tp;
+ PGLOBAL g = t->G; // Is this really useful ???
+
+ tp = new(g) TDBMSD(this);
+ tp->Tdbp = Tdbp->Clone(t);
+ tp->Columns = tp->Tdbp->GetColumns();
+ return tp;
+} // end of Clone
+
+PTDB TDBMSD::Duplicate(PGLOBAL g)
+{
+ PTDBMSD tmup = new(g) TDBMSD(this);
+
+ tmup->Tdbp = Tdbp->Duplicate(g);
+ return tmup;
+} // end of Duplicate
+
+/***********************************************************************/
+/* Initializes the table filename list. */
+/* Note: tables created by concatenating the file components without */
+/* specifying the LRECL value (that should be restricted to _MAX_PATH)*/
+/* have a LRECL that is the sum of the lengths of all components. */
+/* This is why we use a big filename array to take care of that. */
+/***********************************************************************/
+bool TDBMSD::InitFileNames(PGLOBAL g)
+{
+#define PFNZ 4096
+#define FNSZ (_MAX_DRIVE+_MAX_DIR+_MAX_FNAME+_MAX_EXT)
+ PTDBSDR dirp;
+ PSZ pfn[PFNZ];
+ PSZ filename;
+ int rc, n = 0;
+
+ if (trace)
+ htrc("in InitFileName: fn[]=%d\n", FNSZ);
+
+ filename = (char*)PlugSubAlloc(g, NULL, FNSZ);
+
+ // The sub table may need to refer to the Table original block
+ Tdbp->SetTable(To_Table); // Was not set at construction
+
+ PlugSetPath(filename, Tdbp->GetFile(g), Tdbp->GetPath());
+
+ if (trace)
+ htrc("InitFileName: fn='%s'\n", filename);
+
+ dirp = new(g) TDBSDR(filename);
+
+ if (dirp->OpenDB(g))
+ return true;
+
+ while (true)
+ if ((rc = dirp->ReadDB(g)) == RC_OK) {
+#if defined(__WIN__)
+ strcat(strcpy(filename, dirp->Drive), dirp->Direc);
+#else // !__WIN__
+ strcpy(filename, dirp->Direc);
+#endif // !__WIN__
+ strcat(strcat(filename, dirp->Fname), dirp->Ftype);
+ pfn[n++] = PlugDup(g, filename);
+ } else
+ break;
+
+ if (rc == RC_FX)
+ return true;
+
+ if (n) {
+ Filenames = (char**)PlugSubAlloc(g, NULL, n * sizeof(char*));
+
+ for (int i = 0; i < n; i++)
+ Filenames[i] = pfn[i];
+
+ } else {
+ Filenames = (char**)PlugSubAlloc(g, NULL, sizeof(char*));
+ Filenames[0] = NULL;
+ } // endif n
+
+ NumFiles = n;
+ return false;
+} // end of InitFileNames
+#endif // 0
+
+ /* --------------------------- Class DIRDEF -------------------------- */
/***********************************************************************/
/* DefineAM: define specific AM block values from XDB file. */
@@ -589,8 +603,9 @@ void TDBMUL::CloseDB(PGLOBAL g)
bool DIRDEF::DefineAM(PGLOBAL g, LPCSTR, int)
{
Desc = Fn = GetStringCatInfo(g, "Filename", NULL);
- Incl = (GetIntCatInfo("Subdir", 0) != 0);
- Huge = (GetIntCatInfo("Huge", 0) != 0);
+ Incl = GetBoolCatInfo("Subdir", false);
+ Huge = GetBoolCatInfo("Huge", false);
+ Nodir = GetBoolCatInfo("Nodir", true);
return false;
} // end of DefineAM
@@ -616,57 +631,40 @@ PTDB DIRDEF::GetTable(PGLOBAL g, MODE)
/***********************************************************************/
/* TABDIR constructors. */
/***********************************************************************/
-TDBDIR::TDBDIR(PDIRDEF tdp) : TDBASE(tdp)
- {
- To_File = tdp->Fn;
- iFile = 0;
+void TDBDIR::Init(void)
+{
+ iFile = 0;
#if defined(__WIN__)
- memset(&FileData, 0, sizeof(_finddata_t));
- Hsearch = -1;
- *Drive = '\0';
-#else // !__WIN__
- memset(&Fileinfo, 0, sizeof(struct stat));
- Entry = NULL;
- Dir = NULL;
- Done = false;
- *Pattern = '\0';
-#endif // !__WIN__
- *Fpath = '\0';
- *Direc = '\0';
- *Fname = '\0';
- *Ftype = '\0';
- } // end of TDBDIR standard constructor
-
-TDBDIR::TDBDIR(PTDBDIR tdbp) : TDBASE(tdbp)
- {
- To_File = tdbp->To_File;
- iFile = tdbp->iFile;
-#if defined(__WIN__)
- FileData = tdbp->FileData;
- Hsearch = tdbp->Hsearch;
- strcpy(Drive, tdbp->Drive);
+ Dvalp = NULL;
+ memset(&FileData, 0, sizeof(_finddata_t));
+ hSearch = INVALID_HANDLE_VALUE;
+ *Drive = '\0';
#else // !__WIN__
- Fileinfo = tdbp->Fileinfo;
- Entry = tdbp->Entry;
- Dir = tdbp->Dir;
- Done = tdbp->Done;
- strcpy(Pattern, tdbp->Pattern);
+ memset(&Fileinfo, 0, sizeof(struct stat));
+ Entry = NULL;
+ Dir = NULL;
+ Done = false;
+ *Pattern = '\0';
#endif // !__WIN__
- strcpy(Direc, tdbp->Direc);
- strcpy(Fname, tdbp->Fname);
- strcpy(Ftype, tdbp->Ftype);
- } // end of TDBDIR copy constructor
+ *Fpath = '\0';
+ *Direc = '\0';
+ *Fname = '\0';
+ *Ftype = '\0';
+} // end of Init
-// Method
-PTDB TDBDIR::Clone(PTABS t)
- {
- PTDB tp;
- PGLOBAL g = t->G; // Is this really useful ???
+TDBDIR::TDBDIR(PDIRDEF tdp) : TDBASE(tdp)
+{
+ To_File = tdp->Fn;
+ Nodir = tdp->Nodir;
+ Init();
+} // end of TDBDIR standard constructor
- tp = new(g) TDBDIR(this);
- tp->SetColumns(Columns);
- return tp;
- } // end of Clone
+TDBDIR::TDBDIR(PSZ fpat) : TDBASE((PTABDEF)NULL)
+{
+ To_File = fpat;
+ Nodir = true;
+ Init();
+} // end of TDBDIR constructor
/***********************************************************************/
/* Initialize/get the components of the search file pattern. */
@@ -674,18 +672,19 @@ PTDB TDBDIR::Clone(PTABS t)
char* TDBDIR::Path(PGLOBAL g)
{
PCATLG cat = PlgGetCatalog(g);
+ PTABDEF defp = (PTABDEF)To_Def;
#if defined(__WIN__)
if (!*Drive) {
- PlugSetPath(Fpath, To_File, ((PTABDEF)To_Def)->GetPath());
+ PlugSetPath(Fpath, To_File, defp ? defp->GetPath() : NULL);
_splitpath(Fpath, Drive, Direc, Fname, Ftype);
} else
- _makepath(Fpath, Drive, Direc, Fname, Ftype); // Usefull ???
+ _makepath(Fpath, Drive, Direc, Fname, Ftype); // Usefull for TDBSDR
return Fpath;
#else // !__WIN__
if (!Done) {
- PlugSetPath(Fpath, To_File, ((PTABDEF)To_Def)->GetPath());
+ PlugSetPath(Fpath, To_File, defp ? defp->GetPath() : NULL);
_splitpath(Fpath, NULL, Direc, Fname, Ftype);
strcat(strcpy(Pattern, Fname), Ftype);
Done = true;
@@ -709,23 +708,48 @@ PCOL TDBDIR::MakeCol(PGLOBAL g, PCOLDEF cdp, PCOL cprec, int n)
int TDBDIR::GetMaxSize(PGLOBAL g)
{
if (MaxSize < 0) {
- int n = -1;
+ int rc, n = -1;
#if defined(__WIN__)
- int h;
// Start searching files in the target directory.
- h = _findfirst(Path(g), &FileData);
+ hSearch = FindFirstFile(Path(g), &FileData);
- if (h != -1) {
- for (n = 1;; n++)
- if (_findnext(h, &FileData))
- break;
+ if (hSearch == INVALID_HANDLE_VALUE) {
+ rc = GetLastError();
- // Close the search handle.
- _findclose(h);
- } else
- n = 0;
+ if (rc != ERROR_FILE_NOT_FOUND) {
+ char buf[512];
+
+ FormatMessage(FORMAT_MESSAGE_FROM_SYSTEM |
+ FORMAT_MESSAGE_IGNORE_INSERTS,
+ NULL, GetLastError(), 0, (LPTSTR)&buf, sizeof(buf), NULL);
+ sprintf(g->Message, MSG(BAD_FILE_HANDLE), buf);
+ return -1;
+ } // endif rc
+
+ return 0;
+ } // endif hSearch
+
+ while (true) {
+ if (!(FileData.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY))
+ n++;
+
+ if (!FindNextFile(hSearch, &FileData)) {
+ rc = GetLastError();
+
+ if (rc != ERROR_NO_MORE_FILES) {
+ sprintf(g->Message, MSG(NEXT_FILE_ERROR), rc);
+ FindClose(hSearch);
+ return -1;
+ } // endif rc
+
+ break;
+ } // endif Next
+
+ } // endwhile
+ // Close the search handle.
+ FindClose(hSearch);
#else // !__WIN__
Path(g);
@@ -791,30 +815,35 @@ int TDBDIR::ReadDB(PGLOBAL g)
int rc = RC_OK;
#if defined(__WIN__)
- if (Hsearch == -1) {
- /*******************************************************************/
- /* Start searching files in the target directory. The use of the */
- /* Path function is required when called from TDBSDR. */
- /*******************************************************************/
- Hsearch = _findfirst(Path(g), &FileData);
-
- if (Hsearch == -1)
- rc = RC_EF;
- else
- iFile++;
-
- } else {
- if (_findnext(Hsearch, &FileData)) {
- // Restore file name and type pattern
- _splitpath(To_File, NULL, NULL, Fname, Ftype);
- rc = RC_EF;
- } else
- iFile++;
-
- } // endif Hsearch
+ do {
+ if (hSearch == INVALID_HANDLE_VALUE) {
+ /*****************************************************************/
+ /* Start searching files in the target directory. The use of */
+ /* the Path function is required when called from TDBSDR. */
+ /*****************************************************************/
+ hSearch = FindFirstFile(Path(g), &FileData);
+
+ if (hSearch == INVALID_HANDLE_VALUE) {
+ rc = RC_EF;
+ break;
+ } else
+ iFile++;
+
+ } else {
+ if (!FindNextFile(hSearch, &FileData)) {
+ // Restore file name and type pattern
+ _splitpath(To_File, NULL, NULL, Fname, Ftype);
+ rc = RC_EF;
+ break;
+ } else
+ iFile++;
+
+ } // endif hSearch
+
+ } while (Nodir && FileData.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY);
if (rc == RC_OK)
- _splitpath(FileData.name, NULL, NULL, Fname, Ftype);
+ _splitpath(FileData.cFileName, NULL, NULL, Fname, Ftype);
#else // !Win32
rc = RC_NF;
@@ -878,8 +907,8 @@ void TDBDIR::CloseDB(PGLOBAL)
{
#if defined(__WIN__)
// Close the search handle.
- _findclose(Hsearch);
- Hsearch = -1;
+ FindClose(hSearch);
+ hSearch = INVALID_HANDLE_VALUE;
#else // !__WIN__
// Close the DIR handle
if (Dir) {
@@ -895,7 +924,7 @@ void TDBDIR::CloseDB(PGLOBAL)
/***********************************************************************/
/* DIRCOL public constructor. */
/***********************************************************************/
-DIRCOL::DIRCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PSZ)
+DIRCOL::DIRCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PCSZ)
: COLBLK(cdp, tdbp, i)
{
if (cprec) {
@@ -907,6 +936,7 @@ DIRCOL::DIRCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PSZ)
} // endif cprec
// Set additional DIR access method information for column.
+ Tdbp = (PTDBDIR)tdbp;
N = cdp->GetOffset();
} // end of DIRCOL constructor
@@ -916,75 +946,84 @@ DIRCOL::DIRCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PSZ)
/***********************************************************************/
DIRCOL::DIRCOL(DIRCOL *col1, PTDB tdbp) : COLBLK(col1, tdbp)
{
- N = col1->N;
+ Tdbp = (PTDBDIR)tdbp;
+ N = col1->N;
} // end of DIRCOL copy constructor
+#if defined(__WIN__)
+/***********************************************************************/
+/* Retrieve time information from FileData. */
+/***********************************************************************/
+void DIRCOL::SetTimeValue(PGLOBAL g, FILETIME& ftime)
+{
+ char tsp[24];
+ SYSTEMTIME stp;
+
+ if (FileTimeToSystemTime(&ftime, &stp)) {
+ sprintf(tsp, "%04d-%02d-%02d %02d:%02d:%02d",
+ stp.wYear, stp.wMonth, stp.wDay, stp.wHour, stp.wMinute, stp.wSecond);
+
+ if (Value->GetType() != TYPE_STRING) {
+ if (!Tdbp->Dvalp)
+ Tdbp->Dvalp = AllocateValue(g, TYPE_DATE, 20, 0, false,
+ "YYYY-MM-DD hh:mm:ss");
+
+ Tdbp->Dvalp->SetValue_psz(tsp);
+ Value->SetValue_pval(Tdbp->Dvalp);
+ } else
+ Value->SetValue_psz(tsp);
+
+ } else
+ Value->Reset();
+
+} // end of SetTimeValue
+#endif // __WIN__
+
/***********************************************************************/
/* ReadColumn: what this routine does is to access the information */
/* corresponding to this column and convert it to buffer type. */
/***********************************************************************/
void DIRCOL::ReadColumn(PGLOBAL g)
- {
- PTDBDIR tdbp = (PTDBDIR)To_Tdb;
-
+ {
if (trace)
htrc("DIR ReadColumn: col %s R%d use=%.4X status=%.4X type=%d N=%d\n",
- Name, tdbp->GetTdb_No(), ColUse, Status, Buf_Type, N);
+ Name, Tdbp->GetTdb_No(), ColUse, Status, Buf_Type, N);
/*********************************************************************/
/* Retrieve the information corresponding to the column number. */
/*********************************************************************/
switch (N) {
#if defined(__WIN__)
- case 0: Value->SetValue_psz(tdbp->Drive); break;
+ case 0: Value->SetValue_psz(Tdbp->Drive); break;
#endif // __WIN__
- case 1: Value->SetValue_psz(tdbp->Direc); break;
- case 2: Value->SetValue_psz(tdbp->Fname); break;
- case 3: Value->SetValue_psz(tdbp->Ftype); break;
+ case 1: Value->SetValue_psz(Tdbp->Direc); break;
+ case 2: Value->SetValue_psz(Tdbp->Fname); break;
+ case 3: Value->SetValue_psz(Tdbp->Ftype); break;
#if defined(__WIN__)
- case 4: Value->SetValue((int)tdbp->FileData.attrib); break;
- case 5: Value->SetValue((int)tdbp->FileData.size); break;
- case 6: Value->SetValue((int)tdbp->FileData.time_write); break;
- case 7: Value->SetValue((int)tdbp->FileData.time_create); break;
- case 8: Value->SetValue((int)tdbp->FileData.time_access); break;
+ case 4: Value->SetValue((int)Tdbp->FileData.dwFileAttributes); break;
+ case 5: Value->SetValue((int)Tdbp->FileData.nFileSizeLow); break;
+ case 6: SetTimeValue(g, Tdbp->FileData.ftLastWriteTime); break;
+ case 7: SetTimeValue(g, Tdbp->FileData.ftCreationTime); break;
+ case 8: SetTimeValue(g, Tdbp->FileData.ftLastAccessTime); break;
#else // !__WIN__
- case 4: Value->SetValue((int)tdbp->Fileinfo.st_mode); break;
- case 5: Value->SetValue((int)tdbp->Fileinfo.st_size); break;
- case 6: Value->SetValue((int)tdbp->Fileinfo.st_mtime); break;
- case 7: Value->SetValue((int)tdbp->Fileinfo.st_ctime); break;
- case 8: Value->SetValue((int)tdbp->Fileinfo.st_atime); break;
- case 9: Value->SetValue((int)tdbp->Fileinfo.st_uid); break;
- case 10: Value->SetValue((int)tdbp->Fileinfo.st_gid); break;
+ case 4: Value->SetValue((int)Tdbp->Fileinfo.st_mode); break;
+ case 5: Value->SetValue((int)Tdbp->Fileinfo.st_size); break;
+ case 6: Value->SetValue((int)Tdbp->Fileinfo.st_mtime); break;
+ case 7: Value->SetValue((int)Tdbp->Fileinfo.st_ctime); break;
+ case 8: Value->SetValue((int)Tdbp->Fileinfo.st_atime); break;
+ case 9: Value->SetValue((int)Tdbp->Fileinfo.st_uid); break;
+ case 10: Value->SetValue((int)Tdbp->Fileinfo.st_gid); break;
#endif // !__WIN__
default:
sprintf(g->Message, MSG(INV_DIRCOL_OFST), N);
- longjmp(g->jumper[g->jump_level], GetAmType());
- } // endswitch N
+ throw GetAmType();
+ } // endswitch N
} // end of ReadColumn
/* ------------------------- Class TDBSDR ---------------------------- */
/***********************************************************************/
-/* TABSDR copy constructors. */
-/***********************************************************************/
-TDBSDR::TDBSDR(PTDBSDR tdbp) : TDBDIR(tdbp)
- {
- Sub = tdbp->Sub;
- } // end of TDBSDR copy constructor
-
-// Method
-PTDB TDBSDR::Clone(PTABS t)
- {
- PTDB tp;
- PGLOBAL g = t->G; // Is this really useful ???
-
- tp = new(g) TDBSDR(this);
- tp->SetColumns(Columns);
- return tp;
- } // end of Clone
-
-/***********************************************************************/
/* SDR GetMaxSize: returns the number of retrieved files. */
/***********************************************************************/
int TDBSDR::GetMaxSize(PGLOBAL g)
@@ -998,47 +1037,124 @@ int TDBSDR::GetMaxSize(PGLOBAL g)
} // end of GetMaxSize
/***********************************************************************/
-/* SDR GetMaxSize: returns the number of retrieved files. */
+/* SDR FindInDir: returns the number of retrieved files. */
/***********************************************************************/
int TDBSDR::FindInDir(PGLOBAL g)
{
- int n = 0;
+ int rc, n = 0;
size_t m = strlen(Direc);
// Start searching files in the target directory.
#if defined(__WIN__)
- int h = _findfirst(Path(g), &FileData);
+ HANDLE h;
- if (h != -1) {
- for (n = 1;; n++)
- if (_findnext(h, &FileData))
- break;
+#if defined(PATHMATCHSPEC)
+ if (!*Drive)
+ Path(g);
- // Close the search handle.
- _findclose(h);
- } // endif h
+ _makepath(Fpath, Drive, Direc, "*", "*");
- // Now search files in sub-directories.
- _makepath(Fpath, Drive, Direc, "*", "");
- h = _findfirst(Fpath, &FileData);
+ h = FindFirstFile(Fpath, &FileData);
- if (h != -1) {
- while (true) {
- if (FileData.attrib & _A_SUBDIR && *FileData.name != '.') {
- // Look in the name sub-directory
- strcat(strcat(Direc, FileData.name), "\\");
- n += FindInDir(g);
- Direc[m] = '\0'; // Restore path
- } // endif SUBDIR
+ if (h == INVALID_HANDLE_VALUE) {
+ rc = GetLastError();
- if (_findnext(h, &FileData))
- break;
+ if (rc != ERROR_FILE_NOT_FOUND) {
+ char buf[512];
- } // endwhile
+ FormatMessage(FORMAT_MESSAGE_FROM_SYSTEM |
+ FORMAT_MESSAGE_IGNORE_INSERTS,
+ NULL, GetLastError(), 0, (LPTSTR)&buf, sizeof(buf), NULL);
+ sprintf(g->Message, MSG(BAD_FILE_HANDLE), buf);
+ return -1;
+ } // endif rc
- // Close the search handle.
- _findclose(h);
- } // endif h
+ return 0;
+ } // endif h
+
+ while (true) {
+ if ((FileData.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY) &&
+ *FileData.cFileName != '.') {
+ // Look in the name sub-directory
+ strcat(strcat(Direc, FileData.cFileName), "/");
+ n += FindInDir(g);
+ Direc[m] = '\0'; // Restore path
+ } else if (PathMatchSpec(FileData.cFileName, Fpath))
+ n++;
+
+ if (!FindNextFile(h, &FileData)) {
+ rc = GetLastError();
+
+ if (rc != ERROR_NO_MORE_FILES) {
+ sprintf(g->Message, MSG(NEXT_FILE_ERROR), rc);
+ FindClose(h);
+ return -1;
+ } // endif rc
+
+ break;
+ } // endif Next
+
+ } // endwhile
+#else // !PATHMATCHSPEC
+ h = FindFirstFile(Path(g), &FileData);
+
+ if (h == INVALID_HANDLE_VALUE) {
+ rc = GetLastError();
+
+ if (rc != ERROR_FILE_NOT_FOUND) {
+ char buf[512];
+
+ FormatMessage(FORMAT_MESSAGE_FROM_SYSTEM |
+ FORMAT_MESSAGE_IGNORE_INSERTS,
+ NULL, GetLastError(), 0, (LPTSTR)&buf, sizeof(buf), NULL);
+ sprintf(g->Message, MSG(BAD_FILE_HANDLE), buf);
+ return -1;
+ } // endif rc
+
+ return 0;
+ } // endif hSearch
+
+ while (true) {
+ n++;
+
+ if (!FindNextFile(h, &FileData)) {
+ rc = GetLastError();
+
+ if (rc != ERROR_NO_MORE_FILES) {
+ sprintf(g->Message, MSG(NEXT_FILE_ERROR), rc);
+ FindClose(h);
+ return -1;
+ } // endif rc
+
+ break;
+ } // endif Next
+
+ } // endwhile
+
+ // Now search files in sub-directories.
+ _makepath(Fpath, Drive, Direc, "*", ".");
+ h = FindFirstFile(Fpath, &FileData);
+
+ if (h != INVALID_HANDLE_VALUE) {
+ while (true) {
+ if ((FileData.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY) &&
+ *FileData.cFileName != '.') {
+ // Look in the name sub-directory
+ strcat(strcat(Direc, FileData.cFileName), "/");
+ n += FindInDir(g);
+ Direc[m] = '\0'; // Restore path
+ } // endif SUBDIR
+
+ if (!FindNextFile(h, &FileData))
+ break;
+
+ } // endwhile
+
+ } // endif h
+#endif // !PATHMATCHSPEC
+
+ // Close the search handle.
+ FindClose(h);
#else // !__WIN__
int k;
DIR *dir = opendir(Direc);
@@ -1090,7 +1206,7 @@ bool TDBSDR::OpenDB(PGLOBAL g)
Sub->Next = NULL;
Sub->Prev = NULL;
#if defined(__WIN__)
- Sub->H = -1;
+ Sub->H = INVALID_HANDLE_VALUE;
Sub->Len = strlen(Direc);
#else // !__WIN__
Sub->D = NULL;
@@ -1116,18 +1232,22 @@ int TDBSDR::ReadDB(PGLOBAL g)
// Are there more files in sub-directories
retry:
do {
- if (Sub->H == -1) {
- _makepath(Fpath, Drive, Direc, "*", "");
- Sub->H = _findfirst(Fpath, &FileData);
- } else if (_findnext(Sub->H, &FileData)) {
- _findclose(Sub->H);
- Sub->H = -1;
- *FileData.name = '\0';
- } // endif findnext
-
- } while(*FileData.name == '.');
-
- if (Sub->H == -1) {
+ if (Sub->H == INVALID_HANDLE_VALUE) {
+// _makepath(Fpath, Drive, Direc, "*", "."); why was this made?
+ _makepath(Fpath, Drive, Direc, "*", NULL);
+ Sub->H = FindFirstFile(Fpath, &FileData);
+ } else if (!FindNextFile(Sub->H, &FileData)) {
+ FindClose(Sub->H);
+ Sub->H = INVALID_HANDLE_VALUE;
+ *FileData.cFileName= '\0';
+ break;
+ } // endif findnext
+
+ } while(!(FileData.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY) ||
+ (*FileData.cFileName == '.' &&
+ (!FileData.cFileName[1] || FileData.cFileName[1] == '.')));
+
+ if (Sub->H == INVALID_HANDLE_VALUE) {
// No more sub-directories. Are we in a sub-directory?
if (!Sub->Prev)
return rc; // No, all is finished
@@ -1145,17 +1265,17 @@ int TDBSDR::ReadDB(PGLOBAL g)
sup = (PSUBDIR)PlugSubAlloc(g, NULL, sizeof(SUBDIR));
sup->Next = NULL;
sup->Prev = Sub;
- sup->H = -1;
+ sup->H = INVALID_HANDLE_VALUE;
Sub->Next = sup;
} // endif Next
Sub = Sub->Next;
- strcat(strcat(Direc, FileData.name), "\\");
+ strcat(strcat(Direc, FileData.cFileName), "/");
Sub->Len = strlen(Direc);
// Reset Hsearch used by TDBDIR::ReadDB
- _findclose(Hsearch);
- Hsearch = -1;
+ FindClose(hSearch);
+ hSearch = INVALID_HANDLE_VALUE;
goto again;
} // endif H
@@ -1179,7 +1299,8 @@ int TDBSDR::ReadDB(PGLOBAL g)
if (lstat(Fpath, &Fileinfo) < 0) {
sprintf(g->Message, "%s: %s", Fpath, strerror(errno));
rc = RC_FX;
- } else if (S_ISDIR(Fileinfo.st_mode) && *Entry->d_name != '.') {
+ } else if (S_ISDIR(Fileinfo.st_mode) && strcmp(Entry->d_name, ".")
+ && strcmp(Entry->d_name, "..")) {
// Look in the name sub-directory
if (!Sub->Next) {
PSUBDIR sup;
@@ -1423,8 +1544,8 @@ void TDBDHR::CloseDB(PGLOBAL g)
// Close the search handle.
if (!FindClose(Hsearch)) {
strcpy(g->Message, MSG(SRCH_CLOSE_ERR));
- longjmp(g->jumper[g->jump_level], GetAmType());
- } // endif FindClose
+ throw GetAmType();
+ } // endif FindClose
iFile = 0;
Hsearch = INVALID_HANDLE_VALUE;
@@ -1435,8 +1556,8 @@ void TDBDHR::CloseDB(PGLOBAL g)
/***********************************************************************/
/* DHRCOL public constructor. */
/***********************************************************************/
-DHRCOL::DHRCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PSZ am)
- : COLBLK(cdp, tdbp, i)
+DHRCOL::DHRCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PCSZ am)
+ : COLBLK(cdp, tdbp, i)
{
if (cprec) {
Next = cprec->GetNext();
diff --git a/storage/connect/tabmul.h b/storage/connect/tabmul.h
index 51fa7f9000a..8a95a772c41 100644
--- a/storage/connect/tabmul.h
+++ b/storage/connect/tabmul.h
@@ -39,7 +39,7 @@ class DllExport TDBMUL : public TDBASE {
virtual void ResetDB(void);
virtual PTDB Clone(PTABS t);
virtual bool IsSame(PTDB tp) {return tp == (PTDB)Tdbp;}
- virtual PSZ GetFile(PGLOBAL g) {return Tdbp->GetFile(g);}
+ virtual PCSZ GetFile(PGLOBAL g) {return Tdbp->GetFile(g);}
virtual int GetRecpos(void) {return 0;}
virtual PCOL ColDB(PGLOBAL g, PSZ name, int num);
bool InitFileNames(PGLOBAL g);
@@ -69,6 +69,34 @@ class DllExport TDBMUL : public TDBASE {
int iFile; // Index of currently processed file
}; // end of class TDBMUL
+#if 0
+/***********************************************************************/
+/* This is the MSD Access Method class declaration for files that are */
+/* physically split in multiple files having the same format. */
+/* This sub-class also include files of the sub-directories. */
+/***********************************************************************/
+class DllExport TDBMSD : public TDBMUL {
+ //friend class MULCOL;
+public:
+ // Constructor
+ TDBMSD(PTDB tdbp) : TDBMUL(tdbp) {}
+ TDBMSD(PTDBMSD tdbp) : TDBMUL(tdbp) {}
+
+ // Implementation
+ virtual PTDB Duplicate(PGLOBAL g);
+
+ // Methods
+ virtual PTDB Clone(PTABS t);
+ bool InitFileNames(PGLOBAL g);
+
+ // Database routines
+
+protected:
+
+ // Members
+}; // end of class TDBMSD
+#endif
+
/***********************************************************************/
/* Directory listing table. */
/***********************************************************************/
@@ -90,7 +118,8 @@ class DllExport DIRDEF : public TABDEF { /* Directory listing table */
// Members
PSZ Fn; /* Path/Name of file search */
bool Incl; /* true to include sub-directories */
- bool Huge; /* true if files can be larger than 2GB */
+ bool Huge; /* true if files can be larger than 2GB */
+ bool Nodir; /* true to exclude directories */
}; // end of DIRDEF
/***********************************************************************/
@@ -101,18 +130,16 @@ class DllExport DIRDEF : public TABDEF { /* Directory listing table */
/***********************************************************************/
class TDBDIR : public TDBASE {
friend class DIRCOL;
- public:
+ friend class TDBMUL;
+public:
// Constructor
TDBDIR(PDIRDEF tdp);
- TDBDIR(PTDBDIR tdbp);
+ TDBDIR(PSZ fpat);
// Implementation
virtual AMT GetAmType(void) {return TYPE_AM_DIR;}
- virtual PTDB Duplicate(PGLOBAL g)
- {return (PTDB)new(g) TDBDIR(this);}
// Methods
- virtual PTDB Clone(PTABS t);
virtual int GetRecpos(void) {return iFile;}
// Database routines
@@ -127,14 +154,16 @@ class TDBDIR : public TDBASE {
virtual void CloseDB(PGLOBAL g);
protected:
+ void Init(void);
char *Path(PGLOBAL g);
// Members
PSZ To_File; // Points to file search pathname
int iFile; // Index of currently retrieved file
#if defined(__WIN__)
- _finddata_t FileData; // Find data structure
- intptr_t Hsearch; // Search handle
+ PVAL Dvalp; // Used to retrieve file date values
+ WIN32_FIND_DATA FileData; // Find data structure
+ HANDLE hSearch; // Search handle
char Drive[_MAX_DRIVE]; // Drive name
#else // !__WIN__
struct stat Fileinfo; // File info structure
@@ -147,6 +176,7 @@ class TDBDIR : public TDBASE {
char Direc[_MAX_DIR]; // Search path
char Fname[_MAX_FNAME]; // File name
char Ftype[_MAX_EXT]; // File extention
+ bool Nodir; // Exclude directories from file list
}; // end of class TDBDIR
/***********************************************************************/
@@ -158,17 +188,11 @@ class TDBDIR : public TDBASE {
/***********************************************************************/
class TDBSDR : public TDBDIR {
friend class DIRCOL;
+ friend class TDBMUL;
public:
// Constructors
TDBSDR(PDIRDEF tdp) : TDBDIR(tdp) {Sub = NULL;}
- TDBSDR(PTDBSDR tdbp);
-
- // Implementation
- virtual PTDB Duplicate(PGLOBAL g)
- {return (PTDB)new(g) TDBSDR(this);}
-
- // Methods
- virtual PTDB Clone(PTABS t);
+ TDBSDR(PSZ fpat) : TDBDIR(fpat) {Sub = NULL;}
// Database routines
virtual int GetMaxSize(PGLOBAL g);
@@ -184,7 +208,7 @@ class TDBSDR : public TDBDIR {
struct _Sub_Dir *Next;
struct _Sub_Dir *Prev;
#if defined(__WIN__)
- intptr_t H; // Search handle
+ HANDLE H; // Search handle
#else // !__WIN__
DIR *D;
#endif // !__WIN__
@@ -202,7 +226,7 @@ class TDBSDR : public TDBDIR {
class DIRCOL : public COLBLK {
public:
// Constructors
- DIRCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PSZ am = "DIR");
+ DIRCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PCSZ am = "DIR");
DIRCOL(DIRCOL *colp, PTDB tdbp); // Constructor used in copy process
// Implementation
@@ -214,7 +238,11 @@ class DIRCOL : public COLBLK {
protected:
// Default constructor not to be used
DIRCOL(void) {}
+#if defined(__WIN__)
+ void SetTimeValue(PGLOBAL g, FILETIME& ftime);
+#endif // __WIN__
// Members
+ PTDBDIR Tdbp; // To DIR table
int N; // Column number
}; // end of class DIRCOL
diff --git a/storage/connect/tabmysql.cpp b/storage/connect/tabmysql.cpp
index 1a715819fc8..bdddcf64ca8 100644
--- a/storage/connect/tabmysql.cpp
+++ b/storage/connect/tabmysql.cpp
@@ -68,8 +68,8 @@ void PrintResult(PGLOBAL, PSEM, PQRYRES);
#endif // _CONSOLE
// Used to check whether a MYSQL table is created on itself
-bool CheckSelf(PGLOBAL g, TABLE_SHARE *s, const char *host,
- const char *db, char *tab, const char *src, int port);
+bool CheckSelf(PGLOBAL g, TABLE_SHARE *s, PCSZ host, PCSZ db,
+ PCSZ tab, PCSZ src, int port);
/***********************************************************************/
/* External function. */
@@ -125,7 +125,7 @@ bool MYSQLDEF::GetServerInfo(PGLOBAL g, const char *server_name)
} // endif server
DBUG_PRINT("info", ("get_server_by_name returned server at %lx",
- (long unsigned int) server));
+ (size_t) server));
// TODO: We need to examine which of these can really be NULL
Hostname = PlugDup(g, server->host);
@@ -183,19 +183,22 @@ bool MYSQLDEF::GetServerInfo(PGLOBAL g, const char *server_name)
/***********************************************************************/
bool MYSQLDEF::ParseURL(PGLOBAL g, char *url, bool b)
{
+ char *tabn, *pwd, *schema;
+
if ((!strstr(url, "://") && (!strchr(url, '@')))) {
// No :// or @ in connection string. Must be a straight
// connection name of either "server" or "server/table"
// ok, so we do a little parsing, but not completely!
- if ((Tabname= strchr(url, '/'))) {
+ if ((tabn= strchr(url, '/'))) {
// If there is a single '/' in the connection string,
// this means the user is specifying a table name
- *Tabname++= '\0';
+ *tabn++= '\0';
// there better not be any more '/'s !
- if (strchr(Tabname, '/'))
+ if (strchr(tabn, '/'))
return true;
+ Tabname = tabn;
} else
// Otherwise, straight server name,
Tabname = (b) ? GetStringCatInfo(g, "Tabname", Name) : NULL;
@@ -223,7 +226,7 @@ bool MYSQLDEF::ParseURL(PGLOBAL g, char *url, bool b)
Username += 3;
- if (!(Hostname = strchr(Username, '@'))) {
+ if (!(Hostname = (char*)strchr(Username, '@'))) {
strcpy(g->Message, "No host specified in URL");
return true;
} else {
@@ -231,11 +234,11 @@ bool MYSQLDEF::ParseURL(PGLOBAL g, char *url, bool b)
Server = Hostname;
} // endif Hostname
- if ((Password = strchr(Username, ':'))) {
- *Password++ = 0; // End username
+ if ((pwd = (char*)strchr(Username, ':'))) {
+ *pwd++ = 0; // End username
- // Make sure there isn't an extra / or @
- if ((strchr(Password, '/') || strchr(Hostname, '@'))) {
+ // Make sure there isn't an extra /
+ if (strchr(pwd, '/')) {
strcpy(g->Message, "Syntax error in URL");
return true;
} // endif
@@ -243,8 +246,10 @@ bool MYSQLDEF::ParseURL(PGLOBAL g, char *url, bool b)
// Found that if the string is:
// user:@hostname:port/db/table
// Then password is a null string, so set to NULL
- if ((Password[0] == 0))
- Password = NULL;
+ if ((pwd[0] == 0))
+ Password = NULL;
+ else
+ Password = pwd;
} // endif password
@@ -254,21 +259,23 @@ bool MYSQLDEF::ParseURL(PGLOBAL g, char *url, bool b)
return true;
} // endif
- if ((Tabschema = strchr(Hostname, '/'))) {
- *Tabschema++ = 0;
+ if ((schema = strchr(Hostname, '/'))) {
+ *schema++ = 0;
- if ((Tabname = strchr(Tabschema, '/'))) {
- *Tabname++ = 0;
+ if ((tabn = strchr(schema, '/'))) {
+ *tabn++ = 0;
// Make sure there's not an extra /
- if ((strchr(Tabname, '/'))) {
+ if ((strchr(tabn, '/'))) {
strcpy(g->Message, "Syntax error in URL");
return true;
} // endif /
+ Tabname = tabn;
} // endif TableName
- } // endif database
+ Tabschema = schema;
+ } // endif database
if ((sport = strchr(Hostname, ':')))
*sport++ = 0;
@@ -349,7 +356,7 @@ bool MYSQLDEF::DefineAM(PGLOBAL g, LPCSTR am, int)
Portnumber = GetIntCatInfo("Port", GetDefaultPort());
Server = Hostname;
} else {
- char *locdb = Tabschema;
+ PCSZ locdb = Tabschema;
if (ParseURL(g, url))
return true;
@@ -495,11 +502,11 @@ PCOL TDBMYSQL::MakeCol(PGLOBAL g, PCOLDEF cdp, PCOL cprec, int n)
/* filter should be removed from column list. */
/***********************************************************************/
bool TDBMYSQL::MakeSelect(PGLOBAL g, bool mx)
- {
+{
//char *tk = "`";
char tk = '`';
int len = 0, rank = 0;
- bool b = false, oom = false;
+ bool b = false;
PCOL colp;
//PDBUSER dup = PlgGetUser(g);
@@ -526,13 +533,13 @@ bool TDBMYSQL::MakeSelect(PGLOBAL g, bool mx)
for (colp = Columns; colp; colp = colp->GetNext())
if (!colp->IsSpecial()) {
if (b)
- oom |= Query->Append(", ");
+ Query->Append(", ");
else
b = true;
- oom |= Query->Append(tk);
- oom |= Query->Append(colp->GetName());
- oom |= Query->Append(tk);
+ Query->Append(tk);
+ Query->Append(colp->GetName());
+ Query->Append(tk);
((PMYCOL)colp)->Rank = rank++;
} // endif colp
@@ -542,22 +549,22 @@ bool TDBMYSQL::MakeSelect(PGLOBAL g, bool mx)
// Query '*' from...
// (the use of a char constant minimize the result storage)
if (Isview)
- oom |= Query->Append('*');
+ Query->Append('*');
else
- oom |= Query->Append("'*'");
+ Query->Append("'*'");
} // endif ncol
- oom |= Query->Append(" FROM ");
- oom |= Query->Append(tk);
- oom |= Query->Append(TableName);
- oom |= Query->Append(tk);
+ Query->Append(" FROM ");
+ Query->Append(tk);
+ Query->Append(TableName);
+ Query->Append(tk);
len = Query->GetLength();
if (To_CondFil) {
if (!mx) {
- oom |= Query->Append(" WHERE ");
- oom |= Query->Append(To_CondFil->Body);
+ Query->Append(" WHERE ");
+ Query->Append(To_CondFil->Body);
len = Query->GetLength() + 1;
} else
len += (strlen(To_CondFil->Body) + 256);
@@ -565,25 +572,25 @@ bool TDBMYSQL::MakeSelect(PGLOBAL g, bool mx)
} else
len += (mx ? 256 : 1);
- if (oom || Query->Resize(len)) {
+ if (Query->IsTruncated() || Query->Resize(len)) {
strcpy(g->Message, "MakeSelect: Out of memory");
return true;
- } // endif oom
+ } // endif Query
if (trace)
htrc("Query=%s\n", Query->GetStr());
return false;
- } // end of MakeSelect
+} // end of MakeSelect
/***********************************************************************/
/* MakeInsert: make the Insert statement used with MySQL connection. */
/***********************************************************************/
bool TDBMYSQL::MakeInsert(PGLOBAL g)
{
- char *tk = "`";
+ const char *tk = "`";
uint len = 0;
- bool b = false, oom;
+ bool oom, b = false;
PCOL colp;
if (Query)
@@ -622,38 +629,38 @@ bool TDBMYSQL::MakeInsert(PGLOBAL g)
Query = new(g) STRING(g, len);
if (Delayed)
- oom = Query->Set("INSERT DELAYED INTO ");
+ Query->Set("INSERT DELAYED INTO ");
else
- oom = Query->Set("INSERT INTO ");
+ Query->Set("INSERT INTO ");
- oom |= Query->Append(tk);
- oom |= Query->Append(TableName);
- oom |= Query->Append("` (");
+ Query->Append(tk);
+ Query->Append(TableName);
+ Query->Append("` (");
for (colp = Columns; colp; colp = colp->GetNext()) {
if (b)
- oom |= Query->Append(", ");
+ Query->Append(", ");
else
b = true;
- oom |= Query->Append(tk);
- oom |= Query->Append(colp->GetName());
- oom |= Query->Append(tk);
+ Query->Append(tk);
+ Query->Append(colp->GetName());
+ Query->Append(tk);
} // endfor colp
- oom |= Query->Append(") VALUES (");
+ Query->Append(") VALUES (");
#if defined(MYSQL_PREPARED_STATEMENTS)
if (Prep) {
for (int i = 0; i < Nparm; i++)
- oom |= Query->Append("?,");
+ Query->Append("?,");
Query->RepLast(')');
Query->Trim();
} // endif Prep
#endif // MYSQL_PREPARED_STATEMENTS
- if (oom)
+ if ((oom = Query->IsTruncated()))
strcpy(g->Message, "MakeInsert: Out of memory");
return oom;
@@ -684,18 +691,18 @@ bool TDBMYSQL::MakeCommand(PGLOBAL g)
strlwr(strcpy(name, Name)); // Not a keyword
if ((p = strstr(qrystr, name))) {
- bool oom = Query->Set(Qrystr, p - qrystr);
+ Query->Set(Qrystr, p - qrystr);
if (qtd && *(p-1) == ' ') {
- oom |= Query->Append('`');
- oom |= Query->Append(TableName);
- oom |= Query->Append('`');
+ Query->Append('`');
+ Query->Append(TableName);
+ Query->Append('`');
} else
- oom |= Query->Append(TableName);
+ Query->Append(TableName);
- oom |= Query->Append(Qrystr + (p - qrystr) + strlen(name));
+ Query->Append(Qrystr + (p - qrystr) + strlen(name));
- if (oom) {
+ if (Query->IsTruncated()) {
strcpy(g->Message, "MakeCommand: Out of memory");
return true;
} else
@@ -1096,7 +1103,7 @@ bool TDBMYSQL::ReadKey(PGLOBAL g, OPVAL op, const key_range *kr)
To_CondFil->Body= (char*)PlugSubAlloc(g, NULL, 0);
*To_CondFil->Body= 0;
- if ((To_CondFil = hc->CheckCond(g, To_CondFil, To_CondFil->Cond)))
+ if ((To_CondFil = hc->CheckCond(g, To_CondFil, Cond)))
PlugSubAlloc(g, NULL, strlen(To_CondFil->Body) + 1);
} // endif active_index
@@ -1161,24 +1168,23 @@ int TDBMYSQL::WriteDB(PGLOBAL g)
int rc;
uint len = Query->GetLength();
char buf[64];
- bool oom = false;
// Make the Insert command value list
for (PCOL colp = Columns; colp; colp = colp->GetNext()) {
if (!colp->GetValue()->IsNull()) {
if (colp->GetResultType() == TYPE_STRING ||
colp->GetResultType() == TYPE_DATE)
- oom |= Query->Append_quoted(colp->GetValue()->GetCharString(buf));
+ Query->Append_quoted(colp->GetValue()->GetCharString(buf));
else
- oom |= Query->Append(colp->GetValue()->GetCharString(buf));
+ Query->Append(colp->GetValue()->GetCharString(buf));
} else
- oom |= Query->Append("NULL");
+ Query->Append("NULL");
- oom |= Query->Append(',');
+ Query->Append(',');
} // endfor colp
- if (unlikely(oom)) {
+ if (unlikely(Query->IsTruncated())) {
strcpy(g->Message, "WriteDB: Out of memory");
rc = RC_FX;
} else {
@@ -1186,7 +1192,7 @@ int TDBMYSQL::WriteDB(PGLOBAL g)
Myc.m_Rows = -1; // To execute the query
rc = Myc.ExecSQL(g, Query->GetStr());
Query->Truncate(len); // Restore query
- } // endif oom
+ } // endif Query
return (rc == RC_NF) ? RC_OK : rc; // RC_NF is Ok
} // end of WriteDB
@@ -1234,7 +1240,7 @@ void TDBMYSQL::CloseDB(PGLOBAL g)
/***********************************************************************/
/* MYSQLCOL public constructor. */
/***********************************************************************/
-MYSQLCOL::MYSQLCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PSZ am)
+MYSQLCOL::MYSQLCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PCSZ am)
: COLBLK(cdp, tdbp, i)
{
if (cprec) {
@@ -1260,7 +1266,7 @@ MYSQLCOL::MYSQLCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PSZ am)
/***********************************************************************/
/* MYSQLCOL public constructor. */
/***********************************************************************/
-MYSQLCOL::MYSQLCOL(MYSQL_FIELD *fld, PTDB tdbp, int i, PSZ am)
+MYSQLCOL::MYSQLCOL(MYSQL_FIELD *fld, PTDB tdbp, int i, PCSZ am)
: COLBLK(NULL, tdbp, i)
{
const char *chset = get_charset_name(fld->charsetnr);
@@ -1407,8 +1413,8 @@ void MYSQLCOL::ReadColumn(PGLOBAL g)
if (rc == RC_EF)
sprintf(g->Message, MSG(INV_DEF_READ), rc);
- longjmp(g->jumper[g->jump_level], 11);
- } else
+ throw 11;
+ } else
tdbp->Fetched = true;
if ((buf = ((PTDBMY)To_Tdb)->Myc.GetCharField(Rank))) {
@@ -1669,7 +1675,7 @@ int TDBMYEXC::WriteDB(PGLOBAL g)
/***********************************************************************/
/* MYXCOL public constructor. */
/***********************************************************************/
-MYXCOL::MYXCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PSZ am)
+MYXCOL::MYXCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PCSZ am)
: MYSQLCOL(cdp, tdbp, cprec, i, am)
{
// Set additional EXEC MYSQL access method information for column.
@@ -1679,7 +1685,7 @@ MYXCOL::MYXCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PSZ am)
/***********************************************************************/
/* MYSQLCOL public constructor. */
/***********************************************************************/
-MYXCOL::MYXCOL(MYSQL_FIELD *fld, PTDB tdbp, int i, PSZ am)
+MYXCOL::MYXCOL(MYSQL_FIELD *fld, PTDB tdbp, int i, PCSZ am)
: MYSQLCOL(fld, tdbp, i, am)
{
if (trace)
diff --git a/storage/connect/tabmysql.h b/storage/connect/tabmysql.h
index 050fa59259b..3c37ae5bf3b 100644
--- a/storage/connect/tabmysql.h
+++ b/storage/connect/tabmysql.h
@@ -86,7 +86,7 @@ class TDBMYSQL : public TDBEXT {
virtual void ResetDB(void) {N = 0;}
virtual int RowNumber(PGLOBAL g, bool b = false);
virtual bool IsView(void) {return Isview;}
- virtual PSZ GetServer(void) {return Server;}
+ virtual PCSZ GetServer(void) {return Server;}
void SetDatabase(LPCSTR db) {Schema = (char*)db;}
// Schema routines
@@ -109,7 +109,7 @@ class TDBMYSQL : public TDBEXT {
// Internal functions
bool MakeSelect(PGLOBAL g, bool mx);
bool MakeInsert(PGLOBAL g);
- int BindColumns(PGLOBAL g);
+ int BindColumns(PGLOBAL g __attribute__((unused)));
virtual bool MakeCommand(PGLOBAL g);
//int MakeUpdate(PGLOBAL g);
//int MakeDelete(PGLOBAL g);
@@ -146,8 +146,8 @@ class MYSQLCOL : public COLBLK {
friend class TDBMYSQL;
public:
// Constructors
- MYSQLCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PSZ am = "MYSQL");
- MYSQLCOL(MYSQL_FIELD *fld, PTDB tdbp, int i, PSZ am = "MYSQL");
+ MYSQLCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PCSZ am = "MYSQL");
+ MYSQLCOL(MYSQL_FIELD *fld, PTDB tdbp, int i, PCSZ am = "MYSQL");
MYSQLCOL(MYSQLCOL *colp, PTDB tdbp); // Constructor used in copy process
// Implementation
@@ -215,8 +215,8 @@ class MYXCOL : public MYSQLCOL {
friend class TDBMYEXC;
public:
// Constructors
- MYXCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PSZ am = "MYSQL");
- MYXCOL(MYSQL_FIELD *fld, PTDB tdbp, int i, PSZ am = "MYSQL");
+ MYXCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PCSZ am = "MYSQL");
+ MYXCOL(MYSQL_FIELD *fld, PTDB tdbp, int i, PCSZ am = "MYSQL");
MYXCOL(MYXCOL *colp, PTDB tdbp); // Constructor used in copy process
// Methods
@@ -242,10 +242,10 @@ class TDBMCL : public TDBCAT {
virtual PQRYRES GetResult(PGLOBAL g);
// Members
- PSZ Host; // Host machine to use
- PSZ Db; // Database to be used by server
- PSZ Tab; // External table name
- PSZ User; // User logon name
- PSZ Pwd; // Password logon info
- int Port; // MySQL port number (0 = default)
+ PCSZ Host; // Host machine to use
+ PCSZ Db; // Database to be used by server
+ PCSZ Tab; // External table name
+ PCSZ User; // User logon name
+ PCSZ Pwd; // Password logon info
+ int Port; // MySQL port number (0 = default)
}; // end of class TDBMCL
diff --git a/storage/connect/tabodbc.cpp b/storage/connect/tabodbc.cpp
index 488acdd330d..34711d584f1 100644
--- a/storage/connect/tabodbc.cpp
+++ b/storage/connect/tabodbc.cpp
@@ -1,11 +1,11 @@
/************* Tabodbc C++ Program Source Code File (.CPP) *************/
/* PROGRAM NAME: TABODBC */
/* ------------- */
-/* Version 3.1 */
+/* Version 3.2 */
/* */
/* COPYRIGHT: */
/* ---------- */
-/* (C) Copyright to the author Olivier BERTRAND 2000-2016 */
+/* (C) Copyright to the author Olivier BERTRAND 2000-2017 */
/* */
/* WHAT THIS PROGRAM DOES: */
/* ----------------------- */
@@ -116,47 +116,12 @@ bool ODBCDEF::DefineAM(PGLOBAL g, LPCSTR am, int poff)
if (EXTDEF::DefineAM(g, am, poff))
return true;
- // Tabname = GetStringCatInfo(g, "Name",
- // (Catfunc & (FNC_TABLE | FNC_COL)) ? NULL : Name);
- // Tabname = GetStringCatInfo(g, "Tabname", Tabname);
- // Tabschema = GetStringCatInfo(g, "Dbname", NULL);
- // Tabschema = GetStringCatInfo(g, "Schema", Tabschema);
- // Tabcat = GetStringCatInfo(g, "Qualifier", NULL);
- // Tabcat = GetStringCatInfo(g, "Catalog", Tabcat);
- //Username = GetStringCatInfo(g, "User", NULL);
- // Password = GetStringCatInfo(g, "Password", NULL);
-
- // if ((Srcdef = GetStringCatInfo(g, "Srcdef", NULL)))
- // Read_Only = true;
-
- // Qrystr = GetStringCatInfo(g, "Query_String", "?");
- // Sep = GetStringCatInfo(g, "Separator", NULL);
Catver = GetIntCatInfo("Catver", 2);
- //Xsrc = GetBoolCatInfo("Execsrc", FALSE);
- //Maxerr = GetIntCatInfo("Maxerr", 0);
- //Maxres = GetIntCatInfo("Maxres", 0);
- //Quoted = GetIntCatInfo("Quoted", 0);
Options = ODBConn::noOdbcDialog;
//Options = ODBConn::noOdbcDialog | ODBConn::useCursorLib;
Cto= GetIntCatInfo("ConnectTimeout", DEFAULT_LOGIN_TIMEOUT);
Qto= GetIntCatInfo("QueryTimeout", DEFAULT_QUERY_TIMEOUT);
-
- //if ((Scrollable = GetBoolCatInfo("Scrollable", false)) && !Elemt)
- // Elemt = 1; // Cannot merge SQLFetch and SQLExtendedFetch
-
- //if (Catfunc == FNC_COL)
- // Colpat = GetStringCatInfo(g, "Colpat", NULL);
-
- //if (Catfunc == FNC_TABLE)
- // Tabtyp = GetStringCatInfo(g, "Tabtype", NULL);
-
UseCnc = GetBoolCatInfo("UseDSN", false);
-
- // Memory was Boolean, it is now integer
- //if (!(Memory = GetIntCatInfo("Memory", 0)))
- // Memory = GetBoolCatInfo("Memory", false) ? 1 : 0;
-
- //Pseudo = 2; // FILID is Ok but not ROWID
return false;
} // end of DefineAM
@@ -210,59 +175,22 @@ TDBODBC::TDBODBC(PODEF tdp) : TDBEXT(tdp)
if (tdp) {
Connect = tdp->Connect;
- //TableName = tdp->Tabname;
- //Schema = tdp->Tabschema;
Ops.User = tdp->Username;
Ops.Pwd = tdp->Password;
- //Catalog = tdp->Tabcat;
- //Srcdef = tdp->Srcdef;
- //Qrystr = tdp->Qrystr;
- //Sep = tdp->GetSep();
- //Options = tdp->Options;
Ops.Cto = tdp->Cto;
Ops.Qto = tdp->Qto;
- //Quoted = MY_MAX(0, tdp->GetQuoted());
- //Rows = tdp->GetElemt();
Catver = tdp->Catver;
- //Memory = tdp->Memory;
- //Scrollable = tdp->Scrollable;
Ops.UseCnc = tdp->UseCnc;
} else {
Connect = NULL;
- //TableName = NULL;
- //Schema = NULL;
Ops.User = NULL;
Ops.Pwd = NULL;
- //Catalog = NULL;
- //Srcdef = NULL;
- //Qrystr = NULL;
- //Sep = 0;
- //Options = 0;
Ops.Cto = DEFAULT_LOGIN_TIMEOUT;
Ops.Qto = DEFAULT_QUERY_TIMEOUT;
- //Quoted = 0;
- //Rows = 0;
Catver = 0;
- //Memory = 0;
- //Scrollable = false;
Ops.UseCnc = false;
} // endif tdp
- //Quote = NULL;
- //Query = NULL;
- //Count = NULL;
-//Where = NULL;
- //MulConn = NULL;
- //DBQ = NULL;
- //Qrp = NULL;
- //Fpos = 0;
- //Curpos = 0;
- //AftRows = 0;
- //CurNum = 0;
- //Rbuf = 0;
- //BufSize = 0;
- //Nparm = 0;
- //Placed = false;
} // end of TDBODBC standard constructor
TDBODBC::TDBODBC(PTDBODBC tdbp) : TDBEXT(tdbp)
@@ -270,32 +198,7 @@ TDBODBC::TDBODBC(PTDBODBC tdbp) : TDBEXT(tdbp)
Ocp = tdbp->Ocp; // is that right ?
Cnp = tdbp->Cnp;
Connect = tdbp->Connect;
- //TableName = tdbp->TableName;
- //Schema = tdbp->Schema;
Ops = tdbp->Ops;
- //Catalog = tdbp->Catalog;
- //Srcdef = tdbp->Srcdef;
- //Qrystr = tdbp->Qrystr;
- //Memory = tdbp->Memory;
- //Scrollable = tdbp->Scrollable;
- //Quote = tdbp->Quote;
- //Query = tdbp->Query;
- //Count = tdbp->Count;
-//Where = tdbp->Where;
- //MulConn = tdbp->MulConn;
- //DBQ = tdbp->DBQ;
- //Options = tdbp->Options;
- //Quoted = tdbp->Quoted;
- //Rows = tdbp->Rows;
- //Fpos = 0;
- //Curpos = 0;
- //AftRows = 0;
- //CurNum = 0;
- //Rbuf = 0;
- //BufSize = tdbp->BufSize;
- //Nparm = tdbp->Nparm;
- //Qrp = tdbp->Qrp;
- //Placed = false;
} // end of TDBODBC copy constructor
// Method
@@ -328,7 +231,7 @@ PCOL TDBODBC::MakeCol(PGLOBAL g, PCOLDEF cdp, PCOL cprec, int n)
/* This used for Multiple(1) tables. Also prepare a connect string */
/* with a place holder to be used by SetFile. */
/***********************************************************************/
-PSZ TDBODBC::GetFile(PGLOBAL g)
+PCSZ TDBODBC::GetFile(PGLOBAL g)
{
if (Connect) {
char *p1, *p2;
@@ -389,152 +292,15 @@ void TDBODBC::SetFile(PGLOBAL g, PSZ fn)
DBQ = fn;
} // end of SetFile
-#if 0
-/******************************************************************/
-/* Convert an UTF-8 string to latin characters. */
-/******************************************************************/
-int TDBODBC::Decode(char *txt, char *buf, size_t n)
-{
- uint dummy_errors;
- uint32 len= copy_and_convert(buf, n, &my_charset_latin1,
- txt, strlen(txt),
- &my_charset_utf8_general_ci,
- &dummy_errors);
- buf[len]= '\0';
- return 0;
-} // end of Decode
-
-/***********************************************************************/
-/* MakeSQL: make the SQL statement use with ODBC connection. */
-/* Note: when implementing EOM filtering, column only used in local */
-/* filter should be removed from column list. */
-/***********************************************************************/
-bool TDBODBC::MakeSQL(PGLOBAL g, bool cnt)
- {
- char *schmp = NULL, *catp = NULL, buf[NAM_LEN * 3];
- int len;
- bool oom = false, first = true;
- PTABLE tablep = To_Table;
- PCOL colp;
-
- if (Srcdef) {
- if (strstr(Srcdef, "%s")) {
- char *fil;
-
- fil = (To_CondFil) ? To_CondFil->Body : PlugDup(g, "1=1");
- Query = new(g)STRING(g, strlen(Srcdef) + strlen(fil));
- Query->SetLength(sprintf(Query->GetStr(), Srcdef, fil));
- } else
- Query = new(g)STRING(g, 0, Srcdef);
-
- return false;
- } // endif Srcdef
-
- // Allocate the string used to contain the Query
- Query = new(g)STRING(g, 1023, "SELECT ");
-
- if (!cnt) {
- if (Columns) {
- // Normal SQL statement to retrieve results
- for (colp = Columns; colp; colp = colp->GetNext())
- if (!colp->IsSpecial()) {
- if (!first)
- oom |= Query->Append(", ");
- else
- first = false;
-
- // Column name can be encoded in UTF-8
- Decode(colp->GetName(), buf, sizeof(buf));
-
- if (Quote) {
- // Put column name between identifier quotes in case in contains blanks
- oom |= Query->Append(Quote);
- oom |= Query->Append(buf);
- oom |= Query->Append(Quote);
- } else
- oom |= Query->Append(buf);
-
- ((PEXTCOL)colp)->SetRank(++Ncol);
- } // endif colp
-
- } else
- // !Columns can occur for queries such that sql count(*) from...
- // for which we will count the rows from sql * from...
- oom |= Query->Append('*');
-
- } else
- // SQL statement used to retrieve the size of the result
- oom |= Query->Append("count(*)");
-
- oom |= Query->Append(" FROM ");
-
- if (Catalog && *Catalog)
- catp = Catalog;
-
- //if (tablep->GetSchema())
- // schmp = (char*)tablep->GetSchema();
- //else
- if (Schema && *Schema)
- schmp = Schema;
-
- if (catp) {
- oom |= Query->Append(catp);
-
- if (schmp) {
- oom |= Query->Append('.');
- oom |= Query->Append(schmp);
- } // endif schmp
-
- oom |= Query->Append('.');
- } else if (schmp) {
- oom |= Query->Append(schmp);
- oom |= Query->Append('.');
- } // endif schmp
-
- // Table name can be encoded in UTF-8
- Decode(TableName, buf, sizeof(buf));
-
- if (Quote) {
- // Put table name between identifier quotes in case in contains blanks
- oom |= Query->Append(Quote);
- oom |= Query->Append(buf);
- oom |= Query->Append(Quote);
- } else
- oom |= Query->Append(buf);
-
- len = Query->GetLength();
-
- if (To_CondFil) {
- if (Mode == MODE_READ) {
- oom |= Query->Append(" WHERE ");
- oom |= Query->Append(To_CondFil->Body);
- len = Query->GetLength() + 1;
- } else
- len += (strlen(To_CondFil->Body) + 256);
-
- } else
- len += ((Mode == MODE_READX) ? 256 : 1);
-
- if (oom || Query->Resize(len)) {
- strcpy(g->Message, "MakeSQL: Out of memory");
- return true;
- } // endif oom
-
- if (trace)
- htrc("Query=%s\n", Query->GetStr());
-
- return false;
- } // end of MakeSQL
-#endif // 0
-
/***********************************************************************/
/* MakeInsert: make the Insert statement used with ODBC connection. */
/***********************************************************************/
bool TDBODBC::MakeInsert(PGLOBAL g)
{
- char *schmp = NULL, *catp = NULL, buf[NAM_LEN * 3];
+ PCSZ schmp = NULL;
+ char *catp = NULL, buf[NAM_LEN * 3];
int len = 0;
- bool b = false, oom = false;
+ bool oom, b = false;
PTABLE tablep = To_Table;
PCOL colp;
@@ -571,32 +337,32 @@ bool TDBODBC::MakeInsert(PGLOBAL g)
Query = new(g) STRING(g, len, "INSERT INTO ");
if (catp) {
- oom |= Query->Append(catp);
+ Query->Append(catp);
if (schmp) {
- oom |= Query->Append('.');
- oom |= Query->Append(schmp);
+ Query->Append('.');
+ Query->Append(schmp);
} // endif schmp
- oom |= Query->Append('.');
+ Query->Append('.');
} else if (schmp) {
- oom |= Query->Append(schmp);
- oom |= Query->Append('.');
+ Query->Append(schmp);
+ Query->Append('.');
} // endif schmp
if (Quote) {
// Put table name between identifier quotes in case in contains blanks
- oom |= Query->Append(Quote);
- oom |= Query->Append(buf);
- oom |= Query->Append(Quote);
+ Query->Append(Quote);
+ Query->Append(buf);
+ Query->Append(Quote);
} else
- oom |= Query->Append(buf);
+ Query->Append(buf);
- oom |= Query->Append('(');
+ Query->Append('(');
for (colp = Columns; colp; colp = colp->GetNext()) {
if (b)
- oom |= Query->Append(", ");
+ Query->Append(", ");
else
b = true;
@@ -605,20 +371,20 @@ bool TDBODBC::MakeInsert(PGLOBAL g)
if (Quote) {
// Put column name between identifier quotes in case in contains blanks
- oom |= Query->Append(Quote);
- oom |= Query->Append(buf);
- oom |= Query->Append(Quote);
+ Query->Append(Quote);
+ Query->Append(buf);
+ Query->Append(Quote);
} else
- oom |= Query->Append(buf);
+ Query->Append(buf);
} // endfor colp
- oom |= Query->Append(") VALUES (");
+ Query->Append(") VALUES (");
for (int i = 0; i < Nparm; i++)
- oom |= Query->Append("?,");
+ Query->Append("?,");
- if (oom)
+ if ((oom = Query->IsTruncated()))
strcpy(g->Message, "MakeInsert: Out of memory");
else
Query->RepLast(')');
@@ -646,73 +412,6 @@ bool TDBODBC::BindParameters(PGLOBAL g)
#if 0
/***********************************************************************/
-/* MakeCommand: make the Update or Delete statement to send to the */
-/* MySQL server. Limited to remote values and filtering. */
-/***********************************************************************/
-bool TDBODBC::MakeCommand(PGLOBAL g)
- {
- char *p, *stmt, name[68], *body = NULL, *qc = Ocp->GetQuoteChar();
- char *qrystr = (char*)PlugSubAlloc(g, NULL, strlen(Qrystr) + 1);
- bool qtd = Quoted > 0;
- int i = 0, k = 0;
-
- // Make a lower case copy of the originale query and change
- // back ticks to the data source identifier quoting character
- do {
- qrystr[i] = (Qrystr[i] == '`') ? *qc : tolower(Qrystr[i]);
- } while (Qrystr[i++]);
-
- if (To_CondFil && (p = strstr(qrystr, " where "))) {
- p[7] = 0; // Remove where clause
- Qrystr[(p - qrystr) + 7] = 0;
- body = To_CondFil->Body;
- stmt = (char*)PlugSubAlloc(g, NULL, strlen(qrystr)
- + strlen(body) + 64);
- } else
- stmt = (char*)PlugSubAlloc(g, NULL, strlen(Qrystr) + 64);
-
- // Check whether the table name is equal to a keyword
- // If so, it must be quoted in the original query
- strlwr(strcat(strcat(strcpy(name, " "), Name), " "));
-
- if (strstr(" update delete low_priority ignore quick from ", name)) {
- strlwr(strcat(strcat(strcpy(name, qc), Name), qc));
- k += 2;
- } else
- strlwr(strcpy(name, Name)); // Not a keyword
-
- if ((p = strstr(qrystr, name))) {
- for (i = 0; i < p - qrystr; i++)
- stmt[i] = (Qrystr[i] == '`') ? *qc : Qrystr[i];
-
- stmt[i] = 0;
- k += i + (int)strlen(Name);
-
- if (qtd && *(p - 1) == ' ')
- strcat(strcat(strcat(stmt, qc), TableName), qc);
- else
- strcat(stmt, TableName);
-
- i = (int)strlen(stmt);
-
- do {
- stmt[i++] = (Qrystr[k] == '`') ? *qc : Qrystr[k];
- } while (Qrystr[k++]);
-
- if (body)
- strcat(stmt, body);
-
- } else {
- sprintf(g->Message, "Cannot use this %s command",
- (Mode == MODE_UPDATE) ? "UPDATE" : "DELETE");
- return true;
- } // endif p
-
- Query = new(g) STRING(g, 0, stmt);
- return (!Query->GetSize());
- } // end of MakeCommand
-
-/***********************************************************************/
/* MakeUpdate: make the SQL statement to send to ODBC connection. */
/***********************************************************************/
char *TDBODBC::MakeUpdate(PGLOBAL g)
@@ -829,35 +528,6 @@ int TDBODBC::Cardinality(PGLOBAL g)
return Cardinal;
} // end of Cardinality
-#if 0
-/***********************************************************************/
-/* ODBC GetMaxSize: returns table size estimate in number of lines. */
-/***********************************************************************/
-int TDBODBC::GetMaxSize(PGLOBAL g)
- {
- if (MaxSize < 0) {
- if (Mode == MODE_DELETE)
- // Return 0 in mode DELETE in case of delete all.
- MaxSize = 0;
- else if (!Cardinality(NULL))
- MaxSize = 10; // To make MySQL happy
- else if ((MaxSize = Cardinality(g)) < 0)
- MaxSize = 12; // So we can see an error occurred
-
- } // endif MaxSize
-
- return MaxSize;
- } // end of GetMaxSize
-
-/***********************************************************************/
-/* Return max size value. */
-/***********************************************************************/
-int TDBODBC::GetProgMax(PGLOBAL g)
- {
- return GetMaxSize(g);
- } // end of GetProgMax
-#endif // 0
-
/***********************************************************************/
/* ODBC Access Method opening routine. */
/* New method now that this routine is called recursively (last table */
@@ -1064,7 +734,7 @@ bool TDBODBC::ReadKey(PGLOBAL g, OPVAL op, const key_range *kr)
To_CondFil->Body= (char*)PlugSubAlloc(g, NULL, 0);
*To_CondFil->Body= 0;
- if ((To_CondFil = hc->CheckCond(g, To_CondFil, To_CondFil->Cond)))
+ if ((To_CondFil = hc->CheckCond(g, To_CondFil, Cond)))
PlugSubAlloc(g, NULL, strlen(To_CondFil->Body) + 1);
} // endif active_index
@@ -1097,8 +767,6 @@ int TDBODBC::ReadDB(PGLOBAL g)
if (trace > 1)
htrc("ODBC ReadDB: R%d Mode=%d\n", GetTdb_No(), Mode);
- //htrc("ODBC ReadDB: R%d Mode=%d key=%p link=%p Kindex=%p\n",
- // GetTdb_No(), Mode, To_Key_Col, To_Link, To_Kindex);
if (Mode == MODE_UPDATE || Mode == MODE_DELETE) {
if (!Query && MakeCommand(g))
@@ -1118,12 +786,6 @@ int TDBODBC::ReadDB(PGLOBAL g)
} // endif Mode
- //if (To_Kindex) {
- // // Direct access of ODBC tables is not implemented yet
- // strcpy(g->Message, MSG(NO_ODBC_DIRECT));
- // return RC_FX;
- // } // endif To_Kindex
-
/*********************************************************************/
/* Now start the reading process. */
/* Here is the place to fetch the line(s). */
@@ -1208,11 +870,6 @@ int TDBODBC::DeleteDB(PGLOBAL g, int irc)
/***********************************************************************/
void TDBODBC::CloseDB(PGLOBAL g)
{
-//if (To_Kindex) {
-// To_Kindex->Close();
-// To_Kindex = NULL;
-// } // endif
-
if (Ocp)
Ocp->Close();
@@ -1227,20 +884,13 @@ void TDBODBC::CloseDB(PGLOBAL g)
/***********************************************************************/
/* ODBCCOL public constructor. */
/***********************************************************************/
-ODBCCOL::ODBCCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PSZ am)
+ODBCCOL::ODBCCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PCSZ am)
: EXTCOL(cdp, tdbp, cprec, i, am)
{
// Set additional ODBC access method information for column.
-//Crp = NULL;
-//Long = Precision;
-//strcpy(F_Date, cdp->F_Date);
-//To_Val = NULL;
Slen = 0;
StrLen = &Slen;
Sqlbuf = NULL;
-//Bufp = NULL;
-//Blkp = NULL;
-//Rank = 0; // Not known yet
} // end of ODBCCOL constructor
/***********************************************************************/
@@ -1248,17 +898,9 @@ ODBCCOL::ODBCCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PSZ am)
/***********************************************************************/
ODBCCOL::ODBCCOL(void) : EXTCOL()
{
-//Crp = NULL;
-//Buf_Type = TYPE_INT; // This is a count(*) column
-//// Set additional Dos access method information for column.
-//Long = sizeof(int);
-//To_Val = NULL;
Slen = 0;
StrLen = &Slen;
Sqlbuf = NULL;
-//Bufp = NULL;
-//Blkp = NULL;
-//Rank = 1;
} // end of ODBCCOL constructor
/***********************************************************************/
@@ -1267,66 +909,11 @@ ODBCCOL::ODBCCOL(void) : EXTCOL()
/***********************************************************************/
ODBCCOL::ODBCCOL(ODBCCOL *col1, PTDB tdbp) : EXTCOL(col1, tdbp)
{
-//Crp = col1->Crp;
-//Long = col1->Long;
-//strcpy(F_Date, col1->F_Date);
-//To_Val = col1->To_Val;
Slen = col1->Slen;
StrLen = col1->StrLen;
Sqlbuf = col1->Sqlbuf;
-//Bufp = col1->Bufp;
-//Blkp = col1->Blkp;
-//Rank = col1->Rank;
} // end of ODBCCOL copy constructor
-#if 0
-/***********************************************************************/
-/* SetBuffer: prepare a column block for write operation. */
-/***********************************************************************/
-bool ODBCCOL::SetBuffer(PGLOBAL g, PVAL value, bool ok, bool check)
- {
- if (!(To_Val = value)) {
- sprintf(g->Message, MSG(VALUE_ERROR), Name);
- return true;
- } else if (Buf_Type == value->GetType()) {
- // Values are of the (good) column type
- if (Buf_Type == TYPE_DATE) {
- // If any of the date values is formatted
- // output format must be set for the receiving table
- if (GetDomain() || ((DTVAL *)value)->IsFormatted())
- goto newval; // This will make a new value;
-
- } else if (Buf_Type == TYPE_DOUBLE)
- // Float values must be written with the correct (column) precision
- // Note: maybe this should be forced by ShowValue instead of this ?
- value->SetPrec(GetScale());
-
- Value = value; // Directly access the external value
- } else {
- // Values are not of the (good) column type
- if (check) {
- sprintf(g->Message, MSG(TYPE_VALUE_ERR), Name,
- GetTypeName(Buf_Type), GetTypeName(value->GetType()));
- return true;
- } // endif check
-
- newval:
- if (InitValue(g)) // Allocate the matching value block
- return true;
-
- } // endif's Value, Buf_Type
-
- // Because Colblk's have been made from a copy of the original TDB in
- // case of Update, we must reset them to point to the original one.
- if (To_Tdb->GetOrig())
- To_Tdb = (PTDB)To_Tdb->GetOrig();
-
- // Set the Column
- Status = (ok) ? BUF_EMPTY : BUF_NO;
- return false;
- } // end of SetBuffer
-#endif // 0
-
/***********************************************************************/
/* ReadColumn: when SQLFetch is used there is nothing to do as the */
/* column buffer was bind to the record set. This is also the case */
@@ -1715,7 +1302,7 @@ int TDBXDBC::DeleteDB(PGLOBAL g, int irc)
/***********************************************************************/
/* XSRCCOL public constructor. */
/***********************************************************************/
-XSRCCOL::XSRCCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PSZ am)
+XSRCCOL::XSRCCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PCSZ am)
: ODBCCOL(cdp, tdbp, cprec, i, am)
{
// Set additional ODBC access method information for column.
diff --git a/storage/connect/tabodbc.h b/storage/connect/tabodbc.h
index fcefad5647b..0ca88b60858 100644
--- a/storage/connect/tabodbc.h
+++ b/storage/connect/tabodbc.h
@@ -1,7 +1,7 @@
/*************** Tabodbc H Declares Source Code File (.H) **************/
-/* Name: TABODBC.H Version 1.8 */
+/* Name: TABODBC.H Version 1.9 */
/* */
-/* (C) Copyright to the author Olivier BERTRAND 2000-2015 */
+/* (C) Copyright to the author Olivier BERTRAND 2000-2017 */
/* */
/* This file contains the TDBODBC classes declares. */
/***********************************************************************/
@@ -33,14 +33,7 @@ public:
// Implementation
virtual const char *GetType(void) {return "ODBC";}
PSZ GetConnect(void) {return Connect;}
- //PSZ GetTabname(void) {return Tabname;}
- //PSZ GetTabschema(void) {return Tabschema;}
- //PSZ GetTabcat(void) {return Tabcat;}
- //PSZ GetSrcdef(void) {return Srcdef;}
- //char GetSep(void) {return (Sep) ? *Sep : 0;}
- //int GetQuoted(void) {return Quoted;}
int GetCatver(void) {return Catver;}
- //int GetOptions(void) {return Options;}
// Methods
virtual int Indexable(void) {return 2;}
@@ -50,27 +43,7 @@ public:
protected:
// Members
PSZ Connect; /* ODBC connection string */
- //PSZ Tabname; /* External table name */
- //PSZ Tabschema; /* External table schema */
- //PSZ Username; /* User connect name */
- //PSZ Password; /* Password connect info */
- //PSZ Tabcat; /* External table catalog */
- //PSZ Tabtyp; /* Catalog table type */
- //PSZ Colpat; /* Catalog column pattern */
- //PSZ Srcdef; /* The source table SQL definition */
- //PSZ Qchar; /* Identifier quoting character */
- //PSZ Qrystr; /* The original query */
- //PSZ Sep; /* Decimal separator */
int Catver; /* ODBC version for catalog functions */
- //int Options; /* Open connection options */
- //int Cto; /* Open connection timeout */
- //int Qto; /* Query (command) timeout */
- //int Quoted; /* Identifier quoting level */
- //int Maxerr; /* Maxerr for an Exec table */
- //int Maxres; /* Maxres for a catalog table */
- //int Memory; /* Put result set in memory */
- //bool Scrollable; /* Use scrollable cursor */
- //bool Xsrc; /* Execution type */
bool UseCnc; /* Use SQLConnect (!SQLDriverConnect) */
}; // end of ODBCDEF
@@ -96,20 +69,16 @@ class TDBODBC : public TDBEXT {
// Methods
virtual PTDB Clone(PTABS t);
-//virtual int GetRecpos(void);
virtual bool SetRecpos(PGLOBAL g, int recpos);
- virtual PSZ GetFile(PGLOBAL g);
+ virtual PCSZ GetFile(PGLOBAL g);
virtual void SetFile(PGLOBAL g, PSZ fn);
virtual void ResetSize(void);
-//virtual int GetAffectedRows(void) {return AftRows;}
- virtual PSZ GetServer(void) {return "ODBC";}
+ virtual PCSZ GetServer(void) {return "ODBC";}
virtual int Indexable(void) {return 2;}
// Database routines
virtual PCOL MakeCol(PGLOBAL g, PCOLDEF cdp, PCOL cprec, int n);
virtual int Cardinality(PGLOBAL g);
-//virtual int GetMaxSize(PGLOBAL g);
-//virtual int GetProgMax(PGLOBAL g);
virtual bool OpenDB(PGLOBAL g);
virtual int ReadDB(PGLOBAL g);
virtual int WriteDB(PGLOBAL g);
@@ -119,14 +88,8 @@ class TDBODBC : public TDBEXT {
protected:
// Internal functions
-//int Decode(char *utf, char *buf, size_t n);
-//bool MakeSQL(PGLOBAL g, bool cnt);
bool MakeInsert(PGLOBAL g);
-//virtual bool MakeCommand(PGLOBAL g);
-//bool MakeFilter(PGLOBAL g, bool c);
bool BindParameters(PGLOBAL g);
-//char *MakeUpdate(PGLOBAL g);
-//char *MakeDelete(PGLOBAL g);
// Members
ODBConn *Ocp; // Points to an ODBC connection class
@@ -145,15 +108,12 @@ class ODBCCOL : public EXTCOL {
friend class TDBODBC;
public:
// Constructors
- ODBCCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PSZ am = "ODBC");
+ ODBCCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PCSZ am = "ODBC");
ODBCCOL(ODBCCOL *colp, PTDB tdbp); // Constructor used in copy process
// Implementation
virtual int GetAmType(void) {return TYPE_AM_ODBC;}
SQLLEN *GetStrLen(void) {return StrLen;}
-// int GetRank(void) {return Rank;}
-// PVBLK GetBlkp(void) {return Blkp;}
-// void SetCrp(PCOLRES crp) {Crp = crp;}
// Methods
//virtual bool SetBuffer(PGLOBAL g, PVAL value, bool ok, bool check);
@@ -162,7 +122,6 @@ class ODBCCOL : public EXTCOL {
void AllocateBuffers(PGLOBAL g, int rows);
void *GetBuffer(DWORD rows);
SWORD GetBuflen(void);
-// void Print(PGLOBAL g, FILE *, uint);
protected:
// Constructor for count(*) column
@@ -170,14 +129,8 @@ class ODBCCOL : public EXTCOL {
// Members
TIMESTAMP_STRUCT *Sqlbuf; // To get SQL_TIMESTAMP's
-//PCOLRES Crp; // To storage result
-//void *Bufp; // To extended buffer
-//PVBLK Blkp; // To Value Block
-//char F_Date[12]; // Internal Date format
-//PVAL To_Val; // To value used for Insert
SQLLEN *StrLen; // As returned by ODBC
SQLLEN Slen; // Used with Fetch
-//int Rank; // Rank (position) number in the query
}; // end of class ODBCCOL
/***********************************************************************/
@@ -226,16 +179,15 @@ class XSRCCOL : public ODBCCOL {
friend class TDBXDBC;
public:
// Constructors
- XSRCCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PSZ am = "ODBC");
+ XSRCCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PCSZ am = "ODBC");
XSRCCOL(XSRCCOL *colp, PTDB tdbp); // Constructor used in copy process
// Implementation
-//virtual int GetAmType(void) {return TYPE_AM_ODBC;}
// Methods
virtual void ReadColumn(PGLOBAL g);
virtual void WriteColumn(PGLOBAL g);
-// void Print(PGLOBAL g, FILE *, uint);
+// void Printf(PGLOBAL g, FILE *, uint);
protected:
// Members
@@ -287,10 +239,10 @@ class TDBOTB : public TDBDRV {
virtual PQRYRES GetResult(PGLOBAL g);
// Members
- char *Dsn; // Points to connection string
- char *Schema; // Points to schema name or NULL
- char *Tab; // Points to ODBC table name or pattern
- char *Tabtyp; // Points to ODBC table type
+ PCSZ Dsn; // Points to connection string
+ PCSZ Schema; // Points to schema name or NULL
+ PCSZ Tab; // Points to ODBC table name or pattern
+ PCSZ Tabtyp; // Points to ODBC table type
ODBCPARM Ops; // Additional parameters
}; // end of class TDBOTB
diff --git a/storage/connect/tabpivot.cpp b/storage/connect/tabpivot.cpp
index c6d32884417..76a46e6899b 100644
--- a/storage/connect/tabpivot.cpp
+++ b/storage/connect/tabpivot.cpp
@@ -106,214 +106,211 @@ bool PIVAID::SkipColumn(PCOLRES crp, char *skc)
/* Make the Pivot table column list. */
/***********************************************************************/
PQRYRES PIVAID::MakePivotColumns(PGLOBAL g)
- {
+{
char *p, *query, *colname, *skc, buf[64];
- int rc, ndif, nblin, w = 0;
+ int ndif, nblin, w = 0;
bool b = false;
PVAL valp;
PQRYRES qrp;
PCOLRES *pcrp, crp, fncrp = NULL;
- // Save stack and allocation environment and prepare error return
- if (g->jump_level == MAX_JUMP) {
- strcpy(g->Message, MSG(TOO_MANY_JUMPS));
- return NULL;
- } // endif jump_level
-
- if ((rc= setjmp(g->jumper[++g->jump_level])) != 0) {
- goto err;
- } // endif rc
-
- // Are there columns to skip?
- if (Skcol) {
- uint n = strlen(Skcol);
-
- skc = (char*)PlugSubAlloc(g, NULL, n + 2);
- strcpy(skc, Skcol);
- skc[n + 1] = 0;
-
- // Replace ; by nulls in skc
- for (p = strchr(skc, ';'); p; p = strchr(p, ';'))
- *p++ = 0;
-
- } else
- skc = NULL;
-
- if (!Tabsrc && Tabname) {
- // Locate the query
- query = (char*)PlugSubAlloc(g, NULL, strlen(Tabname) + 26);
- sprintf(query, "SELECT * FROM `%s` LIMIT 1", Tabname);
- } else if (!Tabsrc) {
- strcpy(g->Message, MSG(SRC_TABLE_UNDEF));
- return NULL;
- } else
- query = Tabsrc;
-
- // Open a MySQL connection for this table
- if (!Myc.Open(g, Host, Database, User, Pwd, Port)) {
- b = true;
-
- // Returned values must be in their original character set
- if (Myc.ExecSQL(g, "SET character_set_results=NULL", &w) == RC_FX)
- goto err;
- else
- Myc.FreeResult();
-
- } else
- return NULL;
-
- // Send the source command to MySQL
- if (Myc.ExecSQL(g, query, &w) == RC_FX)
- goto err;
-
- // We must have a storage query to get pivot column values
- if (!(Qryp = Myc.GetResult(g, true)))
- goto err;
-
- if (!Fncol) {
- for (crp = Qryp->Colresp; crp; crp = crp->Next)
- if ((!Picol || stricmp(Picol, crp->Name)) && !SkipColumn(crp, skc))
- Fncol = crp->Name;
-
- if (!Fncol) {
- strcpy(g->Message, MSG(NO_DEF_FNCCOL));
- goto err;
- } // endif Fncol
-
- } // endif Fncol
-
- if (!Picol) {
- // Find default Picol as the last one not equal to Fncol
- for (crp = Qryp->Colresp; crp; crp = crp->Next)
- if (stricmp(Fncol, crp->Name) && !SkipColumn(crp, skc))
- Picol = crp->Name;
-
- if (!Picol) {
- strcpy(g->Message, MSG(NO_DEF_PIVOTCOL));
- goto err;
- } // endif Picol
-
- } // endif picol
-
- // Prepare the column list
- for (pcrp = &Qryp->Colresp; crp = *pcrp; )
- if (SkipColumn(crp, skc)) {
- // Ignore this column
- *pcrp = crp->Next;
- } else if (!stricmp(Picol, crp->Name)) {
- if (crp->Nulls) {
- sprintf(g->Message, "Pivot column %s cannot be nullable", Picol);
- goto err;
- } // endif Nulls
-
- Rblkp = crp->Kdata;
- *pcrp = crp->Next;
- } else if (!stricmp(Fncol, crp->Name)) {
- fncrp = crp;
- *pcrp = crp->Next;
- } else
- pcrp = &crp->Next;
-
- if (!Rblkp) {
- strcpy(g->Message, MSG(NO_DEF_PIVOTCOL));
- goto err;
- } else if (!fncrp) {
- strcpy(g->Message, MSG(NO_DEF_FNCCOL));
- goto err;
- } // endif
-
- if (Tabsrc) {
- Myc.Close();
- b = false;
-
- // Before calling sort, initialize all
- nblin = Qryp->Nblin;
-
- Index.Size = nblin * sizeof(int);
- Index.Sub = TRUE; // Should be small enough
-
- if (!PlgDBalloc(g, NULL, Index))
- return NULL;
-
- Offset.Size = (nblin + 1) * sizeof(int);
- Offset.Sub = TRUE; // Should be small enough
-
- if (!PlgDBalloc(g, NULL, Offset))
- return NULL;
-
- ndif = Qsort(g, nblin);
-
- if (ndif < 0) // error
- return NULL;
-
- } else {
- // The query was limited, we must get pivot column values
- // Returned values must be in their original character set
-// if (Myc.ExecSQL(g, "SET character_set_results=NULL", &w) == RC_FX)
-// goto err;
-
- query = (char*)PlugSubAlloc(g, NULL, 0);
- sprintf(query, "SELECT DISTINCT `%s` FROM `%s`", Picol, Tabname);
- PlugSubAlloc(g, NULL, strlen(query) + 1);
- Myc.FreeResult();
-
- // Send the source command to MySQL
- if (Myc.ExecSQL(g, query, &w) == RC_FX)
- goto err;
-
- // We must have a storage query to get pivot column values
- if (!(qrp = Myc.GetResult(g, true)))
- goto err;
-
- Myc.Close();
- b = false;
-
- // Get the column list
- crp = qrp->Colresp;
- Rblkp = crp->Kdata;
- ndif = qrp->Nblin;
- } // endif Tabsrc
-
- // Allocate the Value used to retieve column names
- if (!(valp = AllocateValue(g, Rblkp->GetType(),
- Rblkp->GetVlen(),
- Rblkp->GetPrec())))
- return NULL;
-
- // Now make the functional columns
- for (int i = 0; i < ndif; i++) {
- if (i) {
- crp = (PCOLRES)PlugSubAlloc(g, NULL, sizeof(COLRES));
- memcpy(crp, fncrp, sizeof(COLRES));
- } else
- crp = fncrp;
-
- // Get the value that will be the generated column name
- if (Tabsrc)
- valp->SetValue_pvblk(Rblkp, Pex[Pof[i]]);
- else
- valp->SetValue_pvblk(Rblkp, i);
-
- colname = valp->GetCharString(buf);
- crp->Name = PlugDup(g, colname);
- crp->Flag = 1;
-
- // Add this column
- *pcrp = crp;
- crp->Next = NULL;
- pcrp = &crp->Next;
- } // endfor i
-
- // We added ndif columns and removed 2 (picol and fncol)
- Qryp->Nbcol += (ndif - 2);
- return Qryp;
+ try {
+ // Are there columns to skip?
+ if (Skcol) {
+ uint n = strlen(Skcol);
+
+ skc = (char*)PlugSubAlloc(g, NULL, n + 2);
+ strcpy(skc, Skcol);
+ skc[n + 1] = 0;
+
+ // Replace ; by nulls in skc
+ for (p = strchr(skc, ';'); p; p = strchr(p, ';'))
+ *p++ = 0;
+
+ } else
+ skc = NULL;
+
+ if (!Tabsrc && Tabname) {
+ // Locate the query
+ query = (char*)PlugSubAlloc(g, NULL, strlen(Tabname) + 26);
+ sprintf(query, "SELECT * FROM `%s` LIMIT 1", Tabname);
+ } else if (!Tabsrc) {
+ strcpy(g->Message, MSG(SRC_TABLE_UNDEF));
+ goto err;
+ } else
+ query = (char*)Tabsrc;
+
+ // Open a MySQL connection for this table
+ if (!Myc.Open(g, Host, Database, User, Pwd, Port)) {
+ b = true;
+
+ // Returned values must be in their original character set
+ if (Myc.ExecSQL(g, "SET character_set_results=NULL", &w) == RC_FX)
+ goto err;
+ else
+ Myc.FreeResult();
+
+ } else
+ goto err;
+
+ // Send the source command to MySQL
+ if (Myc.ExecSQL(g, query, &w) == RC_FX)
+ goto err;
+
+ // We must have a storage query to get pivot column values
+ if (!(Qryp = Myc.GetResult(g, true)))
+ goto err;
+
+ if (!Fncol) {
+ for (crp = Qryp->Colresp; crp; crp = crp->Next)
+ if ((!Picol || stricmp(Picol, crp->Name)) && !SkipColumn(crp, skc))
+ Fncol = crp->Name;
+
+ if (!Fncol) {
+ strcpy(g->Message, MSG(NO_DEF_FNCCOL));
+ goto err;
+ } // endif Fncol
+
+ } // endif Fncol
+
+ if (!Picol) {
+ // Find default Picol as the last one not equal to Fncol
+ for (crp = Qryp->Colresp; crp; crp = crp->Next)
+ if (stricmp(Fncol, crp->Name) && !SkipColumn(crp, skc))
+ Picol = crp->Name;
+
+ if (!Picol) {
+ strcpy(g->Message, MSG(NO_DEF_PIVOTCOL));
+ goto err;
+ } // endif Picol
+
+ } // endif picol
+
+ // Prepare the column list
+ for (pcrp = &Qryp->Colresp; crp = *pcrp; )
+ if (SkipColumn(crp, skc)) {
+ // Ignore this column
+ *pcrp = crp->Next;
+ } else if (!stricmp(Picol, crp->Name)) {
+ if (crp->Nulls) {
+ sprintf(g->Message, "Pivot column %s cannot be nullable", Picol);
+ goto err;
+ } // endif Nulls
+
+ Rblkp = crp->Kdata;
+ *pcrp = crp->Next;
+ } else if (!stricmp(Fncol, crp->Name)) {
+ fncrp = crp;
+ *pcrp = crp->Next;
+ } else
+ pcrp = &crp->Next;
+
+ if (!Rblkp) {
+ strcpy(g->Message, MSG(NO_DEF_PIVOTCOL));
+ goto err;
+ } else if (!fncrp) {
+ strcpy(g->Message, MSG(NO_DEF_FNCCOL));
+ goto err;
+ } // endif
+
+ if (Tabsrc) {
+ Myc.Close();
+ b = false;
+
+ // Before calling sort, initialize all
+ nblin = Qryp->Nblin;
+
+ Index.Size = nblin * sizeof(int);
+ Index.Sub = TRUE; // Should be small enough
+
+ if (!PlgDBalloc(g, NULL, Index))
+ goto err;
+
+ Offset.Size = (nblin + 1) * sizeof(int);
+ Offset.Sub = TRUE; // Should be small enough
+
+ if (!PlgDBalloc(g, NULL, Offset))
+ goto err;
+
+ ndif = Qsort(g, nblin);
+
+ if (ndif < 0) // error
+ goto err;
+
+ } else {
+ // The query was limited, we must get pivot column values
+ // Returned values must be in their original character set
+ // if (Myc.ExecSQL(g, "SET character_set_results=NULL", &w) == RC_FX)
+ // goto err;
+
+ query = (char*)PlugSubAlloc(g, NULL, 0);
+ sprintf(query, "SELECT DISTINCT `%s` FROM `%s`", Picol, Tabname);
+ PlugSubAlloc(g, NULL, strlen(query) + 1);
+ Myc.FreeResult();
+
+ // Send the source command to MySQL
+ if (Myc.ExecSQL(g, query, &w) == RC_FX)
+ goto err;
+
+ // We must have a storage query to get pivot column values
+ if (!(qrp = Myc.GetResult(g, true)))
+ goto err;
+
+ Myc.Close();
+ b = false;
+
+ // Get the column list
+ crp = qrp->Colresp;
+ Rblkp = crp->Kdata;
+ ndif = qrp->Nblin;
+ } // endif Tabsrc
+
+ // Allocate the Value used to retieve column names
+ if (!(valp = AllocateValue(g, Rblkp->GetType(),
+ Rblkp->GetVlen(),
+ Rblkp->GetPrec())))
+ goto err;
+
+ // Now make the functional columns
+ for (int i = 0; i < ndif; i++) {
+ if (i) {
+ crp = (PCOLRES)PlugSubAlloc(g, NULL, sizeof(COLRES));
+ memcpy(crp, fncrp, sizeof(COLRES));
+ } else
+ crp = fncrp;
+
+ // Get the value that will be the generated column name
+ if (Tabsrc)
+ valp->SetValue_pvblk(Rblkp, Pex[Pof[i]]);
+ else
+ valp->SetValue_pvblk(Rblkp, i);
+
+ colname = valp->GetCharString(buf);
+ crp->Name = PlugDup(g, colname);
+ crp->Flag = 1;
+
+ // Add this column
+ *pcrp = crp;
+ crp->Next = NULL;
+ pcrp = &crp->Next;
+ } // endfor i
+
+ // We added ndif columns and removed 2 (picol and fncol)
+ Qryp->Nbcol += (ndif - 2);
+ return Qryp;
+ } catch (int n) {
+ if (trace)
+ htrc("Exception %d: %s\n", n, g->Message);
+ } catch (const char *msg) {
+ strcpy(g->Message, msg);
+ } // end catch
err:
if (b)
Myc.Close();
return NULL;
- } // end of MakePivotColumns
+} // end of MakePivotColumns
/***********************************************************************/
/* PIVAID: Compare routine for sorting pivot column values. */
diff --git a/storage/connect/tabpivot.h b/storage/connect/tabpivot.h
index 07d5c3e456b..6c2d53e9527 100644
--- a/storage/connect/tabpivot.h
+++ b/storage/connect/tabpivot.h
@@ -32,16 +32,16 @@ class PIVAID : public CSORT {
protected:
// Members
MYSQLC Myc; // MySQL connection class
- char *Host; // Host machine to use
- char *User; // User logon info
- char *Pwd; // Password logon info
- char *Database; // Database to be used by server
+ PCSZ Host; // Host machine to use
+ PCSZ User; // User logon info
+ PCSZ Pwd; // Password logon info
+ PCSZ Database; // Database to be used by server
PQRYRES Qryp; // Points to Query result block
- char *Tabname; // Name of source table
- char *Tabsrc; // SQL of source table
- char *Picol; // Pivot column name
- char *Fncol; // Function column name
- char *Skcol; // Skipped columns
+ PCSZ Tabname; // Name of source table
+ PCSZ Tabsrc; // SQL of source table
+ PCSZ Picol; // Pivot column name
+ PCSZ Fncol; // Function column name
+ PCSZ Skcol; // Skipped columns
PVBLK Rblkp; // The value block of the pivot column
int Port; // MySQL port number
}; // end of class PIVAID
diff --git a/storage/connect/tabsys.cpp b/storage/connect/tabsys.cpp
index 2ddd1c3c753..7f0d9881298 100644
--- a/storage/connect/tabsys.cpp
+++ b/storage/connect/tabsys.cpp
@@ -1,9 +1,9 @@
/************* TabSys C++ Program Source Code File (.CPP) **************/
/* PROGRAM NAME: TABSYS */
/* ------------- */
-/* Version 2.3 */
+/* Version 2.4 */
/* */
-/* Author Olivier BERTRAND 2004-2015 */
+/* Author Olivier BERTRAND 2004-2017 */
/* */
/* This program are the INI/CFG tables classes. */
/***********************************************************************/
@@ -355,7 +355,7 @@ void TDBINI::CloseDB(PGLOBAL)
/***********************************************************************/
/* INICOL public constructor. */
/***********************************************************************/
-INICOL::INICOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PSZ)
+INICOL::INICOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PCSZ)
: COLBLK(cdp, tdbp, i)
{
if (cprec) {
@@ -511,12 +511,12 @@ void INICOL::WriteColumn(PGLOBAL g)
if (strlen(p) > (unsigned)Long) {
sprintf(g->Message, MSG(VALUE_TOO_LONG), p, Name, Long);
- longjmp(g->jumper[g->jump_level], 31);
- } else if (Flag == 1) {
+ throw 31;
+ } else if (Flag == 1) {
if (tdbp->Mode == MODE_UPDATE) {
strcpy(g->Message, MSG(NO_SEC_UPDATE));
- longjmp(g->jumper[g->jump_level], 31);
- } else if (*p) {
+ throw 31;
+ } else if (*p) {
tdbp->Section = p;
} else
tdbp->Section = NULL;
@@ -524,8 +524,8 @@ void INICOL::WriteColumn(PGLOBAL g)
return;
} else if (!tdbp->Section) {
strcpy(g->Message, MSG(SEC_NAME_FIRST));
- longjmp(g->jumper[g->jump_level], 31);
- } // endif's
+ throw 31;
+ } // endif's
/*********************************************************************/
/* Updating must be done only when not in checking pass. */
@@ -536,8 +536,8 @@ void INICOL::WriteColumn(PGLOBAL g)
if (!rc) {
sprintf(g->Message, "Error %d writing to %s",
GetLastError(), tdbp->Ifile);
- longjmp(g->jumper[g->jump_level], 31);
- } // endif rc
+ throw 31;
+ } // endif rc
} // endif Status
@@ -769,7 +769,7 @@ int TDBXIN::DeleteDB(PGLOBAL g, int irc)
/***********************************************************************/
/* XINCOL public constructor. */
/***********************************************************************/
-XINCOL::XINCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PSZ am)
+XINCOL::XINCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PCSZ am)
: INICOL(cdp, tdbp, cprec, i, am)
{
} // end of XINCOL constructor
@@ -837,12 +837,12 @@ void XINCOL::WriteColumn(PGLOBAL g)
if (strlen(p) > (unsigned)Long) {
sprintf(g->Message, MSG(VALUE_TOO_LONG), p, Name, Long);
- longjmp(g->jumper[g->jump_level], 31);
- } else if (Flag == 1) {
+ throw 31;
+ } else if (Flag == 1) {
if (tdbp->Mode == MODE_UPDATE) {
strcpy(g->Message, MSG(NO_SEC_UPDATE));
- longjmp(g->jumper[g->jump_level], 31);
- } else if (*p) {
+ throw 31;
+ } else if (*p) {
tdbp->Section = p;
} else
tdbp->Section = NULL;
@@ -851,8 +851,8 @@ void XINCOL::WriteColumn(PGLOBAL g)
} else if (Flag == 2) {
if (tdbp->Mode == MODE_UPDATE) {
strcpy(g->Message, MSG(NO_KEY_UPDATE));
- longjmp(g->jumper[g->jump_level], 31);
- } else if (*p) {
+ throw 31;
+ } else if (*p) {
tdbp->Keycur = p;
} else
tdbp->Keycur = NULL;
@@ -860,8 +860,8 @@ void XINCOL::WriteColumn(PGLOBAL g)
return;
} else if (!tdbp->Section || !tdbp->Keycur) {
strcpy(g->Message, MSG(SEC_KEY_FIRST));
- longjmp(g->jumper[g->jump_level], 31);
- } // endif's
+ throw 31;
+ } // endif's
/*********************************************************************/
/* Updating must be done only when not in checking pass. */
@@ -872,8 +872,8 @@ void XINCOL::WriteColumn(PGLOBAL g)
if (!rc) {
sprintf(g->Message, "Error %d writing to %s",
GetLastError(), tdbp->Ifile);
- longjmp(g->jumper[g->jump_level], 31);
- } // endif rc
+ throw 31;
+ } // endif rc
} // endif Status
diff --git a/storage/connect/tabsys.h b/storage/connect/tabsys.h
index ff1b8335690..0c6017af177 100644
--- a/storage/connect/tabsys.h
+++ b/storage/connect/tabsys.h
@@ -61,8 +61,8 @@ class TDBINI : public TDBASE {
virtual int GetRecpos(void) {return N;}
virtual int GetProgCur(void) {return N;}
//virtual int GetAffectedRows(void) {return 0;}
- virtual PSZ GetFile(PGLOBAL g) {return Ifile;}
- virtual void SetFile(PGLOBAL g, PSZ fn) {Ifile = fn;}
+ virtual PCSZ GetFile(PGLOBAL g) {return Ifile;}
+ virtual void SetFile(PGLOBAL g, PCSZ fn) {Ifile = fn;}
virtual void ResetDB(void) {Seclist = Section = NULL; N = 0;}
virtual void ResetSize(void) {MaxSize = -1; Seclist = NULL;}
virtual int RowNumber(PGLOBAL g, bool b = false) {return N;}
@@ -80,7 +80,7 @@ class TDBINI : public TDBASE {
protected:
// Members
- char *Ifile; // The INI file
+ PCSZ Ifile; // The INI file
char *Seclist; // The section list
char *Section; // The current section
int Seclen; // Length of seclist buffer
@@ -93,7 +93,7 @@ class TDBINI : public TDBASE {
class INICOL : public COLBLK {
public:
// Constructors
- INICOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PSZ am = "INI");
+ INICOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PCSZ am = "INI");
INICOL(INICOL *colp, PTDB tdbp); // Constructor used in copy process
// Implementation
@@ -165,7 +165,7 @@ class TDBXIN : public TDBINI {
class XINCOL : public INICOL {
public:
// Constructors
- XINCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PSZ am = "INI");
+ XINCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PCSZ am = "INI");
XINCOL(XINCOL *colp, PTDB tdbp); // Constructor used in copy process
// Implementation
diff --git a/storage/connect/tabutil.cpp b/storage/connect/tabutil.cpp
index ad939db86be..158cf744a4a 100644
--- a/storage/connect/tabutil.cpp
+++ b/storage/connect/tabutil.cpp
@@ -119,7 +119,8 @@ PQRYRES TabColumns(PGLOBAL g, THD *thd, const char *db,
FLD_LENGTH, FLD_SCALE, FLD_RADIX, FLD_NULL,
FLD_REM, FLD_NO, FLD_CHARSET};
unsigned int length[] = {0, 4, 16, 4, 4, 4, 4, 4, 0, 32, 32};
- char *pn, *tn, *fld, *colname, *chset, *fmt, v;
+ PCSZ fmt;
+ char *pn, *tn, *fld, *colname, *chset, v;
int i, n, ncol = sizeof(buftyp) / sizeof(int);
int prec, len, type, scale;
int zconv = GetConvSize();
@@ -227,7 +228,7 @@ PQRYRES TabColumns(PGLOBAL g, THD *thd, const char *db,
fmt = MyDateFmt(fp->type());
prec = len = strlen(fmt);
} else {
- fmt = (char*)fp->option_struct->dateformat;
+ fmt = (PCSZ)fp->option_struct->dateformat;
prec = len = fp->field_length;
} // endif mysql
@@ -314,7 +315,7 @@ bool PRXDEF::DefineAM(PGLOBAL g, LPCSTR, int)
strcpy(g->Message, "Missing object table definition");
return true;
} else
- tab = "Noname";
+ tab = PlugDup(g, "Noname");
} else
// Analyze the table name, it may have the format: [dbname.]tabname
@@ -626,7 +627,7 @@ void TDBPRX::RemoveNext(PTABLE tp)
/***********************************************************************/
/* PRXCOL public constructor. */
/***********************************************************************/
-PRXCOL::PRXCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PSZ am)
+PRXCOL::PRXCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PCSZ am)
: COLBLK(cdp, tdbp, i)
{
if (cprec) {
@@ -741,7 +742,14 @@ void PRXCOL::ReadColumn(PGLOBAL g)
if (Nullable)
Value->SetNull(Value->IsNull());
- } // endif Colp
+ } else {
+ Value->Reset();
+
+ // Set null when applicable
+ if (Nullable)
+ Value->SetNull(true);
+
+ } // endif Colp
} // end of ReadColumn
diff --git a/storage/connect/tabutil.h b/storage/connect/tabutil.h
index 8e56aecff86..62678508ca1 100644
--- a/storage/connect/tabutil.h
+++ b/storage/connect/tabutil.h
@@ -71,7 +71,7 @@ class DllExport TDBPRX : public TDBASE {
virtual int GetRecpos(void) {return Tdbp->GetRecpos();}
virtual void ResetDB(void) {Tdbp->ResetDB();}
virtual int RowNumber(PGLOBAL g, bool b = FALSE);
- virtual PSZ GetServer(void) {return (Tdbp) ? Tdbp->GetServer() : (PSZ)"?";}
+ virtual PCSZ GetServer(void) {return (Tdbp) ? Tdbp->GetServer() : (PSZ)"?";}
// Database routines
virtual PCOL MakeCol(PGLOBAL g, PCOLDEF cdp, PCOL cprec, int n);
@@ -101,7 +101,7 @@ class DllExport PRXCOL : public COLBLK {
friend class TDBOCCUR;
public:
// Constructors
- PRXCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PSZ am = "PRX");
+ PRXCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PCSZ am = "PRX");
PRXCOL(PRXCOL *colp, PTDB tdbp); // Constructor used in copy process
// Implementation
diff --git a/storage/connect/tabvct.cpp b/storage/connect/tabvct.cpp
index 282fb55a43c..533986e44da 100644
--- a/storage/connect/tabvct.cpp
+++ b/storage/connect/tabvct.cpp
@@ -1,11 +1,11 @@
/************* TabVct C++ Program Source Code File (.CPP) **************/
/* PROGRAM NAME: TABVCT */
/* ------------- */
-/* Version 3.8 */
+/* Version 3.9 */
/* */
/* COPYRIGHT: */
/* ---------- */
-/* (C) Copyright to the author Olivier BERTRAND 1999-2015 */
+/* (C) Copyright to the author Olivier BERTRAND 1999-2017 */
/* */
/* WHAT THIS PROGRAM DOES: */
/* ----------------------- */
@@ -174,7 +174,7 @@ bool VCTDEF::Erase(char *filename)
/***********************************************************************/
int VCTDEF::MakeFnPattern(char *fpat)
{
- char pat[8];
+ char pat[16];
#if defined(__WIN__)
char drive[_MAX_DRIVE];
#else
@@ -490,15 +490,15 @@ void VCTCOL::ReadBlock(PGLOBAL g)
#if defined(_DEBUG)
if (!Blk) {
strcpy(g->Message, MSG(TO_BLK_IS_NULL));
- longjmp(g->jumper[g->jump_level], 58);
- } // endif
+ throw 58;
+ } // endif
#endif
/*********************************************************************/
/* Read column block according to used access method. */
/*********************************************************************/
if (txfp->ReadBlock(g, this))
- longjmp(g->jumper[g->jump_level], 6);
+ throw 6;
ColBlk = txfp->CurBlk;
ColPos = -1; // Any invalid position
@@ -518,15 +518,15 @@ void VCTCOL::WriteBlock(PGLOBAL g)
#if defined(_DEBUG)
if (!Blk) {
strcpy(g->Message, MSG(BLK_IS_NULL));
- longjmp(g->jumper[g->jump_level], 56);
- } // endif
+ throw 56;
+ } // endif
#endif
/*******************************************************************/
/* Write column block according to used access method. */
/*******************************************************************/
if (txfp->WriteBlock(g, this))
- longjmp(g->jumper[g->jump_level], 6);
+ throw 6;
Modif = 0;
} // endif Modif
diff --git a/storage/connect/tabvir.cpp b/storage/connect/tabvir.cpp
index 155c71fe268..84b3dd1787b 100644
--- a/storage/connect/tabvir.cpp
+++ b/storage/connect/tabvir.cpp
@@ -1,6 +1,6 @@
/************* tdbvir C++ Program Source Code File (.CPP) **************/
-/* PROGRAM NAME: tdbvir.cpp Version 1.1 */
-/* (C) Copyright to the author Olivier BERTRAND 2014 */
+/* PROGRAM NAME: tdbvir.cpp Version 1.2 */
+/* (C) Copyright to the author Olivier BERTRAND 2014-2017 */
/* This program are the VIR classes DB execution routines. */
/***********************************************************************/
@@ -269,7 +269,7 @@ int TDBVIR::DeleteDB(PGLOBAL g, int)
/***********************************************************************/
/* VIRCOL public constructor. */
/***********************************************************************/
-VIRCOL::VIRCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PSZ)
+VIRCOL::VIRCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PCSZ)
: COLBLK(cdp, tdbp, i)
{
if (cprec) {
@@ -289,8 +289,8 @@ void VIRCOL::ReadColumn(PGLOBAL g)
{
// This should never be called
sprintf(g->Message, "ReadColumn: Column %s is not virtual", Name);
- longjmp(g->jumper[g->jump_level], TYPE_COLBLK);
- } // end of ReadColumn
+ throw TYPE_COLBLK;
+} // end of ReadColumn
/* ---------------------------TDBVICL class -------------------------- */
diff --git a/storage/connect/tabvir.h b/storage/connect/tabvir.h
index a53aceaeceb..e7313bbae67 100644
--- a/storage/connect/tabvir.h
+++ b/storage/connect/tabvir.h
@@ -76,7 +76,7 @@ class VIRCOL : public COLBLK {
friend class TDBVIR;
public:
// Constructors
- VIRCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PSZ am = "VIRTUAL");
+ VIRCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PCSZ am = "VIRTUAL");
// Implementation
virtual int GetAmType(void) {return TYPE_AM_VIR;}
diff --git a/storage/connect/tabwmi.cpp b/storage/connect/tabwmi.cpp
index 4871a1d66dc..335ffce5d7f 100644
--- a/storage/connect/tabwmi.cpp
+++ b/storage/connect/tabwmi.cpp
@@ -27,7 +27,7 @@
/***********************************************************************/
/* Initialize WMI operations. */
/***********************************************************************/
-PWMIUT InitWMI(PGLOBAL g, char *nsp, char *classname)
+PWMIUT InitWMI(PGLOBAL g, PCSZ nsp, PCSZ classname)
{
IWbemLocator *loc;
char *p;
@@ -132,7 +132,7 @@ PWMIUT InitWMI(PGLOBAL g, char *nsp, char *classname)
/* WMIColumns: constructs the result blocks containing the description */
/* of all the columns of a WMI table of a specified class. */
/***********************************************************************/
-PQRYRES WMIColumns(PGLOBAL g, char *nsp, char *cls, bool info)
+PQRYRES WMIColumns(PGLOBAL g, PCSZ nsp, PCSZ cls, bool info)
{
static int buftyp[] = {TYPE_STRING, TYPE_SHORT, TYPE_STRING,
TYPE_INT, TYPE_INT, TYPE_SHORT};
diff --git a/storage/connect/tabwmi.h b/storage/connect/tabwmi.h
index 6abb85453a1..7a18453374e 100644
--- a/storage/connect/tabwmi.h
+++ b/storage/connect/tabwmi.h
@@ -27,7 +27,7 @@ typedef struct _WMIutil {
/***********************************************************************/
/* Functions used externally. */
/***********************************************************************/
-PQRYRES WMIColumns(PGLOBAL g, char *nsp, char *cls, bool info);
+PQRYRES WMIColumns(PGLOBAL g, PCSZ nsp, PCSZ cls, bool info);
/* -------------------------- WMI classes ---------------------------- */
diff --git a/storage/connect/tabxml.cpp b/storage/connect/tabxml.cpp
index 52cf3d3812f..80d4395058e 100644
--- a/storage/connect/tabxml.cpp
+++ b/storage/connect/tabxml.cpp
@@ -1,9 +1,9 @@
/************* Tabxml C++ Program Source Code File (.CPP) **************/
/* PROGRAM NAME: TABXML */
/* ------------- */
-/* Version 2.9 */
+/* Version 3.0 */
/* */
-/* Author Olivier BERTRAND 2007 - 2016 */
+/* Author Olivier BERTRAND 2007 - 2017 */
/* */
/* This program are the XML tables classes using MS-DOM or libxml2. */
/***********************************************************************/
@@ -118,10 +118,11 @@ PQRYRES XMLColumns(PGLOBAL g, char *db, char *tab, PTOS topt, bool info)
static XFLD fldtyp[] = {FLD_NAME, FLD_TYPE, FLD_TYPENAME, FLD_PREC,
FLD_LENGTH, FLD_SCALE, FLD_NULL, FLD_FORMAT};
static unsigned int length[] = {0, 6, 8, 10, 10, 6, 6, 0};
- char *fn, *op, colname[65], fmt[129], buf[512];
+ char colname[65], fmt[129], buf[512];
int i, j, lvl, n = 0;
int ncol = sizeof(buftyp) / sizeof(int);
bool ok = true;
+ PCSZ fn, op;
PXCL xcol, xcp, fxcp = NULL, pxcp = NULL;
PLVL *lvlp, vp;
PXNODE node = NULL;
@@ -157,7 +158,10 @@ PQRYRES XMLColumns(PGLOBAL g, char *db, char *tab, PTOS topt, bool info)
tdp = new(g) XMLDEF;
tdp->Fn = fn;
- tdp->Database = SetPath(g, db);
+
+ if (!(tdp->Database = SetPath(g, db)))
+ return NULL;
+
tdp->Tabname = tab;
tdp->Zipped = GetBooleanTableOption(g, topt, "Zipped", false);
tdp->Entry = GetStringTableOption(g, topt, "Entry", NULL);
@@ -359,7 +363,7 @@ PQRYRES XMLColumns(PGLOBAL g, char *db, char *tab, PTOS topt, bool info)
skipit:
if (trace)
- htrc("CSVColumns: n=%d len=%d\n", n, length[0]);
+ htrc("XMLColumns: n=%d len=%d\n", n, length[0]);
/*********************************************************************/
/* Allocate the structures used to refer to the result set. */
@@ -448,7 +452,8 @@ XMLDEF::XMLDEF(void)
/***********************************************************************/
bool XMLDEF::DefineAM(PGLOBAL g, LPCSTR am, int poff)
{
- char *defrow, *defcol, buf[10];
+ PCSZ defrow, defcol;
+ char buf[10];
Fn = GetStringCatInfo(g, "Filename", NULL);
Encoding = GetStringCatInfo(g, "Encoding", "UTF-8");
@@ -1314,8 +1319,8 @@ void TDBXML::CloseDB(PGLOBAL g)
Docp->CloseDoc(g, To_Xb);
// This causes a crash in Diagnostics_area::set_error_status
-// longjmp(g->jumper[g->jump_level], TYPE_AM_XML);
- } // endif DumpDoc
+// throw TYPE_AM_XML;
+ } // endif DumpDoc
} // endif Changed
@@ -1357,8 +1362,8 @@ void TDBXML::CloseDB(PGLOBAL g)
/***********************************************************************/
/* XMLCOL public constructor. */
/***********************************************************************/
-XMLCOL::XMLCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PSZ am)
- : COLBLK(cdp, tdbp, i)
+XMLCOL::XMLCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PCSZ am)
+ : COLBLK(cdp, tdbp, i)
{
if (cprec) {
Next = cprec->GetNext();
@@ -1637,8 +1642,8 @@ void XMLCOL::ReadColumn(PGLOBAL g)
if (ValNode->GetType() != XML_ELEMENT_NODE &&
ValNode->GetType() != XML_ATTRIBUTE_NODE) {
sprintf(g->Message, MSG(BAD_VALNODE), ValNode->GetType(), Name);
- longjmp(g->jumper[g->jump_level], TYPE_AM_XML);
- } // endif type
+ throw TYPE_AM_XML;
+ } // endif type
// Get the Xname value from the XML file
switch (ValNode->GetContent(g, Valbuf, Long + 1)) {
@@ -1648,8 +1653,8 @@ void XMLCOL::ReadColumn(PGLOBAL g)
PushWarning(g, Tdbp);
break;
default:
- longjmp(g->jumper[g->jump_level], TYPE_AM_XML);
- } // endswitch
+ throw TYPE_AM_XML;
+ } // endswitch
Value->SetValue_psz(Valbuf);
} else {
@@ -1699,7 +1704,7 @@ void XMLCOL::WriteColumn(PGLOBAL g)
/* For columns having an Xpath, the Clist must be updated. */
/*********************************************************************/
if (Tdbp->CheckRow(g, Nod || Tdbp->Colname))
- longjmp(g->jumper[g->jump_level], TYPE_AM_XML);
+ throw TYPE_AM_XML;
/*********************************************************************/
/* Null values are represented by no node. */
@@ -1771,8 +1776,8 @@ void XMLCOL::WriteColumn(PGLOBAL g)
if (ColNode == NULL) {
strcpy(g->Message, MSG(COL_ALLOC_ERR));
- longjmp(g->jumper[g->jump_level], TYPE_AM_XML);
- } // endif ColNode
+ throw TYPE_AM_XML;
+ } // endif ColNode
} // endif ColNode
@@ -1800,8 +1805,8 @@ void XMLCOL::WriteColumn(PGLOBAL g)
if (strlen(p) > (unsigned)Long) {
sprintf(g->Message, MSG(VALUE_TOO_LONG), p, Name, Long);
- longjmp(g->jumper[g->jump_level], TYPE_AM_XML);
- } else
+ throw TYPE_AM_XML;
+ } else
strcpy(Valbuf, p);
/*********************************************************************/
@@ -1850,8 +1855,8 @@ void XMULCOL::ReadColumn(PGLOBAL g)
if (ValNode->GetType() != XML_ELEMENT_NODE &&
ValNode->GetType() != XML_ATTRIBUTE_NODE) {
sprintf(g->Message, MSG(BAD_VALNODE), ValNode->GetType(), Name);
- longjmp(g->jumper[g->jump_level], TYPE_AM_XML);
- } // endif type
+ throw TYPE_AM_XML;
+ } // endif type
// Get the Xname value from the XML file
switch (ValNode->GetContent(g, p, (b ? Long : len))) {
@@ -1936,7 +1941,7 @@ void XMULCOL::WriteColumn(PGLOBAL g)
/* For columns having an Xpath, the Clist must be updated. */
/*********************************************************************/
if (Tdbp->CheckRow(g, Nod))
- longjmp(g->jumper[g->jump_level], TYPE_AM_XML);
+ throw TYPE_AM_XML;
/*********************************************************************/
/* Find the column and value nodes to update or insert. */
@@ -1985,8 +1990,8 @@ void XMULCOL::WriteColumn(PGLOBAL g)
if (len > 1 && !Tdbp->Xpand) {
sprintf(g->Message, MSG(BAD_VAL_UPDATE), Name);
- longjmp(g->jumper[g->jump_level], TYPE_AM_XML);
- } else
+ throw TYPE_AM_XML;
+ } else
ValNode = Nlx->GetItem(g, Tdbp->Nsub, Vxnp);
} else // Inod != Nod
@@ -2027,8 +2032,8 @@ void XMULCOL::WriteColumn(PGLOBAL g)
if (ColNode == NULL) {
strcpy(g->Message, MSG(COL_ALLOC_ERR));
- longjmp(g->jumper[g->jump_level], TYPE_AM_XML);
- } // endif ColNode
+ throw TYPE_AM_XML;
+ } // endif ColNode
} // endif ColNode
@@ -2056,8 +2061,8 @@ void XMULCOL::WriteColumn(PGLOBAL g)
if (strlen(p) > (unsigned)Long) {
sprintf(g->Message, MSG(VALUE_TOO_LONG), p, Name, Long);
- longjmp(g->jumper[g->jump_level], TYPE_AM_XML);
- } else
+ throw TYPE_AM_XML;
+ } else
strcpy(Valbuf, p);
/*********************************************************************/
@@ -2088,8 +2093,8 @@ void XPOSCOL::ReadColumn(PGLOBAL g)
if (Tdbp->Clist == NULL) {
strcpy(g->Message, MSG(MIS_TAG_LIST));
- longjmp(g->jumper[g->jump_level], TYPE_AM_XML);
- } // endif Clist
+ throw TYPE_AM_XML;
+ } // endif Clist
if ((ValNode = Tdbp->Clist->GetItem(g, Rank, Vxnp))) {
// Get the column value from the XML file
@@ -2100,8 +2105,8 @@ void XPOSCOL::ReadColumn(PGLOBAL g)
PushWarning(g, Tdbp);
break;
default:
- longjmp(g->jumper[g->jump_level], TYPE_AM_XML);
- } // endswitch
+ throw TYPE_AM_XML;
+ } // endswitch
Value->SetValue_psz(Valbuf);
} else {
@@ -2151,15 +2156,15 @@ void XPOSCOL::WriteColumn(PGLOBAL g)
/* For all columns the Clist must be updated. */
/*********************************************************************/
if (Tdbp->CheckRow(g, true))
- longjmp(g->jumper[g->jump_level], TYPE_AM_XML);
+ throw TYPE_AM_XML;
/*********************************************************************/
/* Find the column and value nodes to update or insert. */
/*********************************************************************/
if (Tdbp->Clist == NULL) {
strcpy(g->Message, MSG(MIS_TAG_LIST));
- longjmp(g->jumper[g->jump_level], TYPE_AM_XML);
- } // endif Clist
+ throw TYPE_AM_XML;
+ } // endif Clist
n = Tdbp->Clist->GetLength();
k = Rank;
@@ -2183,8 +2188,8 @@ void XPOSCOL::WriteColumn(PGLOBAL g)
if (strlen(p) > (unsigned)Long) {
sprintf(g->Message, MSG(VALUE_TOO_LONG), p, Name, Long);
- longjmp(g->jumper[g->jump_level], TYPE_AM_XML);
- } else
+ throw TYPE_AM_XML;
+ } else
strcpy(Valbuf, p);
/*********************************************************************/
diff --git a/storage/connect/tabxml.h b/storage/connect/tabxml.h
index 65b353072cb..813f62dde52 100644
--- a/storage/connect/tabxml.h
+++ b/storage/connect/tabxml.h
@@ -31,7 +31,7 @@ class DllExport XMLDEF : public TABDEF { /* Logical table description */
protected:
// Members
- char *Fn; /* Path/Name of corresponding file */
+ PCSZ Fn; /* Path/Name of corresponding file */
char *Encoding; /* New XML table file encoding */
char *Tabname; /* Name of Table node */
char *Rowname; /* Name of first level nodes */
@@ -42,7 +42,7 @@ class DllExport XMLDEF : public TABDEF { /* Logical table description */
char *DefNs; /* Dummy name of default namespace */
char *Attrib; /* Table node attributes */
char *Hdattr; /* Header node attributes */
- char *Entry; /* Zip entry name or pattern */
+ PCSZ Entry; /* Zip entry name or pattern */
int Coltype; /* Default column type */
int Limit; /* Limit of multiple values */
int Header; /* n first rows are header rows */
@@ -74,8 +74,8 @@ class DllExport TDBXML : public TDBASE {
virtual PTDB Clone(PTABS t);
virtual int GetRecpos(void);
virtual int GetProgCur(void) {return N;}
- virtual PSZ GetFile(PGLOBAL g) {return Xfile;}
- virtual void SetFile(PGLOBAL g, PSZ fn) {Xfile = fn;}
+ virtual PCSZ GetFile(PGLOBAL g) {return Xfile;}
+ virtual void SetFile(PGLOBAL g, PCSZ fn) {Xfile = fn;}
virtual void ResetDB(void) {N = 0;}
virtual void ResetSize(void) {MaxSize = -1;}
virtual int RowNumber(PGLOBAL g, bool b = false);
@@ -127,7 +127,7 @@ class DllExport TDBXML : public TDBASE {
bool Void; // True if the file does not exist
bool Zipped; // True if Zipped XML file(s)
bool Mulentries; // True if multiple entries in zip file
- char *Xfile; // The XML file
+ PCSZ Xfile; // The XML file
char *Enc; // New XML table file encoding
char *Tabname; // Name of Table node
char *Rowname; // Name of first level nodes
@@ -138,7 +138,7 @@ class DllExport TDBXML : public TDBASE {
char *DefNs; // Dummy name of default namespace
char *Attrib; // Table node attribut(s)
char *Hdattr; // Header node attribut(s)
- char *Entry; // Zip entry name or pattern
+ PCSZ Entry; // Zip entry name or pattern
int Coltype; // Default column type
int Limit; // Limit of multiple values
int Header; // n first rows are header rows
@@ -155,7 +155,7 @@ class DllExport TDBXML : public TDBASE {
class XMLCOL : public COLBLK {
public:
// Constructors
- XMLCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PSZ am = "XML");
+ XMLCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PCSZ am = "XML");
XMLCOL(XMLCOL *colp, PTDB tdbp); // Constructor used in copy process
// Implementation
diff --git a/storage/connect/tabzip.cpp b/storage/connect/tabzip.cpp
index b91059a3843..c026744dba8 100644
--- a/storage/connect/tabzip.cpp
+++ b/storage/connect/tabzip.cpp
@@ -195,8 +195,8 @@ void TDBZIP::CloseDB(PGLOBAL g)
/***********************************************************************/
/* ZIPCOL public constructor. */
/***********************************************************************/
-ZIPCOL::ZIPCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PSZ am)
- : COLBLK(cdp, tdbp, i)
+ZIPCOL::ZIPCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PCSZ am)
+ : COLBLK(cdp, tdbp, i)
{
if (cprec) {
Next = cprec->GetNext();
diff --git a/storage/connect/tabzip.h b/storage/connect/tabzip.h
index dcec3475371..32b15281f81 100644
--- a/storage/connect/tabzip.h
+++ b/storage/connect/tabzip.h
@@ -34,7 +34,7 @@ public:
protected:
// Members
- PSZ target; // The inside file to query
+ PCSZ target; // The inside file to query
}; // end of ZIPDEF
/***********************************************************************/
@@ -68,7 +68,7 @@ protected:
// Members
unzFile zipfile; // The ZIP container file
- PSZ zfn; // The ZIP file name
+ PCSZ zfn; // The ZIP file name
//PSZ target;
unz_file_info64 finfo; // The current file info
char fn[FILENAME_MAX]; // The current file name
@@ -82,7 +82,7 @@ class DllExport ZIPCOL : public COLBLK {
friend class TDBZIP;
public:
// Constructors
- ZIPCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PSZ am = "ZIP");
+ ZIPCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PCSZ am = "ZIP");
// Implementation
virtual int GetAmType(void) { return TYPE_AM_ZIP; }
diff --git a/storage/connect/valblk.cpp b/storage/connect/valblk.cpp
index 5fefcba5856..5b98f3eb425 100644
--- a/storage/connect/valblk.cpp
+++ b/storage/connect/valblk.cpp
@@ -1,7 +1,7 @@
/************ Valblk C++ Functions Source Code File (.CPP) *************/
-/* Name: VALBLK.CPP Version 2.1 */
+/* Name: VALBLK.CPP Version 2.3 */
/* */
-/* (C) Copyright to the author Olivier BERTRAND 2005-2015 */
+/* (C) Copyright to the author Olivier BERTRAND 2005-2017 */
/* */
/* This file contains the VALBLK and derived classes functions. */
/* Second family is VALBLK, representing simple suballocated arrays */
@@ -138,14 +138,14 @@ PSZ VALBLK::GetCharValue(int)
assert(g);
sprintf(g->Message, MSG(NO_CHAR_FROM), Type);
- longjmp(g->jumper[g->jump_level], Type);
- return NULL;
+ throw Type;
+ return NULL;
} // end of GetCharValue
/***********************************************************************/
/* Set format so formatted dates can be converted on input. */
/***********************************************************************/
-bool VALBLK::SetFormat(PGLOBAL g, PSZ, int, int)
+bool VALBLK::SetFormat(PGLOBAL g, PCSZ, int, int)
{
sprintf(g->Message, MSG(NO_DATE_FMT), Type);
return true;
@@ -206,8 +206,8 @@ void VALBLK::ChkIndx(int n)
if (n < 0 || n >= Nval) {
PGLOBAL& g = Global;
strcpy(g->Message, MSG(BAD_VALBLK_INDX));
- longjmp(g->jumper[g->jump_level], Type);
- } // endif n
+ throw Type;
+ } // endif n
} // end of ChkIndx
@@ -216,8 +216,8 @@ void VALBLK::ChkTyp(PVAL v)
if (Check && (Type != v->GetType() || Unsigned != v->IsUnsigned())) {
PGLOBAL& g = Global;
strcpy(g->Message, MSG(VALTYPE_NOMATCH));
- longjmp(g->jumper[g->jump_level], Type);
- } // endif Type
+ throw Type;
+ } // endif Type
} // end of ChkTyp
@@ -226,8 +226,8 @@ void VALBLK::ChkTyp(PVBLK vb)
if (Check && (Type != vb->GetType() || Unsigned != vb->IsUnsigned())) {
PGLOBAL& g = Global;
strcpy(g->Message, MSG(VALTYPE_NOMATCH));
- longjmp(g->jumper[g->jump_level], Type);
- } // endif Type
+ throw Type;
+ } // endif Type
} // end of ChkTyp
@@ -335,15 +335,15 @@ uchar TYPBLK<uchar>::GetTypedValue(PVAL valp)
/* Set one value in a block from a zero terminated string. */
/***********************************************************************/
template <class TYPE>
-void TYPBLK<TYPE>::SetValue(PSZ p, int n)
+void TYPBLK<TYPE>::SetValue(PCSZ p, int n)
{
ChkIndx(n);
if (Check) {
PGLOBAL& g = Global;
strcpy(g->Message, MSG(BAD_SET_STRING));
- longjmp(g->jumper[g->jump_level], Type);
- } // endif Check
+ throw Type;
+ } // endif Check
bool minus;
ulonglong maxval = MaxVal();
@@ -385,15 +385,15 @@ template <>
ulonglong TYPBLK<ulonglong>::MaxVal(void) {return ULONGLONG_MAX;}
template <>
-void TYPBLK<double>::SetValue(PSZ p, int n)
+void TYPBLK<double>::SetValue(PCSZ p, int n)
{
ChkIndx(n);
if (Check) {
PGLOBAL& g = Global;
strcpy(g->Message, MSG(BAD_SET_STRING));
- longjmp(g->jumper[g->jump_level], Type);
- } // endif Check
+ throw Type;
+ } // endif Check
Typp[n] = atof(p);
SetNull(n, false);
@@ -403,7 +403,7 @@ void TYPBLK<double>::SetValue(PSZ p, int n)
/* Set one value in a block from an array of characters. */
/***********************************************************************/
template <class TYPE>
-void TYPBLK<TYPE>::SetValue(char *sp, uint len, int n)
+void TYPBLK<TYPE>::SetValue(PCSZ sp, uint len, int n)
{
PGLOBAL& g = Global;
PSZ spz = (PSZ)PlugSubAlloc(g, NULL, 0); // Temporary
@@ -778,7 +778,7 @@ void CHRBLK::SetValue(PVAL valp, int n)
/***********************************************************************/
/* Set one value in a block from a zero terminated string. */
/***********************************************************************/
-void CHRBLK::SetValue(PSZ sp, int n)
+void CHRBLK::SetValue(PCSZ sp, int n)
{
uint len = (sp) ? strlen(sp) : 0;
SetValue(sp, len, n);
@@ -787,7 +787,7 @@ void CHRBLK::SetValue(PSZ sp, int n)
/***********************************************************************/
/* Set one value in a block from an array of characters. */
/***********************************************************************/
-void CHRBLK::SetValue(char *sp, uint len, int n)
+void CHRBLK::SetValue(const char *sp, uint len, int n)
{
char *p = Chrp + n * Long;
@@ -795,8 +795,8 @@ void CHRBLK::SetValue(char *sp, uint len, int n)
if (Check && (signed)len > Long) {
PGLOBAL& g = Global;
strcpy(g->Message, MSG(SET_STR_TRUNC));
- longjmp(g->jumper[g->jump_level], Type);
- } // endif Check
+ throw Type;
+ } // endif Check
#endif // _DEBUG
if (sp)
@@ -823,8 +823,8 @@ void CHRBLK::SetValue(PVBLK pv, int n1, int n2)
if (Type != pv->GetType() || Long != ((CHRBLK*)pv)->Long) {
PGLOBAL& g = Global;
strcpy(g->Message, MSG(BLKTYPLEN_MISM));
- longjmp(g->jumper[g->jump_level], Type);
- } // endif Type
+ throw Type;
+ } // endif Type
if (!(b = pv->IsNull(n2)))
memcpy(Chrp + n1 * Long, ((CHRBLK*)pv)->Chrp + n2 * Long, Long);
@@ -874,8 +874,8 @@ void CHRBLK::SetValues(PVBLK pv, int k, int n)
if (Type != pv->GetType() || Long != ((CHRBLK*)pv)->Long) {
PGLOBAL& g = Global;
strcpy(g->Message, MSG(BLKTYPLEN_MISM));
- longjmp(g->jumper[g->jump_level], Type);
- } // endif Type
+ throw Type;
+ } // endif Type
#endif // _DEBUG
char *p = ((CHRBLK*)pv)->Chrp;
@@ -1152,7 +1152,7 @@ void STRBLK::SetValue(PVAL valp, int n)
/***********************************************************************/
/* Set one value in a block from a zero terminated string. */
/***********************************************************************/
-void STRBLK::SetValue(PSZ p, int n)
+void STRBLK::SetValue(PCSZ p, int n)
{
if (p) {
if (!Sorted || !n || !Strp[n-1] || strcmp(p, Strp[n-1]))
@@ -1168,7 +1168,7 @@ void STRBLK::SetValue(PSZ p, int n)
/***********************************************************************/
/* Set one value in a block from an array of characters. */
/***********************************************************************/
-void STRBLK::SetValue(char *sp, uint len, int n)
+void STRBLK::SetValue(const char *sp, uint len, int n)
{
PSZ p;
@@ -1316,7 +1316,7 @@ DATBLK::DATBLK(void *mp, int nval) : TYPBLK<int>(mp, nval, TYPE_INT)
/***********************************************************************/
/* Set format so formatted dates can be converted on input. */
/***********************************************************************/
-bool DATBLK::SetFormat(PGLOBAL g, PSZ fmt, int len, int year)
+bool DATBLK::SetFormat(PGLOBAL g, PCSZ fmt, int len, int year)
{
if (!(Dvalp = AllocateValue(g, TYPE_DATE, len, year, false, fmt)))
return true;
@@ -1343,7 +1343,7 @@ char *DATBLK::GetCharString(char *p, int n)
/***********************************************************************/
/* Set one value in a block from a char string. */
/***********************************************************************/
-void DATBLK::SetValue(PSZ p, int n)
+void DATBLK::SetValue(PCSZ p, int n)
{
if (Dvalp) {
// Decode the string according to format
diff --git a/storage/connect/valblk.h b/storage/connect/valblk.h
index c3cad79b234..38a73424985 100644
--- a/storage/connect/valblk.h
+++ b/storage/connect/valblk.h
@@ -91,7 +91,7 @@ class VALBLK : public BLOCK {
virtual char *GetCharString(char *p, int n) = 0;
virtual void ReAlloc(void *mp, int n) {Blkp = mp; Nval = n;}
virtual void Reset(int n) = 0;
- virtual bool SetFormat(PGLOBAL g, PSZ fmt, int len, int year = 0);
+ virtual bool SetFormat(PGLOBAL g, PCSZ fmt, int len, int year = 0);
virtual void SetPrec(int p) {}
virtual bool IsCi(void) {return false;}
@@ -105,8 +105,8 @@ class VALBLK : public BLOCK {
virtual void SetValue(double, int) {assert(false);}
virtual void SetValue(char, int) {assert(false);}
virtual void SetValue(uchar, int) {assert(false);}
- virtual void SetValue(PSZ, int) {assert(false);}
- virtual void SetValue(char *, uint, int) {assert(false);}
+ virtual void SetValue(PCSZ, int) {assert(false);}
+ virtual void SetValue(const char *, uint, int) {assert(false);}
virtual void SetValue(PVAL valp, int n) = 0;
virtual void SetValue(PVBLK pv, int n1, int n2) = 0;
virtual void SetMin(PVAL valp, int n) = 0;
@@ -165,8 +165,8 @@ class TYPBLK : public VALBLK {
// Methods
using VALBLK::SetValue;
- virtual void SetValue(PSZ sp, int n);
- virtual void SetValue(char *sp, uint len, int n);
+ virtual void SetValue(PCSZ sp, int n);
+ virtual void SetValue(const char *sp, uint len, int n);
virtual void SetValue(short sval, int n)
{Typp[n] = (TYPE)sval; SetNull(n, false);}
virtual void SetValue(ushort sval, int n)
@@ -236,8 +236,8 @@ class CHRBLK : public VALBLK {
// Methods
using VALBLK::SetValue;
- virtual void SetValue(PSZ sp, int n);
- virtual void SetValue(char *sp, uint len, int n);
+ virtual void SetValue(PCSZ sp, int n);
+ virtual void SetValue(const char *sp, uint len, int n);
virtual void SetValue(PVAL valp, int n);
virtual void SetValue(PVBLK pv, int n1, int n2);
virtual void SetMin(PVAL valp, int n);
@@ -290,8 +290,8 @@ class STRBLK : public VALBLK {
// Methods
using VALBLK::SetValue;
- virtual void SetValue(PSZ sp, int n);
- virtual void SetValue(char *sp, uint len, int n);
+ virtual void SetValue(PCSZ sp, int n);
+ virtual void SetValue(const char *sp, uint len, int n);
virtual void SetValue(PVAL valp, int n);
virtual void SetValue(PVBLK pv, int n1, int n2);
virtual void SetMin(PVAL valp, int n);
@@ -322,12 +322,12 @@ class DATBLK : public TYPBLK<int> {
DATBLK(void *mp, int size);
// Implementation
- virtual bool SetFormat(PGLOBAL g, PSZ fmt, int len, int year = 0);
+ virtual bool SetFormat(PGLOBAL g, PCSZ fmt, int len, int year = 0);
virtual char *GetCharString(char *p, int n);
// Methods
using TYPBLK<int>::SetValue;
- virtual void SetValue(PSZ sp, int n);
+ virtual void SetValue(PCSZ sp, int n);
protected:
// Members
@@ -352,7 +352,7 @@ class PTRBLK : public STRBLK {
// Methods
using STRBLK::SetValue;
using STRBLK::CompVal;
- virtual void SetValue(PSZ p, int n) {Strp[n] = p;}
+ virtual void SetValue(PCSZ p, int n) {Strp[n] = (char*)p;}
virtual int CompVal(int i1, int i2);
protected:
diff --git a/storage/connect/value.cpp b/storage/connect/value.cpp
index ced690e77c0..b6c63bdadd3 100644
--- a/storage/connect/value.cpp
+++ b/storage/connect/value.cpp
@@ -1,7 +1,7 @@
/************* Value C++ Functions Source Code File (.CPP) *************/
-/* Name: VALUE.CPP Version 2.6 */
+/* Name: VALUE.CPP Version 2.8 */
/* */
-/* (C) Copyright to the author Olivier BERTRAND 2001-2016 */
+/* (C) Copyright to the author Olivier BERTRAND 2001-2017 */
/* */
/* This file contains the VALUE and derived classes family functions. */
/* These classes contain values of different types. They are used so */
@@ -60,7 +60,7 @@
#define CheckType(V) if (Type != V->GetType()) { \
PGLOBAL& g = Global; \
strcpy(g->Message, MSG(VALTYPE_NOMATCH)); \
- longjmp(g->jumper[g->jump_level], Type); }
+ throw Type;
#else
#define CheckType(V)
#endif
@@ -94,12 +94,12 @@ PSZ strlwr(PSZ s);
/* OUT minus: Set to true if the number is negative */
/* Returned val: The resulting number */
/***********************************************************************/
-ulonglong CharToNumber(char *p, int n, ulonglong maxval,
+ulonglong CharToNumber(const char *p, int n, ulonglong maxval,
bool un, bool *minus, bool *rc)
{
- char *p2;
- uchar c;
- ulonglong val;
+ const char *p2;
+ uchar c;
+ ulonglong val;
if (minus) *minus = false;
if (rc) *rc = false;
@@ -118,7 +118,7 @@ ulonglong CharToNumber(char *p, int n, ulonglong maxval,
maxval++;
if (minus) *minus = true;
} // endif Unsigned
-
+ /* fall through */
case '+':
p++;
break;
@@ -138,9 +138,9 @@ ulonglong CharToNumber(char *p, int n, ulonglong maxval,
/***********************************************************************/
/* GetTypeName: returns the PlugDB internal type name. */
/***********************************************************************/
-PSZ GetTypeName(int type)
+PCSZ GetTypeName(int type)
{
- PSZ name;
+ PCSZ name;
switch (type) {
case TYPE_STRING: name = "CHAR"; break;
@@ -184,9 +184,9 @@ int GetTypeSize(int type, int len)
/***********************************************************************/
/* GetFormatType: returns the FORMAT character(s) according to type. */
/***********************************************************************/
-char *GetFormatType(int type)
+const char *GetFormatType(int type)
{
- char *c = "X";
+ const char *c = "X";
switch (type) {
case TYPE_STRING: c = "C"; break;
@@ -370,7 +370,7 @@ PVAL AllocateValue(PGLOBAL g, void *value, short type, short prec)
/* Allocate a variable Value according to type, length and precision. */
/***********************************************************************/
PVAL AllocateValue(PGLOBAL g, int type, int len, int prec,
- bool uns, PSZ fmt)
+ bool uns, PCSZ fmt)
{
PVAL valp;
@@ -558,6 +558,38 @@ bool VALUE::Compute(PGLOBAL g, PVAL *, int, OPVAL)
return true;
} // end of Compute
+/***********************************************************************/
+/* Make file output of an object value. */
+/***********************************************************************/
+void VALUE::Printf(PGLOBAL g, FILE *f, uint n)
+{
+ char m[64], buf[64];
+
+ memset(m, ' ', n); /* Make margin string */
+ m[n] = '\0';
+
+ if (Null)
+ fprintf(f, "%s<null>\n", m);
+ else
+ fprintf(f, "%s%s%s", GetCharString(buf), "\n", m);
+
+} /* end of Print */
+
+/***********************************************************************/
+/* Make string output of an object value. */
+/***********************************************************************/
+void VALUE::Prints(PGLOBAL g, char *ps, uint z)
+{
+ char *p, buf[64];
+
+ if (Null)
+ p = strcpy(buf, "<null>");
+ else
+ p = GetCharString(buf);
+
+ strncpy(ps, p, z);
+} // end of Print
+
/* -------------------------- Class TYPVAL ---------------------------- */
/***********************************************************************/
@@ -682,7 +714,7 @@ uchar TYPVAL<uchar>::GetTypedValue(PVAL valp)
/* TYPVAL SetValue: convert chars extracted from a line to TYPE value.*/
/***********************************************************************/
template <class TYPE>
-bool TYPVAL<TYPE>::SetValue_char(char *p, int n)
+bool TYPVAL<TYPE>::SetValue_char(const char *p, int n)
{
bool rc, minus;
ulonglong maxval = MaxVal();
@@ -704,7 +736,7 @@ bool TYPVAL<TYPE>::SetValue_char(char *p, int n)
} // end of SetValue
template <>
-bool TYPVAL<double>::SetValue_char(char *p, int n)
+bool TYPVAL<double>::SetValue_char(const char *p, int n)
{
if (p && n > 0) {
char buf[64];
@@ -732,7 +764,7 @@ bool TYPVAL<double>::SetValue_char(char *p, int n)
/* TYPVAL SetValue: fill a typed value from a string. */
/***********************************************************************/
template <class TYPE>
-void TYPVAL<TYPE>::SetValue_psz(PSZ s)
+void TYPVAL<TYPE>::SetValue_psz(PCSZ s)
{
if (s) {
SetValue_char(s, (int)strlen(s));
@@ -1019,12 +1051,12 @@ TYPE TYPVAL<TYPE>::SafeAdd(TYPE n1, TYPE n2)
if ((n2 > 0) && (n < n1)) {
// Overflow
strcpy(g->Message, MSG(FIX_OVFLW_ADD));
- longjmp(g->jumper[g->jump_level], 138);
- } else if ((n2 < 0) && (n > n1)) {
+ throw 138;
+ } else if ((n2 < 0) && (n > n1)) {
// Underflow
strcpy(g->Message, MSG(FIX_UNFLW_ADD));
- longjmp(g->jumper[g->jump_level], 138);
- } // endif's n2
+ throw 138;
+ } // endif's n2
return n;
} // end of SafeAdd
@@ -1047,12 +1079,12 @@ TYPE TYPVAL<TYPE>::SafeMult(TYPE n1, TYPE n2)
if (n > MinMaxVal(true)) {
// Overflow
strcpy(g->Message, MSG(FIX_OVFLW_TIMES));
- longjmp(g->jumper[g->jump_level], 138);
- } else if (n < MinMaxVal(false)) {
+ throw 138;
+ } else if (n < MinMaxVal(false)) {
// Underflow
strcpy(g->Message, MSG(FIX_UNFLW_TIMES));
- longjmp(g->jumper[g->jump_level], 138);
- } // endif's n2
+ throw 138;
+ } // endif's n2
return (TYPE)n;
} // end of SafeMult
@@ -1170,7 +1202,7 @@ bool TYPVAL<TYPE>::Compall(PGLOBAL g, PVAL *vp, int np, OPVAL op)
/* This function assumes that the format matches the value type. */
/***********************************************************************/
template <class TYPE>
-bool TYPVAL<TYPE>::FormatValue(PVAL vp, char *fmt)
+bool TYPVAL<TYPE>::FormatValue(PVAL vp, PCSZ fmt)
{
char *buf = (char*)vp->GetTo_Val(); // Should be big enough
int n = sprintf(buf, fmt, Tval);
@@ -1192,37 +1224,6 @@ bool TYPVAL<TYPE>::SetConstFormat(PGLOBAL g, FORMAT& fmt)
return false;
} // end of SetConstFormat
-/***********************************************************************/
-/* Make file output of a typed object. */
-/***********************************************************************/
-template <class TYPE>
-void TYPVAL<TYPE>::Print(PGLOBAL g, FILE *f, uint n)
- {
- char m[64], buf[12];
-
- memset(m, ' ', n); /* Make margin string */
- m[n] = '\0';
-
- if (Null)
- fprintf(f, "%s<null>\n", m);
- else
- fprintf(f, strcat(strcat(strcpy(buf, "%s"), Fmt), "\n"), m, Tval);
-
- } /* end of Print */
-
-/***********************************************************************/
-/* Make string output of a int object. */
-/***********************************************************************/
-template <class TYPE>
-void TYPVAL<TYPE>::Print(PGLOBAL g, char *ps, uint z)
- {
- if (Null)
- strcpy(ps, "<null>");
- else
- sprintf(ps, Fmt, Tval);
-
- } /* end of Print */
-
/* -------------------------- Class STRING --------------------------- */
/***********************************************************************/
@@ -1361,25 +1362,25 @@ bool TYPVAL<PSZ>::SetValue_pval(PVAL valp, bool chktype)
/***********************************************************************/
/* STRING SetValue: fill string with chars extracted from a line. */
/***********************************************************************/
-bool TYPVAL<PSZ>::SetValue_char(char *p, int n)
+bool TYPVAL<PSZ>::SetValue_char(const char *cp, int n)
{
bool rc = false;
- if (!p || n == 0) {
+ if (!cp || n == 0) {
Reset();
Null = Nullable;
- } else if (p != Strp) {
- rc = n > Len;
+ } else if (cp != Strp) {
+ const char *p = cp + n - 1;
- if ((n = MY_MIN(n, Len))) {
- strncpy(Strp, p, n);
+ for (p; p >= cp; p--, n--)
+ if (*p && *p != ' ')
+ break;
-// for (p = Strp + n - 1; p >= Strp && (*p == ' ' || *p == '\0'); p--) ;
- for (p = Strp + n - 1; p >= Strp; p--)
- if (*p && *p != ' ')
- break;
+ rc = n > Len;
- *(++p) = '\0';
+ if ((n = MY_MIN(n, Len))) {
+ strncpy(Strp, cp, n);
+ Strp[n] = '\0';
if (trace > 1)
htrc(" Setting string to: '%s'\n", Strp);
@@ -1396,7 +1397,7 @@ bool TYPVAL<PSZ>::SetValue_char(char *p, int n)
/***********************************************************************/
/* STRING SetValue: fill string with another string. */
/***********************************************************************/
-void TYPVAL<PSZ>::SetValue_psz(PSZ s)
+void TYPVAL<PSZ>::SetValue_psz(PCSZ s)
{
if (!s) {
Reset();
@@ -1432,8 +1433,8 @@ void TYPVAL<PSZ>::SetValue(int n)
if (k > Len) {
sprintf(g->Message, MSG(VALSTR_TOO_LONG), buf, Len);
- longjmp(g->jumper[g->jump_level], 138);
- } else
+ throw 138;
+ } else
SetValue_psz(buf);
Null = false;
@@ -1486,8 +1487,8 @@ void TYPVAL<PSZ>::SetValue(longlong n)
if (k > Len) {
sprintf(g->Message, MSG(VALSTR_TOO_LONG), buf, Len);
- longjmp(g->jumper[g->jump_level], 138);
- } else
+ throw 138;
+ } else
SetValue_psz(buf);
Null = false;
@@ -1529,8 +1530,8 @@ void TYPVAL<PSZ>::SetValue(double f)
if (k > Len) {
sprintf(g->Message, MSG(VALSTR_TOO_LONG), buf, Len);
- longjmp(g->jumper[g->jump_level], 138);
- } else
+ throw 138;
+ } else
SetValue_psz(buf);
Null = false;
@@ -1559,7 +1560,7 @@ void TYPVAL<PSZ>::SetValue(uchar c)
/***********************************************************************/
void TYPVAL<PSZ>::SetBinValue(void *p)
{
- SetValue_char((char *)p, Len);
+ SetValue_char((const char *)p, Len);
} // end of SetBinValue
/***********************************************************************/
@@ -1689,7 +1690,7 @@ bool TYPVAL<PSZ>::Compute(PGLOBAL g, PVAL *vp, int np, OPVAL op)
/* constructed from its own value formated using the fmt format. */
/* This function assumes that the format matches the value type. */
/***********************************************************************/
-bool TYPVAL<PSZ>::FormatValue(PVAL vp, char *fmt)
+bool TYPVAL<PSZ>::FormatValue(PVAL vp, PCSZ fmt)
{
char *buf = (char*)vp->GetTo_Val(); // Should be big enough
int n = sprintf(buf, fmt, Strp);
@@ -1708,6 +1709,18 @@ bool TYPVAL<PSZ>::SetConstFormat(PGLOBAL, FORMAT& fmt)
return false;
} // end of SetConstFormat
+/***********************************************************************/
+/* Make string output of an object value. */
+/***********************************************************************/
+void TYPVAL<PSZ>::Prints(PGLOBAL g, char *ps, uint z)
+{
+ if (Null)
+ strncpy(ps, "null", z);
+ else
+ strcat(strncat(strncpy(ps, "\"", z), Strp, z-2), "\"");
+
+} // end of Print
+
/* -------------------------- Class DECIMAL -------------------------- */
/***********************************************************************/
@@ -1797,102 +1810,6 @@ bool DECVAL::GetBinValue(void *buf, int buflen, bool go)
return false;
} // end of GetBinValue
-#if 0
-/***********************************************************************/
-/* DECIMAL SetValue: copy the value of another Value object. */
-/***********************************************************************/
-bool DECVAL::SetValue_pval(PVAL valp, bool chktype)
- {
- if (chktype && (valp->GetType() != Type || valp->GetSize() > Len))
- return true;
-
- char buf[64];
-
- if (!(Null = valp->IsNull() && Nullable))
- strncpy(Strp, valp->GetCharString(buf), Len);
- else
- Reset();
-
- return false;
- } // end of SetValue_pval
-
-/***********************************************************************/
-/* DECIMAL SetValue: fill string with chars extracted from a line. */
-/***********************************************************************/
-bool DECVAL::SetValue_char(char *p, int n)
- {
- bool rc;
-
- if (p && n > 0) {
- rc = n > Len;
-
- if ((n = MY_MIN(n, Len))) {
- strncpy(Strp, p, n);
-
-// for (p = Strp + n - 1; p >= Strp && (*p == ' ' || *p == '\0'); p--) ;
- for (p = Strp + n - 1; p >= Strp; p--)
- if (*p && *p != ' ')
- break;
-
- *(++p) = '\0';
-
- if (trace > 1)
- htrc(" Setting string to: '%s'\n", Strp);
-
- } else
- Reset();
-
- Null = false;
- } else {
- rc = false;
- Reset();
- Null = Nullable;
- } // endif p
-
- return rc;
- } // end of SetValue_char
-
-/***********************************************************************/
-/* DECIMAL SetValue: fill string with another string. */
-/***********************************************************************/
-void DECVAL::SetValue_psz(PSZ s)
- {
- if (s) {
- strncpy(Strp, s, Len);
- Null = false;
- } else {
- Reset();
- Null = Nullable;
- } // endif s
-
- } // end of SetValue_psz
-
-/***********************************************************************/
-/* DECIMAL SetValue: fill string with a string extracted from a block.*/
-/***********************************************************************/
-void DECVAL::SetValue_pvblk(PVBLK blk, int n)
- {
- // STRBLK's can return a NULL pointer
- SetValue_psz(blk->GetCharValue(n));
- } // end of SetValue_pvblk
-
-/***********************************************************************/
-/* DECIMAL SetBinValue: fill string with chars extracted from a line. */
-/***********************************************************************/
-void DECVAL::SetBinValue(void *p)
- {
- SetValue_char((char *)p, Len);
- } // end of SetBinValue
-
-/***********************************************************************/
-/* DECIMAL GetCharString: get string representation of a char value. */
-/***********************************************************************/
-char *DECVAL::GetCharString(char *p)
- {
- return Strp;
- } // end of GetCharString
-#endif // 0
-
/***********************************************************************/
/* DECIMAL compare value with another Value. */
/***********************************************************************/
@@ -1927,32 +1844,6 @@ int DECVAL::CompareValue(PVAL vp)
return (f > n) ? 1 : (f < n) ? (-1) : 0;
} // end of CompareValue
-#if 0
-/***********************************************************************/
-/* FormatValue: This function set vp (a STRING value) to the string */
-/* constructed from its own value formated using the fmt format. */
-/* This function assumes that the format matches the value type. */
-/***********************************************************************/
-bool DECVAL::FormatValue(PVAL vp, char *fmt)
- {
- char *buf = (char*)vp->GetTo_Val(); // Should be big enough
- int n = sprintf(buf, fmt, Strp);
-
- return (n > vp->GetValLen());
- } // end of FormatValue
-
-/***********************************************************************/
-/* DECIMAL SetFormat function (used to set SELECT output format). */
-/***********************************************************************/
-bool DECVAL::SetConstFormat(PGLOBAL g, FORMAT& fmt)
- {
- fmt.Type[0] = 'C';
- fmt.Length = Len;
- fmt.Prec = 0;
- return false;
- } // end of SetConstFormat
-#endif // 0
-
/* -------------------------- Class BINVAL --------------------------- */
/***********************************************************************/
@@ -2110,7 +2001,7 @@ bool BINVAL::SetValue_pval(PVAL valp, bool chktype)
/***********************************************************************/
/* BINVAL SetValue: fill value with chars extracted from a line. */
/***********************************************************************/
-bool BINVAL::SetValue_char(char *p, int n)
+bool BINVAL::SetValue_char(const char *p, int n)
{
bool rc;
@@ -2131,7 +2022,7 @@ bool BINVAL::SetValue_char(char *p, int n)
/***********************************************************************/
/* BINVAL SetValue: fill value with another string. */
/***********************************************************************/
-void BINVAL::SetValue_psz(PSZ s)
+void BINVAL::SetValue_psz(PCSZ s)
{
if (s) {
Len = MY_MIN(Clen, (signed)strlen(s));
@@ -2357,7 +2248,7 @@ bool BINVAL::IsEqual(PVAL vp, bool chktype)
/* constructed from its own value formated using the fmt format. */
/* This function assumes that the format matches the value type. */
/***********************************************************************/
-bool BINVAL::FormatValue(PVAL vp, char *fmt)
+bool BINVAL::FormatValue(PVAL vp, PCSZ fmt)
{
char *buf = (char*)vp->GetTo_Val(); // Should be big enough
int n = sprintf(buf, fmt, Len, Binp);
@@ -2381,7 +2272,7 @@ bool BINVAL::SetConstFormat(PGLOBAL, FORMAT& fmt)
/***********************************************************************/
/* DTVAL public constructor for new void values. */
/***********************************************************************/
-DTVAL::DTVAL(PGLOBAL g, int n, int prec, PSZ fmt)
+DTVAL::DTVAL(PGLOBAL g, int n, int prec, PCSZ fmt)
: TYPVAL<int>((int)0, TYPE_DATE)
{
if (!fmt) {
@@ -2410,7 +2301,7 @@ DTVAL::DTVAL(int n) : TYPVAL<int>(n, TYPE_DATE)
/***********************************************************************/
/* Set format so formatted dates can be converted on input/output. */
/***********************************************************************/
-bool DTVAL::SetFormat(PGLOBAL g, PSZ fmt, int len, int year)
+bool DTVAL::SetFormat(PGLOBAL g, PCSZ fmt, int len, int year)
{
Pdtp = MakeDateFormat(g, fmt, true, true, (year > 9999) ? 1 : 0);
Sdate = (char*)PlugSubAlloc(g, NULL, len + 1);
@@ -2668,7 +2559,11 @@ bool DTVAL::SetValue_pval(PVAL valp, bool chktype)
ndv = ExtractDate(valp->GetCharValue(), Pdtp, DefYear, dval);
MakeDate(NULL, dval, ndv);
- } else
+ } else if (valp->GetType() == TYPE_BIGINT &&
+ !(valp->GetBigintValue() % 1000)) {
+ // Assuming that this timestamp is in milliseconds
+ Tval = valp->GetBigintValue() / 1000;
+ } else
Tval = valp->GetIntValue();
} else
@@ -2682,14 +2577,14 @@ bool DTVAL::SetValue_pval(PVAL valp, bool chktype)
/***********************************************************************/
/* SetValue: convert chars extracted from a line to date value. */
/***********************************************************************/
-bool DTVAL::SetValue_char(char *p, int n)
+bool DTVAL::SetValue_char(const char *p, int n)
{
bool rc= 0;
if (Pdtp) {
- char *p2;
- int ndv;
- int dval[6];
+ const char *p2;
+ int ndv;
+ int dval[6];
if (n > 0) {
// Trim trailing blanks
@@ -2721,11 +2616,11 @@ bool DTVAL::SetValue_char(char *p, int n)
/***********************************************************************/
/* SetValue: convert a char string to date value. */
/***********************************************************************/
-void DTVAL::SetValue_psz(PSZ p)
+void DTVAL::SetValue_psz(PCSZ p)
{
if (Pdtp) {
- int ndv;
- int dval[6];
+ int ndv;
+ int dval[6];
strncpy(Sdate, p, Len);
Sdate[Len] = '\0';
@@ -2815,8 +2710,10 @@ char *DTVAL::ShowValue(char *buf, int len)
strncat(p, "Error", m);
} // endif n
- } else
- p = ""; // DEFAULT VALUE ???
+ } else {
+ p = buf;
+ *p = '\0'; // DEFAULT VALUE ???
+ } // endif Null
return p;
} else
@@ -2881,7 +2778,7 @@ bool DTVAL::WeekNum(PGLOBAL g, int& nval)
/* constructed from its own value formated using the fmt format. */
/* This function assumes that the format matches the value type. */
/***********************************************************************/
-bool DTVAL::FormatValue(PVAL vp, char *fmt)
+bool DTVAL::FormatValue(PVAL vp, PCSZ fmt)
{
char *buf = (char*)vp->GetTo_Val(); // Should be big enough
struct tm tm, *ptm = GetGmTime(&tm);
diff --git a/storage/connect/value.h b/storage/connect/value.h
index 14a568c3549..2754c761815 100644
--- a/storage/connect/value.h
+++ b/storage/connect/value.h
@@ -1,7 +1,7 @@
/**************** Value H Declares Source Code File (.H) ***************/
-/* Name: VALUE.H Version 2.2 */
+/* Name: VALUE.H Version 2.3 */
/* */
-/* (C) Copyright to the author Olivier BERTRAND 2001-2016 */
+/* (C) Copyright to the author Olivier BERTRAND 2001-2017 */
/* */
/* This file contains the VALUE and derived classes declares. */
/***********************************************************************/
@@ -40,14 +40,14 @@ typedef struct _datpar *PDTP; // For DTVAL
/* Utilities used to test types and to allocated values. */
/***********************************************************************/
// Exported functions
-DllExport PSZ GetTypeName(int);
+DllExport PCSZ GetTypeName(int);
DllExport int GetTypeSize(int, int);
#ifdef ODBC_SUPPORT
/* This function is exported for use in OEM table type DLLs */
DllExport int TranslateSQLType(int stp, int prec,
int& len, char& v, bool& w);
#endif
-DllExport char *GetFormatType(int);
+DllExport const char *GetFormatType(int);
DllExport int GetFormatType(char);
DllExport bool IsTypeChar(int type);
DllExport bool IsTypeNum(int type);
@@ -55,8 +55,8 @@ DllExport int ConvertType(int, int, CONV, bool match = false);
DllExport PVAL AllocateValue(PGLOBAL, void *, short, short = 2);
DllExport PVAL AllocateValue(PGLOBAL, PVAL, int = TYPE_VOID, int = 0);
DllExport PVAL AllocateValue(PGLOBAL, int, int len = 0, int prec = 0,
- bool uns = false, PSZ fmt = NULL);
-DllExport ulonglong CharToNumber(char *, int, ulonglong, bool,
+ bool uns = false, PCSZ fmt = NULL);
+DllExport ulonglong CharToNumber(PCSZ, int, ulonglong, bool,
bool *minus = NULL, bool *rc = NULL);
DllExport BYTE OpBmp(PGLOBAL g, OPVAL opc);
@@ -100,8 +100,8 @@ class DllExport VALUE : public BLOCK {
// Methods
virtual bool SetValue_pval(PVAL valp, bool chktype = false) = 0;
- virtual bool SetValue_char(char *p, int n) = 0;
- virtual void SetValue_psz(PSZ s) = 0;
+ virtual bool SetValue_char(const char *p, int n) = 0;
+ virtual void SetValue_psz(PCSZ s) = 0;
virtual void SetValue_bool(bool) {assert(FALSE);}
virtual int CompareValue(PVAL vp) = 0;
virtual BYTE TestValue(PVAL vp);
@@ -121,7 +121,9 @@ class DllExport VALUE : public BLOCK {
virtual char *GetCharString(char *p) = 0;
virtual bool IsEqual(PVAL vp, bool chktype) = 0;
virtual bool Compute(PGLOBAL g, PVAL *vp, int np, OPVAL op);
- virtual bool FormatValue(PVAL vp, char *fmt) = 0;
+ virtual bool FormatValue(PVAL vp, PCSZ fmt) = 0;
+ virtual void Printf(PGLOBAL g, FILE *, uint);
+ virtual void Prints(PGLOBAL g, char *ps, uint z);
/**
Set value from a non-aligned in-memory value in the machine byte order.
@@ -211,8 +213,8 @@ class DllExport TYPVAL : public VALUE {
// Methods
virtual bool SetValue_pval(PVAL valp, bool chktype);
- virtual bool SetValue_char(char *p, int n);
- virtual void SetValue_psz(PSZ s);
+ virtual bool SetValue_char(const char *p, int n);
+ virtual void SetValue_psz(PCSZ s);
virtual void SetValue_bool(bool b) {Tval = (b) ? 1 : 0;}
virtual int CompareValue(PVAL vp);
virtual void SetValue(char c) {Tval = (TYPE)c; Null = false;}
@@ -232,9 +234,7 @@ class DllExport TYPVAL : public VALUE {
virtual bool IsEqual(PVAL vp, bool chktype);
virtual bool Compute(PGLOBAL g, PVAL *vp, int np, OPVAL op);
virtual bool SetConstFormat(PGLOBAL, FORMAT&);
- virtual bool FormatValue(PVAL vp, char *fmt);
- virtual void Print(PGLOBAL g, FILE *, uint);
- virtual void Print(PGLOBAL g, char *, uint);
+ virtual bool FormatValue(PVAL vp, PCSZ fmt);
protected:
static TYPE MinMaxVal(bool b);
@@ -287,8 +287,8 @@ class DllExport TYPVAL<PSZ>: public VALUE {
// Methods
virtual bool SetValue_pval(PVAL valp, bool chktype);
- virtual bool SetValue_char(char *p, int n);
- virtual void SetValue_psz(PSZ s);
+ virtual bool SetValue_char(const char *p, int n);
+ virtual void SetValue_psz(PCSZ s);
virtual void SetValue_pvblk(PVBLK blk, int n);
virtual void SetValue(char c);
virtual void SetValue(uchar c);
@@ -306,8 +306,9 @@ class DllExport TYPVAL<PSZ>: public VALUE {
virtual char *GetCharString(char *p);
virtual bool IsEqual(PVAL vp, bool chktype);
virtual bool Compute(PGLOBAL g, PVAL *vp, int np, OPVAL op);
- virtual bool FormatValue(PVAL vp, char *fmt);
+ virtual bool FormatValue(PVAL vp, PCSZ fmt);
virtual bool SetConstFormat(PGLOBAL, FORMAT&);
+ virtual void Prints(PGLOBAL g, char *ps, uint z);
protected:
// Members
@@ -371,8 +372,8 @@ class DllExport BINVAL: public VALUE {
// Methods
virtual bool SetValue_pval(PVAL valp, bool chktype);
- virtual bool SetValue_char(char *p, int n);
- virtual void SetValue_psz(PSZ s);
+ virtual bool SetValue_char(const char *p, int n);
+ virtual void SetValue_psz(PCSZ s);
virtual void SetValue_pvblk(PVBLK blk, int n);
virtual void SetValue(char c);
virtual void SetValue(uchar c);
@@ -389,7 +390,7 @@ class DllExport BINVAL: public VALUE {
virtual char *ShowValue(char *buf, int);
virtual char *GetCharString(char *p);
virtual bool IsEqual(PVAL vp, bool chktype);
- virtual bool FormatValue(PVAL vp, char *fmt);
+ virtual bool FormatValue(PVAL vp, PCSZ fmt);
virtual bool SetConstFormat(PGLOBAL, FORMAT&);
protected:
@@ -405,18 +406,18 @@ class DllExport BINVAL: public VALUE {
class DllExport DTVAL : public TYPVAL<int> {
public:
// Constructors
- DTVAL(PGLOBAL g, int n, int p, PSZ fmt);
+ DTVAL(PGLOBAL g, int n, int p, PCSZ fmt);
DTVAL(int n);
// Implementation
virtual bool SetValue_pval(PVAL valp, bool chktype);
- virtual bool SetValue_char(char *p, int n);
- virtual void SetValue_psz(PSZ s);
+ virtual bool SetValue_char(const char *p, int n);
+ virtual void SetValue_psz(PCSZ s);
virtual void SetValue_pvblk(PVBLK blk, int n);
virtual char *GetCharString(char *p);
virtual char *ShowValue(char *buf, int);
- virtual bool FormatValue(PVAL vp, char *fmt);
- bool SetFormat(PGLOBAL g, PSZ fmt, int len, int year = 0);
+ virtual bool FormatValue(PVAL vp, PCSZ fmt);
+ bool SetFormat(PGLOBAL g, PCSZ fmt, int len, int year = 0);
bool SetFormat(PGLOBAL g, PVAL valp);
bool IsFormatted(void) {return Pdtp != NULL;}
bool MakeTime(struct tm *ptm);
diff --git a/storage/connect/xindex.cpp b/storage/connect/xindex.cpp
index 15fb71ab88a..3e4db8080ae 100755
--- a/storage/connect/xindex.cpp
+++ b/storage/connect/xindex.cpp
@@ -1,7 +1,7 @@
/***************** Xindex C++ Class Xindex Code (.CPP) *****************/
-/* Name: XINDEX.CPP Version 2.9 */
+/* Name: XINDEX.CPP Version 3.0 */
/* */
-/* (C) Copyright to the author Olivier BERTRAND 2004-2015 */
+/* (C) Copyright to the author Olivier BERTRAND 2004-2017 */
/* */
/* This file contains the class XINDEX implementation code. */
/***********************************************************************/
@@ -181,7 +181,7 @@ XXBASE::XXBASE(PTDBDOS tbxp, bool b) : CSORT(b),
/***********************************************************************/
/* Make file output of XINDEX contents. */
/***********************************************************************/
-void XXBASE::Print(PGLOBAL, FILE *f, uint n)
+void XXBASE::Printf(PGLOBAL, FILE *f, uint n)
{
char m[64];
@@ -193,7 +193,7 @@ void XXBASE::Print(PGLOBAL, FILE *f, uint n)
/***********************************************************************/
/* Make string output of XINDEX contents. */
/***********************************************************************/
-void XXBASE::Print(PGLOBAL, char *ps, uint z)
+void XXBASE::Prints(PGLOBAL, char *ps, uint z)
{
*ps = '\0';
strncat(ps, "Xindex", z);
@@ -446,8 +446,8 @@ bool XINDEX::Make(PGLOBAL g, PIXDEF sxp)
#if 0
if (!dup->Step) {
strcpy(g->Message, MSG(QUERY_CANCELLED));
- longjmp(g->jumper[g->jump_level], 99);
- } // endif Step
+ throw 99;
+ } // endif Step
#endif // 0
/*******************************************************************/
@@ -464,7 +464,7 @@ bool XINDEX::Make(PGLOBAL g, PIXDEF sxp)
if (ApplyFilter(g, filp))
break;
- // passthru
+ // fall through
case RC_NF:
continue;
case RC_EF:
@@ -819,7 +819,7 @@ bool XINDEX::Reorder(PGLOBAL g __attribute__((unused)))
/***********************************************************************/
bool XINDEX::SaveIndex(PGLOBAL g, PIXDEF sxp)
{
- char *ftype;
+ PCSZ ftype;
char fn[_MAX_PATH];
int n[NZ], nof = (Mul) ? (Ndif + 1) : 0;
int id = -1, size = 0;
@@ -948,7 +948,7 @@ bool XINDEX::Init(PGLOBAL g)
/* Table will be accessed through an index table. */
/* If sorting is required, this will be done later. */
/*********************************************************************/
- char *ftype;
+ PCSZ ftype;
char fn[_MAX_PATH];
int k, n, nv[NZ], id = -1;
bool estim = false;
@@ -965,7 +965,7 @@ bool XINDEX::Init(PGLOBAL g)
// For DBF tables, Cardinality includes bad or soft deleted lines
// that are not included in the index, and can be larger then the
// index size.
- estim = (Tdbp->Ftype == RECFM_DBF);
+ estim = (Tdbp->Ftype == RECFM_DBF || Tdbp->Txfp->GetAmType() == TYPE_AM_ZIP);
n = Tdbp->Cardinality(g); // n is exact table size
} else {
// Variable table not optimized
@@ -1412,7 +1412,7 @@ err:
/***********************************************************************/
bool XINDEX::GetAllSizes(PGLOBAL g,/* int &ndif,*/ int &numk)
{
- char *ftype;
+ PCSZ ftype;
char fn[_MAX_PATH];
int nv[NZ], id = -1; // n
//bool estim = false;
@@ -2320,9 +2320,9 @@ XFILE::XFILE(void) : XLOAD()
/***********************************************************************/
bool XFILE::Open(PGLOBAL g, char *filename, int id, MODE mode)
{
- char *pmod;
- bool rc;
- IOFF noff[MAX_INDX];
+ PCSZ pmod;
+ bool rc;
+ IOFF noff[MAX_INDX];
/*********************************************************************/
/* Open the index file according to mode. */
@@ -3032,7 +3032,7 @@ bool KXYCOL::Init(PGLOBAL g, PCOL colp, int n, bool sm, int kln)
return true;
Klen = Valp->GetClen();
- Keys.Size = n * Klen;
+ Keys.Size = (size_t)n * (size_t)Klen;
if (!PlgDBalloc(g, NULL, Keys)) {
sprintf(g->Message, MSG(KEY_ALLOC_ERROR), Klen, n);
diff --git a/storage/connect/xindex.h b/storage/connect/xindex.h
index 2d10d72722e..339d7e68b75 100644
--- a/storage/connect/xindex.h
+++ b/storage/connect/xindex.h
@@ -200,8 +200,8 @@ class DllExport XXBASE : public CSORT, public BLOCK {
void FreeIndex(void) {PlgDBfree(Index);}
// Methods
- virtual void Print(PGLOBAL g, FILE *f, uint n);
- virtual void Print(PGLOBAL g, char *ps, uint z);
+ virtual void Printf(PGLOBAL g, FILE *f, uint n);
+ virtual void Prints(PGLOBAL g, char *ps, uint z);
virtual bool Init(PGLOBAL g) = 0;
virtual bool Make(PGLOBAL g, PIXDEF sxp) = 0;
#if defined(XMAP)
diff --git a/storage/connect/xobject.cpp b/storage/connect/xobject.cpp
index a0b7849543d..205edc12d0c 100644
--- a/storage/connect/xobject.cpp
+++ b/storage/connect/xobject.cpp
@@ -1,7 +1,7 @@
/************ Xobject C++ Functions Source Code File (.CPP) ************/
-/* Name: XOBJECT.CPP Version 2.4 */
+/* Name: XOBJECT.CPP Version 2.5 */
/* */
-/* (C) Copyright to the author Olivier BERTRAND 1998-2014 */
+/* (C) Copyright to the author Olivier BERTRAND 1998-2017 */
/* */
/* This file contains base XOBJECT class functions. */
/* Also here is the implementation of the CONSTANT class. */
@@ -84,7 +84,7 @@ double XOBJECT::GetFloatValue(void)
CONSTANT::CONSTANT(PGLOBAL g, void *value, short type)
{
if (!(Value = AllocateValue(g, value, (int)type)))
- longjmp(g->jumper[g->jump_level], TYPE_CONST);
+ throw TYPE_CONST;
Constant = true;
} // end of CONSTANT constructor
@@ -95,7 +95,7 @@ CONSTANT::CONSTANT(PGLOBAL g, void *value, short type)
CONSTANT::CONSTANT(PGLOBAL g, int n)
{
if (!(Value = AllocateValue(g, &n, TYPE_INT)))
- longjmp(g->jumper[g->jump_level], TYPE_CONST);
+ throw TYPE_CONST;
Constant = true;
} // end of CONSTANT constructor
@@ -117,7 +117,7 @@ void CONSTANT::Convert(PGLOBAL g, int newtype)
{
if (Value->GetType() != newtype)
if (!(Value = AllocateValue(g, Value, newtype)))
- longjmp(g->jumper[g->jump_level], TYPE_CONST);
+ throw TYPE_CONST;
} // end of Convert
@@ -173,17 +173,17 @@ bool CONSTANT::Rephrase(PGLOBAL g, PSZ work)
/***********************************************************************/
/* Make file output of a constant object. */
/***********************************************************************/
-void CONSTANT::Print(PGLOBAL g, FILE *f, uint n)
+void CONSTANT::Printf(PGLOBAL g, FILE *f, uint n)
{
- Value->Print(g, f, n);
+ Value->Printf(g, f, n);
} /* end of Print */
/***********************************************************************/
/* Make string output of a constant object. */
/***********************************************************************/
-void CONSTANT::Print(PGLOBAL g, char *ps, uint z)
+void CONSTANT::Prints(PGLOBAL g, char *ps, uint z)
{
- Value->Print(g, ps, z);
+ Value->Prints(g, ps, z);
} /* end of Print */
/* -------------------------- Class STRING --------------------------- */
@@ -192,7 +192,7 @@ void CONSTANT::Print(PGLOBAL g, char *ps, uint z)
/* STRING public constructor for new char values. Alloc Size must be */
/* calculated because PlugSubAlloc rounds up size to multiple of 8. */
/***********************************************************************/
-STRING::STRING(PGLOBAL g, uint n, char *str)
+STRING::STRING(PGLOBAL g, uint n, PCSZ str)
{
G = g;
Length = (str) ? strlen(str) : 0;
@@ -205,10 +205,12 @@ STRING::STRING(PGLOBAL g, uint n, char *str)
Next = GetNext();
Size = Next - Strp;
+ Trc = false;
} else {
// This should normally never happen
Next = NULL;
Size = 0;
+ Trc = true;
} // endif Strp
} // end of STRING constructor
@@ -229,6 +231,7 @@ char *STRING::Realloc(uint len)
if (!p) {
// No more room in Sarea; this is very unlikely
strcpy(G->Message, "No more room in work area");
+ Trc = true;
return NULL;
} // endif p
@@ -243,7 +246,7 @@ char *STRING::Realloc(uint len)
/***********************************************************************/
/* Set a STRING new PSZ value. */
/***********************************************************************/
-bool STRING::Set(PSZ s)
+bool STRING::Set(PCSZ s)
{
if (!s)
return false;
@@ -333,9 +336,9 @@ bool STRING::Append(const char *s, uint ln, bool nq)
} // end of Append
/***********************************************************************/
-/* Append a PSZ to a STRING. */
+/* Append a PCSZ to a STRING. */
/***********************************************************************/
-bool STRING::Append(PSZ s)
+bool STRING::Append(PCSZ s)
{
if (!s)
return false;
@@ -392,11 +395,11 @@ bool STRING::Append(char c)
/***********************************************************************/
/* Append a quoted PSZ to a STRING. */
/***********************************************************************/
-bool STRING::Append_quoted(PSZ s)
+bool STRING::Append_quoted(PCSZ s)
{
bool b = Append('\'');
- if (s) for (char *p = s; !b && *p; p++)
+ if (s) for (const char *p = s; !b && *p; p++)
switch (*p) {
case '\'':
case '\\':
@@ -405,7 +408,7 @@ bool STRING::Append_quoted(PSZ s)
case '\r':
case '\b':
case '\f': b |= Append('\\');
- // passthru
+ // fall through
default:
b |= Append(*p);
break;
diff --git a/storage/connect/xobject.h b/storage/connect/xobject.h
index 8f6c23c4aeb..bc5912d3054 100644
--- a/storage/connect/xobject.h
+++ b/storage/connect/xobject.h
@@ -112,8 +112,8 @@ class DllExport CONSTANT : public XOBJECT {
{return Value->SetConstFormat(g, fmt);}
void Convert(PGLOBAL g, int newtype);
void SetValue(PVAL vp) {Value = vp;}
- virtual void Print(PGLOBAL g, FILE *, uint);
- virtual void Print(PGLOBAL g, char *, uint);
+ virtual void Printf(PGLOBAL g, FILE *, uint);
+ virtual void Prints(PGLOBAL g, char *, uint);
}; // end of class CONSTANT
/***********************************************************************/
@@ -123,24 +123,25 @@ class DllExport CONSTANT : public XOBJECT {
class DllExport STRING : public BLOCK {
public:
// Constructor
- STRING(PGLOBAL g, uint n, PSZ str = NULL);
+ STRING(PGLOBAL g, uint n, PCSZ str = NULL);
// Implementation
inline int GetLength(void) {return (int)Length;}
inline void SetLength(uint n) {Length = n;}
inline PSZ GetStr(void) {return Strp;}
inline uint32 GetSize(void) {return Size;}
+ inline bool IsTruncated(void) {return Trc;}
// Methods
inline void Reset(void) {*Strp = 0;}
- bool Set(PSZ s);
+ bool Set(PCSZ s);
bool Set(char *s, uint n);
bool Append(const char *s, uint ln, bool nq = false);
- bool Append(PSZ s);
+ bool Append(PCSZ s);
bool Append(STRING &str);
bool Append(char c);
bool Resize(uint n);
- bool Append_quoted(PSZ s);
+ bool Append_quoted(PCSZ s);
inline void Trim(void) {(void)Resize(Length + 1);}
inline void Chop(void) {if (Length) Strp[--Length] = 0;}
inline void RepLast(char c) {if (Length) Strp[Length-1] = c;}
@@ -156,6 +157,7 @@ class DllExport STRING : public BLOCK {
PSZ Strp; // The char string
uint Length; // String length
uint Size; // Allocated size
+ bool Trc; // When truncated
char *Next; // Next alloc position
}; // end of class STRING
diff --git a/storage/connect/xtable.h b/storage/connect/xtable.h
index 4aeea05946a..ebef7a2549a 100644
--- a/storage/connect/xtable.h
+++ b/storage/connect/xtable.h
@@ -33,29 +33,6 @@ class CMD : public BLOCK {
char *Cmd;
}; // end of class CMD
-#if 0
-// Condition filter structure
-class CONDFIL : public BLOCK {
- public:
- // Constructor
- CONDFIL(const Item *cond, uint idx, AMT type)
- {
- Cond = cond; Idx = idx; Type = type; Op = OP_XX;
- Cmds = NULL; All = true; Body = NULL, Having = NULL;
- }
-
- // Members
- const Item *Cond;
- AMT Type;
- uint Idx;
- OPVAL Op;
- PCMD Cmds;
- bool All;
- char *Body;
- char *Having;
-}; // end of class CONDFIL
-#endif // 0
-
typedef class EXTCOL *PEXTCOL;
typedef class CONDFIL *PCFIL;
typedef class TDBCAT *PTDBCAT;
@@ -84,7 +61,6 @@ class DllExport TDB: public BLOCK { // Table Descriptor Block.
inline PFIL GetFilter(void) {return To_Filter;}
inline PCOL GetSetCols(void) {return To_SetCols;}
inline void SetSetCols(PCOL colp) {To_SetCols = colp;}
- inline void SetFilter(PFIL fp) {To_Filter = fp;}
inline void SetOrig(PTDB txp) {To_Orig = txp;}
inline void SetUse(TUSE n) {Use = n;}
inline void SetCondFil(PCFIL cfp) {To_CondFil = cfp;}
@@ -94,11 +70,14 @@ class DllExport TDB: public BLOCK { // Table Descriptor Block.
inline void SetColumns(PCOL colp) {Columns = colp;}
inline void SetDegree(int degree) {Degree = degree;}
inline void SetMode(MODE mode) {Mode = mode;}
+ inline const Item *GetCond(void) {return Cond;}
+ inline void SetCond(const Item *cond) {Cond = cond;}
// Properties
virtual AMT GetAmType(void) {return TYPE_AM_ERROR;}
virtual bool IsRemote(void) {return false;}
virtual bool IsIndexed(void) {return false;}
+ virtual void SetFilter(PFIL fp) {To_Filter = fp;}
virtual int GetTdb_No(void) {return Tdb_No;}
virtual PTDB GetNext(void) {return Next;}
virtual PCATLG GetCat(void) {return NULL;}
@@ -110,7 +89,7 @@ class DllExport TDB: public BLOCK { // Table Descriptor Block.
virtual bool IsSpecial(PSZ name);
virtual bool IsReadOnly(void) {return Read_Only;}
virtual bool IsView(void) {return FALSE;}
- virtual PSZ GetPath(void);
+ virtual PCSZ GetPath(void);
virtual RECFM GetFtype(void) {return RECFM_NAF;}
virtual bool GetBlockValues(PGLOBAL) { return false; }
virtual int Cardinality(PGLOBAL) {return 0;}
@@ -119,19 +98,20 @@ class DllExport TDB: public BLOCK { // Table Descriptor Block.
virtual int GetMaxSize(PGLOBAL) = 0;
virtual int GetProgMax(PGLOBAL) = 0;
virtual int GetProgCur(void) {return GetRecpos();}
- virtual PSZ GetFile(PGLOBAL) {return "Not a file";}
- virtual void SetFile(PGLOBAL, PSZ) {}
+ virtual PCSZ GetFile(PGLOBAL) {return "Not a file";}
+ virtual void SetFile(PGLOBAL, PCSZ) {}
virtual void ResetDB(void) {}
virtual void ResetSize(void) {MaxSize = -1;}
virtual int RowNumber(PGLOBAL g, bool b = false);
+ virtual bool CanBeFiltered(void) {return true;}
virtual PTDB Duplicate(PGLOBAL) {return NULL;}
virtual PTDB Clone(PTABS) {return this;}
virtual PTDB Copy(PTABS t);
virtual void PrintAM(FILE *f, char *m)
{fprintf(f, "%s AM(%d)\n", m, GetAmType());}
- virtual void Print(PGLOBAL g, FILE *f, uint n);
- virtual void Print(PGLOBAL g, char *ps, uint z);
- virtual PSZ GetServer(void) = 0;
+ virtual void Printf(PGLOBAL g, FILE *f, uint n);
+ virtual void Prints(PGLOBAL g, char *ps, uint z);
+ virtual PCSZ GetServer(void) = 0;
virtual int GetBadLines(void) {return 0;}
virtual CHARSET_INFO *data_charset(void);
@@ -157,6 +137,7 @@ class DllExport TDB: public BLOCK { // Table Descriptor Block.
TUSE Use;
PFIL To_Filter;
PCFIL To_CondFil; // To condition filter structure
+ const Item *Cond; // The condition used to make filters
static int Tnum; // Used to generate Tdb_no's
const int Tdb_No; // GetTdb_No() is always 0 for OPJOIN
PTDB Next; // Next in linearized queries
@@ -187,9 +168,6 @@ class DllExport TDBASE : public TDB {
// Implementation
inline int GetKnum(void) {return Knum;}
-//inline PTABDEF GetDef(void) {return To_Def;}
-//inline PCOL GetSetCols(void) {return To_SetCols;}
-//inline void SetSetCols(PCOL colp) {To_SetCols = colp;}
inline void SetKey_Col(PCOL *cpp) {To_Key_Col = cpp;}
inline void SetXdp(PIXDEF xdp) {To_Xdp = xdp;}
inline void SetKindex(PKXBASE kxp) {To_Kindex = kxp;}
@@ -201,36 +179,14 @@ class DllExport TDBASE : public TDB {
// Methods
virtual bool IsUsingTemp(PGLOBAL) {return false;}
-//virtual bool IsIndexed(void) {return false;}
-//virtual bool IsSpecial(PSZ name);
virtual PCATLG GetCat(void);
-//virtual PSZ GetPath(void);
virtual void PrintAM(FILE *f, char *m);
-//virtual RECFM GetFtype(void) {return RECFM_NAF;}
-//virtual int GetAffectedRows(void) {return -1;}
-//virtual int GetRecpos(void) = 0;
-//virtual bool SetRecpos(PGLOBAL g, int recpos);
-//virtual bool IsReadOnly(void) {return Read_Only;}
-//virtual bool IsView(void) {return FALSE;}
-//virtual CHARSET_INFO *data_charset(void);
virtual int GetProgMax(PGLOBAL g) {return GetMaxSize(g);}
-//virtual int GetProgCur(void) {return GetRecpos();}
-//virtual PSZ GetFile(PGLOBAL) {return "Not a file";}
-//virtual int GetRemote(void) {return 0;}
-//virtual void SetFile(PGLOBAL, PSZ) {}
-//virtual void ResetDB(void) {}
-//virtual void ResetSize(void) {MaxSize = -1;}
virtual void RestoreNrec(void) {}
virtual int ResetTableOpt(PGLOBAL g, bool dop, bool dox);
- virtual PSZ GetServer(void) {return "Current";}
+ virtual PCSZ GetServer(void) {return "Current";}
// Database routines
-//virtual PCOL ColDB(PGLOBAL g, PSZ name, int num);
-//virtual PCOL MakeCol(PGLOBAL, PCOLDEF, PCOL, int)
-// {assert(false); return NULL;}
-//virtual PCOL InsertSpecialColumn(PCOL colp);
-//virtual PCOL InsertSpcBlk(PGLOBAL g, PCOLDEF cdp);
-//virtual void MarkDB(PGLOBAL g, PTDB tdb2);
virtual int MakeIndex(PGLOBAL g, PIXDEF, bool)
{strcpy(g->Message, "Remote index"); return RC_INFO;}
virtual bool ReadKey(PGLOBAL, OPVAL, const key_range *)
@@ -241,18 +197,12 @@ class DllExport TDBASE : public TDB {
"This function should not be called for this table"); return true;}
// Members
-//PTABDEF To_Def; // Points to catalog description block
PXOB *To_Link; // Points to column of previous relations
PCOL *To_Key_Col; // Points to key columns in current file
PKXBASE To_Kindex; // Points to table key index
PIXDEF To_Xdp; // To the index definition block
-//PCOL To_SetCols; // Points to updated columns
RECFM Ftype; // File type: 0-var 1-fixed 2-binary (VCT)
-//int MaxSize; // Max size in number of lines
int Knum; // Size of key arrays
-//bool Read_Only; // True for read only tables
-//const CHARSET_INFO *m_data_charset;
-//const char *csname; // Table charset name
}; // end of class TDBASE
/***********************************************************************/
diff --git a/storage/csv/ha_tina.cc b/storage/csv/ha_tina.cc
index 363fb993fe7..f00fe0e201f 100644
--- a/storage/csv/ha_tina.cc
+++ b/storage/csv/ha_tina.cc
@@ -300,7 +300,7 @@ static int read_meta_file(File meta_file, ha_rows *rows)
mysql_file_seek(meta_file, 0, MY_SEEK_SET, MYF(0));
if (mysql_file_read(meta_file, (uchar*)meta_buffer, META_BUFFER_SIZE, 0)
!= META_BUFFER_SIZE)
- DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE);
+ DBUG_RETURN(my_errno= HA_ERR_CRASHED_ON_USAGE);
/*
Parse out the meta data, we ignore version at the moment
@@ -429,10 +429,13 @@ static int free_share(TINA_SHARE *share)
int result_code= 0;
if (!--share->use_count){
/* Write the meta file. Mark it as crashed if needed. */
- (void)write_meta_file(share->meta_file, share->rows_recorded,
- share->crashed ? TRUE :FALSE);
- if (mysql_file_close(share->meta_file, MYF(0)))
- result_code= 1;
+ if (share->meta_file != -1)
+ {
+ (void)write_meta_file(share->meta_file, share->rows_recorded,
+ share->crashed ? TRUE :FALSE);
+ if (mysql_file_close(share->meta_file, MYF(0)))
+ result_code= 1;
+ }
if (share->tina_write_opened)
{
if (mysql_file_close(share->tina_write_filedes, MYF(0)))
@@ -954,7 +957,7 @@ int ha_tina::open(const char *name, int mode, uint open_options)
if (share->crashed && !(open_options & HA_OPEN_FOR_REPAIR))
{
free_share(share);
- DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE);
+ DBUG_RETURN(my_errno ? my_errno : HA_ERR_CRASHED_ON_USAGE);
}
local_data_file_version= share->data_file_version;
@@ -1505,13 +1508,13 @@ int ha_tina::repair(THD* thd, HA_CHECK_OPT* check_opt)
/* Don't assert in field::val() functions */
table->use_all_columns();
- if (!(buf= (uchar*) my_malloc(table->s->reclength, MYF(MY_WME))))
- DBUG_RETURN(HA_ERR_OUT_OF_MEM);
/* position buffer to the start of the file */
if (init_data_file())
DBUG_RETURN(HA_ERR_CRASHED_ON_REPAIR);
+ if (!(buf= (uchar*) my_malloc(table->s->reclength, MYF(MY_WME))))
+ DBUG_RETURN(HA_ERR_OUT_OF_MEM);
/*
Local_saved_data_file_length is initialized during the lock phase.
Sometimes this is not getting executed before ::repair (e.g. for
@@ -1595,9 +1598,9 @@ int ha_tina::repair(THD* thd, HA_CHECK_OPT* check_opt)
DBUG_RETURN(my_errno ? my_errno : -1);
share->tina_write_opened= FALSE;
}
- if (mysql_file_close(data_file, MYF(0)) ||
- mysql_file_close(repair_file, MYF(0)) ||
- mysql_file_rename(csv_key_file_data,
+ mysql_file_close(data_file, MYF(0));
+ mysql_file_close(repair_file, MYF(0));
+ if (mysql_file_rename(csv_key_file_data,
repaired_fname, share->data_file_name, MYF(0)))
DBUG_RETURN(-1);
@@ -1719,13 +1722,14 @@ int ha_tina::check(THD* thd, HA_CHECK_OPT* check_opt)
DBUG_ENTER("ha_tina::check");
old_proc_info= thd_proc_info(thd, "Checking table");
- if (!(buf= (uchar*) my_malloc(table->s->reclength, MYF(MY_WME))))
- DBUG_RETURN(HA_ERR_OUT_OF_MEM);
/* position buffer to the start of the file */
if (init_data_file())
DBUG_RETURN(HA_ERR_CRASHED);
+ if (!(buf= (uchar*) my_malloc(table->s->reclength, MYF(MY_WME))))
+ DBUG_RETURN(HA_ERR_OUT_OF_MEM);
+
/*
Local_saved_data_file_length is initialized during the lock phase.
Check does not use store_lock in certain cases. So, we set it
diff --git a/storage/federated/ha_federated.cc b/storage/federated/ha_federated.cc
index f22bb6cb758..a3121f56d7c 100644
--- a/storage/federated/ha_federated.cc
+++ b/storage/federated/ha_federated.cc
@@ -1,4 +1,5 @@
/* Copyright (c) 2004, 2015, Oracle and/or its affiliates.
+ Copyright (c) 2017, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -2943,6 +2944,7 @@ int ha_federated::extra(ha_extra_function operation)
break;
case HA_EXTRA_PREPARE_FOR_DROP:
table_will_be_deleted = TRUE;
+ break;
default:
/* do nothing */
DBUG_PRINT("info",("unhandled operation: %d", (uint) operation));
diff --git a/storage/federatedx/ha_federatedx.h b/storage/federatedx/ha_federatedx.h
index 759ddfdfe71..3345bbc2180 100644
--- a/storage/federatedx/ha_federatedx.h
+++ b/storage/federatedx/ha_federatedx.h
@@ -330,7 +330,7 @@ public:
return (HA_PRIMARY_KEY_IN_READ_INDEX | HA_FILE_BASED
| HA_REC_NOT_IN_SEQ | HA_AUTO_PART_KEY | HA_CAN_INDEX_BLOBS |
HA_BINLOG_ROW_CAPABLE | HA_BINLOG_STMT_CAPABLE | HA_CAN_REPAIR |
- HA_NO_PREFIX_CHAR_KEYS | HA_PRIMARY_KEY_REQUIRED_FOR_DELETE |
+ HA_PRIMARY_KEY_REQUIRED_FOR_DELETE |
HA_PARTIAL_COLUMN_READ | HA_NULL_IN_KEY);
}
/*
diff --git a/storage/heap/hp_create.c b/storage/heap/hp_create.c
index bb5537c9363..431e992e75b 100644
--- a/storage/heap/hp_create.c
+++ b/storage/heap/hp_create.c
@@ -1,5 +1,5 @@
/* Copyright (c) 2000, 2011, Oracle and/or its affiliates.
- Copyright (c) 2010, 2014, SkySQL Ab.
+ Copyright (c) 2010, 2017, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -94,7 +94,7 @@ int heap_create(const char *name, HP_CREATE_INFO *create_info,
case HA_KEYTYPE_VARBINARY1:
/* Case-insensitiveness is handled in coll->hash_sort */
keyinfo->seg[j].type= HA_KEYTYPE_VARTEXT1;
- /* fall_through */
+ /* fall through */
case HA_KEYTYPE_VARTEXT1:
keyinfo->flag|= HA_VAR_LENGTH_KEY;
length+= 2;
diff --git a/storage/innobase/btr/btr0btr.cc b/storage/innobase/btr/btr0btr.cc
index 3d778c49012..d0fcd68c677 100644
--- a/storage/innobase/btr/btr0btr.cc
+++ b/storage/innobase/btr/btr0btr.cc
@@ -1133,9 +1133,7 @@ btr_create(
const btr_create_t* btr_redo_create_info,
mtr_t* mtr)
{
- ulint page_no;
buf_block_t* block;
- buf_frame_t* frame;
page_t* page;
page_zip_des_t* page_zip;
@@ -1170,33 +1168,28 @@ btr_create(
+ IBUF_HEADER + IBUF_TREE_SEG_HEADER,
IBUF_TREE_ROOT_PAGE_NO,
FSP_UP, mtr);
- ut_ad(block->page.id.page_no() == IBUF_TREE_ROOT_PAGE_NO);
- } else {
- block = fseg_create(space, 0,
- PAGE_HEADER + PAGE_BTR_SEG_TOP, mtr);
- }
- if (block == NULL) {
-
- return(FIL_NULL);
- }
+ if (block == NULL) {
+ return(FIL_NULL);
+ }
- page_no = block->page.id.page_no();
- frame = buf_block_get_frame(block);
+ ut_ad(block->page.id.page_no() == IBUF_TREE_ROOT_PAGE_NO);
- if (type & DICT_IBUF) {
- /* It is an insert buffer tree: initialize the free list */
buf_block_dbg_add_level(block, SYNC_IBUF_TREE_NODE_NEW);
- ut_ad(page_no == IBUF_TREE_ROOT_PAGE_NO);
-
- flst_init(frame + PAGE_HEADER + PAGE_BTR_IBUF_FREE_LIST, mtr);
+ flst_init(block->frame + PAGE_HEADER + PAGE_BTR_IBUF_FREE_LIST,
+ mtr);
} else {
- /* It is a non-ibuf tree: create a file segment for leaf
- pages */
+ block = fseg_create(space, 0,
+ PAGE_HEADER + PAGE_BTR_SEG_TOP, mtr);
+
+ if (block == NULL) {
+ return(FIL_NULL);
+ }
+
buf_block_dbg_add_level(block, SYNC_TREE_NODE_NEW);
- if (!fseg_create(space, page_no,
+ if (!fseg_create(space, block->page.id.page_no(),
PAGE_HEADER + PAGE_BTR_SEG_LEAF, mtr)) {
/* Not enough space for new segment, free root
segment before return. */
@@ -1287,7 +1280,7 @@ btr_create(
ut_ad(page_get_max_insert_size(page, 2) > 2 * BTR_PAGE_MAX_REC_SIZE);
- return(page_no);
+ return(block->page.id.page_no());
}
/** Free a B-tree except the root page. The root page MUST be freed after
diff --git a/storage/innobase/btr/btr0cur.cc b/storage/innobase/btr/btr0cur.cc
index aa24cb5d423..3bef25e945c 100644
--- a/storage/innobase/btr/btr0cur.cc
+++ b/storage/innobase/btr/btr0cur.cc
@@ -2813,18 +2813,21 @@ btr_cur_ins_lock_and_undo(
}
if (err != DB_SUCCESS
+ || !(~flags | (BTR_NO_UNDO_LOG_FLAG | BTR_KEEP_SYS_FLAG))
|| !dict_index_is_clust(index) || dict_index_is_ibuf(index)) {
return(err);
}
- err = trx_undo_report_row_operation(flags, TRX_UNDO_INSERT_OP,
- thr, index, entry,
- NULL, 0, NULL, NULL,
- &roll_ptr);
- if (err != DB_SUCCESS) {
-
- return(err);
+ if (flags & BTR_NO_UNDO_LOG_FLAG) {
+ roll_ptr = 0;
+ } else {
+ err = trx_undo_report_row_operation(thr, index, entry,
+ NULL, 0, NULL, NULL,
+ &roll_ptr);
+ if (err != DB_SUCCESS) {
+ return(err);
+ }
}
/* Now we can fill in the roll ptr field in entry */
@@ -2884,15 +2887,17 @@ btr_cur_optimistic_insert(
btr_cur_t* cursor, /*!< in: cursor on page after which to insert;
cursor stays valid */
ulint** offsets,/*!< out: offsets on *rec */
- mem_heap_t** heap, /*!< in/out: pointer to memory heap, or NULL */
+ mem_heap_t** heap, /*!< in/out: pointer to memory heap */
dtuple_t* entry, /*!< in/out: entry to insert */
rec_t** rec, /*!< out: pointer to inserted record if
succeed */
big_rec_t** big_rec,/*!< out: big rec vector whose fields have to
- be stored externally by the caller, or
- NULL */
+ be stored externally by the caller */
ulint n_ext, /*!< in: number of externally stored columns */
- que_thr_t* thr, /*!< in: query thread or NULL */
+ que_thr_t* thr, /*!< in/out: query thread; can be NULL if
+ !(~flags
+ & (BTR_NO_LOCKING_FLAG
+ | BTR_NO_UNDO_LOG_FLAG)) */
mtr_t* mtr) /*!< in/out: mini-transaction;
if this function returns DB_SUCCESS on
a leaf page of a secondary index in a
@@ -2912,6 +2917,7 @@ btr_cur_optimistic_insert(
ulint rec_size;
dberr_t err;
+ ut_ad(thr || !(~flags & (BTR_NO_LOCKING_FLAG | BTR_NO_UNDO_LOG_FLAG)));
*big_rec = NULL;
block = btr_cur_get_block(cursor);
@@ -3147,15 +3153,17 @@ btr_cur_pessimistic_insert(
cursor stays valid */
ulint** offsets,/*!< out: offsets on *rec */
mem_heap_t** heap, /*!< in/out: pointer to memory heap
- that can be emptied, or NULL */
+ that can be emptied */
dtuple_t* entry, /*!< in/out: entry to insert */
rec_t** rec, /*!< out: pointer to inserted record if
succeed */
big_rec_t** big_rec,/*!< out: big rec vector whose fields have to
- be stored externally by the caller, or
- NULL */
+ be stored externally by the caller */
ulint n_ext, /*!< in: number of externally stored columns */
- que_thr_t* thr, /*!< in: query thread or NULL */
+ que_thr_t* thr, /*!< in/out: query thread; can be NULL if
+ !(~flags
+ & (BTR_NO_LOCKING_FLAG
+ | BTR_NO_UNDO_LOG_FLAG)) */
mtr_t* mtr) /*!< in/out: mini-transaction */
{
dict_index_t* index = cursor->index;
@@ -3166,6 +3174,7 @@ btr_cur_pessimistic_insert(
ulint n_reserved = 0;
ut_ad(dtuple_check_typed(entry));
+ ut_ad(thr || !(~flags & (BTR_NO_LOCKING_FLAG | BTR_NO_UNDO_LOG_FLAG)));
*big_rec = NULL;
@@ -3353,9 +3362,10 @@ btr_cur_upd_lock_and_undo(
/* Append the info about the update in the undo log */
- return(trx_undo_report_row_operation(
- flags, TRX_UNDO_MODIFY_OP, thr,
- index, NULL, update,
+ return((flags & BTR_NO_UNDO_LOG_FLAG)
+ ? DB_SUCCESS
+ : trx_undo_report_row_operation(
+ thr, index, NULL, update,
cmpl_info, rec, offsets, roll_ptr));
}
@@ -4069,12 +4079,12 @@ btr_cur_pessimistic_update(
ulint** offsets,/*!< out: offsets on cursor->page_cur.rec */
mem_heap_t** offsets_heap,
/*!< in/out: pointer to memory heap
- that can be emptied, or NULL */
+ that can be emptied */
mem_heap_t* entry_heap,
/*!< in/out: memory heap for allocating
big_rec and the index tuple */
big_rec_t** big_rec,/*!< out: big rec vector whose fields have to
- be stored externally by the caller, or NULL */
+ be stored externally by the caller */
upd_t* update, /*!< in/out: update vector; this is allowed to
also contain trx id and roll ptr fields.
Non-updated columns that are moved offpage will
@@ -4601,7 +4611,6 @@ undo log record created.
dberr_t
btr_cur_del_mark_set_clust_rec(
/*===========================*/
- ulint flags, /*!< in: undo logging and locking flags */
buf_block_t* block, /*!< in/out: buffer block of the record */
rec_t* rec, /*!< in/out: record */
dict_index_t* index, /*!< in: clustered index of the record */
@@ -4637,8 +4646,8 @@ btr_cur_del_mark_set_clust_rec(
return(err);
}
- err = trx_undo_report_row_operation(flags, TRX_UNDO_MODIFY_OP, thr,
- index, entry, NULL, 0, rec, offsets,
+ err = trx_undo_report_row_operation(thr, index,
+ entry, NULL, 0, rec, offsets,
&roll_ptr);
if (err != DB_SUCCESS) {
diff --git a/storage/innobase/btr/btr0defragment.cc b/storage/innobase/btr/btr0defragment.cc
index 6913124cea1..d4b83930191 100644
--- a/storage/innobase/btr/btr0defragment.cc
+++ b/storage/innobase/btr/btr0defragment.cc
@@ -154,7 +154,6 @@ btr_defragment_init()
srv_defragment_interval = ut_microseconds_to_timer(
(ulonglong) (1000000.0 / srv_defragment_frequency));
mutex_create(LATCH_ID_BTR_DEFRAGMENT_MUTEX, &btr_defragment_mutex);
- os_thread_create(btr_defragment_thread, NULL, NULL);
}
/******************************************************************//**
@@ -736,14 +735,13 @@ btr_defragment_n_pages(
return current_block;
}
-/******************************************************************//**
-Thread that merges consecutive b-tree pages into fewer pages to defragment
-the index. */
+/** Whether btr_defragment_thread is active */
+bool btr_defragment_thread_active;
+
+/** Merge consecutive b-tree pages into fewer pages to defragment indexes */
extern "C" UNIV_INTERN
os_thread_ret_t
-DECLARE_THREAD(btr_defragment_thread)(
-/*==========================================*/
- void* arg) /*!< in: work queue */
+DECLARE_THREAD(btr_defragment_thread)(void*)
{
btr_pcur_t* pcur;
btr_cur_t* cursor;
@@ -753,6 +751,8 @@ DECLARE_THREAD(btr_defragment_thread)(
buf_block_t* last_block;
while (srv_shutdown_state == SRV_SHUTDOWN_NONE) {
+ ut_ad(btr_defragment_thread_active);
+
/* If defragmentation is disabled, sleep before
checking whether it's enabled. */
if (!srv_defragment) {
@@ -844,7 +844,8 @@ DECLARE_THREAD(btr_defragment_thread)(
btr_defragment_remove_item(item);
}
}
- btr_defragment_shutdown();
+
+ btr_defragment_thread_active = false;
os_thread_exit();
OS_THREAD_DUMMY_RETURN;
}
diff --git a/storage/innobase/buf/buf0buf.cc b/storage/innobase/buf/buf0buf.cc
index e63c234b2f6..8d6d95a020e 100644
--- a/storage/innobase/buf/buf0buf.cc
+++ b/storage/innobase/buf/buf0buf.cc
@@ -79,6 +79,10 @@ Created 11/5/1995 Heikki Tuuri
#include "ut0byte.h"
#include <new>
+#ifdef UNIV_LINUX
+#include <stdlib.h>
+#endif
+
#ifdef HAVE_LZO
#include "lzo/lzo1x.h"
#endif
@@ -126,6 +130,30 @@ struct set_numa_interleave_t
#define NUMA_MEMPOLICY_INTERLEAVE_IN_SCOPE
#endif /* HAVE_LIBNUMA */
+#ifdef HAVE_SNAPPY
+#include "snappy-c.h"
+#endif
+
+inline void* aligned_malloc(size_t size, size_t align) {
+ void *result;
+#ifdef _MSC_VER
+ result = _aligned_malloc(size, align);
+#else
+ if(posix_memalign(&result, align, size)) {
+ result = 0;
+ }
+#endif
+ return result;
+}
+
+inline void aligned_free(void *ptr) {
+#ifdef _MSC_VER
+ _aligned_free(ptr);
+#else
+ free(ptr);
+#endif
+}
+
/*
IMPLEMENTATION OF THE BUFFER POOL
=================================
@@ -1943,20 +1971,14 @@ buf_pool_free_instance(
if (buf_pool->tmp_arr) {
for(ulint i = 0; i < buf_pool->tmp_arr->n_slots; i++) {
buf_tmp_buffer_t* slot = &(buf_pool->tmp_arr->slots[i]);
-#ifdef HAVE_LZO
- if (slot && slot->lzo_mem) {
- ut_free(slot->lzo_mem);
- slot->lzo_mem = NULL;
- }
-#endif
- if (slot && slot->crypt_buf_free) {
- ut_free(slot->crypt_buf_free);
- slot->crypt_buf_free = NULL;
+ if (slot && slot->crypt_buf) {
+ aligned_free(slot->crypt_buf);
+ slot->crypt_buf = NULL;
}
- if (slot && slot->comp_buf_free) {
- ut_free(slot->comp_buf_free);
- slot->comp_buf_free = NULL;
+ if (slot && slot->comp_buf) {
+ aligned_free(slot->comp_buf);
+ slot->comp_buf = NULL;
}
}
@@ -3850,14 +3872,25 @@ buf_zip_decompress(
{
const byte* frame = block->page.zip.data;
ulint size = page_zip_get_size(&block->page.zip);
+ /* The tablespace will not be found if this function is called
+ during IMPORT. */
+ fil_space_t* space = fil_space_acquire_for_io(block->page.id.space());
+ const unsigned key_version = mach_read_from_4(
+ frame + FIL_PAGE_FILE_FLUSH_LSN_OR_KEY_VERSION);
+ fil_space_crypt_t* crypt_data = space ? space->crypt_data : NULL;
+ const bool encrypted = crypt_data
+ && crypt_data->type != CRYPT_SCHEME_UNENCRYPTED
+ && (!crypt_data->is_default_encryption()
+ || srv_encrypt_tables);
ut_ad(block->page.size.is_compressed());
ut_a(block->page.id.space() != 0);
if (UNIV_UNLIKELY(check && !page_zip_verify_checksum(frame, size))) {
- ib::error() << "Compressed page checksum mismatch "
- << block->page.id << "): stored: "
+ ib::error() << "Compressed page checksum mismatch for "
+ << (space ? space->chain.start->name : "")
+ << block->page.id << ": stored: "
<< mach_read_from_4(frame + FIL_PAGE_SPACE_OR_CHKSUM)
<< ", crc32: "
<< page_zip_calc_checksum(
@@ -3873,7 +3906,7 @@ buf_zip_decompress(
<< page_zip_calc_checksum(
frame, size, SRV_CHECKSUM_ALGORITHM_NONE);
- return(FALSE);
+ goto err_exit;
}
switch (fil_page_get_type(frame)) {
@@ -3881,15 +3914,16 @@ buf_zip_decompress(
case FIL_PAGE_RTREE:
if (page_zip_decompress(&block->page.zip,
block->frame, TRUE)) {
+ if (space) {
+ fil_space_release_for_io(space);
+ }
return(TRUE);
}
- ib::error() << "Unable to decompress space "
- << block->page.id.space()
- << " page " << block->page.id.page_no();
-
- return(FALSE);
-
+ ib::error() << "Unable to decompress "
+ << (space ? space->chain.start->name : "")
+ << block->page.id;
+ goto err_exit;
case FIL_PAGE_TYPE_ALLOCATED:
case FIL_PAGE_INODE:
case FIL_PAGE_IBUF_BITMAP:
@@ -3899,11 +3933,31 @@ buf_zip_decompress(
case FIL_PAGE_TYPE_ZBLOB2:
/* Copy to uncompressed storage. */
memcpy(block->frame, frame, block->page.size.physical());
+ if (space) {
+ fil_space_release_for_io(space);
+ }
+
return(TRUE);
}
ib::error() << "Unknown compressed page type "
- << fil_page_get_type(frame);
+ << fil_page_get_type(frame)
+ << " in " << (space ? space->chain.start->name : "")
+ << block->page.id;
+
+err_exit:
+ if (encrypted) {
+ ib::info() << "Row compressed page could be encrypted"
+ " with key_version " << key_version;
+ block->page.encrypted = true;
+ dict_set_encrypted_by_space(block->page.id.space());
+ } else {
+ dict_set_corrupted_by_space(block->page.id.space());
+ }
+
+ if (space) {
+ fil_space_release_for_io(space);
+ }
return(FALSE);
}
@@ -4504,12 +4558,21 @@ got_block:
/* Decompress the page while not holding
buf_pool->mutex or block->mutex. */
- /* Page checksum verification is already done when
- the page is read from disk. Hence page checksum
- verification is not necessary when decompressing the page. */
{
- bool success = buf_zip_decompress(block, FALSE);
- ut_a(success);
+ bool success = buf_zip_decompress(block, TRUE);
+
+ if (!success) {
+ buf_pool_mutex_enter(buf_pool);
+ buf_page_mutex_enter(fix_block);
+ buf_block_set_io_fix(fix_block, BUF_IO_NONE);
+ buf_page_mutex_exit(fix_block);
+
+ --buf_pool->n_pend_unzip;
+ buf_block_unfix(fix_block);
+ buf_pool_mutex_exit(buf_pool);
+ rw_lock_x_unlock(&fix_block->lock);
+ return NULL;
+ }
}
if (!recv_no_ibuf_operations) {
@@ -4612,19 +4675,12 @@ got_block:
goto loop;
}
- ib::info() << "innodb_change_buffering_debug evict "
- << page_id;
-
return(NULL);
}
buf_page_mutex_enter(fix_block);
if (buf_flush_page_try(buf_pool, fix_block)) {
-
- ib::info() << "innodb_change_buffering_debug flush "
- << page_id;
-
guess = fix_block;
goto loop;
@@ -5541,15 +5597,11 @@ buf_page_create(
memset(frame + FIL_PAGE_NEXT, 0xff, 4);
mach_write_to_2(frame + FIL_PAGE_TYPE, FIL_PAGE_TYPE_ALLOCATED);
- /* These 8 bytes are also repurposed for PageIO compression and must
- be reset when the frame is assigned to a new page id. See fil0fil.h.
-
-
- FIL_PAGE_FILE_FLUSH_LSN is used on the following pages:
+ /* FIL_PAGE_FILE_FLUSH_LSN_OR_KEY_VERSION is only used on the
+ following pages:
(1) The first page of the InnoDB system tablespace (page 0:0)
- (2) FIL_RTREE_SPLIT_SEQ_NUM on R-tree pages .
-
- Therefore we don't transparently compress such pages. */
+ (2) FIL_RTREE_SPLIT_SEQ_NUM on R-tree pages
+ (3) key_version on encrypted pages (not page 0:0) */
memset(frame + FIL_PAGE_FILE_FLUSH_LSN_OR_KEY_VERSION, 0, 8);
@@ -7220,22 +7272,27 @@ buf_pool_reserve_tmp_slot(
buf_pool_mutex_exit(buf_pool);
/* Allocate temporary memory for encryption/decryption */
- if (free_slot->crypt_buf_free == NULL) {
- free_slot->crypt_buf_free = static_cast<byte *>(ut_malloc_nokey(UNIV_PAGE_SIZE*2));
- free_slot->crypt_buf = static_cast<byte *>(ut_align(free_slot->crypt_buf_free, UNIV_PAGE_SIZE));
- memset(free_slot->crypt_buf_free, 0, UNIV_PAGE_SIZE *2);
+ if (free_slot->crypt_buf == NULL) {
+ free_slot->crypt_buf = static_cast<byte*>(aligned_malloc(UNIV_PAGE_SIZE, UNIV_PAGE_SIZE));
+ memset(free_slot->crypt_buf, 0, UNIV_PAGE_SIZE);
}
/* For page compressed tables allocate temporary memory for
compression/decompression */
- if (compressed && free_slot->comp_buf_free == NULL) {
- free_slot->comp_buf_free = static_cast<byte *>(ut_malloc_nokey(UNIV_PAGE_SIZE*2));
- free_slot->comp_buf = static_cast<byte *>(ut_align(free_slot->comp_buf_free, UNIV_PAGE_SIZE));
- memset(free_slot->comp_buf_free, 0, UNIV_PAGE_SIZE *2);
-#ifdef HAVE_LZO
- free_slot->lzo_mem = static_cast<byte *>(ut_malloc_nokey(LZO1X_1_15_MEM_COMPRESS));
- memset(free_slot->lzo_mem, 0, LZO1X_1_15_MEM_COMPRESS);
+ if (compressed && free_slot->comp_buf == NULL) {
+ ulint size = UNIV_PAGE_SIZE;
+
+ /* Both snappy and lzo compression methods require that
+ output buffer used for compression is bigger than input
+ buffer. Increase the allocated buffer size accordingly. */
+#if HAVE_SNAPPY
+ size = snappy_max_compressed_length(size);
+#endif
+#if HAVE_LZO
+ size += LZO1X_1_15_MEM_COMPRESS;
#endif
+ free_slot->comp_buf = static_cast<byte*>(aligned_malloc(size, UNIV_PAGE_SIZE));
+ memset(free_slot->comp_buf, 0, size);
}
return (free_slot);
@@ -7320,8 +7377,7 @@ buf_page_encrypt_before_write(
fsp_flags_get_page_compression_level(space->flags),
fil_space_get_block_size(space, bpage->id.page_no()),
encrypted,
- &out_len,
- IF_LZO(slot->lzo_mem, NULL));
+ &out_len);
bpage->real_size = out_len;
diff --git a/storage/innobase/buf/buf0dblwr.cc b/storage/innobase/buf/buf0dblwr.cc
index f99fc6434de..b770e8483d9 100644
--- a/storage/innobase/buf/buf0dblwr.cc
+++ b/storage/innobase/buf/buf0dblwr.cc
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1995, 2015, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1995, 2017, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2013, 2017, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
@@ -162,11 +162,11 @@ buf_dblwr_init(
ut_zalloc_nokey(buf_size * sizeof(void*)));
}
-/****************************************************************//**
-Creates the doublewrite buffer to a new InnoDB installation. The header of the
-doublewrite buffer is placed on the trx system header page.
-@return true if successful, false if not. */
-MY_ATTRIBUTE((warn_unused_result))
+/** Create the doublewrite buffer if the doublewrite buffer header
+is not present in the TRX_SYS page.
+@return whether the operation succeeded
+@retval true if the doublewrite buffer exists or was created
+@retval false if the creation failed (too small first data file) */
bool
buf_dblwr_create()
{
@@ -181,12 +181,11 @@ buf_dblwr_create()
if (buf_dblwr) {
/* Already inited */
-
return(true);
}
start_again:
- mtr_start(&mtr);
+ mtr.start();
buf_dblwr_being_created = TRUE;
doublewrite = buf_dblwr_get(&mtr);
@@ -198,33 +197,49 @@ start_again:
buf_dblwr_init(doublewrite);
- mtr_commit(&mtr);
+ mtr.commit();
buf_dblwr_being_created = FALSE;
return(true);
- }
+ } else {
+ fil_space_t* space = fil_space_acquire(TRX_SYS_SPACE);
+ const bool fail = UT_LIST_GET_FIRST(space->chain)->size
+ < 3 * FSP_EXTENT_SIZE;
+ fil_space_release(space);
- ib::info() << "Doublewrite buffer not found: creating new";
+ if (fail) {
+ goto too_small;
+ }
+ }
block2 = fseg_create(TRX_SYS_SPACE, TRX_SYS_PAGE_NO,
TRX_SYS_DOUBLEWRITE
+ TRX_SYS_DOUBLEWRITE_FSEG, &mtr);
- /* fseg_create acquires a second latch on the page,
- therefore we must declare it: */
-
- buf_block_dbg_add_level(block2, SYNC_NO_ORDER_CHECK);
-
if (block2 == NULL) {
- ib::error() << "Cannot create doublewrite buffer: you must"
- " increase your tablespace size."
- " Cannot continue operation.";
-
- /* The mini-transaction did not write anything yet;
- we merely failed to allocate a page. */
+too_small:
+ ib::error()
+ << "Cannot create doublewrite buffer: "
+ "the first file in innodb_data_file_path"
+ " must be at least "
+ << (3 * (FSP_EXTENT_SIZE * UNIV_PAGE_SIZE) >> 20)
+ << "M.";
mtr.commit();
return(false);
}
+ ib::info() << "Doublewrite buffer not found: creating new";
+
+ /* FIXME: After this point, the doublewrite buffer creation
+ is not atomic. The doublewrite buffer should not exist in
+ the InnoDB system tablespace file in the first place.
+ It could be located in separate optional file(s) in a
+ user-specified location. */
+
+ /* fseg_create acquires a second latch on the page,
+ therefore we must declare it: */
+
+ buf_block_dbg_add_level(block2, SYNC_NO_ORDER_CHECK);
+
fseg_header = doublewrite + TRX_SYS_DOUBLEWRITE_FSEG;
prev_page_no = 0;
@@ -338,7 +353,7 @@ recovery, this function loads the pages from double write buffer into memory.
@return DB_SUCCESS or error code */
dberr_t
buf_dblwr_init_or_load_pages(
- os_file_t file,
+ pfs_os_file_t file,
const char* path)
{
byte* buf;
@@ -516,6 +531,10 @@ buf_dblwr_process()
byte* unaligned_read_buf;
recv_dblwr_t& recv_dblwr = recv_sys->dblwr;
+ if (!buf_dblwr) {
+ return;
+ }
+
unaligned_read_buf = static_cast<byte*>(
ut_malloc_nokey(2 * UNIV_PAGE_SIZE));
diff --git a/storage/innobase/buf/buf0dump.cc b/storage/innobase/buf/buf0dump.cc
index ce7488e3d1f..b318d1e9a3a 100644
--- a/storage/innobase/buf/buf0dump.cc
+++ b/storage/innobase/buf/buf0dump.cc
@@ -394,7 +394,7 @@ buf_dump(
buf_dump_status(
STATUS_VERBOSE,
"Dumping buffer pool"
- " " ULINTPF "/" ULINTPF ","
+ " " ULINTPF "/%lu,"
" page " ULINTPF "/" ULINTPF,
i + 1, srv_buf_pool_instances,
j + 1, n_pages);
@@ -595,8 +595,8 @@ buf_load()
if (dump == NULL) {
fclose(f);
buf_load_status(STATUS_ERR,
- "Cannot allocate %lu bytes: %s",
- (ulint) (dump_n * sizeof(*dump)),
+ "Cannot allocate " ULINTPF " bytes: %s",
+ dump_n * sizeof(*dump),
strerror(errno));
return;
}
diff --git a/storage/innobase/buf/buf0flu.cc b/storage/innobase/buf/buf0flu.cc
index 2a9acc8c298..05ab07229e3 100644
--- a/storage/innobase/buf/buf0flu.cc
+++ b/storage/innobase/buf/buf0flu.cc
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1995, 2017, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2013, 2017, MariaDB Corporation.
Copyright (c) 2013, 2014, Fusion-io
@@ -70,7 +70,7 @@ is set to TRUE by the page_cleaner thread when it is spawned and is set
back to FALSE at shutdown by the page_cleaner as well. Therefore no
need to protect it by a mutex. It is only ever read by the thread
doing the shutdown */
-bool buf_page_cleaner_is_active = false;
+bool buf_page_cleaner_is_active;
/** Factor for scan length to determine n_pages for intended oldest LSN
progress */
@@ -269,6 +269,7 @@ buf_flush_insert_in_flush_rbt(
buf_page_t* prev = NULL;
buf_pool_t* buf_pool = buf_pool_from_bpage(bpage);
+ ut_ad(srv_shutdown_state != SRV_SHUTDOWN_FLUSH_PHASE);
ut_ad(buf_flush_list_mutex_own(buf_pool));
/* Insert this buffer into the rbt. */
@@ -480,6 +481,7 @@ buf_flush_insert_sorted_into_flush_list(
buf_page_t* prev_b;
buf_page_t* b;
+ ut_ad(srv_shutdown_state != SRV_SHUTDOWN_FLUSH_PHASE);
ut_ad(!buf_pool_mutex_own(buf_pool));
ut_ad(log_flush_order_mutex_own());
ut_ad(buf_page_mutex_own(block));
@@ -789,6 +791,7 @@ buf_flush_write_complete(
flush_type = buf_page_get_flush_type(bpage);
buf_pool->n_flush[flush_type]--;
+ ut_ad(buf_pool->n_flush[flush_type] != ULINT_MAX);
ut_ad(buf_pool_mutex_own(buf_pool));
@@ -1214,6 +1217,7 @@ buf_flush_page(
}
++buf_pool->n_flush[flush_type];
+ ut_ad(buf_pool->n_flush[flush_type] != 0);
mutex_exit(block_mutex);
@@ -2697,6 +2701,11 @@ pc_sleep_if_needed(
ulint next_loop_time,
int64_t sig_count)
{
+ /* No sleep if we are cleaning the buffer pool during the shutdown
+ with everything else finished */
+ if (srv_shutdown_state == SRV_SHUTDOWN_FLUSH_PHASE)
+ return OS_SYNC_TIME_EXCEEDED;
+
ulint cur_time = ut_time_ms();
if (next_loop_time > cur_time) {
@@ -3121,6 +3130,7 @@ DECLARE_THREAD(buf_flush_page_cleaner_coordinator)(
/*!< in: a dummy parameter required by
os_thread_create */
{
+ my_thread_init();
ulint next_loop_time = ut_time_ms() + 1000;
ulint n_flushed = 0;
ulint last_activity = srv_get_activity_count();
@@ -3150,8 +3160,6 @@ DECLARE_THREAD(buf_flush_page_cleaner_coordinator)(
}
#endif /* UNIV_LINUX */
- buf_page_cleaner_is_active = true;
-
while (!srv_read_only_mode
&& srv_shutdown_state == SRV_SHUTDOWN_NONE
&& recv_sys->heap != NULL) {
@@ -3479,7 +3487,6 @@ thread_exit:
buf_page_cleaner_is_active = false;
my_thread_end();
-
/* We count the number of threads in os_thread_exit(). A created
thread should always use that to exit and not use return() to exit. */
os_thread_exit();
diff --git a/storage/innobase/buf/buf0rea.cc b/storage/innobase/buf/buf0rea.cc
index 12775c74daf..20603021072 100644
--- a/storage/innobase/buf/buf0rea.cc
+++ b/storage/innobase/buf/buf0rea.cc
@@ -752,14 +752,9 @@ buf_read_ahead_linear(
switch (err) {
case DB_SUCCESS:
case DB_TABLESPACE_TRUNCATED:
+ case DB_TABLESPACE_DELETED:
case DB_ERROR:
break;
- case DB_TABLESPACE_DELETED:
- ib::info() << "linear readahead trying to"
- " access page "
- << page_id_t(page_id.space(), i)
- << " in nonexisting or being-dropped"
- " tablespace";
case DB_DECRYPTION_FAILED:
ib::error() << "linear readahead failed to"
" decrypt page "
@@ -778,11 +773,11 @@ buf_read_ahead_linear(
os_aio_simulated_wake_handler_threads();
if (count) {
- DBUG_PRINT("ib_buf", ("linear read-ahead %lu pages, "
- "%lu:%lu",
+ DBUG_PRINT("ib_buf", ("linear read-ahead " ULINTPF " pages, "
+ "%u:%u",
count,
- (ulint)page_id.space(),
- (ulint)page_id.page_no()));
+ page_id.space(),
+ page_id.page_no()));
}
/* Read ahead is considered one I/O operation for the purpose of
diff --git a/storage/innobase/data/data0type.cc b/storage/innobase/data/data0type.cc
index 315b12e135c..a40724b303d 100644
--- a/storage/innobase/data/data0type.cc
+++ b/storage/innobase/data/data0type.cc
@@ -199,6 +199,7 @@ dtype_print(const dtype_t* type)
case DATA_VAR_POINT:
fputs("DATA_VAR_POINT", stderr);
+ break;
case DATA_GEOMETRY:
fputs("DATA_GEOMETRY", stderr);
diff --git a/storage/innobase/dict/dict0crea.cc b/storage/innobase/dict/dict0crea.cc
index 1b5b6ff3850..128f661b618 100644
--- a/storage/innobase/dict/dict0crea.cc
+++ b/storage/innobase/dict/dict0crea.cc
@@ -460,14 +460,9 @@ dict_build_tablespace_for_table(
mtr_start(&mtr);
mtr.set_named_space(table->space);
- bool ret = fsp_header_init(table->space,
- FIL_IBD_FILE_INITIAL_SIZE,
- &mtr);
+ fsp_header_init(table->space, FIL_IBD_FILE_INITIAL_SIZE, &mtr);
mtr_commit(&mtr);
- if (!ret) {
- return(DB_ERROR);
- }
} else {
ut_ad(dict_tf_get_rec_format(table->flags)
!= REC_FORMAT_COMPRESSED);
diff --git a/storage/innobase/dict/dict0dict.cc b/storage/innobase/dict/dict0dict.cc
index b733ac580d9..63507e0a2c9 100644
--- a/storage/innobase/dict/dict0dict.cc
+++ b/storage/innobase/dict/dict0dict.cc
@@ -4367,7 +4367,7 @@ dict_table_get_highest_foreign_id(
}
DBUG_PRINT("dict_table_get_highest_foreign_id",
- ("id: %lu", biggest_id));
+ ("id: " ULINTPF, biggest_id));
DBUG_RETURN(biggest_id);
}
@@ -6612,7 +6612,8 @@ dict_table_schema_check(
if ((ulint) table->n_def - n_sys_cols != req_schema->n_cols) {
/* the table has a different number of columns than required */
ut_snprintf(errstr, errstr_sz,
- "%s has %lu columns but should have " ULINTPF ".",
+ "%s has " ULINTPF " columns but should have "
+ ULINTPF ".",
ut_format_name(req_schema->table_name,
buf, sizeof(buf)),
table->n_def - n_sys_cols,
diff --git a/storage/innobase/dict/dict0load.cc b/storage/innobase/dict/dict0load.cc
index ba1fbc5eb5d..0ca9ebfc622 100644
--- a/storage/innobase/dict/dict0load.cc
+++ b/storage/innobase/dict/dict0load.cc
@@ -185,17 +185,6 @@ dict_load_field_low(
for temporary storage */
const rec_t* rec); /*!< in: SYS_FIELDS record */
-/** Load a table definition from a SYS_TABLES record to dict_table_t.
-Do not load any columns or indexes.
-@param[in] name Table name
-@param[in] rec SYS_TABLES record
-@param[out,own] table table, or NULL
-@return error message
-@retval NULL on success */
-static
-const char*
-dict_load_table_low(table_name_t& name, const rec_t* rec, dict_table_t** table);
-
/* If this flag is TRUE, then we will load the cluster index's (and tables')
metadata even if it is marked as "corrupted". */
my_bool srv_load_corrupted;
@@ -1149,6 +1138,7 @@ dict_sys_tablespaces_rec_read(
@param[out] flags Pointer to table flags
@param[out] flags2 Pointer to table flags2
@return true if the record was read correctly, false if not. */
+MY_ATTRIBUTE((warn_unused_result))
static
bool
dict_sys_tables_rec_read(
@@ -1164,8 +1154,6 @@ dict_sys_tables_rec_read(
ulint len;
ulint type;
- *flags2 = 0;
-
field = rec_get_nth_field_old(
rec, DICT_FLD__SYS_TABLES__ID, &len);
ut_ad(len == 8);
@@ -1202,22 +1190,40 @@ dict_sys_tables_rec_read(
" data dictionary contains invalid flags."
" SYS_TABLES.TYPE=" << type <<
" SYS_TABLES.N_COLS=" << *n_cols;
- *flags = ULINT_UNDEFINED;
return(false);
}
*flags = dict_sys_tables_type_to_tf(type, *n_cols);
- /* Get flags2 from SYS_TABLES.MIX_LEN */
- field = rec_get_nth_field_old(
- rec, DICT_FLD__SYS_TABLES__MIX_LEN, &len);
- *flags2 = mach_read_from_4(field);
+ /* For tables created before MySQL 4.1, there may be
+ garbage in SYS_TABLES.MIX_LEN where flags2 are found. Such tables
+ would always be in ROW_FORMAT=REDUNDANT which do not have the
+ high bit set in n_cols, and flags would be zero.
+ MySQL 4.1 was the first version to support innodb_file_per_table,
+ that is, *space_id != 0. */
+ if (*flags != 0 || *space_id != 0 || *n_cols & DICT_N_COLS_COMPACT) {
- /* DICT_TF2_FTS will be set when indexes are being loaded */
- *flags2 &= ~DICT_TF2_FTS;
+ /* Get flags2 from SYS_TABLES.MIX_LEN */
+ field = rec_get_nth_field_old(
+ rec, DICT_FLD__SYS_TABLES__MIX_LEN, &len);
+ *flags2 = mach_read_from_4(field);
+
+ if (!dict_tf2_is_valid(*flags, *flags2)) {
+ ib::error() << "Table " << table_name << " in InnoDB"
+ " data dictionary contains invalid flags."
+ " SYS_TABLES.MIX_LEN=" << *flags2;
+ return(false);
+ }
+
+ /* DICT_TF2_FTS will be set when indexes are being loaded */
+ *flags2 &= ~DICT_TF2_FTS;
+
+ /* Now that we have used this bit, unset it. */
+ *n_cols &= ~DICT_N_COLS_COMPACT;
+ } else {
+ *flags2 = 0;
+ }
- /* Now that we have used this bit, unset it. */
- *n_cols &= ~DICT_N_COLS_COMPACT;
return(true);
}
@@ -1280,11 +1286,10 @@ dict_check_sys_tables(
("name: %p, '%s'", table_name.m_name,
table_name.m_name));
- dict_sys_tables_rec_read(rec, table_name,
- &table_id, &space_id,
- &n_cols, &flags, &flags2);
- if (flags == ULINT_UNDEFINED
- || is_system_tablespace(space_id)) {
+ if (!dict_sys_tables_rec_read(rec, table_name,
+ &table_id, &space_id,
+ &n_cols, &flags, &flags2)
+ || space_id == TRX_SYS_SPACE) {
ut_free(table_name.m_name);
continue;
}
@@ -2091,6 +2096,8 @@ func_exit:
static const char* dict_load_index_del = "delete-marked record in SYS_INDEXES";
/** Error message for table->id mismatch in dict_load_index_low() */
static const char* dict_load_index_id_err = "SYS_INDEXES.TABLE_ID mismatch";
+/** Error message for SYS_TABLES flags mismatch in dict_load_table_low() */
+static const char* dict_load_table_flags = "incorrect flags in SYS_TABLES";
/** Load an index definition from a SYS_INDEXES record to dict_index_t.
If allocate=TRUE, we will create a dict_index_t structure and fill it
@@ -2539,11 +2546,9 @@ dict_load_table_low(table_name_t& name, const rec_t* rec, dict_table_t** table)
return(error_text);
}
- dict_sys_tables_rec_read(rec, name, &table_id, &space_id,
- &t_num, &flags, &flags2);
-
- if (flags == ULINT_UNDEFINED) {
- return("incorrect flags in SYS_TABLES");
+ if (!dict_sys_tables_rec_read(rec, name, &table_id, &space_id,
+ &t_num, &flags, &flags2)) {
+ return(dict_load_table_flags);
}
dict_table_decode_n_col(t_num, &n_cols, &n_v_col);
@@ -2857,8 +2862,9 @@ err_exit:
err_msg = dict_load_table_low(name, rec, &table);
if (err_msg) {
-
- ib::error() << err_msg;
+ if (err_msg != dict_load_table_flags) {
+ ib::error() << err_msg;
+ }
goto err_exit;
}
@@ -2914,21 +2920,6 @@ err_exit:
}
}
- /* We don't trust the table->flags2(retrieved from SYS_TABLES.MIX_LEN
- field) if the datafiles are from 3.23.52 version. To identify this
- version, we do the below check and reset the flags. */
- if (!DICT_TF2_FLAG_IS_SET(table, DICT_TF2_FTS_HAS_DOC_ID)
- && table->space == srv_sys_space.space_id()
- && table->flags == 0) {
- table->flags2 = 0;
- }
-
- DBUG_EXECUTE_IF("ib_table_invalid_flags",
- if(strcmp(table->name.m_name, "test/t1") == 0) {
- table->flags2 = 255;
- table->flags = 255;
- });
-
if (!dict_tf2_is_valid(table->flags, table->flags2)) {
ib::error() << "Table " << table->name << " in InnoDB"
" data dictionary contains invalid flags."
diff --git a/storage/innobase/dict/dict0stats.cc b/storage/innobase/dict/dict0stats.cc
index 54988b910d8..9350b5d400d 100644
--- a/storage/innobase/dict/dict0stats.cc
+++ b/storage/innobase/dict/dict0stats.cc
@@ -1159,10 +1159,11 @@ dict_stats_analyze_index_level(
leaf-level delete marks because delete marks on
non-leaf level do not make sense. */
- if (level == 0 && (srv_stats_include_delete_marked ? 0:
- rec_get_deleted_flag(
+ if (level == 0
+ && !srv_stats_include_delete_marked
+ && rec_get_deleted_flag(
rec,
- page_is_comp(btr_pcur_get_page(&pcur))))) {
+ page_is_comp(btr_pcur_get_page(&pcur)))) {
if (rec_is_last_on_page
&& !prev_rec_is_copied
@@ -1336,16 +1337,11 @@ dict_stats_analyze_index_level(
/* aux enum for controlling the behavior of dict_stats_scan_page() @{ */
enum page_scan_method_t {
- COUNT_ALL_NON_BORING_AND_SKIP_DEL_MARKED,/* scan all records on
- the given page and count the number of
- distinct ones, also ignore delete marked
- records */
- QUIT_ON_FIRST_NON_BORING,/* quit when the first record that differs
- from its right neighbor is found */
- COUNT_ALL_NON_BORING_INCLUDE_DEL_MARKED/* scan all records on
- the given page and count the number of
- distinct ones, include delete marked
- records */
+ /** scan the records on the given page, counting the number
+ of distinct ones; @see srv_stats_include_delete_marked */
+ COUNT_ALL_NON_BORING,
+ /** quit on the first record that differs from its right neighbor */
+ QUIT_ON_FIRST_NON_BORING
};
/* @} */
@@ -1392,13 +1388,10 @@ dict_stats_scan_page(
Because offsets1,offsets2 should be big enough,
this memory heap should never be used. */
mem_heap_t* heap = NULL;
- const rec_t* (*get_next)(const rec_t*);
-
- if (scan_method == COUNT_ALL_NON_BORING_AND_SKIP_DEL_MARKED) {
- get_next = page_rec_get_next_non_del_marked;
- } else {
- get_next = page_rec_get_next_const;
- }
+ const rec_t* (*get_next)(const rec_t*)
+ = srv_stats_include_delete_marked
+ ? page_rec_get_next_const
+ : page_rec_get_next_non_del_marked;
const bool should_count_external_pages = n_external_pages != NULL;
@@ -1618,9 +1611,7 @@ dict_stats_analyze_index_below_cur(
offsets_rec = dict_stats_scan_page(
&rec, offsets1, offsets2, index, page, n_prefix,
- srv_stats_include_delete_marked ?
- COUNT_ALL_NON_BORING_INCLUDE_DEL_MARKED:
- COUNT_ALL_NON_BORING_AND_SKIP_DEL_MARKED, n_diff,
+ COUNT_ALL_NON_BORING, n_diff,
n_external_pages);
#if 0
diff --git a/storage/innobase/dict/dict0stats_bg.cc b/storage/innobase/dict/dict0stats_bg.cc
index 876d1bcb342..f2ef18b116d 100644
--- a/storage/innobase/dict/dict0stats_bg.cc
+++ b/storage/innobase/dict/dict0stats_bg.cc
@@ -38,8 +38,6 @@ Created Apr 25, 2012 Vasil Dimov
/** Minimum time interval between stats recalc for a given table */
#define MIN_RECALC_INTERVAL 10 /* seconds */
-#define SHUTTING_DOWN() (srv_shutdown_state != SRV_SHUTDOWN_NONE)
-
/** Event to wake up dict_stats_thread on dict_stats_recalc_pool_add()
or shutdown. Not protected by any mutex. */
os_event_t dict_stats_event;
@@ -120,6 +118,7 @@ background stats gathering thread. Only the table id is added to the
list, so the table can be closed after being enqueued and it will be
opened when needed. If the table does not exist later (has been DROPped),
then it will be removed from the pool and skipped. */
+static
void
dict_stats_recalc_pool_add(
/*=======================*/
@@ -147,6 +146,44 @@ dict_stats_recalc_pool_add(
os_event_set(dict_stats_event);
}
+/** Update the table modification counter and if necessary,
+schedule new estimates for table and index statistics to be calculated.
+@param[in,out] table persistent or temporary table */
+void
+dict_stats_update_if_needed(dict_table_t* table)
+{
+ ut_ad(table->stat_initialized);
+ ut_ad(!mutex_own(&dict_sys->mutex));
+
+ ulonglong counter = table->stat_modified_counter++;
+ ulonglong n_rows = dict_table_get_n_rows(table);
+
+ if (dict_stats_is_persistent_enabled(table)) {
+ if (counter > n_rows / 10 /* 10% */
+ && dict_stats_auto_recalc_is_enabled(table)) {
+
+ dict_stats_recalc_pool_add(table);
+ table->stat_modified_counter = 0;
+ }
+ return;
+ }
+
+ /* Calculate new statistics if 1 / 16 of table has been modified
+ since the last time a statistics batch was run.
+ We calculate statistics at most every 16th round, since we may have
+ a counter table which is very small and updated very often. */
+ ulonglong threshold = 16 + n_rows / 16; /* 6.25% */
+
+ if (srv_stats_modified_counter) {
+ threshold = std::min(srv_stats_modified_counter, threshold);
+ }
+
+ if (counter > threshold) {
+ /* this will reset table->stat_modified_counter to 0 */
+ dict_stats_update(table, DICT_STATS_RECALC_TRANSIENT);
+ }
+}
+
/*****************************************************************//**
Get a table from the auto recalc pool. The returned table id is removed
from the pool.
@@ -231,7 +268,6 @@ Initialize global variables needed for the operation of dict_stats_thread()
Must be called before dict_stats_thread() is started. */
void
dict_stats_thread_init()
-/*====================*/
{
ut_a(!srv_read_only_mode);
@@ -276,15 +312,9 @@ dict_stats_thread_deinit()
mutex_free(&recalc_pool_mutex);
-#ifdef UNIV_DEBUG
- os_event_destroy(dict_stats_disabled_event);
- dict_stats_disabled_event = NULL;
-#endif /* UNIV_DEBUG */
-
+ ut_d(os_event_destroy(dict_stats_disabled_event));
os_event_destroy(dict_stats_event);
os_event_destroy(dict_stats_shutdown_event);
- dict_stats_event = NULL;
- dict_stats_shutdown_event = NULL;
dict_stats_start_shutdown = false;
}
@@ -401,6 +431,7 @@ extern "C"
os_thread_ret_t
DECLARE_THREAD(dict_stats_thread)(void*)
{
+ my_thread_init();
ut_a(!srv_read_only_mode);
#ifdef UNIV_PFS_THREAD
@@ -452,7 +483,7 @@ DECLARE_THREAD(dict_stats_thread)(void*)
OS_THREAD_DUMMY_RETURN;
}
-/** Shutdown the dict stats thread. */
+/** Shut down the dict_stats_thread. */
void
dict_stats_shutdown()
{
diff --git a/storage/innobase/fil/fil0crypt.cc b/storage/innobase/fil/fil0crypt.cc
index 3216e6ef99e..7917cbb528b 100644
--- a/storage/innobase/fil/fil0crypt.cc
+++ b/storage/innobase/fil/fil0crypt.cc
@@ -831,7 +831,7 @@ fil_space_decrypt(
Calculate post encryption checksum
@param[in] page_size page size
@param[in] dst_frame Block where checksum is calculated
-@return page checksum or BUF_NO_CHECKSUM_MAGIC
+@return page checksum
not needed. */
UNIV_INTERN
uint32_t
@@ -839,34 +839,11 @@ fil_crypt_calculate_checksum(
const page_size_t& page_size,
const byte* dst_frame)
{
- uint32_t checksum = 0;
- srv_checksum_algorithm_t algorithm =
- static_cast<srv_checksum_algorithm_t>(srv_checksum_algorithm);
-
- if (!page_size.is_compressed()) {
- switch (algorithm) {
- case SRV_CHECKSUM_ALGORITHM_CRC32:
- case SRV_CHECKSUM_ALGORITHM_STRICT_CRC32:
- checksum = buf_calc_page_crc32(dst_frame);
- break;
- case SRV_CHECKSUM_ALGORITHM_INNODB:
- case SRV_CHECKSUM_ALGORITHM_STRICT_INNODB:
- checksum = (ib_uint32_t) buf_calc_page_new_checksum(
- dst_frame);
- break;
- case SRV_CHECKSUM_ALGORITHM_NONE:
- case SRV_CHECKSUM_ALGORITHM_STRICT_NONE:
- checksum = BUF_NO_CHECKSUM_MAGIC;
- break;
- /* no default so the compiler will emit a warning
- * if new enum is added and not handled here */
- }
- } else {
- checksum = page_zip_calc_checksum(dst_frame, page_size.physical(),
- algorithm);
- }
-
- return checksum;
+ /* For encrypted tables we use only crc32 and strict_crc32 */
+ return page_size.is_compressed()
+ ? page_zip_calc_checksum(dst_frame, page_size.physical(),
+ SRV_CHECKSUM_ALGORITHM_CRC32)
+ : buf_calc_page_crc32(dst_frame);
}
/***********************************************************************/
@@ -945,6 +922,34 @@ fil_crypt_needs_rotation(
return false;
}
+/** Read page 0 and possible crypt data from there.
+@param[in,out] space Tablespace */
+static inline
+void
+fil_crypt_read_crypt_data(fil_space_t* space)
+{
+ if (space->crypt_data || space->size) {
+ /* The encryption metadata has already been read, or
+ the tablespace is not encrypted and the file has been
+ opened already. */
+ return;
+ }
+
+ const page_size_t page_size(space->flags);
+ mtr_t mtr;
+ mtr.start();
+ if (buf_block_t* block = buf_page_get(page_id_t(space->id, 0),
+ page_size, RW_S_LATCH, &mtr)) {
+ mutex_enter(&fil_system->mutex);
+ if (!space->crypt_data) {
+ space->crypt_data = fil_space_read_crypt_data(
+ page_size, block->frame);
+ }
+ mutex_exit(&fil_system->mutex);
+ }
+ mtr.commit();
+}
+
/***********************************************************************
Start encrypting a space
@param[in,out] space Tablespace
@@ -955,6 +960,7 @@ fil_crypt_start_encrypting_space(
fil_space_t* space)
{
bool recheck = false;
+
mutex_enter(&fil_crypt_threads_mutex);
fil_space_crypt_t *crypt_data = space->crypt_data;
@@ -1097,12 +1103,12 @@ struct rotate_thread_t {
bool should_shutdown() const {
switch (srv_shutdown_state) {
case SRV_SHUTDOWN_NONE:
- case SRV_SHUTDOWN_CLEANUP:
return thread_no >= srv_n_fil_crypt_threads;
case SRV_SHUTDOWN_EXIT_THREADS:
/* srv_init_abort() must have been invoked */
- case SRV_SHUTDOWN_FLUSH_PHASE:
+ case SRV_SHUTDOWN_CLEANUP:
return true;
+ case SRV_SHUTDOWN_FLUSH_PHASE:
case SRV_SHUTDOWN_LAST_PHASE:
break;
}
@@ -1451,6 +1457,13 @@ fil_crypt_find_space_to_rotate(
}
while (!state->should_shutdown() && state->space) {
+ /* If there is no crypt data and we have not yet read
+ page 0 for this tablespace, we need to read it before
+ we can continue. */
+ if (!state->space->crypt_data) {
+ fil_crypt_read_crypt_data(state->space);
+ }
+
if (fil_crypt_space_needs_rotation(state, key_state, recheck)) {
ut_ad(key_state->key_id);
/* init state->min_key_version_found before
@@ -2152,8 +2165,10 @@ DECLARE_THREAD(fil_crypt_thread)(
while (!thr.should_shutdown() &&
fil_crypt_find_page_to_rotate(&new_state, &thr)) {
- /* rotate a (set) of pages */
- fil_crypt_rotate_pages(&new_state, &thr);
+ if (!thr.space->is_stopping()) {
+ /* rotate a (set) of pages */
+ fil_crypt_rotate_pages(&new_state, &thr);
+ }
/* If space is marked as stopping, release
space and stop rotation. */
@@ -2380,6 +2395,14 @@ fil_space_crypt_get_status(
memset(status, 0, sizeof(*status));
ut_ad(space->n_pending_ops > 0);
+
+ /* If there is no crypt data and we have not yet read
+ page 0 for this tablespace, we need to read it before
+ we can continue. */
+ if (!space->crypt_data) {
+ fil_crypt_read_crypt_data(const_cast<fil_space_t*>(space));
+ }
+
fil_space_crypt_t* crypt_data = space->crypt_data;
status->space = space->id;
@@ -2492,15 +2515,8 @@ fil_space_verify_crypt_checksum(
return false;
}
- srv_checksum_algorithm_t algorithm =
- static_cast<srv_checksum_algorithm_t>(srv_checksum_algorithm);
- /* If no checksum is used, can't continue checking. */
- if (algorithm == SRV_CHECKSUM_ALGORITHM_NONE) {
- return(true);
- }
-
/* Read stored post encryption checksum. */
- ib_uint32_t checksum = mach_read_from_4(
+ uint32_t checksum = mach_read_from_4(
page + FIL_PAGE_FILE_FLUSH_LSN_OR_KEY_VERSION + 4);
/* Declare empty pages non-corrupted */
diff --git a/storage/innobase/fil/fil0fil.cc b/storage/innobase/fil/fil0fil.cc
index ce417b1e511..2cbc863102c 100644
--- a/storage/innobase/fil/fil0fil.cc
+++ b/storage/innobase/fil/fil0fil.cc
@@ -167,13 +167,13 @@ UNIV_INTERN extern ib_mutex_t fil_crypt_threads_mutex;
/** Determine if the space id is a user tablespace id or not.
@param[in] space_id Space ID to check
@return true if it is a user tablespace ID */
-UNIV_INLINE
+inline
bool
-fil_is_user_tablespace_id(
- ulint space_id)
+fil_is_user_tablespace_id(ulint space_id)
{
- return(space_id > srv_undo_tablespaces_open
- && space_id != SRV_TMP_SPACE_ID);
+ return(space_id != TRX_SYS_SPACE
+ && space_id != SRV_TMP_SPACE_ID
+ && !srv_is_undo_tablespace(space_id));
}
#ifdef UNIV_DEBUG
@@ -655,12 +655,8 @@ retry:
FSP_HEADER_OFFSET + FSP_FREE + page);
/* Try to read crypt_data from page 0 if it is not yet
- read. FIXME: Remove page_0_crypt_read, and simply ensure in
- fil_space_t object creation that node->size==0 if and only
- if the crypt_data is not known and must be read. */
- if (!space->page_0_crypt_read) {
- space->page_0_crypt_read = true;
- ut_ad(space->crypt_data == NULL);
+ read. */
+ if (!space->crypt_data) {
space->crypt_data = fil_space_read_crypt_data(
page_size_t(space->flags), page);
}
@@ -673,7 +669,7 @@ retry:
if (cflags == ULINT_UNDEFINED) {
ib::error()
<< "Expected tablespace flags "
- << ib::hex(flags)
+ << ib::hex(space->flags)
<< " but found " << ib::hex(flags)
<< " in the file " << node->name;
return(false);
@@ -1591,7 +1587,6 @@ Error messages are issued to the server log.
@param[in] flags tablespace flags
@param[in] purpose tablespace purpose
@param[in,out] crypt_data encryption information
-@param[in] create_table whether this is CREATE TABLE
@param[in] mode encryption mode
@return pointer to created tablespace, to be filled in with fil_node_create()
@retval NULL on failure (such as when the same tablespace exists) */
@@ -1602,7 +1597,6 @@ fil_space_create(
ulint flags,
fil_type_t purpose,
fil_space_crypt_t* crypt_data,
- bool create_table,
fil_encryption_t mode)
{
fil_space_t* space;
@@ -1667,16 +1661,8 @@ fil_space_create(
space->magic_n = FIL_SPACE_MAGIC_N;
space->crypt_data = crypt_data;
- /* In create table we write page 0 so we have already
- "read" it and for system tablespaces we have read
- crypt data at startup. */
- if (create_table || crypt_data != NULL) {
- space->page_0_crypt_read = true;
- }
-
DBUG_LOG("tablespace",
- "Tablespace for space " << id << " name " << name
- << (create_table ? " created" : " opened"));
+ "Created metadata for " << id << " name " << name);
if (crypt_data) {
DBUG_LOG("crypt",
"Tablespace " << id << " name " << name
@@ -3796,7 +3782,7 @@ fil_ibd_create(
fil_encryption_t mode,
uint32_t key_id)
{
- os_file_t file;
+ pfs_os_file_t file;
dberr_t err;
byte* buf2;
byte* page;
@@ -4004,7 +3990,7 @@ fil_ibd_create(
}
space = fil_space_create(name, space_id, flags, FIL_TYPE_TABLESPACE,
- crypt_data, true, mode);
+ crypt_data, mode);
fil_node_t* node = NULL;
@@ -4282,12 +4268,14 @@ fil_ibd_open(
df_default.close();
tablespaces_found--;
}
+
if (df_dict.is_open() && !df_dict.is_valid()) {
df_dict.close();
/* Leave dict.filepath so that SYS_DATAFILES
can be corrected below. */
tablespaces_found--;
}
+
if (df_remote.is_open() && !df_remote.is_valid()) {
df_remote.close();
tablespaces_found--;
@@ -4370,7 +4358,7 @@ skip_validate:
space_name, id, flags, purpose,
df_remote.is_open() ? df_remote.get_crypt_info() :
df_dict.is_open() ? df_dict.get_crypt_info() :
- df_default.get_crypt_info(), false);
+ df_default.get_crypt_info());
/* We do not measure the size of the file, that is why
we pass the 0 below */
@@ -4690,7 +4678,7 @@ fil_ibd_load(
space = fil_space_create(
file.name(), space_id, flags, FIL_TYPE_TABLESPACE,
- file.get_crypt_info(), false);
+ file.get_crypt_info());
if (space == NULL) {
return(FIL_LOAD_INVALID);
@@ -5151,23 +5139,16 @@ fil_report_invalid_page_access(
ulint len, /*!< in: I/O length */
bool is_read) /*!< in: I/O type */
{
- ib::error()
- << "Trying to access page number " << block_offset << " in"
+ ib::fatal()
+ << "Trying to " << (is_read ? "read" : "write")
+ << " page number " << block_offset << " in"
" space " << space_id << ", space name " << space_name << ","
" which is outside the tablespace bounds. Byte offset "
- << byte_offset << ", len " << len << ", i/o type " <<
- (is_read ? "read" : "write")
- << ". If you get this error at mysqld startup, please check"
- " that your my.cnf matches the ibdata files that you have in"
- " the MySQL server.";
-
- ib::error() << "Server exits"
-#ifdef UNIV_DEBUG
- << " at " << __FILE__ << "[" << __LINE__ << "]"
-#endif
- << ".";
-
- _exit(1);
+ << byte_offset << ", len " << len <<
+ (space_id == 0 && !srv_was_started
+ ? "Please check that the configuration matches"
+ " the InnoDB system tablespace location (ibdata files)"
+ : "");
}
/** Reads or writes data. This operation could be asynchronous (aio).
@@ -5833,7 +5814,7 @@ fil_buf_block_init(
}
struct fil_iterator_t {
- os_file_t file; /*!< File handle */
+ pfs_os_file_t file; /*!< File handle */
const char* filepath; /*!< File path name */
os_offset_t start; /*!< From where to start */
os_offset_t end; /*!< Where to stop */
@@ -5875,18 +5856,15 @@ fil_iterate(
ut_ad(!srv_read_only_mode);
- /* For old style compressed tables we do a lot of useless copying
- for non-index pages. Unfortunately, it is required by
- buf_zip_decompress() */
-
- ulint read_type = IORequest::READ;
- ulint write_type = IORequest::WRITE;
+ /* TODO: For compressed tables we do a lot of useless
+ copying for non-index pages. Unfortunately, it is
+ required by buf_zip_decompress() */
+ const bool row_compressed
+ = callback.get_page_size().is_compressed();
for (offset = iter.start; offset < iter.end; offset += n_bytes) {
byte* io_buffer = iter.io_buffer;
- const bool row_compressed
- = callback.get_page_size().is_compressed();
block->frame = io_buffer;
@@ -5906,8 +5884,6 @@ fil_iterate(
/* Zip IO is done in the compressed page buffer. */
io_buffer = block->page.zip.data;
- } else {
- io_buffer = iter.io_buffer;
}
/* We have to read the exact number of bytes. Otherwise the
@@ -5920,22 +5896,14 @@ fil_iterate(
ut_ad(n_bytes > 0);
ut_ad(!(n_bytes % iter.page_size));
- dberr_t err = DB_SUCCESS;
- IORequest read_request(read_type);
-
- byte* readptr = io_buffer;
- byte* writeptr = io_buffer;
- bool encrypted = false;
-
+ const bool encrypted = iter.crypt_data != NULL
+ && iter.crypt_data->should_encrypt();
/* Use additional crypt io buffer if tablespace is encrypted */
- if (iter.crypt_data != NULL && iter.crypt_data->should_encrypt()) {
-
- encrypted = true;
- readptr = iter.crypt_io_buffer;
- writeptr = iter.crypt_io_buffer;
- }
-
- err = os_file_read(
+ byte* const readptr = encrypted
+ ? iter.crypt_io_buffer : io_buffer;
+ byte* const writeptr = readptr;
+ IORequest read_request(IORequest::READ);
+ dberr_t err = os_file_read(
read_request, iter.file, readptr, offset,
(ulint) n_bytes);
@@ -5960,9 +5928,9 @@ fil_iterate(
ulint page_type = mach_read_from_2(src+FIL_PAGE_TYPE);
- bool page_compressed =
- (page_type == FIL_PAGE_PAGE_COMPRESSED_ENCRYPTED
- || page_type == FIL_PAGE_PAGE_COMPRESSED);
+ const bool page_compressed
+ = page_type == FIL_PAGE_PAGE_COMPRESSED_ENCRYPTED
+ || page_type == FIL_PAGE_PAGE_COMPRESSED;
/* If tablespace is encrypted, we need to decrypt
the page. Note that tablespaces are not in
@@ -6064,8 +6032,7 @@ fil_iterate(
dict_table_page_compression_level(iter.table),
512,/* FIXME: use proper block size */
encrypted,
- &len,
- NULL);
+ &len);
if (len != size) {
memset(res+len, 0, size-len);
@@ -6107,7 +6074,7 @@ fil_iterate(
block->frame += iter.page_size;
}
- IORequest write_request(write_type);
+ IORequest write_request(IORequest::WRITE);
/* A page was updated in the set, write back to disk.
Note: We don't have the compression algorithm, we write
@@ -6144,7 +6111,7 @@ fil_tablespace_iterate(
PageCallback& callback)
{
dberr_t err;
- os_file_t file;
+ pfs_os_file_t file;
char* filepath;
bool success;
diff --git a/storage/innobase/fil/fil0pagecompress.cc b/storage/innobase/fil/fil0pagecompress.cc
index cb3e50745a7..0346a676a1f 100644
--- a/storage/innobase/fil/fil0pagecompress.cc
+++ b/storage/innobase/fil/fil0pagecompress.cc
@@ -93,17 +93,16 @@ fil_compress_page(
ulint level, /* in: compression level */
ulint block_size, /*!< in: block size */
bool encrypted, /*!< in: is page also encrypted */
- ulint* out_len, /*!< out: actual length of compressed
+ ulint* out_len) /*!< out: actual length of compressed
page */
- byte* lzo_mem) /*!< in: temporal memory used by LZO */
{
int err = Z_OK;
int comp_level = int(level);
ulint header_len = FIL_PAGE_DATA + FIL_PAGE_COMPRESSED_SIZE;
- ulint write_size=0;
+ ulint write_size = 0;
/* Cache to avoid change during function execution */
ulint comp_method = innodb_compression_algorithm;
- bool allocated=false;
+ bool allocated = false;
/* page_compression does not apply to tables or tablespaces
that use ROW_FORMAT=COMPRESSED */
@@ -115,13 +114,23 @@ fil_compress_page(
if (!out_buf) {
allocated = true;
- out_buf = static_cast<byte *>(ut_malloc_nokey(UNIV_PAGE_SIZE));
-#ifdef HAVE_LZO
+ ulint size = UNIV_PAGE_SIZE;
+
+ /* Both snappy and lzo compression methods require that
+ output buffer used for compression is bigger than input
+ buffer. Increase the allocated buffer size accordingly. */
+#if HAVE_SNAPPY
+ if (comp_method == PAGE_SNAPPY_ALGORITHM) {
+ size = snappy_max_compressed_length(size);
+ }
+#endif
+#if HAVE_LZO
if (comp_method == PAGE_LZO_ALGORITHM) {
- lzo_mem = static_cast<byte *>(ut_malloc_nokey(LZO1X_1_15_MEM_COMPRESS));
- memset(lzo_mem, 0, LZO1X_1_15_MEM_COMPRESS);
+ size += LZO1X_1_15_MEM_COMPRESS;
}
#endif
+
+ out_buf = static_cast<byte *>(ut_malloc_nokey(size));
}
ut_ad(buf);
@@ -173,7 +182,7 @@ fil_compress_page(
#ifdef HAVE_LZO
case PAGE_LZO_ALGORITHM:
err = lzo1x_1_15_compress(
- buf, len, out_buf+header_len, &write_size, lzo_mem);
+ buf, len, out_buf+header_len, &write_size, out_buf+UNIV_PAGE_SIZE);
if (err != LZO_E_OK || write_size > UNIV_PAGE_SIZE-header_len) {
goto err_exit;
@@ -229,6 +238,7 @@ fil_compress_page(
case PAGE_SNAPPY_ALGORITHM:
{
snappy_status cstatus;
+ write_size = snappy_max_compressed_length(UNIV_PAGE_SIZE);
cstatus = snappy_compress(
(const char *)buf,
@@ -380,11 +390,6 @@ err_exit:
exit_free:
if (allocated) {
ut_free(out_buf);
-#ifdef HAVE_LZO
- if (comp_method == PAGE_LZO_ALGORITHM) {
- ut_free(lzo_mem);
- }
-#endif
}
return (buf);
@@ -443,13 +448,14 @@ fil_decompress_page(
/* Before actual decompress, make sure that page type is correct */
- if (mach_read_from_4(buf+FIL_PAGE_SPACE_OR_CHKSUM) != BUF_NO_CHECKSUM_MAGIC) {
- ib::error() << "Corruption: We try to uncompress corrupted page:"
- << " CRC "
- << mach_read_from_4(buf+FIL_PAGE_SPACE_OR_CHKSUM)
- << " page_type "
- << mach_read_from_2(buf+FIL_PAGE_TYPE)
- << " page len " << len << ".";
+ if (mach_read_from_4(buf+FIL_PAGE_SPACE_OR_CHKSUM)
+ != BUF_NO_CHECKSUM_MAGIC
+ || (ptype != FIL_PAGE_PAGE_COMPRESSED
+ && ptype != FIL_PAGE_PAGE_COMPRESSED_ENCRYPTED)) {
+ ib::error() << "Corruption: We try to uncompress corrupted "
+ "page CRC "
+ << mach_read_from_4(buf+FIL_PAGE_SPACE_OR_CHKSUM)
+ << " type " << ptype << " len " << len << ".";
if (return_error) {
goto error_return;
@@ -515,7 +521,7 @@ fil_decompress_page(
#endif /* HAVE_LZ4 */
#ifdef HAVE_LZO
case PAGE_LZO_ALGORITHM: {
- ulint olen=0;
+ ulint olen = 0;
err = lzo1x_decompress((const unsigned char *)buf+header_len,
actual_size,(unsigned char *)in_buf, &olen, NULL);
@@ -586,7 +592,7 @@ fil_decompress_page(
case PAGE_SNAPPY_ALGORITHM:
{
snappy_status cstatus;
- ulint olen = 0;
+ ulint olen = UNIV_PAGE_SIZE;
cstatus = snappy_uncompress(
(const char *)(buf+header_len),
@@ -602,6 +608,7 @@ fil_decompress_page(
goto error_return;
}
}
+
break;
}
#endif /* HAVE_SNAPPY */
@@ -620,8 +627,7 @@ fil_decompress_page(
memcpy(buf, in_buf, len);
error_return:
- // Need to free temporal buffer if no buffer was given
- if (page_buf == NULL) {
+ if (page_buf != in_buf) {
ut_free(in_buf);
}
diff --git a/storage/innobase/fsp/fsp0fsp.cc b/storage/innobase/fsp/fsp0fsp.cc
index 57b6c8de825..bf86d83a8c7 100644
--- a/storage/innobase/fsp/fsp0fsp.cc
+++ b/storage/innobase/fsp/fsp0fsp.cc
@@ -690,17 +690,12 @@ fsp_header_init_fields(
flags);
}
-/** Initializes the space header of a new created space and creates also the
-insert buffer tree root if space == 0.
+/** Initialize a tablespace header.
@param[in] space_id space id
@param[in] size current size in blocks
-@param[in,out] mtr min-transaction
-@return true on success, otherwise false. */
-bool
-fsp_header_init(
- ulint space_id,
- ulint size,
- mtr_t* mtr)
+@param[in,out] mtr mini-transaction */
+void
+fsp_header_init(ulint space_id, ulint size, mtr_t* mtr)
{
fsp_header_t* header;
buf_block_t* block;
@@ -752,19 +747,9 @@ fsp_header_init(
fsp_fill_free_list(!is_system_tablespace(space_id),
space, header, mtr);
- if (space_id == srv_sys_space.space_id()) {
- if (btr_create(DICT_CLUSTERED | DICT_UNIVERSAL | DICT_IBUF,
- 0, univ_page_size, DICT_IBUF_ID_MIN + space_id,
- dict_ind_redundant, NULL, mtr) == FIL_NULL) {
- return(false);
- }
- }
-
if (space->crypt_data) {
space->crypt_data->write_page0(space, page, mtr);
}
-
- return(true);
}
/**********************************************************************//**
@@ -889,14 +874,12 @@ fsp_try_extend_data_file_with_pages(
@param[in,out] space tablespace
@param[in,out] header tablespace header
@param[in,out] mtr mini-transaction
-@return whether the tablespace was extended */
-static UNIV_COLD MY_ATTRIBUTE((nonnull))
+@return number of pages added
+@retval 0 if the tablespace was not extended */
+UNIV_COLD MY_ATTRIBUTE((nonnull))
+static
ulint
-fsp_try_extend_data_file(
- fil_space_t* space,
- fsp_header_t* header,
- mtr_t* mtr,
- ulint* n_pages_added)
+fsp_try_extend_data_file(fil_space_t* space, fsp_header_t* header, mtr_t* mtr)
{
ulint size; /* current number of pages in the datafile */
ulint size_increase; /* number of pages to extend this file */
@@ -919,7 +902,7 @@ fsp_try_extend_data_file(
<< " innodb_data_file_path.";
srv_sys_space.set_tablespace_full_status(true);
}
- return(false);
+ return(0);
} else if (fsp_is_system_temporary(space->id)
&& !srv_tmp_space.can_auto_extend_last_file()) {
@@ -933,7 +916,7 @@ fsp_try_extend_data_file(
<< " innodb_temp_data_file_path.";
srv_tmp_space.set_tablespace_full_status(true);
}
- return(false);
+ return(0);
}
size = mach_read_from_4(header + FSP_SIZE);
@@ -956,7 +939,7 @@ fsp_try_extend_data_file(
/* Let us first extend the file to extent_size */
if (!fsp_try_extend_data_file_with_pages(
space, extent_pages - 1, header, mtr)) {
- return(false);
+ return(0);
}
size = extent_pages;
@@ -966,16 +949,13 @@ fsp_try_extend_data_file(
}
if (size_increase == 0) {
-
- return(false);
+ return(0);
}
if (!fil_space_extend(space, size + size_increase)) {
- return(false);
+ return(0);
}
- *n_pages_added = size_increase;
-
/* We ignore any fragments of a full megabyte when storing the size
to the space header */
@@ -985,7 +965,7 @@ fsp_try_extend_data_file(
mlog_write_ulint(
header + FSP_SIZE, space->size_in_header, MLOG_4BYTES, mtr);
- return(true);
+ return(size_increase);
}
/** Calculate the number of pages to extend a datafile.
@@ -1077,8 +1057,7 @@ fsp_fill_free_list(
}
if (!skip_resize) {
- ulint n_pages = 0;
- fsp_try_extend_data_file(space, header, mtr, &n_pages);
+ fsp_try_extend_data_file(space, header, mtr);
size = space->size_in_header;
}
}
@@ -2096,7 +2075,6 @@ fseg_create_general(
inode = fsp_alloc_seg_inode(space, space_header, mtr);
if (inode == NULL) {
-
goto funct_exit;
}
@@ -2800,8 +2778,6 @@ fsp_reserve_free_extents(
ulint n_free_up;
ulint reserve;
size_t total_reserved = 0;
- ulint rounds = 0;
- ulint n_pages_added = 0;
ut_ad(mtr);
*n_reserved = n_ext;
@@ -2882,23 +2858,8 @@ try_again:
return(true);
}
try_to_extend:
- n_pages_added = 0;
-
- if (fsp_try_extend_data_file(space, space_header, mtr, &n_pages_added)) {
-
- rounds++;
- total_reserved += n_pages_added;
-
- if (rounds > 10) {
- ib::info() << "Space id: "
- << space << " trying to reserve: "
- << n_ext << " extents actually reserved: "
- << n_pages_added << " reserve: "
- << reserve << " free: " << n_free
- << " size: " << size
- << " rounds: " << rounds
- << " total_reserved: " << total_reserved << ".";
- }
+ if (ulint n = fsp_try_extend_data_file(space, space_header, mtr)) {
+ total_reserved += n;
goto try_again;
}
diff --git a/storage/innobase/fsp/fsp0space.cc b/storage/innobase/fsp/fsp0space.cc
index 76269a749f9..7ca81898f70 100644
--- a/storage/innobase/fsp/fsp0space.cc
+++ b/storage/innobase/fsp/fsp0space.cc
@@ -132,8 +132,7 @@ Tablespace::open_or_create(bool is_temp)
m_name, m_space_id, FSP_FLAGS_PAGE_SSIZE(),
is_temp
? FIL_TYPE_TEMPORARY : FIL_TYPE_TABLESPACE,
- it->m_crypt_info,
- false);
+ it->m_crypt_info);
}
ut_a(fil_validate());
diff --git a/storage/innobase/fsp/fsp0sysspace.cc b/storage/innobase/fsp/fsp0sysspace.cc
index 974140fe565..e4bb11c9a22 100644
--- a/storage/innobase/fsp/fsp0sysspace.cc
+++ b/storage/innobase/fsp/fsp0sysspace.cc
@@ -834,17 +834,6 @@ SysTablespace::check_file_spec(
}
}
- /* We assume doublewirte blocks in the first data file. */
- if (err == DB_SUCCESS && *create_new_db
- && begin->m_size < TRX_SYS_DOUBLEWRITE_BLOCK_SIZE * 3) {
- ib::error() << "The " << name() << " data file "
- << "'" << begin->name() << "' must be at least "
- << TRX_SYS_DOUBLEWRITE_BLOCK_SIZE * 3 * UNIV_PAGE_SIZE
- / (1024 * 1024) << " MB";
-
- err = DB_ERROR;
- }
-
return(err);
}
@@ -941,8 +930,8 @@ SysTablespace::open_or_create(
space = fil_space_create(
name(), space_id(), flags(), is_temp
- ? FIL_TYPE_TEMPORARY : FIL_TYPE_TABLESPACE, m_crypt_info,
- false);
+ ? FIL_TYPE_TEMPORARY : FIL_TYPE_TABLESPACE,
+ m_crypt_info);
}
ut_a(fil_validate());
diff --git a/storage/innobase/fts/fts0fts.cc b/storage/innobase/fts/fts0fts.cc
index 79020fb4442..60cc3c91fef 100644
--- a/storage/innobase/fts/fts0fts.cc
+++ b/storage/innobase/fts/fts0fts.cc
@@ -1200,7 +1200,6 @@ fts_tokenizer_word_get(
/* If it is a stopword, do not index it */
if (!fts_check_token(text,
cache->stopword_info.cached_stopword,
- index_cache->index->is_ngram,
index_cache->charset)) {
return(NULL);
@@ -1716,21 +1715,6 @@ fts_drop_tables(
return(error);
}
-/** Extract only the required flags from table->flags2 for FTS Aux
-tables.
-@param[in] in_flags2 Table flags2
-@return extracted flags2 for FTS aux tables */
-static inline
-ulint
-fts_get_table_flags2_for_aux_tables(
- ulint flags2)
-{
- /* Extract the file_per_table flag & temporary file flag
- from the main FTS table flags2 */
- return((flags2 & DICT_TF2_USE_FILE_PER_TABLE) |
- (flags2 & DICT_TF2_TEMPORARY));
-}
-
/** Create dict_table_t object for FTS Aux tables.
@param[in] aux_table_name FTS Aux table name
@param[in] table table object of FTS Index
@@ -1745,7 +1729,9 @@ fts_create_in_mem_aux_table(
{
dict_table_t* new_table = dict_mem_table_create(
aux_table_name, table->space, n_cols, 0, table->flags,
- fts_get_table_flags2_for_aux_tables(table->flags2));
+ table->space == TRX_SYS_SPACE
+ ? 0 : table->space == SRV_TMP_SPACE_ID
+ ? DICT_TF2_TEMPORARY : DICT_TF2_USE_FILE_PER_TABLE);
if (DICT_TF_HAS_DATA_DIR(table->flags)) {
ut_ad(table->data_dir_path != NULL);
@@ -3241,7 +3227,6 @@ fts_query_expansion_fetch_doc(
}
doc.charset = doc_charset;
- doc.is_ngram = result_doc->is_ngram;
if (dfield_is_ext(dfield)) {
/* We ignore columns that are stored externally, this
@@ -3347,7 +3332,6 @@ fts_fetch_doc_from_rec(
doc->found = TRUE;
doc->charset = get_doc->index_cache->charset;
- doc->is_ngram = index->is_ngram;
/* Null Field */
if (doc->text.f_len == UNIV_SQL_NULL || doc->text.f_len == 0) {
@@ -4379,13 +4363,10 @@ fts_sync_table(
return(err);
}
-/** Check fts token
-1. for ngram token, check whether the token contains any words in stopwords
-2. for non-ngram token, check if it's stopword or less than fts_min_token_size
+/** Check if a fts token is a stopword or less than fts_min_token_size
or greater than fts_max_token_size.
@param[in] token token string
@param[in] stopwords stopwords rb tree
-@param[in] is_ngram is ngram parser
@param[in] cs token charset
@retval true if it is not stopword and length in range
@retval false if it is stopword or lenght not in range */
@@ -4393,96 +4374,16 @@ bool
fts_check_token(
const fts_string_t* token,
const ib_rbt_t* stopwords,
- bool is_ngram,
const CHARSET_INFO* cs)
{
ut_ad(cs != NULL || stopwords == NULL);
- if (!is_ngram) {
- ib_rbt_bound_t parent;
-
- if (token->f_n_char < fts_min_token_size
- || token->f_n_char > fts_max_token_size
- || (stopwords != NULL
- && rbt_search(stopwords, &parent, token) == 0)) {
- return(false);
- } else {
- return(true);
- }
- }
-
- /* Check token for ngram. */
- DBUG_EXECUTE_IF(
- "fts_instrument_ignore_ngram_check",
- return(true);
- );
-
- /* We ignore fts_min_token_size when ngram */
- ut_ad(token->f_n_char > 0
- && token->f_n_char <= fts_max_token_size);
-
- if (stopwords == NULL) {
- return(true);
- }
-
- /*Ngram checks whether the token contains any words in stopwords.
- We can't simply use CONTAIN to search in stopwords, because it's
- built on COMPARE. So we need to tokenize the token into words
- from unigram to f_n_char, and check them separately. */
- for (ulint ngram_token_size = 1; ngram_token_size <= token->f_n_char;
- ngram_token_size ++) {
- const char* start;
- const char* next;
- const char* end;
- ulint char_len;
- ulint n_chars;
+ ib_rbt_bound_t parent;
- start = reinterpret_cast<char*>(token->f_str);
- next = start;
- end = start + token->f_len;
- n_chars = 0;
-
- while (next < end) {
- char_len = my_charlen(cs, next, end);
-
- if (next + char_len > end || char_len == 0) {
- break;
- } else {
- /* Skip SPACE */
- if (char_len == 1 && *next == ' ') {
- start = next + 1;
- next = start;
- n_chars = 0;
-
- continue;
- }
-
- next += char_len;
- n_chars++;
- }
-
- if (n_chars == ngram_token_size) {
- fts_string_t ngram_token;
- ngram_token.f_str =
- reinterpret_cast<byte*>(
- const_cast<char*>(start));
- ngram_token.f_len = next - start;
- ngram_token.f_n_char = ngram_token_size;
-
- ib_rbt_bound_t parent;
- if (rbt_search(stopwords, &parent,
- &ngram_token) == 0) {
- return(false);
- }
-
- /* Move a char forward */
- start += my_charlen(cs, start, end);
- n_chars = ngram_token_size - 1;
- }
- }
- }
-
- return(true);
+ return(token->f_n_char >= fts_min_token_size
+ && token->f_n_char <= fts_max_token_size
+ && (stopwords == NULL
+ || rbt_search(stopwords, &parent, token) != 0));
}
/** Add the token and its start position to the token's list of positions.
@@ -4499,8 +4400,7 @@ fts_add_token(
/* Ignore string whose character number is less than
"fts_min_token_size" or more than "fts_max_token_size" */
- if (fts_check_token(&str, NULL, result_doc->is_ngram,
- result_doc->charset)) {
+ if (fts_check_token(&str, NULL, result_doc->charset)) {
mem_heap_t* heap;
fts_string_t t_str;
@@ -7487,7 +7387,6 @@ fts_init_recover_doc(
}
doc.charset = get_doc->index_cache->charset;
- doc.is_ngram = get_doc->index_cache->index->is_ngram;
if (dfield_is_ext(dfield)) {
dict_table_t* table = cache->sync->table;
diff --git a/storage/innobase/fts/fts0plugin.cc b/storage/innobase/fts/fts0plugin.cc
index e78dcdacfb9..b7a05deeb34 100644
--- a/storage/innobase/fts/fts0plugin.cc
+++ b/storage/innobase/fts/fts0plugin.cc
@@ -130,6 +130,7 @@ fts_query_add_word_for_parser(
if (cur_node->type != FTS_AST_PARSER_PHRASE_LIST) {
break;
}
+ /* fall through */
case FT_TOKEN_WORD:
term_node = fts_ast_create_node_term_for_parser(
diff --git a/storage/innobase/fts/fts0que.cc b/storage/innobase/fts/fts0que.cc
index bc1d173cc29..594f337c978 100644
--- a/storage/innobase/fts/fts0que.cc
+++ b/storage/innobase/fts/fts0que.cc
@@ -2430,7 +2430,8 @@ fts_query_terms_in_document(
/*****************************************************************//**
Retrieve the document and match the phrase tokens.
@return DB_SUCCESS or error code */
-static MY_ATTRIBUTE((nonnull, warn_unused_result))
+MY_ATTRIBUTE((nonnull(1,2,3,6), warn_unused_result))
+static
dberr_t
fts_query_match_document(
/*=====================*/
@@ -2692,7 +2693,6 @@ fts_query_phrase_split(
if (fts_check_token(
&result_str,
cache->stopword_info.cached_stopword,
- query->index->is_ngram,
query->fts_index_table.charset)) {
/* Add the word to the RB tree so that we can
calculate it's frequencey within a document. */
@@ -4277,7 +4277,6 @@ fts_expand_query(
result_doc.charset = index_cache->charset;
result_doc.parser = index_cache->index->parser;
- result_doc.is_ngram = index_cache->index->is_ngram;
query->total_size += SIZEOF_RBT_CREATE;
diff --git a/storage/innobase/handler/ha_innodb.cc b/storage/innobase/handler/ha_innodb.cc
index c69075eac2c..767c2865528 100644
--- a/storage/innobase/handler/ha_innodb.cc
+++ b/storage/innobase/handler/ha_innodb.cc
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 2000, 2016, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2000, 2017, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2013, 2017, MariaDB Corporation.
Copyright (c) 2008, 2009 Google Inc.
Copyright (c) 2009, Percona Inc.
@@ -349,12 +349,12 @@ thd_destructor_proxy(void *)
while (trx_sys_any_active_transactions()) {
os_thread_sleep(1000);
}
-
- /* Some background threads might generate undo pages that will
- need to be purged, so they have to be shut down before purge
- threads if slow shutdown is requested. */
- srv_shutdown_bg_undo_sources();
}
+
+ /* Some background threads might generate undo pages that will
+ need to be purged, so they have to be shut down before purge
+ threads if slow shutdown is requested. */
+ srv_shutdown_bg_undo_sources();
srv_purge_wakeup();
destroy_thd(thd);
@@ -1448,14 +1448,11 @@ innobase_drop_database(
handlerton* hton,
char* path);
-/*******************************************************************//**
-Closes an InnoDB database. */
+/** Shut down the InnoDB storage engine.
+@return 0 */
static
int
-innobase_end(
-/*=========*/
- handlerton* hton, /* in: InnoDB handlerton */
- ha_panic_function type);
+innobase_end(handlerton*, ha_panic_function);
/*****************************************************************//**
Creates an InnoDB transaction struct for the thd if it does not yet have one.
@@ -2479,25 +2476,16 @@ Thread unsafe, can only be called from the thread owning the THD.
@return SQL statement string */
const char*
innobase_get_stmt_unsafe(
-/*=====================*/
THD* thd,
size_t* length)
{
- LEX_STRING* stmt;
- const char* query=NULL;
-
- stmt = thd ? thd_query_string(thd) : NULL;
- // MySQL 5.7
- //stmt = thd_query_unsafe(thd);
-
- if (stmt && stmt->str) {
+ if (const LEX_STRING *stmt = thd_query_string(thd)) {
*length = stmt->length;
- query = stmt->str;
- } else {
- *length = 0;
+ return stmt->str;
}
- return(query);
+ *length = 0;
+ return NULL;
}
/** Determines the current SQL statement.
@@ -2509,7 +2497,6 @@ into the provided buffer.
@return Length of the SQL statement */
size_t
innobase_get_stmt_safe(
-/*===================*/
THD* thd,
char* buf,
size_t buflen)
@@ -3761,10 +3748,7 @@ innobase_space_shutdown()
srv_tmp_space.shutdown();
#ifdef WITH_INNODB_DISALLOW_WRITES
- if (srv_allow_writes_event) {
- os_event_destroy(srv_allow_writes_event);
- srv_allow_writes_event = NULL;
- }
+ os_event_destroy(srv_allow_writes_event);
#endif /* WITH_INNODB_DISALLOW_WRITES */
DBUG_VOID_RETURN;
@@ -4587,21 +4571,13 @@ error:
DBUG_RETURN(1);
}
-/*******************************************************************//**
-Closes an InnoDB database.
-@return TRUE if error */
+/** Shut down the InnoDB storage engine.
+@return 0 */
static
int
-innobase_end(
-/*=========*/
- handlerton* hton, /*!< in/out: InnoDB handlerton */
- ha_panic_function type MY_ATTRIBUTE((unused)))
- /*!< in: ha_panic() parameter */
+innobase_end(handlerton*, ha_panic_function)
{
- int err= 0;
-
DBUG_ENTER("innobase_end");
- DBUG_ASSERT(hton == innodb_hton_ptr);
if (srv_was_started) {
THD *thd= current_thd;
@@ -4634,7 +4610,7 @@ innobase_end(
mysql_mutex_destroy(&pending_checkpoint_mutex);
}
- DBUG_RETURN(err);
+ DBUG_RETURN(0);
}
/*****************************************************************//**
@@ -6927,11 +6903,6 @@ ha_innobase::open(
static_cast<st_mysql_ftparser *>(
plugin_decl(parser)->info);
- index->is_ngram = strncmp(
- plugin_name(parser)->str,
- FTS_NGRAM_PARSER_NAME,
- plugin_name(parser)->length) == 0;
-
DBUG_EXECUTE_IF("fts_instrument_use_default_parser",
index->parser = &fts_default_parser;);
}
@@ -8525,8 +8496,8 @@ ha_innobase::innobase_lock_autoinc(void)
break;
}
}
- /* Fall through to old style locking. */
-
+ /* Use old style locking. */
+ /* fall through */
case AUTOINC_OLD_STYLE_LOCKING:
DBUG_EXECUTE_IF("die_if_autoinc_old_lock_style_used",
ut_ad(0););
@@ -9196,8 +9167,8 @@ calc_row_difference(
}
}
- if (o_len != n_len || (o_len != UNIV_SQL_NULL &&
- 0 != memcmp(o_ptr, n_ptr, o_len))) {
+ if (o_len != n_len || (o_len != 0 && o_len != UNIV_SQL_NULL
+ && 0 != memcmp(o_ptr, n_ptr, o_len))) {
/* The field has changed */
ufield = uvect->fields + n_changed;
@@ -12387,7 +12358,8 @@ create_table_info_t::create_options_are_invalid()
get_row_format_name(row_format));
ret = "ROW_FORMAT";
}
- /* FALLTRHOUGH */
+ /* ROW_FORMAT=DYNAMIC also shuns KEY_BLOCK_SIZE */
+ /* fall through */
case ROW_TYPE_COMPACT:
case ROW_TYPE_REDUNDANT:
if (has_key_block_size) {
@@ -12904,7 +12876,8 @@ index_bad:
break;
}
zip_allowed = false;
- /* fall through to set row_type = DYNAMIC */
+ /* Set ROW_FORMAT = COMPACT */
+ /* fall through */
case ROW_TYPE_NOT_USED:
case ROW_TYPE_FIXED:
case ROW_TYPE_PAGE:
@@ -12912,6 +12885,7 @@ index_bad:
m_thd, Sql_condition::WARN_LEVEL_WARN,
ER_ILLEGAL_HA_CREATE_OPTION,
"InnoDB: assuming ROW_FORMAT=DYNAMIC.");
+ /* fall through */
case ROW_TYPE_DYNAMIC:
innodb_row_format = REC_FORMAT_DYNAMIC;
break;
diff --git a/storage/innobase/handler/ha_innodb.h b/storage/innobase/handler/ha_innodb.h
index 64147291338..717781fd65c 100644
--- a/storage/innobase/handler/ha_innodb.h
+++ b/storage/innobase/handler/ha_innodb.h
@@ -619,8 +619,6 @@ extern "C" void wsrep_thd_set_wsrep_last_query_id(THD *thd, query_id_t id);
extern const struct _ft_vft ft_vft_result;
-#define FTS_NGRAM_PARSER_NAME "ngram"
-
/** Structure Returned by ha_innobase::ft_init_ext() */
typedef struct new_ft_info
{
@@ -656,14 +654,11 @@ and returns true.
@return true if the index name matches the reserved name */
bool
innobase_index_name_is_reserved(
- THD* thd, /*!< in/out: MySQL connection */
- const KEY* key_info, /*!< in: Indexes to be
- created */
- ulint num_of_keys) /*!< in: Number of indexes to
- be created. */
- MY_ATTRIBUTE((warn_unused_result));
-
-extern const char reserved_file_per_table_space_name[];
+ THD* thd, /*!< in/out: MySQL connection */
+ const KEY* key_info, /*!< in: Indexes to be created */
+ ulint num_of_keys) /*!< in: Number of indexes to
+ be created. */
+ MY_ATTRIBUTE((nonnull(1), warn_unused_result));
#ifdef WITH_WSREP
//extern "C" int wsrep_trx_is_aborting(void *thd_ptr);
diff --git a/storage/innobase/handler/handler0alter.cc b/storage/innobase/handler/handler0alter.cc
index 60e742f27f7..416dd231e10 100644
--- a/storage/innobase/handler/handler0alter.cc
+++ b/storage/innobase/handler/handler0alter.cc
@@ -2203,7 +2203,6 @@ innobase_create_index_def(
memset(index->fields, 0, n_fields * sizeof *index->fields);
index->parser = NULL;
- index->is_ngram = false;
index->key_number = key_number;
index->n_fields = n_fields;
index->name = mem_heap_strdup(heap, key->name);
@@ -2237,12 +2236,6 @@ innobase_create_index_def(
static_cast<st_mysql_ftparser*>(
plugin_decl(parser)->info);
- index->is_ngram = strncmp(
- plugin_name(parser)->str,
- FTS_NGRAM_PARSER_NAME,
- plugin_name(parser)->length)
- == 0;
-
break;
}
}
@@ -2830,10 +2823,10 @@ online_retry_drop_indexes_with_trx(
@param drop_fk constraints being dropped
@param n_drop_fk number of constraints that are being dropped
@return whether the constraint is being dropped */
-inline MY_ATTRIBUTE((warn_unused_result))
+MY_ATTRIBUTE((pure, nonnull(1), warn_unused_result))
+inline
bool
innobase_dropping_foreign(
-/*======================*/
const dict_foreign_t* foreign,
dict_foreign_t** drop_fk,
ulint n_drop_fk)
@@ -2857,10 +2850,10 @@ column that is being dropped or modified to NOT NULL.
@retval true Not allowed (will call my_error())
@retval false Allowed
*/
-static MY_ATTRIBUTE((warn_unused_result))
+MY_ATTRIBUTE((pure, nonnull(1,4), warn_unused_result))
+static
bool
innobase_check_foreigns_low(
-/*========================*/
const dict_table_t* user_table,
dict_foreign_t** drop_fk,
ulint n_drop_fk,
@@ -2957,10 +2950,10 @@ column that is being dropped or modified to NOT NULL.
@retval true Not allowed (will call my_error())
@retval false Allowed
*/
-static MY_ATTRIBUTE((warn_unused_result))
+MY_ATTRIBUTE((pure, nonnull(1,2,3,4), warn_unused_result))
+static
bool
innobase_check_foreigns(
-/*====================*/
Alter_inplace_info* ha_alter_info,
const TABLE* altered_table,
const TABLE* old_table,
diff --git a/storage/innobase/handler/i_s.cc b/storage/innobase/handler/i_s.cc
index ced45b96ea2..36ecc0b8a29 100644
--- a/storage/innobase/handler/i_s.cc
+++ b/storage/innobase/handler/i_s.cc
@@ -1,7 +1,7 @@
/*****************************************************************************
-Copyright (c) 2007, 2016, Oracle and/or its affiliates.
-Copyrigth (c) 2014, 2017, MariaDB Corporation
+Copyright (c) 2007, 2016, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2014, 2017, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -2971,14 +2971,16 @@ i_s_fts_deleted_generic_fill(
fields = table->field;
+ int ret = 0;
+
for (ulint j = 0; j < ib_vector_size(deleted->doc_ids); ++j) {
doc_id_t doc_id;
doc_id = *(doc_id_t*) ib_vector_get_const(deleted->doc_ids, j);
- OK(fields[I_S_FTS_DOC_ID]->store(doc_id, true));
+ BREAK_IF(ret = fields[I_S_FTS_DOC_ID]->store(doc_id, true));
- OK(schema_table_store_record(thd, table));
+ BREAK_IF(ret = schema_table_store_record(thd, table));
}
trx_free_for_background(trx);
@@ -2989,7 +2991,7 @@ i_s_fts_deleted_generic_fill(
rw_lock_s_unlock(dict_operation_lock);
- DBUG_RETURN(0);
+ DBUG_RETURN(ret);
}
/*******************************************************************//**
@@ -3229,13 +3231,13 @@ i_s_fts_index_cache_fill_one_index(
/*===============================*/
fts_index_cache_t* index_cache, /*!< in: FTS index cache */
THD* thd, /*!< in: thread */
+ fts_string_t* conv_str, /*!< in/out: buffer */
TABLE_LIST* tables) /*!< in/out: tables to fill */
{
TABLE* table = (TABLE*) tables->table;
Field** fields;
CHARSET_INFO* index_charset;
const ib_rbt_node_t* rbt_node;
- fts_string_t conv_str;
uint dummy_errors;
char* word_str;
@@ -3244,10 +3246,9 @@ i_s_fts_index_cache_fill_one_index(
fields = table->field;
index_charset = index_cache->charset;
- conv_str.f_len = system_charset_info->mbmaxlen
- * FTS_MAX_WORD_LEN_IN_CHAR;
- conv_str.f_str = static_cast<byte*>(ut_malloc_nokey(conv_str.f_len));
- conv_str.f_n_char = 0;
+ conv_str->f_n_char = 0;
+
+ int ret = 0;
/* Go through each word in the index cache */
for (rbt_node = rbt_first(index_cache->words);
@@ -3259,16 +3260,16 @@ i_s_fts_index_cache_fill_one_index(
/* Convert word from index charset to system_charset_info */
if (index_charset->cset != system_charset_info->cset) {
- conv_str.f_n_char = my_convert(
- reinterpret_cast<char*>(conv_str.f_str),
- static_cast<uint32>(conv_str.f_len),
+ conv_str->f_n_char = my_convert(
+ reinterpret_cast<char*>(conv_str->f_str),
+ static_cast<uint32>(conv_str->f_len),
system_charset_info,
reinterpret_cast<char*>(word->text.f_str),
static_cast<uint32>(word->text.f_len),
index_charset, &dummy_errors);
- ut_ad(conv_str.f_n_char <= conv_str.f_len);
- conv_str.f_str[conv_str.f_n_char] = 0;
- word_str = reinterpret_cast<char*>(conv_str.f_str);
+ ut_ad(conv_str->f_n_char <= conv_str->f_len);
+ conv_str->f_str[conv_str->f_n_char] = 0;
+ word_str = reinterpret_cast<char*>(conv_str->f_str);
} else {
word_str = reinterpret_cast<char*>(word->text.f_str);
}
@@ -3326,9 +3327,7 @@ i_s_fts_index_cache_fill_one_index(
}
}
- ut_free(conv_str.f_str);
-
- DBUG_RETURN(0);
+ DBUG_RETURN(ret);
}
/*******************************************************************//**
Fill the dynamic table INFORMATION_SCHEMA.INNODB_FT_INDEX_CACHED
@@ -3372,18 +3371,27 @@ i_s_fts_index_cache_fill(
ut_a(cache);
+ int ret = 0;
+ fts_string_t conv_str;
+ conv_str.f_len = system_charset_info->mbmaxlen
+ * FTS_MAX_WORD_LEN_IN_CHAR;
+ conv_str.f_str = static_cast<byte*>(ut_malloc_nokey(conv_str.f_len));
+
for (ulint i = 0; i < ib_vector_size(cache->indexes); i++) {
fts_index_cache_t* index_cache;
index_cache = static_cast<fts_index_cache_t*> (
ib_vector_get(cache->indexes, i));
- i_s_fts_index_cache_fill_one_index(index_cache, thd, tables);
+ BREAK_IF(ret = i_s_fts_index_cache_fill_one_index(
+ index_cache, thd, &conv_str, tables));
}
+ ut_free(conv_str.f_str);
+
dict_table_close(user_table, FALSE, FALSE);
- DBUG_RETURN(0);
+ DBUG_RETURN(ret);
}
/*******************************************************************//**
@@ -3685,8 +3693,6 @@ i_s_fts_index_table_fill_one_fetch(
}
}
- i_s_fts_index_table_free_one_fetch(words);
-
DBUG_RETURN(ret);
}
@@ -3700,12 +3706,12 @@ i_s_fts_index_table_fill_one_index(
/*===============================*/
dict_index_t* index, /*!< in: FTS index */
THD* thd, /*!< in: thread */
+ fts_string_t* conv_str, /*!< in/out: buffer */
TABLE_LIST* tables) /*!< in/out: tables to fill */
{
ib_vector_t* words;
mem_heap_t* heap;
CHARSET_INFO* index_charset;
- fts_string_t conv_str;
dberr_t error;
int ret = 0;
@@ -3718,10 +3724,6 @@ i_s_fts_index_table_fill_one_index(
sizeof(fts_word_t), 256);
index_charset = fts_index_get_charset(index);
- conv_str.f_len = system_charset_info->mbmaxlen
- * FTS_MAX_WORD_LEN_IN_CHAR;
- conv_str.f_str = static_cast<byte*>(ut_malloc_nokey(conv_str.f_len));
- conv_str.f_n_char = 0;
/* Iterate through each auxiliary table as described in
fts_index_selector */
@@ -3759,17 +3761,17 @@ i_s_fts_index_table_fill_one_index(
/* Fill into tables */
ret = i_s_fts_index_table_fill_one_fetch(
- index_charset, thd, tables, words, &conv_str, has_more);
+ index_charset, thd, tables, words, conv_str,
+ has_more);
+ i_s_fts_index_table_free_one_fetch(words);
if (ret != 0) {
- i_s_fts_index_table_free_one_fetch(words);
goto func_exit;
}
} while (has_more);
}
func_exit:
- ut_free(conv_str.f_str);
mem_heap_free(heap);
DBUG_RETURN(ret);
@@ -3811,10 +3813,17 @@ i_s_fts_index_table_fill(
DBUG_RETURN(0);
}
+ int ret = 0;
+ fts_string_t conv_str;
+ conv_str.f_len = system_charset_info->mbmaxlen
+ * FTS_MAX_WORD_LEN_IN_CHAR;
+ conv_str.f_str = static_cast<byte*>(ut_malloc_nokey(conv_str.f_len));
+
for (index = dict_table_get_first_index(user_table);
index; index = dict_table_get_next_index(index)) {
if (index->type & DICT_FTS) {
- i_s_fts_index_table_fill_one_index(index, thd, tables);
+ BREAK_IF(ret = i_s_fts_index_table_fill_one_index(
+ index, thd, &conv_str, tables));
}
}
@@ -3822,7 +3831,9 @@ i_s_fts_index_table_fill(
rw_lock_s_unlock(dict_operation_lock);
- DBUG_RETURN(0);
+ ut_free(conv_str.f_str);
+
+ DBUG_RETURN(ret);
}
/*******************************************************************//**
@@ -3988,6 +3999,8 @@ i_s_fts_config_fill(
DBUG_ASSERT(!dict_index_is_online_ddl(index));
}
+ int ret = 0;
+
while (fts_config_key[i]) {
fts_string_t value;
char* key_name;
@@ -4012,13 +4025,14 @@ i_s_fts_config_fill(
ut_free(key_name);
}
- OK(field_store_string(
- fields[FTS_CONFIG_KEY], fts_config_key[i]));
+ BREAK_IF(ret = field_store_string(
+ fields[FTS_CONFIG_KEY], fts_config_key[i]));
- OK(field_store_string(
- fields[FTS_CONFIG_VALUE], (const char*) value.f_str));
+ BREAK_IF(ret = field_store_string(
+ fields[FTS_CONFIG_VALUE],
+ reinterpret_cast<const char*>(value.f_str)));
- OK(schema_table_store_record(thd, table));
+ BREAK_IF(ret = schema_table_store_record(thd, table));
i++;
}
@@ -4031,7 +4045,7 @@ i_s_fts_config_fill(
rw_lock_s_unlock(dict_operation_lock);
- DBUG_RETURN(0);
+ DBUG_RETURN(ret);
}
/*******************************************************************//**
@@ -4889,15 +4903,14 @@ i_s_innodb_buffer_page_fill(
i_s_page_type[page_info->page_type].type_str));
OK(fields[IDX_BUFFER_PAGE_FLUSH_TYPE]->store(
- page_info->flush_type));
+ page_info->flush_type, true));
OK(fields[IDX_BUFFER_PAGE_FIX_COUNT]->store(
- page_info->fix_count));
+ page_info->fix_count, true));
#ifdef BTR_CUR_HASH_ADAPT
- OK(field_store_string(
- fields[IDX_BUFFER_PAGE_HASHED],
- page_info->hashed ? "YES" : "NO"));
+ OK(field_store_string(fields[IDX_BUFFER_PAGE_HASHED],
+ page_info->hashed ? "YES" : "NO"));
#endif /* BTR_CUR_HASH_ADAPT */
OK(fields[IDX_BUFFER_PAGE_NEWEST_MOD]->store(
@@ -4907,7 +4920,7 @@ i_s_innodb_buffer_page_fill(
page_info->oldest_mod, true));
OK(fields[IDX_BUFFER_PAGE_ACCESS_TIME]->store(
- page_info->access_time));
+ page_info->access_time, true));
fields[IDX_BUFFER_PAGE_TABLE_NAME]->set_null();
@@ -4916,32 +4929,36 @@ i_s_innodb_buffer_page_fill(
/* If this is an index page, fetch the index name
and table name */
if (page_info->page_type == I_S_PAGE_TYPE_INDEX) {
- const dict_index_t* index;
+ bool ret = false;
mutex_enter(&dict_sys->mutex);
- index = dict_index_get_if_in_cache_low(
- page_info->index_id);
-
- if (index) {
+ if (const dict_index_t* index =
+ dict_index_get_if_in_cache_low(
+ page_info->index_id)) {
table_name_end = innobase_convert_name(
table_name, sizeof(table_name),
index->table_name,
strlen(index->table_name),
thd);
- OK(fields[IDX_BUFFER_PAGE_TABLE_NAME]->store(
- table_name,
- uint(table_name_end - table_name),
- system_charset_info));
- fields[IDX_BUFFER_PAGE_TABLE_NAME]->set_notnull();
-
- OK(field_store_index_name(
- fields[IDX_BUFFER_PAGE_INDEX_NAME],
- index->name));
+ ret = fields[IDX_BUFFER_PAGE_TABLE_NAME]
+ ->store(table_name,
+ static_cast<uint>(
+ table_name_end
+ - table_name),
+ system_charset_info)
+ || field_store_index_name(
+ fields
+ [IDX_BUFFER_PAGE_INDEX_NAME],
+ index->name);
}
mutex_exit(&dict_sys->mutex);
+
+ OK(ret);
+
+ fields[IDX_BUFFER_PAGE_TABLE_NAME]->set_notnull();
}
OK(fields[IDX_BUFFER_PAGE_NUM_RECS]->store(
@@ -4991,32 +5008,29 @@ i_s_innodb_buffer_page_fill(
switch (page_info->io_fix) {
case BUF_IO_NONE:
- OK(field_store_string(fields[IDX_BUFFER_PAGE_IO_FIX],
- "IO_NONE"));
+ state_str = "IO_NONE";
break;
case BUF_IO_READ:
- OK(field_store_string(fields[IDX_BUFFER_PAGE_IO_FIX],
- "IO_READ"));
+ state_str = "IO_READ";
break;
case BUF_IO_WRITE:
- OK(field_store_string(fields[IDX_BUFFER_PAGE_IO_FIX],
- "IO_WRITE"));
+ state_str = "IO_WRITE";
break;
case BUF_IO_PIN:
- OK(field_store_string(fields[IDX_BUFFER_PAGE_IO_FIX],
- "IO_PIN"));
+ state_str = "IO_PIN";
break;
}
+ OK(field_store_string(fields[IDX_BUFFER_PAGE_IO_FIX],
+ state_str));
+
OK(field_store_string(fields[IDX_BUFFER_PAGE_IS_OLD],
(page_info->is_old) ? "YES" : "NO"));
OK(fields[IDX_BUFFER_PAGE_FREE_CLOCK]->store(
page_info->freed_page_clock, true));
- if (schema_table_store_record(thd, table)) {
- DBUG_RETURN(1);
- }
+ OK(schema_table_store_record(thd, table));
}
DBUG_RETURN(0);
@@ -5572,17 +5586,10 @@ i_s_innodb_buf_page_lru_fill(
ulint num_page) /*!< in: number of page info
cached */
{
- TABLE* table;
- Field** fields;
- mem_heap_t* heap;
-
DBUG_ENTER("i_s_innodb_buf_page_lru_fill");
- table = tables->table;
-
- fields = table->field;
-
- heap = mem_heap_create(1000);
+ TABLE* table = tables->table;
+ Field** fields = table->field;
/* Iterate through the cached array and fill the I_S table rows */
for (ulint i = 0; i < num_page; i++) {
@@ -5619,9 +5626,8 @@ i_s_innodb_buf_page_lru_fill(
page_info->fix_count, true));
#ifdef BTR_CUR_HASH_ADAPT
- OK(field_store_string(
- fields[IDX_BUF_LRU_PAGE_HASHED],
- page_info->hashed ? "YES" : "NO"));
+ OK(field_store_string(fields[IDX_BUF_LRU_PAGE_HASHED],
+ page_info->hashed ? "YES" : "NO"));
#endif /* BTR_CUR_HASH_ADAPT */
OK(fields[IDX_BUF_LRU_PAGE_NEWEST_MOD]->store(
@@ -5640,32 +5646,36 @@ i_s_innodb_buf_page_lru_fill(
/* If this is an index page, fetch the index name
and table name */
if (page_info->page_type == I_S_PAGE_TYPE_INDEX) {
- const dict_index_t* index;
+ bool ret = false;
mutex_enter(&dict_sys->mutex);
- index = dict_index_get_if_in_cache_low(
- page_info->index_id);
-
- if (index) {
+ if (const dict_index_t* index =
+ dict_index_get_if_in_cache_low(
+ page_info->index_id)) {
table_name_end = innobase_convert_name(
table_name, sizeof(table_name),
index->table_name,
strlen(index->table_name),
thd);
- OK(fields[IDX_BUF_LRU_PAGE_TABLE_NAME]->store(
- table_name,
- uint(table_name_end - table_name),
- system_charset_info));
- fields[IDX_BUF_LRU_PAGE_TABLE_NAME]->set_notnull();
-
- OK(field_store_index_name(
- fields[IDX_BUF_LRU_PAGE_INDEX_NAME],
- index->name));
+ ret = fields[IDX_BUF_LRU_PAGE_TABLE_NAME]
+ ->store(table_name,
+ static_cast<uint>(
+ table_name_end
+ - table_name),
+ system_charset_info)
+ || field_store_index_name(
+ fields
+ [IDX_BUF_LRU_PAGE_INDEX_NAME],
+ index->name);
}
mutex_exit(&dict_sys->mutex);
+
+ OK(ret);
+
+ fields[IDX_BUF_LRU_PAGE_TABLE_NAME]->set_notnull();
}
OK(fields[IDX_BUF_LRU_PAGE_NUM_RECS]->store(
@@ -5675,8 +5685,8 @@ i_s_innodb_buf_page_lru_fill(
page_info->data_size, true));
OK(fields[IDX_BUF_LRU_PAGE_ZIP_SIZE]->store(
- page_info->zip_ssize ?
- 512 << page_info->zip_ssize : 0, true));
+ page_info->zip_ssize
+ ? 512 << page_info->zip_ssize : 0, true));
state = static_cast<enum buf_page_state>(page_info->page_state);
@@ -5705,35 +5715,31 @@ i_s_innodb_buf_page_lru_fill(
switch (page_info->io_fix) {
case BUF_IO_NONE:
- OK(field_store_string(fields[IDX_BUF_LRU_PAGE_IO_FIX],
- "IO_NONE"));
+ state_str = "IO_NONE";
break;
case BUF_IO_READ:
- OK(field_store_string(fields[IDX_BUF_LRU_PAGE_IO_FIX],
- "IO_READ"));
+ state_str = "IO_READ";
break;
case BUF_IO_WRITE:
- OK(field_store_string(fields[IDX_BUF_LRU_PAGE_IO_FIX],
- "IO_WRITE"));
+ state_str = "IO_WRITE";
+ break;
+ case BUF_IO_PIN:
+ state_str = "IO_PIN";
break;
}
+ OK(field_store_string(fields[IDX_BUF_LRU_PAGE_IO_FIX],
+ state_str));
+
OK(field_store_string(fields[IDX_BUF_LRU_PAGE_IS_OLD],
- (page_info->is_old) ? "YES" : "NO"));
+ page_info->is_old ? "YES" : "NO"));
OK(fields[IDX_BUF_LRU_PAGE_FREE_CLOCK]->store(
page_info->freed_page_clock, true));
- if (schema_table_store_record(thd, table)) {
- mem_heap_free(heap);
- DBUG_RETURN(1);
- }
-
- mem_heap_empty(heap);
+ OK(schema_table_store_record(thd, table));
}
- mem_heap_free(heap);
-
DBUG_RETURN(0);
}
@@ -6082,10 +6088,10 @@ i_s_dict_fill_sys_tables(
OK(field_store_string(fields[SYS_TABLES_ROW_FORMAT], row_format));
- OK(fields[SYS_TABLES_ZIP_PAGE_SIZE]->store(static_cast<double>(
+ OK(fields[SYS_TABLES_ZIP_PAGE_SIZE]->store(
page_size.is_compressed()
? page_size.physical()
- : 0)));
+ : 0, true));
OK(field_store_string(fields[SYS_TABLES_SPACE_TYPE], space_type));
@@ -6385,7 +6391,7 @@ i_s_dict_fill_sys_tablestats(
OK(fields[SYS_TABLESTATS_AUTONINC]->store(table->autoinc, true));
- OK(fields[SYS_TABLESTATS_TABLE_REF_COUNT]->store(static_cast<double>(ref_count)));
+ OK(fields[SYS_TABLESTATS_TABLE_REF_COUNT]->store(ref_count, true));
OK(schema_table_store_record(thd, table_to_fill));
@@ -7321,11 +7327,11 @@ i_s_dict_fill_sys_fields(
fields = table_to_fill->field;
- OK(fields[SYS_FIELD_INDEX_ID]->store((longlong) index_id, TRUE));
+ OK(fields[SYS_FIELD_INDEX_ID]->store(index_id, true));
OK(field_store_string(fields[SYS_FIELD_NAME], field->name));
- OK(fields[SYS_FIELD_POS]->store(static_cast<double>(pos)));
+ OK(fields[SYS_FIELD_POS]->store(pos, true));
OK(schema_table_store_record(thd, table_to_fill));
diff --git a/storage/innobase/handler/i_s.h b/storage/innobase/handler/i_s.h
index 8d34fbf8fbb..e07fe49f7fa 100644
--- a/storage/innobase/handler/i_s.h
+++ b/storage/innobase/handler/i_s.h
@@ -1,7 +1,7 @@
/*****************************************************************************
Copyright (c) 2007, 2015, Oracle and/or its affiliates. All Rights Reserved.
-Copyrigth (c) 2014, 2017, MariaDB Corporation
+Copyright (c) 2014, 2017, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -73,6 +73,8 @@ extern struct st_maria_plugin i_s_innodb_sys_semaphore_waits;
DBUG_RETURN(1); \
}
+#define BREAK_IF(expr) if ((expr)) break
+
#define RETURN_IF_INNODB_NOT_STARTED(plugin_name) \
do { \
if (!srv_was_started) { \
diff --git a/storage/innobase/ibuf/ibuf0ibuf.cc b/storage/innobase/ibuf/ibuf0ibuf.cc
index 014d9f1a1a1..b4cbe7d4480 100644
--- a/storage/innobase/ibuf/ibuf0ibuf.cc
+++ b/storage/innobase/ibuf/ibuf0ibuf.cc
@@ -3601,7 +3601,7 @@ fail_exit:
if (mode == BTR_MODIFY_PREV) {
err = btr_cur_optimistic_insert(
- BTR_NO_LOCKING_FLAG,
+ BTR_NO_LOCKING_FLAG | BTR_NO_UNDO_LOG_FLAG,
cursor, &offsets, &offsets_heap,
ibuf_entry, &ins_rec,
&dummy_big_rec, 0, thr, &mtr);
diff --git a/storage/innobase/include/btr0cur.h b/storage/innobase/include/btr0cur.h
index e1f5286e122..e62a5e90ce2 100644
--- a/storage/innobase/include/btr0cur.h
+++ b/storage/innobase/include/btr0cur.h
@@ -249,15 +249,17 @@ btr_cur_optimistic_insert(
btr_cur_t* cursor, /*!< in: cursor on page after which to insert;
cursor stays valid */
ulint** offsets,/*!< out: offsets on *rec */
- mem_heap_t** heap, /*!< in/out: pointer to memory heap, or NULL */
+ mem_heap_t** heap, /*!< in/out: pointer to memory heap */
dtuple_t* entry, /*!< in/out: entry to insert */
rec_t** rec, /*!< out: pointer to inserted record if
succeed */
big_rec_t** big_rec,/*!< out: big rec vector whose fields have to
- be stored externally by the caller, or
- NULL */
+ be stored externally by the caller */
ulint n_ext, /*!< in: number of externally stored columns */
- que_thr_t* thr, /*!< in: query thread or NULL */
+ que_thr_t* thr, /*!< in/out: query thread; can be NULL if
+ !(~flags
+ & (BTR_NO_LOCKING_FLAG
+ | BTR_NO_UNDO_LOG_FLAG)) */
mtr_t* mtr) /*!< in/out: mini-transaction;
if this function returns DB_SUCCESS on
a leaf page of a secondary index in a
@@ -284,15 +286,17 @@ btr_cur_pessimistic_insert(
cursor stays valid */
ulint** offsets,/*!< out: offsets on *rec */
mem_heap_t** heap, /*!< in/out: pointer to memory heap
- that can be emptied, or NULL */
+ that can be emptied */
dtuple_t* entry, /*!< in/out: entry to insert */
rec_t** rec, /*!< out: pointer to inserted record if
succeed */
big_rec_t** big_rec,/*!< out: big rec vector whose fields have to
- be stored externally by the caller, or
- NULL */
+ be stored externally by the caller */
ulint n_ext, /*!< in: number of externally stored columns */
- que_thr_t* thr, /*!< in: query thread or NULL */
+ que_thr_t* thr, /*!< in/out: query thread; can be NULL if
+ !(~flags
+ & (BTR_NO_LOCKING_FLAG
+ | BTR_NO_UNDO_LOG_FLAG)) */
mtr_t* mtr) /*!< in/out: mini-transaction */
MY_ATTRIBUTE((nonnull(2,3,4,5,6,7,10), warn_unused_result));
/*************************************************************//**
@@ -413,12 +417,12 @@ btr_cur_pessimistic_update(
ulint** offsets,/*!< out: offsets on cursor->page_cur.rec */
mem_heap_t** offsets_heap,
/*!< in/out: pointer to memory heap
- that can be emptied, or NULL */
+ that can be emptied */
mem_heap_t* entry_heap,
/*!< in/out: memory heap for allocating
big_rec and the index tuple */
big_rec_t** big_rec,/*!< out: big rec vector whose fields have to
- be stored externally by the caller, or NULL */
+ be stored externally by the caller */
upd_t* update, /*!< in/out: update vector; this is allowed to
also contain trx id and roll ptr fields.
Non-updated columns that are moved offpage will
@@ -439,7 +443,6 @@ undo log record created.
dberr_t
btr_cur_del_mark_set_clust_rec(
/*===========================*/
- ulint flags, /*!< in: undo logging and locking flags */
buf_block_t* block, /*!< in/out: buffer block of the record */
rec_t* rec, /*!< in/out: record */
dict_index_t* index, /*!< in: clustered index of the record */
diff --git a/storage/innobase/include/btr0defragment.h b/storage/innobase/include/btr0defragment.h
index 21ba6d9f426..9c78ec412a2 100644
--- a/storage/innobase/include/btr0defragment.h
+++ b/storage/innobase/include/btr0defragment.h
@@ -1,7 +1,7 @@
/*****************************************************************************
Copyright (C) 2013, 2014 Facebook, Inc. All Rights Reserved.
-Copyright (C) 2014, 2015, MariaDB Corporation.
+Copyright (C) 2014, 2017, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -85,13 +85,13 @@ UNIV_INTERN
void
btr_defragment_save_defrag_stats_if_needed(
dict_index_t* index); /*!< in: index */
-/******************************************************************//**
-Thread that merges consecutive b-tree pages into fewer pages to defragment
-the index. */
+
+/** Merge consecutive b-tree pages into fewer pages to defragment indexes */
extern "C" UNIV_INTERN
os_thread_ret_t
-DECLARE_THREAD(btr_defragment_thread)(
-/*==========================================*/
- void* arg); /*!< in: a dummy parameter required by
- os_thread_create */
+DECLARE_THREAD(btr_defragment_thread)(void*);
+
+/** Whether btr_defragment_thread is active */
+extern bool btr_defragment_thread_active;
+
#endif
diff --git a/storage/innobase/include/buf0buf.h b/storage/innobase/include/buf0buf.h
index 928521e789e..9d5f373f5de 100644
--- a/storage/innobase/include/buf0buf.h
+++ b/storage/innobase/include/buf0buf.h
@@ -1519,20 +1519,13 @@ directory (buf) to see it. Do not use from outside! */
typedef struct {
bool reserved; /*!< true if this slot is reserved
*/
-#ifdef HAVE_LZO
- byte* lzo_mem; /*!< Temporal memory used by LZO */
-#endif
byte* crypt_buf; /*!< for encryption the data needs to be
copied to a separate buffer before it's
encrypted&written. this as a page can be
read while it's being flushed */
- byte* crypt_buf_free; /*!< for encryption, allocated buffer
- that is then alligned */
byte* comp_buf; /*!< for compression we need
temporal buffer because page
can be read while it's being flushed */
- byte* comp_buf_free; /*!< for compression, allocated
- buffer that is then alligned */
byte* out_buf; /*!< resulting buffer after
encryption/compression. This is a
pointer and not allocated. */
diff --git a/storage/innobase/include/buf0buf.ic b/storage/innobase/include/buf0buf.ic
index f22dcc48a01..38c52d5e608 100644
--- a/storage/innobase/include/buf0buf.ic
+++ b/storage/innobase/include/buf0buf.ic
@@ -1286,9 +1286,8 @@ buf_page_release_zip(
rw_lock_s_unlock(&block->debug_latch);
}
}
- /* Fall through */
#endif /* UNIV_DEBUG */
-
+ /* Fall through */
case BUF_BLOCK_ZIP_PAGE:
case BUF_BLOCK_ZIP_DIRTY:
buf_block_unfix(reinterpret_cast<buf_block_t*>(bpage));
diff --git a/storage/innobase/include/buf0dblwr.h b/storage/innobase/include/buf0dblwr.h
index e1ecb6baf56..598609e2be4 100644
--- a/storage/innobase/include/buf0dblwr.h
+++ b/storage/innobase/include/buf0dblwr.h
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1995, 2017, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2017, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
@@ -38,24 +38,27 @@ extern buf_dblwr_t* buf_dblwr;
/** Set to TRUE when the doublewrite buffer is being created */
extern ibool buf_dblwr_being_created;
-/****************************************************************//**
-Creates the doublewrite buffer to a new InnoDB installation. The header of the
-doublewrite buffer is placed on the trx system header page.
-@return true if successful, false if not. */
+/** Create the doublewrite buffer if the doublewrite buffer header
+is not present in the TRX_SYS page.
+@return whether the operation succeeded
+@retval true if the doublewrite buffer exists or was created
+@retval false if the creation failed (too small first data file) */
MY_ATTRIBUTE((warn_unused_result))
bool
buf_dblwr_create();
-/****************************************************************//**
-At a database startup initializes the doublewrite buffer memory structure if
+/**
+At database startup initializes the doublewrite buffer memory structure if
we already have a doublewrite buffer created in the data files. If we are
upgrading to an InnoDB version which supports multiple tablespaces, then this
function performs the necessary update operations. If we are in a crash
recovery, this function loads the pages from double write buffer into memory.
+@param[in] file File handle
+@param[in] path Path name of file
@return DB_SUCCESS or error code */
dberr_t
buf_dblwr_init_or_load_pages(
- os_file_t file,
+ pfs_os_file_t file,
const char* path);
/** Process and remove the double write buffer pages for all tablespaces. */
diff --git a/storage/innobase/include/data0type.ic b/storage/innobase/include/data0type.ic
index 9ec85dfaf50..b5a467455b9 100644
--- a/storage/innobase/include/data0type.ic
+++ b/storage/innobase/include/data0type.ic
@@ -527,7 +527,8 @@ dtype_get_fixed_size_low(
return(len);
}
}
- /* fall through for variable-length charsets */
+ /* Treat as variable-length. */
+ /* Fall through */
case DATA_VARCHAR:
case DATA_BINARY:
case DATA_DECIMAL:
diff --git a/storage/innobase/include/dict0mem.h b/storage/innobase/include/dict0mem.h
index f35a7810736..093a036b331 100644
--- a/storage/innobase/include/dict0mem.h
+++ b/storage/innobase/include/dict0mem.h
@@ -300,9 +300,8 @@ ROW_FORMAT=REDUNDANT. InnoDB engines do not check these flags
for unknown bits in order to protect backward incompatibility. */
/* @{ */
/** Total number of bits in table->flags2. */
-#define DICT_TF2_BITS 9
-#define DICT_TF2_UNUSED_BIT_MASK (~0U << DICT_TF2_BITS | \
- 1U << DICT_TF_POS_SHARED_SPACE)
+#define DICT_TF2_BITS 7
+#define DICT_TF2_UNUSED_BIT_MASK (~0U << DICT_TF2_BITS)
#define DICT_TF2_BIT_MASK ~DICT_TF2_UNUSED_BIT_MASK
/** TEMPORARY; TRUE for tables from CREATE TEMPORARY TABLE. */
@@ -924,8 +923,6 @@ struct dict_index_t{
dict_field_t* fields; /*!< array of field descriptions */
st_mysql_ftparser*
parser; /*!< fulltext parser plugin */
- bool is_ngram;
- /*!< true if it's ngram parser */
bool has_new_v_col;
/*!< whether it has a newly added virtual
column in ALTER */
@@ -1426,8 +1423,6 @@ struct dict_table_t {
5 whether the table is being created its own tablespace,
6 whether the table has been DISCARDed,
7 whether the aux FTS tables names are in hex.
- 8 whether the table is instinc table.
- 9 whether the table has encryption setting.
Use DICT_TF2_FLAG_IS_SET() to parse this flag. */
unsigned flags2:DICT_TF2_BITS;
diff --git a/storage/innobase/include/dict0stats.h b/storage/innobase/include/dict0stats.h
index 752c197f8c3..8846aeda7fd 100644
--- a/storage/innobase/include/dict0stats.h
+++ b/storage/innobase/include/dict0stats.h
@@ -110,6 +110,13 @@ dict_stats_deinit(
dict_table_t* table) /*!< in/out: table */
MY_ATTRIBUTE((nonnull));
+/** Update the table modification counter and if necessary,
+schedule new estimates for table and index statistics to be calculated.
+@param[in,out] table persistent or temporary table */
+void
+dict_stats_update_if_needed(dict_table_t* table)
+ MY_ATTRIBUTE((nonnull));
+
/*********************************************************************//**
Calculates new estimates for table and index statistics. The statistics
are used in query optimization.
diff --git a/storage/innobase/include/dict0stats_bg.h b/storage/innobase/include/dict0stats_bg.h
index b7bf1b0c170..f99cbeed780 100644
--- a/storage/innobase/include/dict0stats_bg.h
+++ b/storage/innobase/include/dict0stats_bg.h
@@ -47,17 +47,6 @@ extern my_bool innodb_dict_stats_disabled_debug;
#endif /* UNIV_DEBUG */
/*****************************************************************//**
-Add a table to the recalc pool, which is processed by the
-background stats gathering thread. Only the table id is added to the
-list, so the table can be closed after being enqueued and it will be
-opened when needed. If the table does not exist later (has been DROPped),
-then it will be removed from the pool and skipped. */
-void
-dict_stats_recalc_pool_add(
-/*=======================*/
- const dict_table_t* table); /*!< in: table to add */
-
-/*****************************************************************//**
Delete a given table from the auto recalc pool.
dict_stats_recalc_pool_del() */
void
@@ -151,7 +140,7 @@ DECLARE_THREAD(dict_stats_thread)(
void* arg); /*!< in: a dummy parameter
required by os_thread_create */
-/** Shutdown the dict stats thread. */
+/** Shut down the dict_stats_thread. */
void
dict_stats_shutdown();
diff --git a/storage/innobase/include/fil0fil.h b/storage/innobase/include/fil0fil.h
index d8b6cf33675..7d9327c598b 100644
--- a/storage/innobase/include/fil0fil.h
+++ b/storage/innobase/include/fil0fil.h
@@ -180,9 +180,6 @@ struct fil_space_t {
/** MariaDB encryption data */
fil_space_crypt_t* crypt_data;
- /** tablespace crypt data has been read */
- bool page_0_crypt_read;
-
/** True if we have already printed compression failure */
bool printed_compression_failure;
@@ -217,7 +214,7 @@ struct fil_node_t {
/** file name; protected by fil_system->mutex and log_sys->mutex. */
char* name;
/** file handle (valid if is_open) */
- os_file_t handle;
+ pfs_os_file_t handle;
/** event that groups and serializes calls to fsync;
os_event_set() and os_event_reset() are protected by
fil_system_t::mutex */
@@ -586,7 +583,6 @@ Error messages are issued to the server log.
@param[in] flags tablespace flags
@param[in] purpose tablespace purpose
@param[in,out] crypt_data encryption information
-@param[in] create_table whether this is CREATE TABLE
@param[in] mode encryption mode
@return pointer to created tablespace, to be filled in with fil_node_create()
@retval NULL on failure (such as when the same tablespace exists) */
@@ -597,7 +593,6 @@ fil_space_create(
ulint flags,
fil_type_t purpose,
fil_space_crypt_t* crypt_data,
- bool create_table,
fil_encryption_t mode = FIL_ENCRYPTION_DEFAULT)
MY_ATTRIBUTE((warn_unused_result));
@@ -1064,7 +1059,7 @@ fil_ibd_create(
ulint size,
fil_encryption_t mode,
uint32_t key_id)
- MY_ATTRIBUTE((warn_unused_result));
+ MY_ATTRIBUTE((nonnull(2), warn_unused_result));
/** Try to adjust FSP_SPACE_FLAGS if they differ from the expectations.
(Typically when upgrading from MariaDB 10.1.0..10.1.20.)
@@ -1404,19 +1399,19 @@ struct PageCallback {
/** Called for every page in the tablespace. If the page was not
updated then its state must be set to BUF_PAGE_NOT_USED. For
compressed tables the page descriptor memory will be at offset:
- block->frame + UNIV_PAGE_SIZE;
+ block->frame + UNIV_PAGE_SIZE;
@param offset physical offset within the file
@param block block read from file, note it is not from the buffer pool
@retval DB_SUCCESS or error code. */
virtual dberr_t operator()(
- os_offset_t offset,
+ os_offset_t offset,
buf_block_t* block) UNIV_NOTHROW = 0;
/** Set the name of the physical file and the file handle that is used
to open it for the file that is being iterated over.
- @param filename then physical name of the tablespace file.
+ @param filename the name of the tablespace file
@param file OS file handle */
- void set_file(const char* filename, os_file_t file) UNIV_NOTHROW
+ void set_file(const char* filename, pfs_os_file_t file) UNIV_NOTHROW
{
m_file = file;
m_filepath = filename;
@@ -1441,7 +1436,7 @@ struct PageCallback {
page_size_t m_page_size;
/** File handle to the tablespace */
- os_file_t m_file;
+ pfs_os_file_t m_file;
/** Physical file path. */
const char* m_filepath;
diff --git a/storage/innobase/include/fil0pagecompress.h b/storage/innobase/include/fil0pagecompress.h
index 67ff7895b02..be10f99d0f0 100644
--- a/storage/innobase/include/fil0pagecompress.h
+++ b/storage/innobase/include/fil0pagecompress.h
@@ -46,9 +46,8 @@ fil_compress_page(
ulint level, /* in: compression level */
ulint block_size, /*!< in: block size */
bool encrypted, /*!< in: is page also encrypted */
- ulint* out_len, /*!< out: actual length of compressed
+ ulint* out_len); /*!< out: actual length of compressed
page */
- byte* lzo_mem); /*!< in: temporal memory used by LZO */
/****************************************************************//**
For page compressed pages decompress the page after actual read
diff --git a/storage/innobase/include/fsp0file.h b/storage/innobase/include/fsp0file.h
index 1c4ac8152c9..974de9c75ed 100644
--- a/storage/innobase/include/fsp0file.h
+++ b/storage/innobase/include/fsp0file.h
@@ -54,7 +54,7 @@ public:
m_name(),
m_filepath(),
m_filename(),
- m_handle(OS_FILE_CLOSED),
+ m_handle(),
m_open_flags(OS_FILE_OPEN),
m_size(),
m_order(),
@@ -77,7 +77,7 @@ public:
m_name(mem_strdup(name)),
m_filepath(),
m_filename(),
- m_handle(OS_FILE_CLOSED),
+ m_handle(),
m_open_flags(OS_FILE_OPEN),
m_size(size),
m_order(order),
@@ -274,7 +274,7 @@ public:
/** Get Datafile::m_handle.
@return m_handle */
- os_file_t handle() const
+ pfs_os_file_t handle() const
{
return(m_handle);
}
@@ -416,7 +416,7 @@ private:
char* m_filename;
/** Open file handle */
- os_file_t m_handle;
+ pfs_os_file_t m_handle;
/** Flags to use for opening the data file */
os_file_create_t m_open_flags;
diff --git a/storage/innobase/include/fsp0fsp.h b/storage/innobase/include/fsp0fsp.h
index f6fee72300c..63fc211c88d 100644
--- a/storage/innobase/include/fsp0fsp.h
+++ b/storage/innobase/include/fsp0fsp.h
@@ -397,18 +397,12 @@ fsp_header_init_fields(
ulint space_id, /*!< in: space id */
ulint flags); /*!< in: tablespace flags (FSP_SPACE_FLAGS):
0, or table->flags if newer than COMPACT */
-
-/** Initializes the space header of a new created space and creates also the
-insert buffer tree root if space == 0.
+/** Initialize a tablespace header.
@param[in] space_id space id
@param[in] size current size in blocks
-@param[in,out] mtr min-transaction
-@return true on success, otherwise false. */
-bool
-fsp_header_init(
- ulint space_id,
- ulint size,
- mtr_t* mtr);
+@param[in,out] mtr mini-transaction */
+void
+fsp_header_init(ulint space_id, ulint size, mtr_t* mtr);
/**********************************************************************//**
Increases the space size field of a space. */
diff --git a/storage/innobase/include/fts0priv.h b/storage/innobase/include/fts0priv.h
index 80ebcf09d6d..f9d5d07a44c 100644
--- a/storage/innobase/include/fts0priv.h
+++ b/storage/innobase/include/fts0priv.h
@@ -1,6 +1,7 @@
/*****************************************************************************
Copyright (c) 2011, 2016, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2017, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -215,13 +216,10 @@ fts_write_node(
fts_node_t* node) /*!< in: node columns */
MY_ATTRIBUTE((warn_unused_result));
-/** Check fts token
-1. for ngram token, check whether the token contains any words in stopwords
-2. for non-ngram token, check if it's stopword or less than fts_min_token_size
+/** Check if a fts token is a stopword or less than fts_min_token_size
or greater than fts_max_token_size.
@param[in] token token string
@param[in] stopwords stopwords rb tree
-@param[in] is_ngram is ngram parser
@param[in] cs token charset
@retval true if it is not stopword and length in range
@retval false if it is stopword or length not in range */
@@ -229,7 +227,6 @@ bool
fts_check_token(
const fts_string_t* token,
const ib_rbt_t* stopwords,
- bool is_ngram,
const CHARSET_INFO* cs);
/******************************************************************//**
diff --git a/storage/innobase/include/fts0types.h b/storage/innobase/include/fts0types.h
index c1db160602f..55a698e8b66 100644
--- a/storage/innobase/include/fts0types.h
+++ b/storage/innobase/include/fts0types.h
@@ -1,7 +1,7 @@
/*****************************************************************************
Copyright (c) 2007, 2016, Oracle and/or its affiliates. All Rights Reserved.
-Copyright (c) 2017, MariaDB Corporation. All Rights Reserved.
+Copyright (c) 2017, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -277,8 +277,6 @@ struct fts_doc_t {
st_mysql_ftparser* parser; /*!< fts plugin parser */
- bool is_ngram; /*!< Whether it is a ngram parser */
-
ib_rbt_t* stopwords; /*!< Stopwords */
};
diff --git a/storage/innobase/include/fts0types.ic b/storage/innobase/include/fts0types.ic
index 417a1010919..a8712751412 100644
--- a/storage/innobase/include/fts0types.ic
+++ b/storage/innobase/include/fts0types.ic
@@ -1,6 +1,7 @@
/*****************************************************************************
Copyright (c) 2007, 2015, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2017, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -115,19 +116,14 @@ bool
fts_is_charset_cjk(
const CHARSET_INFO* cs)
{
- if (strcmp(cs->name, "gb2312_chinese_ci") == 0
- || strcmp(cs->name, "gbk_chinese_ci") == 0
- || strcmp(cs->name, "big5_chinese_ci") == 0
- || strcmp(cs->name, "gb18030_chinese_ci") == 0
- || strcmp(cs->name, "ujis_japanese_ci") == 0
- || strcmp(cs->name, "sjis_japanese_ci") == 0
- || strcmp(cs->name, "cp932_japanese_ci") == 0
- || strcmp(cs->name, "eucjpms_japanese_ci") == 0
- || strcmp(cs->name, "euckr_korean_ci") == 0) {
- return(true);
- } else {
- return(false);
- }
+ return cs == &my_charset_gb2312_chinese_ci
+ || cs == &my_charset_gbk_chinese_ci
+ || cs == &my_charset_big5_chinese_ci
+ || cs == &my_charset_ujis_japanese_ci
+ || cs == &my_charset_sjis_japanese_ci
+ || cs == &my_charset_cp932_japanese_ci
+ || cs == &my_charset_eucjpms_japanese_ci
+ || cs == &my_charset_euckr_korean_ci;
}
/** Select the FTS auxiliary index for the given character by range.
diff --git a/storage/innobase/include/lock0lock.h b/storage/innobase/include/lock0lock.h
index e718189062d..b3de1bf27f2 100644
--- a/storage/innobase/include/lock0lock.h
+++ b/storage/innobase/include/lock0lock.h
@@ -303,32 +303,6 @@ lock_rec_insert_check_and_lock(
MY_ATTRIBUTE((warn_unused_result));
/*********************************************************************//**
-Enqueues a waiting request for a lock which cannot be granted immediately.
-Checks for deadlocks.
-@return DB_LOCK_WAIT, DB_DEADLOCK, or DB_QUE_THR_SUSPENDED, or
-DB_SUCCESS_LOCKED_REC; DB_SUCCESS_LOCKED_REC means that
-there was a deadlock, but another transaction was chosen as a victim,
-and we got the lock immediately: no need to wait then */
-dberr_t
-lock_rec_enqueue_waiting(
-/*=====================*/
- ulint type_mode,/*!< in: lock mode this
- transaction is requesting:
- LOCK_S or LOCK_X, possibly
- ORed with LOCK_GAP or
- LOCK_REC_NOT_GAP, ORed with
- LOCK_INSERT_INTENTION if this
- waiting lock request is set
- when performing an insert of
- an index record */
- const buf_block_t* block, /*!< in: buffer block containing
- the record */
- ulint heap_no,/*!< in: heap number of the record */
- dict_index_t* index, /*!< in: index of record */
- que_thr_t* thr, /*!< in: query thread */
- lock_prdt_t* prdt); /*!< in: Minimum Bounding Box */
-
-/*********************************************************************//**
Checks if locks of other transactions prevent an immediate modify (update,
delete mark, or delete unmark) of a clustered index record. If they do,
first tests if the query thread should anyway be suspended for some
diff --git a/storage/innobase/include/log0log.h b/storage/innobase/include/log0log.h
index d1aae64227e..05e53e23f28 100644
--- a/storage/innobase/include/log0log.h
+++ b/storage/innobase/include/log0log.h
@@ -151,24 +151,24 @@ UNIV_INLINE
lsn_t
log_get_max_modified_age_async(void);
/*================================*/
-/******************************************************//**
-Initializes the log. */
+/** Initializes the redo logging subsystem. */
void
-log_init(void);
-/*==========*/
-/******************************************************************//**
-Inits a log group to the log system.
-@return true if success, false if not */
-MY_ATTRIBUTE((warn_unused_result))
+log_sys_init();
+
+/** Initialize the redo log.
+@param[in] n_files number of files
+@param[in] file_size file size in bytes */
+void
+log_init(ulint n_files, lsn_t file_size);
+/** Calculate the recommended highest values for lsn - last_checkpoint_lsn
+and lsn - buf_get_oldest_modification().
+@retval true on success
+@retval false if the smallest log group is too small to
+accommodate the number of OS threads in the database server */
bool
-log_group_init(
-/*===========*/
- ulint id, /*!< in: group id */
- ulint n_files, /*!< in: number of log files */
- lsn_t file_size, /*!< in: log file size in bytes */
- ulint space_id); /*!< in: space id of the file space
- which contains the log files of this
- group */
+log_set_capacity()
+ MY_ATTRIBUTE((warn_unused_result));
+
/******************************************************//**
Completes an i/o to a log file. */
void
@@ -552,16 +552,12 @@ Currently, this is only protected by log_sys->mutex. However, in the case
of log_write_up_to(), we will access some members only with the protection
of log_sys->write_mutex, which should affect nothing for now. */
struct log_group_t{
- /** log group identifier (always 0) */
- ulint id;
/** number of files in the group */
ulint n_files;
/** format of the redo log: e.g., LOG_HEADER_FORMAT_CURRENT */
ulint format;
/** individual log file size in bytes, including the header */
- lsn_t file_size
- /** file space which implements the log group */;
- ulint space_id;
+ lsn_t file_size;
/** corruption status */
log_group_state_t state;
/** lsn used to fix coordinates within the log group */
@@ -580,8 +576,6 @@ struct log_group_t{
byte* checkpoint_buf_ptr;
/** buffer for writing a checkpoint header */
byte* checkpoint_buf;
- /** list of log groups */
- UT_LIST_NODE_T(log_group_t) log_groups;
/** @return whether the redo log is encrypted */
bool is_encrypted() const
@@ -639,8 +633,8 @@ struct log_t{
max_checkpoint_age; this flag is
peeked at by log_free_check(), which
does not reserve the log mutex */
- UT_LIST_BASE_NODE_T(log_group_t)
- log_groups; /*!< log groups */
+ /** the redo log */
+ log_group_t log;
/** The fields involved in the log buffer flush @{ */
@@ -729,7 +723,7 @@ struct log_t{
/** @return whether the redo log is encrypted */
bool is_encrypted() const
{
- return(UT_LIST_GET_FIRST(log_groups)->is_encrypted());
+ return(log.is_encrypted());
}
};
diff --git a/storage/innobase/include/log0recv.h b/storage/innobase/include/log0recv.h
index 74ea6c95036..784699279d4 100644
--- a/storage/innobase/include/log0recv.h
+++ b/storage/innobase/include/log0recv.h
@@ -41,6 +41,13 @@ Created 9/20/1997 Heikki Tuuri
/** @return whether recovery is currently running. */
#define recv_recovery_is_on() recv_recovery_on
+/** Find the latest checkpoint in the log header.
+@param[out] max_field LOG_CHECKPOINT_1 or LOG_CHECKPOINT_2
+@return error code or DB_SUCCESS */
+dberr_t
+recv_find_max_checkpoint(ulint* max_field)
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
+
/** Apply the hashed log records to the page, if the page lsn is less than the
lsn of a log record.
@param just_read_in whether the page recently arrived to the I/O handler
diff --git a/storage/innobase/include/mach0data.ic b/storage/innobase/include/mach0data.ic
index 34d375aa1e8..c89e4960480 100644
--- a/storage/innobase/include/mach0data.ic
+++ b/storage/innobase/include/mach0data.ic
@@ -827,13 +827,13 @@ mach_swap_byte_order(
dest += len;
switch (len & 0x7) {
- case 0: *--dest = *from++;
- case 7: *--dest = *from++;
- case 6: *--dest = *from++;
- case 5: *--dest = *from++;
- case 4: *--dest = *from++;
- case 3: *--dest = *from++;
- case 2: *--dest = *from++;
+ case 0: *--dest = *from++; /* fall through */
+ case 7: *--dest = *from++; /* fall through */
+ case 6: *--dest = *from++; /* fall through */
+ case 5: *--dest = *from++; /* fall through */
+ case 4: *--dest = *from++; /* fall through */
+ case 3: *--dest = *from++; /* fall through */
+ case 2: *--dest = *from++; /* fall through */
case 1: *--dest = *from;
}
}
diff --git a/storage/innobase/include/os0file.h b/storage/innobase/include/os0file.h
index e3396a4b7f7..b3a70edf8be 100644
--- a/storage/innobase/include/os0file.h
+++ b/storage/innobase/include/os0file.h
@@ -1,6 +1,6 @@
/***********************************************************************
-Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1995, 2017, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2009, Percona Inc.
Copyright (c) 2013, 2017, MariaDB Corporation.
@@ -90,6 +90,30 @@ typedef int os_file_t;
static const os_file_t OS_FILE_CLOSED = os_file_t(~0);
+/** File descriptor with optional PERFORMANCE_SCHEMA instrumentation */
+struct pfs_os_file_t
+{
+ /** Default constructor */
+ pfs_os_file_t(os_file_t file = OS_FILE_CLOSED) : m_file(file)
+#ifdef UNIV_PFS_IO
+ , m_psi(NULL)
+#endif
+ {}
+
+ /** The wrapped file handle */
+ os_file_t m_file;
+#ifdef UNIV_PFS_IO
+ /** PERFORMANCE_SCHEMA descriptor */
+ struct PSI_file *m_psi;
+#endif
+ /** Implicit type conversion.
+ @return the wrapped file handle */
+ operator os_file_t() const { return m_file; }
+ /** Assignment operator.
+ @param[in] file file handle to be assigned */
+ void operator=(os_file_t file) { m_file = file; }
+};
+
/** The next value should be smaller or equal to the smallest sector size used
on any disk. A log block is required to be a portion of disk which is written
so that if the start and the end of a block get written to disk, then the
@@ -585,7 +609,7 @@ A simple function to open or create a file.
@param[out] success true if succeed, false if error
@return own: handle to the file, not defined if error, error number
can be retrieved with os_file_get_last_error */
-os_file_t
+pfs_os_file_t
os_file_create_simple_func(
const char* name,
ulint create_mode,
@@ -605,7 +629,7 @@ A simple function to open or create a file.
@param[out] success true if succeeded
@return own: handle to the file, not defined if error, error number
can be retrieved with os_file_get_last_error */
-os_file_t
+pfs_os_file_t
os_file_create_simple_no_error_handling_func(
const char* name,
ulint create_mode,
@@ -643,7 +667,7 @@ Opens an existing file or creates a new.
@param[in] success true if succeeded
@return own: handle to the file, not defined if error, error number
can be retrieved with os_file_get_last_error */
-os_file_t
+pfs_os_file_t
os_file_create_func(
const char* name,
ulint create_mode,
@@ -696,6 +720,8 @@ extern mysql_pfs_key_t innodb_temp_file_key;
various file I/O operations with performance schema.
1) register_pfs_file_open_begin() and register_pfs_file_open_end() are
used to register file creation, opening, closing and renaming.
+2) register_pfs_file_rename_begin() and register_pfs_file_rename_end()
+are used to register file renaming
2) register_pfs_file_io_begin() and register_pfs_file_io_end() are
used to register actual file read, write and flush
3) register_pfs_file_close_begin() and register_pfs_file_close_end()
@@ -711,11 +737,23 @@ do { \
} \
} while (0)
-# define register_pfs_file_open_end(locker, file) \
+# define register_pfs_file_open_end(locker, file, result) \
do { \
if (locker != NULL) { \
- PSI_FILE_CALL(end_file_open_wait_and_bind_to_descriptor)(\
- locker, file); \
+ file.m_psi = PSI_FILE_CALL(end_file_open_wait)( \
+ locker, result); \
+ } \
+} while (0)
+
+# define register_pfs_file_rename_begin(state, locker, key, op, name, \
+ src_file, src_line) \
+ register_pfs_file_open_begin(state, locker, key, op, name, \
+ src_file, src_line) \
+
+# define register_pfs_file_rename_end(locker, result) \
+do { \
+ if (locker != NULL) { \
+ PSI_FILE_CALL(end_file_open_wait)(locker, result); \
} \
} while (0)
@@ -741,8 +779,8 @@ do { \
# define register_pfs_file_io_begin(state, locker, file, count, op, \
src_file, src_line) \
do { \
- locker = PSI_FILE_CALL(get_thread_file_descriptor_locker)( \
- state, file, op); \
+ locker = PSI_FILE_CALL(get_thread_file_stream_locker)( \
+ state, file.m_psi, op); \
if (locker != NULL) { \
PSI_FILE_CALL(start_file_wait)( \
locker, count, src_file, src_line); \
@@ -768,7 +806,9 @@ os_file_rename
os_aio
os_file_read
os_file_read_no_error_handling
+os_file_read_no_error_handling_int_fd
os_file_write
+os_file_write_int_fd
The wrapper functions have the prefix of "innodb_". */
@@ -804,11 +844,19 @@ The wrapper functions have the prefix of "innodb_". */
pfs_os_file_read_no_error_handling_func( \
type, file, buf, offset, n, o, __FILE__, __LINE__)
+# define os_file_read_no_error_handling_int_fd(type, file, buf, offset, n) \
+ pfs_os_file_read_no_error_handling_int_fd_func( \
+ type, file, buf, offset, n, __FILE__, __LINE__)
+
# define os_file_write(type, name, file, buf, offset, n) \
pfs_os_file_write_func(type, name, file, buf, offset, \
- n,__FILE__, __LINE__)
+ n, __FILE__, __LINE__)
+
+# define os_file_write_int_fd(type, name, file, buf, offset, n) \
+ pfs_os_file_write_int_fd_func(type, name, file, buf, offset, \
+ n, __FILE__, __LINE__)
-# define os_file_flush(file) \
+# define os_file_flush(file) \
pfs_os_file_flush_func(file, __FILE__, __LINE__)
# define os_file_rename(key, oldpath, newpath) \
@@ -836,7 +884,7 @@ os_file_create_simple() which opens or creates a file.
@return own: handle to the file, not defined if error, error number
can be retrieved with os_file_get_last_error */
UNIV_INLINE
-os_file_t
+pfs_os_file_t
pfs_os_file_create_simple_func(
mysql_pfs_key_t key,
const char* name,
@@ -867,7 +915,7 @@ monitor file creation/open.
@return own: handle to the file, not defined if error, error number
can be retrieved with os_file_get_last_error */
UNIV_INLINE
-os_file_t
+pfs_os_file_t
pfs_os_file_create_simple_no_error_handling_func(
mysql_pfs_key_t key,
const char* name,
@@ -900,7 +948,7 @@ Add instrumentation to monitor file creation/open.
@return own: handle to the file, not defined if error, error number
can be retrieved with os_file_get_last_error */
UNIV_INLINE
-os_file_t
+pfs_os_file_t
pfs_os_file_create_func(
mysql_pfs_key_t key,
const char* name,
@@ -923,7 +971,7 @@ A performance schema instrumented wrapper function for os_file_close().
UNIV_INLINE
bool
pfs_os_file_close_func(
- os_file_t file,
+ pfs_os_file_t file,
const char* src_file,
uint src_line);
@@ -943,7 +991,7 @@ UNIV_INLINE
dberr_t
pfs_os_file_read_func(
IORequest& type,
- os_file_t file,
+ pfs_os_file_t file,
void* buf,
os_offset_t offset,
ulint n,
@@ -968,7 +1016,7 @@ UNIV_INLINE
dberr_t
pfs_os_file_read_no_error_handling_func(
IORequest& type,
- os_file_t file,
+ pfs_os_file_t file,
void* buf,
os_offset_t offset,
ulint n,
@@ -1004,7 +1052,7 @@ pfs_os_aio_func(
IORequest& type,
ulint mode,
const char* name,
- os_file_t file,
+ pfs_os_file_t file,
void* buf,
os_offset_t offset,
ulint n,
@@ -1033,7 +1081,7 @@ dberr_t
pfs_os_file_write_func(
IORequest& type,
const char* name,
- os_file_t file,
+ pfs_os_file_t file,
const void* buf,
os_offset_t offset,
ulint n,
@@ -1052,7 +1100,7 @@ Flushes the write buffers of a given file to the disk.
UNIV_INLINE
bool
pfs_os_file_flush_func(
- os_file_t file,
+ pfs_os_file_t file,
const char* src_file,
uint src_line);
@@ -1144,9 +1192,12 @@ to original un-instrumented file I/O APIs */
# define os_file_read_no_error_handling(type, file, buf, offset, n, o) \
os_file_read_no_error_handling_func(type, file, buf, offset, n, o)
+# define os_file_read_no_error_handling_int_fd(type, file, buf, offset, n) \
+ os_file_read_no_error_handling_func(type, file, buf, offset, n, NULL)
# define os_file_write(type, name, file, buf, offset, n) \
os_file_write_func(type, name, file, buf, offset, n)
+# define os_file_write_int_fd os_file_write_func
# define os_file_flush(file) os_file_flush_func(file)
@@ -1402,7 +1453,7 @@ os_aio_func(
IORequest& type,
ulint mode,
const char* name,
- os_file_t file,
+ pfs_os_file_t file,
void* buf,
os_offset_t offset,
ulint n,
@@ -1538,19 +1589,6 @@ os_is_sparse_file_supported(
@return DB_SUCCESS or error code */
dberr_t
os_file_punch_hole(
- IORequest& type,
- os_file_t fh,
- os_offset_t off,
- os_offset_t len)
- MY_ATTRIBUTE((warn_unused_result));
-
-/** Free storage space associated with a section of the file.
-@param[in] fh Open file handle
-@param[in] off Starting offset (SEEK_SET)
-@param[in] len Size of the hole
-@return DB_SUCCESS or error code */
-dberr_t
-os_file_punch_hole(
os_file_t fh,
os_offset_t off,
os_offset_t len)
diff --git a/storage/innobase/include/os0file.ic b/storage/innobase/include/os0file.ic
index 807d3254b9d..07c2b6212ed 100644
--- a/storage/innobase/include/os0file.ic
+++ b/storage/innobase/include/os0file.ic
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 2010, 2015, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2010, 2017, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2013, 2017, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
@@ -43,7 +43,7 @@ os_file_create_simple() which opens or creates a file.
@return own: handle to the file, not defined if error, error number
can be retrieved with os_file_get_last_error */
UNIV_INLINE
-os_file_t
+pfs_os_file_t
pfs_os_file_create_simple_func(
mysql_pfs_key_t key,
const char* name,
@@ -64,11 +64,12 @@ pfs_os_file_create_simple_func(
? PSI_FILE_CREATE : PSI_FILE_OPEN,
name, src_file, src_line);
- os_file_t file = os_file_create_simple_func(
+ pfs_os_file_t file = os_file_create_simple_func(
name, create_mode, access_type, read_only, success);
- /* Regsiter the returning "file" value with the system */
- register_pfs_file_open_end(locker, file);
+ /* Register psi value for the file */
+ register_pfs_file_open_end(locker, file,
+ (*success == TRUE ? success : 0));
return(file);
}
@@ -92,7 +93,7 @@ monitor file creation/open.
@return own: handle to the file, not defined if error, error number
can be retrieved with os_file_get_last_error */
UNIV_INLINE
-os_file_t
+pfs_os_file_t
pfs_os_file_create_simple_no_error_handling_func(
mysql_pfs_key_t key,
const char* name,
@@ -113,10 +114,11 @@ pfs_os_file_create_simple_no_error_handling_func(
? PSI_FILE_CREATE : PSI_FILE_OPEN,
name, src_file, src_line);
- os_file_t file = os_file_create_simple_no_error_handling_func(
+ pfs_os_file_t file = os_file_create_simple_no_error_handling_func(
name, create_mode, access_type, read_only, success);
- register_pfs_file_open_end(locker, file);
+ register_pfs_file_open_end(locker, file,
+ (*success == TRUE ? success : 0));
return(file);
}
@@ -142,7 +144,7 @@ Add instrumentation to monitor file creation/open.
@return own: handle to the file, not defined if error, error number
can be retrieved with os_file_get_last_error */
UNIV_INLINE
-os_file_t
+pfs_os_file_t
pfs_os_file_create_func(
mysql_pfs_key_t key,
const char* name,
@@ -164,10 +166,11 @@ pfs_os_file_create_func(
? PSI_FILE_CREATE : PSI_FILE_OPEN,
name, src_file, src_line);
- os_file_t file = os_file_create_func(
+ pfs_os_file_t file = os_file_create_func(
name, create_mode, purpose, type, read_only, success);
- register_pfs_file_open_end(locker, file);
+ register_pfs_file_open_end(locker, file,
+ (*success == TRUE ? success : 0));
return(file);
}
@@ -182,7 +185,7 @@ A performance schema instrumented wrapper function for os_file_close().
UNIV_INLINE
bool
pfs_os_file_close_func(
- os_file_t file,
+ pfs_os_file_t file,
const char* src_file,
uint src_line)
{
@@ -228,7 +231,7 @@ pfs_os_aio_func(
IORequest& type,
ulint mode,
const char* name,
- os_file_t file,
+ pfs_os_file_t file,
void* buf,
os_offset_t offset,
ulint n,
@@ -273,7 +276,7 @@ UNIV_INLINE
dberr_t
pfs_os_file_read_func(
IORequest& type,
- os_file_t file,
+ pfs_os_file_t file,
void* buf,
os_offset_t offset,
ulint n,
@@ -315,7 +318,7 @@ UNIV_INLINE
dberr_t
pfs_os_file_read_no_error_handling_func(
IORequest& type,
- os_file_t file,
+ pfs_os_file_t file,
void* buf,
os_offset_t offset,
ulint n,
@@ -337,6 +340,49 @@ pfs_os_file_read_no_error_handling_func(
return(result);
}
+/** NOTE! Please use the corresponding macro
+os_file_read_no_error_handling_int_fd() to request
+a synchronous read operation.
+@param[in,out] type read request
+@param[in] file file handle
+@param[out] buf buffer where to read
+@param[in] offset file offset where to read
+@param[in] n number of bytes to read
+@param[in] src_file caller file name
+@param[in] src_line caller line number
+@return whether the request was successful */
+UNIV_INLINE
+bool
+pfs_os_file_read_no_error_handling_int_fd_func(
+ IORequest& type,
+ int file,
+ void* buf,
+ os_offset_t offset,
+ ulint n,
+ const char* src_file,
+ uint src_line)
+{
+ PSI_file_locker_state state;
+
+ PSI_file_locker* locker = PSI_FILE_CALL(
+ get_thread_file_descriptor_locker)(
+ &state, file, PSI_FILE_READ);
+ if (locker != NULL) {
+ PSI_FILE_CALL(start_file_wait)(
+ locker, n,
+ __FILE__, __LINE__);
+ }
+ ulint fulfilled;
+ bool success = DB_SUCCESS == os_file_read_no_error_handling_func(
+ type, OS_FILE_FROM_FD(file), buf, offset, n, &fulfilled);
+
+ if (locker != NULL) {
+ PSI_FILE_CALL(end_file_wait)(locker, n);
+ }
+
+ return(success);
+}
+
/** NOTE! Please use the corresponding macro os_file_write(), not directly
this function!
This is the performance schema instrumented wrapper function for
@@ -350,13 +396,14 @@ os_file_write() which requests a synchronous write operation.
@param[in] n number of bytes to read
@param[in] src_file file name where func invoked
@param[in] src_line line where the func invoked
-@return DB_SUCCESS if request was successful */
+@return error code
+@retval DB_SUCCESS if the request was successfully fulfilled */
UNIV_INLINE
dberr_t
pfs_os_file_write_func(
IORequest& type,
const char* name,
- os_file_t file,
+ pfs_os_file_t file,
const void* buf,
os_offset_t offset,
ulint n,
@@ -378,6 +425,52 @@ pfs_os_file_write_func(
return(result);
}
+/** NOTE! Please use the corresponding macro os_file_write_int_fd(),
+not directly this function!
+This is the performance schema instrumented wrapper function for
+os_file_write_int_fd() which requests a synchronous write operation.
+@param[in,out] type write request
+@param[in] name file name
+@param[in] file file handle
+@param[in] buf buffer to write
+@param[in] offset file offset
+@param[in] n number of bytes
+@param[in] src_file file name where func invoked
+@param[in] src_line line where the func invoked
+@return whether the request was successful */
+UNIV_INLINE
+bool
+pfs_os_file_write_int_fd_func(
+ IORequest& type,
+ const char* name,
+ int file,
+ const void* buf,
+ os_offset_t offset,
+ ulint n,
+ const char* src_file,
+ uint src_line)
+{
+ PSI_file_locker_state state;
+ struct PSI_file_locker* locker;
+
+ locker = PSI_FILE_CALL(get_thread_file_descriptor_locker)(
+ &state, file, PSI_FILE_WRITE);
+ if (locker != NULL) {
+ PSI_FILE_CALL(start_file_wait)(
+ locker, n,
+ __FILE__, __LINE__);
+ }
+
+ bool success = DB_SUCCESS == os_file_write_func(
+ type, name, OS_FILE_FROM_FD(file), buf, offset, n);
+
+ if (locker != NULL) {
+ PSI_FILE_CALL(end_file_wait)(locker, n);
+ }
+
+ return(success);
+}
+
/** NOTE! Please use the corresponding macro os_file_flush(), not directly
this function!
This is the performance schema instrumented wrapper function for
@@ -390,7 +483,7 @@ Flushes the write buffers of a given file to the disk.
UNIV_INLINE
bool
pfs_os_file_flush_func(
- os_file_t file,
+ pfs_os_file_t file,
const char* src_file,
uint src_line)
{
@@ -436,7 +529,7 @@ pfs_os_file_rename_func(
bool result = os_file_rename_func(oldpath, newpath);
- register_pfs_file_open_end(locker, 0);
+ register_pfs_file_rename_end(locker, 0);
return(result);
}
diff --git a/storage/innobase/include/os0thread.h b/storage/innobase/include/os0thread.h
index 6f521b5a2ec..071e7422894 100644
--- a/storage/innobase/include/os0thread.h
+++ b/storage/innobase/include/os0thread.h
@@ -151,23 +151,10 @@ os_thread_sleep(
/*============*/
ulint tm); /*!< in: time in microseconds */
-/**
-Initializes OS thread management data structures. */
-void
-os_thread_init();
-/*============*/
-
-/**
-Frees OS thread management data structures. */
-void
-os_thread_free();
-/*============*/
-
/*****************************************************************//**
Check if there are threads active.
@return true if the thread count > 0. */
bool
os_thread_active();
-/*==============*/
#endif
diff --git a/storage/innobase/include/page0zip.ic b/storage/innobase/include/page0zip.ic
index 5f754e1f993..fa03279f9bc 100644
--- a/storage/innobase/include/page0zip.ic
+++ b/storage/innobase/include/page0zip.ic
@@ -164,7 +164,8 @@ page_zip_rec_needs_ext(
ulint n_fields,
const page_size_t& page_size)
{
- ut_ad(rec_size > comp ? REC_N_NEW_EXTRA_BYTES : REC_N_OLD_EXTRA_BYTES);
+ ut_ad(rec_size
+ > ulint(comp ? REC_N_NEW_EXTRA_BYTES : REC_N_OLD_EXTRA_BYTES));
ut_ad(comp || !page_size.is_compressed());
#if UNIV_PAGE_SIZE_MAX > REC_MAX_DATA_SIZE
diff --git a/storage/innobase/include/rem0cmp.h b/storage/innobase/include/rem0cmp.h
index 245fefae944..216e3a7655b 100644
--- a/storage/innobase/include/rem0cmp.h
+++ b/storage/innobase/include/rem0cmp.h
@@ -1,6 +1,7 @@
/*****************************************************************************
Copyright (c) 1994, 2016, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2017, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -62,7 +63,7 @@ cmp_data_data(
ulint len1,
const byte* data2,
ulint len2)
- MY_ATTRIBUTE((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((warn_unused_result));
/** Compare two data fields.
@param[in] dfield1 data field; must have type field set
diff --git a/storage/innobase/include/row0merge.h b/storage/innobase/include/row0merge.h
index 1b61c475c6f..50c3361a3f9 100644
--- a/storage/innobase/include/row0merge.h
+++ b/storage/innobase/include/row0merge.h
@@ -129,7 +129,6 @@ struct index_def_t {
index_field_t* fields; /*!< field definitions */
st_mysql_ftparser*
parser; /*!< fulltext parser plugin */
- bool is_ngram; /*!< true if it's ngram parser */
};
/** Structure for reporting duplicate records. */
@@ -195,7 +194,7 @@ row_merge_drop_temp_indexes(void);
/** Create temporary merge files in the given paramater path, and if
UNIV_PFS_IO defined, register the file descriptor with Performance Schema.
-@param[in] path location for creating temporary merge files.
+@param[in] path location for creating temporary merge files, or NULL
@return File descriptor */
int
row_merge_file_create_low(
@@ -398,13 +397,13 @@ row_merge_buf_empty(
/** Create a merge file in the given location.
@param[out] merge_file merge file structure
-@param[in] path location for creating temporary file
+@param[in] path location for creating temporary file, or NULL
@return file descriptor, or -1 on failure */
int
row_merge_file_create(
merge_file_t* merge_file,
const char* path)
- MY_ATTRIBUTE((warn_unused_result, nonnull));
+ MY_ATTRIBUTE((warn_unused_result, nonnull(1)));
/** Merge disk files.
@param[in] trx transaction
@@ -464,10 +463,9 @@ row_merge_file_destroy(
merge_file_t* merge_file) /*!< in/out: merge file structure */
MY_ATTRIBUTE((nonnull));
-/********************************************************************//**
-Read a merge block from the file system.
-@return TRUE if request was successful, FALSE if fail */
-ibool
+/** Read a merge block from the file system.
+@return whether the request was successful */
+bool
row_merge_read(
/*===========*/
int fd, /*!< in: file descriptor */
diff --git a/storage/innobase/include/row0mysql.h b/storage/innobase/include/row0mysql.h
index 7507c96ea5f..6164366628e 100644
--- a/storage/innobase/include/row0mysql.h
+++ b/storage/innobase/include/row0mysql.h
@@ -204,6 +204,7 @@ row_update_prebuilt_trx(
row_prebuilt_t* prebuilt, /*!< in/out: prebuilt struct
in MySQL handle */
trx_t* trx); /*!< in: transaction handle */
+
/*********************************************************************//**
Sets an AUTO_INC type lock on the table mentioned in prebuilt. The
AUTO_INC lock gives exclusive access to the auto-inc counter of the
diff --git a/storage/innobase/include/srv0srv.h b/storage/innobase/include/srv0srv.h
index 9d8e736beb1..8f0df34af74 100644
--- a/storage/innobase/include/srv0srv.h
+++ b/storage/innobase/include/srv0srv.h
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1995, 2016, Oracle and/or its affiliates. All rights reserved.
+Copyright (c) 1995, 2017, Oracle and/or its affiliates. All rights reserved.
Copyright (c) 2008, 2009, Google Inc.
Copyright (c) 2009, Percona Inc.
Copyright (c) 2013, 2017, MariaDB Corporation.
diff --git a/storage/innobase/include/srv0start.h b/storage/innobase/include/srv0start.h
index 4f2f4a312ff..47b42725541 100644
--- a/storage/innobase/include/srv0start.h
+++ b/storage/innobase/include/srv0start.h
@@ -1,7 +1,7 @@
/*****************************************************************************
-Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved.
-Copyright (c) 2017, MariaDB Corporation
+Copyright (c) 1995, 2017, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2017, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -38,43 +38,20 @@ struct dict_table_t;
only one buffer pool instance is used. */
#define BUF_POOL_SIZE_THRESHOLD (1024 * 1024 * 1024)
-/*********************************************************************//**
-Parse temporary tablespace configuration.
-@return true if ok, false on parse error */
-bool
-srv_parse_temp_data_file_paths_and_sizes(
-/*=====================================*/
- char* str); /*!< in/out: the data file path string */
-/*********************************************************************//**
-Frees the memory allocated by srv_parse_data_file_paths_and_sizes()
-and srv_parse_log_group_home_dirs(). */
-void
-srv_free_paths_and_sizes(void);
-/*==========================*/
-/*********************************************************************//**
-Adds a slash or a backslash to the end of a string if it is missing
-and the string is not empty.
-@return string which has the separator if the string is not empty */
-char*
-srv_add_path_separator_if_needed(
-/*=============================*/
- char* str); /*!< in: null-terminated character string */
-
/****************************************************************//**
Starts Innobase and creates a new database if database files
are not found and the user wants.
@return DB_SUCCESS or error code */
dberr_t
-innobase_start_or_create_for_mysql(void);
-/*====================================*/
+innobase_start_or_create_for_mysql();
+
/** Shut down InnoDB. */
void
innodb_shutdown();
-/****************************************************************//**
-Shuts down background threads that can generate undo pages. */
+/** Shut down background threads that can generate undo log. */
void
-srv_shutdown_bg_undo_sources(void);
+srv_shutdown_bg_undo_sources();
/*************************************************************//**
Copy the file path component of the physical file to parameter. It will
@@ -128,6 +105,22 @@ extern bool srv_startup_is_before_trx_rollback_phase;
/** TRUE if a raw partition is in use */
extern ibool srv_start_raw_disk_in_use;
+/** Undo tablespaces starts with space_id. */
+extern ulint srv_undo_space_id_start;
+
+/** Check whether given space id is undo tablespace id
+@param[in] space_id space id to check
+@return true if it is undo tablespace else false. */
+inline
+bool
+srv_is_undo_tablespace(ulint space_id)
+{
+ return srv_undo_space_id_start > 0
+ && space_id >= srv_undo_space_id_start
+ && space_id < (srv_undo_space_id_start
+ + srv_undo_tablespaces_open);
+}
+
/** Shutdown state */
enum srv_shutdown_t {
SRV_SHUTDOWN_NONE = 0, /*!< Database running normally */
@@ -144,6 +137,9 @@ enum srv_shutdown_t {
SRV_SHUTDOWN_EXIT_THREADS/*!< Exit all threads */
};
+/** Whether any undo log records can be generated */
+extern bool srv_undo_sources;
+
/** At a shutdown this value climbs from SRV_SHUTDOWN_NONE to
SRV_SHUTDOWN_CLEANUP and then to SRV_SHUTDOWN_LAST_PHASE, and so on */
extern enum srv_shutdown_t srv_shutdown_state;
diff --git a/storage/innobase/include/sync0types.h b/storage/innobase/include/sync0types.h
index c3d413e71df..ec4503daa64 100644
--- a/storage/innobase/include/sync0types.h
+++ b/storage/innobase/include/sync0types.h
@@ -354,7 +354,6 @@ enum latch_id_t {
LATCH_ID_EVENT_MANAGER,
LATCH_ID_EVENT_MUTEX,
LATCH_ID_SYNC_ARRAY_MUTEX,
- LATCH_ID_THREAD_MUTEX,
LATCH_ID_ZIP_PAD_MUTEX,
LATCH_ID_OS_AIO_READ_MUTEX,
LATCH_ID_OS_AIO_WRITE_MUTEX,
@@ -1284,7 +1283,10 @@ struct MY_ALIGNED(CPU_LEVEL1_DCACHE_LINESIZE) simple_counter
{
compile_time_assert(!atomic || sizeof(Type) == sizeof(lint));
if (atomic) {
- return Type(my_atomic_addlint(&m_counter, i));
+ /* Silence MSVS warnings when instantiating
+ this template with atomic=false. */
+ return Type(my_atomic_addlint(reinterpret_cast<lint*>
+ (&m_counter), i));
} else {
return m_counter += i;
}
diff --git a/storage/innobase/include/trx0rec.h b/storage/innobase/include/trx0rec.h
index 50304ce3631..3b4a195735b 100644
--- a/storage/innobase/include/trx0rec.h
+++ b/storage/innobase/include/trx0rec.h
@@ -188,28 +188,28 @@ transaction.
dberr_t
trx_undo_report_row_operation(
/*==========================*/
- ulint flags, /*!< in: if BTR_NO_UNDO_LOG_FLAG bit is
- set, does nothing */
- ulint op_type, /*!< in: TRX_UNDO_INSERT_OP or
- TRX_UNDO_MODIFY_OP */
que_thr_t* thr, /*!< in: query thread */
dict_index_t* index, /*!< in: clustered index */
const dtuple_t* clust_entry, /*!< in: in the case of an insert,
index entry to insert into the
- clustered index, otherwise NULL */
+ clustered index; in updates,
+ may contain a clustered index
+ record tuple that also contains
+ virtual columns of the table;
+ otherwise, NULL */
const upd_t* update, /*!< in: in the case of an update,
the update vector, otherwise NULL */
ulint cmpl_info, /*!< in: compiler info on secondary
index updates */
const rec_t* rec, /*!< in: case of an update or delete
marking, the record in the clustered
- index, otherwise NULL */
+ index; NULL if insert */
const ulint* offsets, /*!< in: rec_get_offsets(rec) */
roll_ptr_t* roll_ptr) /*!< out: rollback pointer to the
inserted undo log record,
0 if BTR_NO_UNDO_LOG
flag was specified */
- MY_ATTRIBUTE((nonnull(3,4,10), warn_unused_result));
+ MY_ATTRIBUTE((nonnull(1,2,8), warn_unused_result));
/** status bit used for trx_undo_prev_version_build() */
@@ -341,10 +341,6 @@ record */
storage fields: used by purge to
free the external storage */
-/* Operation type flags used in trx_undo_report_row_operation */
-#define TRX_UNDO_INSERT_OP 1U
-#define TRX_UNDO_MODIFY_OP 2U
-
#include "trx0rec.ic"
#endif /* trx0rec_h */
diff --git a/storage/innobase/include/trx0rseg.h b/storage/innobase/include/trx0rseg.h
index 4c162526384..3078aa8faf1 100644
--- a/storage/innobase/include/trx0rseg.h
+++ b/storage/innobase/include/trx0rseg.h
@@ -110,9 +110,12 @@ void
trx_rseg_mem_free(trx_rseg_t* rseg);
/** Create a persistent rollback segment.
-@param[in] space_id system or undo tablespace id */
+@param[in] space_id system or undo tablespace id
+@return pointer to new rollback segment
+@retval NULL on failure */
trx_rseg_t*
-trx_rseg_create(ulint space_id);
+trx_rseg_create(ulint space_id)
+ MY_ATTRIBUTE((warn_unused_result));
/** Create the temporary rollback segments. */
void
diff --git a/storage/innobase/include/trx0trx.h b/storage/innobase/include/trx0trx.h
index 75753b53467..6274dec9f9d 100644
--- a/storage/innobase/include/trx0trx.h
+++ b/storage/innobase/include/trx0trx.h
@@ -1,7 +1,7 @@
/*****************************************************************************
Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved.
-Copyright (c) 2016, 2017, MariaDB Corporation.
+Copyright (c) 2015, 2017, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
diff --git a/storage/innobase/include/trx0xa.h b/storage/innobase/include/trx0xa.h
index b333f32cd73..4d5adc68dcd 100644
--- a/storage/innobase/include/trx0xa.h
+++ b/storage/innobase/include/trx0xa.h
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1995, 2014, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -24,12 +24,7 @@ this program; if not, write to the Free Software Foundation, Inc.,
#ifndef XA_H
#define XA_H
-/* Missing MySQL 5.7 header */
-#ifdef HAVE_XA_H
-#include "xa.h"
-#else
#include "handler.h"
-#endif
/*
* Transaction branch identification: XID and NULLXID:
diff --git a/storage/innobase/include/univ.i b/storage/innobase/include/univ.i
index ee759534301..88c13be5a8f 100644
--- a/storage/innobase/include/univ.i
+++ b/storage/innobase/include/univ.i
@@ -41,7 +41,7 @@ Created 1/20/1994 Heikki Tuuri
#define INNODB_VERSION_MAJOR 5
#define INNODB_VERSION_MINOR 7
-#define INNODB_VERSION_BUGFIX 14
+#define INNODB_VERSION_BUGFIX 18
/* The following is the InnoDB version as shown in
SELECT plugin_version FROM information_schema.plugins;
@@ -121,14 +121,7 @@ HAVE_PSI_INTERFACE is defined. */
#ifdef HAVE_PSI_INTERFACE
# define UNIV_PFS_MUTEX
# define UNIV_PFS_RWLOCK
-/* For I/O instrumentation, performance schema rely
-on a native descriptor to identify the file, this
-descriptor could conflict with our OS level descriptor.
-Disable IO instrumentation on Windows until this is
-resolved */
-# ifndef _WIN32
-# define UNIV_PFS_IO
-# endif
+# define UNIV_PFS_IO
# define UNIV_PFS_THREAD
// JAN: TODO: MySQL 5.7 PSI
@@ -191,9 +184,9 @@ command. */
#define UNIV_ENABLE_UNIT_TEST_ROW_RAW_FORMAT_INT
*/
-#if defined HAVE_valgrind && defined HAVE_VALGRIND
+#if defined HAVE_valgrind && defined HAVE_VALGRIND_MEMCHECK_H
# define UNIV_DEBUG_VALGRIND
-#endif /* HAVE_VALGRIND */
+#endif
#ifdef DBUG_OFF
# undef UNIV_DEBUG
diff --git a/storage/innobase/include/ut0new.h b/storage/innobase/include/ut0new.h
index 5a9022e8a77..955e7b026c7 100644
--- a/storage/innobase/include/ut0new.h
+++ b/storage/innobase/include/ut0new.h
@@ -235,8 +235,10 @@ struct ut_new_pfx_t {
#endif
};
-/** Allocator class for allocating memory from inside std::* containers. */
-template <class T>
+/** Allocator class for allocating memory from inside std::* containers.
+@tparam T type of allocated object
+@tparam oom_fatal whether to commit suicide when running out of memory */
+template <class T, bool oom_fatal = true>
class ut_allocator {
public:
typedef T* pointer;
@@ -249,13 +251,10 @@ public:
/** Default constructor. */
explicit
- ut_allocator(
- PSI_memory_key key = PSI_NOT_INSTRUMENTED)
- :
+ ut_allocator(PSI_memory_key key = PSI_NOT_INSTRUMENTED)
#ifdef UNIV_PFS_MEMORY
- m_key(key),
+ : m_key(key)
#endif /* UNIV_PFS_MEMORY */
- m_oom_fatal(true)
{
}
@@ -263,30 +262,10 @@ public:
template <class U>
ut_allocator(
const ut_allocator<U>& other)
- : m_oom_fatal(other.is_oom_fatal())
- {
#ifdef UNIV_PFS_MEMORY
- const PSI_memory_key other_key = other.get_mem_key(NULL);
-
- m_key = (other_key != mem_key_std)
- ? other_key
- : PSI_NOT_INSTRUMENTED;
+ : m_key(other.m_key)
#endif /* UNIV_PFS_MEMORY */
- }
-
- /** When out of memory (OOM) happens, report error and do not
- make it fatal.
- @return a reference to the allocator. */
- ut_allocator&
- set_oom_not_fatal() {
- m_oom_fatal = false;
- return(*this);
- }
-
- /** Check if allocation failure is a fatal error.
- @return true if allocation failure is fatal, false otherwise. */
- bool is_oom_fatal() const {
- return(m_oom_fatal);
+ {
}
/** Return the maximum number of objects that can be allocated by
@@ -364,7 +343,7 @@ public:
}
if (ptr == NULL) {
- ib::fatal_or_error(m_oom_fatal)
+ ib::fatal_or_error(oom_fatal)
<< "Cannot allocate " << total_bytes
<< " bytes of memory after "
<< alloc_max_retries << " retries over "
@@ -499,14 +478,13 @@ public:
}
if (pfx_new == NULL) {
- ib::fatal_or_error(m_oom_fatal)
+ ib::fatal_or_error(oom_fatal)
<< "Cannot reallocate " << total_bytes
<< " bytes of memory after "
<< alloc_max_retries << " retries over "
<< alloc_max_retries << " seconds. OS error: "
<< strerror(errno) << " (" << errno << "). "
<< OUT_OF_MEMORY_MSG;
- /* not reached */
return(NULL);
}
@@ -739,10 +717,6 @@ private:
void
operator=(
const ut_allocator<U>&);
-
- /** A flag to indicate whether out of memory (OOM) error is considered
- fatal. If true, it is fatal. */
- bool m_oom_fatal;
};
/** Compare two allocators of the same type.
@@ -882,9 +856,8 @@ ut_delete_array(
n_bytes, NULL, __FILE__, true, false))
#define ut_zalloc_nokey_nofatal(n_bytes) static_cast<void*>( \
- ut_allocator<byte>(PSI_NOT_INSTRUMENTED). \
- set_oom_not_fatal(). \
- allocate(n_bytes, NULL, __FILE__, true, false))
+ ut_allocator<byte, false>(PSI_NOT_INSTRUMENTED).allocate( \
+ n_bytes, NULL, __FILE__, true, false))
#define ut_realloc(ptr, n_bytes) static_cast<void*>( \
ut_allocator<byte>(PSI_NOT_INSTRUMENTED).reallocate( \
diff --git a/storage/innobase/include/ut0rnd.ic b/storage/innobase/include/ut0rnd.ic
index 503c9482ea3..16dccb545d8 100644
--- a/storage/innobase/include/ut0rnd.ic
+++ b/storage/innobase/include/ut0rnd.ic
@@ -1,6 +1,7 @@
/*****************************************************************************
Copyright (c) 1994, 2016, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2017, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -217,16 +218,22 @@ ut_fold_binary(
switch (len & 0x7) {
case 7:
fold = ut_fold_ulint_pair(fold, (ulint)(*str++));
+ /* fall through */
case 6:
fold = ut_fold_ulint_pair(fold, (ulint)(*str++));
+ /* fall through */
case 5:
fold = ut_fold_ulint_pair(fold, (ulint)(*str++));
+ /* fall through */
case 4:
fold = ut_fold_ulint_pair(fold, (ulint)(*str++));
+ /* fall through */
case 3:
fold = ut_fold_ulint_pair(fold, (ulint)(*str++));
+ /* fall through */
case 2:
fold = ut_fold_ulint_pair(fold, (ulint)(*str++));
+ /* fall through */
case 1:
fold = ut_fold_ulint_pair(fold, (ulint)(*str++));
}
diff --git a/storage/innobase/innodb.cmake b/storage/innobase/innodb.cmake
index 9d6ac0eb0e8..fe2d537c50e 100644
--- a/storage/innobase/innodb.cmake
+++ b/storage/innobase/innodb.cmake
@@ -24,12 +24,14 @@ INCLUDE(lzo.cmake)
INCLUDE(lzma.cmake)
INCLUDE(bzip2.cmake)
INCLUDE(snappy.cmake)
+INCLUDE(numa)
MYSQL_CHECK_LZ4()
MYSQL_CHECK_LZO()
MYSQL_CHECK_LZMA()
MYSQL_CHECK_BZIP2()
MYSQL_CHECK_SNAPPY()
+MYSQL_CHECK_NUMA()
IF(CMAKE_CROSSCOMPILING)
# Use CHECK_C_SOURCE_COMPILES instead of CHECK_C_SOURCE_RUNS when
@@ -123,8 +125,8 @@ ENDIF()
OPTION(WITH_INNODB_EXTRA_DEBUG "Enable extra InnoDB debug checks" OFF)
IF(WITH_INNODB_EXTRA_DEBUG)
- IF(NOT WITH_DEBUG)
- MESSAGE(FATAL_ERROR "WITH_INNODB_EXTRA_DEBUG can be enabled only when WITH_DEBUG is enabled")
+ IF(NOT CMAKE_BUILD_TYPE STREQUAL "Debug")
+ MESSAGE(FATAL_ERROR "WITH_INNODB_EXTRA_DEBUG can be enabled only in debug builds")
ENDIF()
SET(EXTRA_DEBUG_FLAGS "")
diff --git a/storage/innobase/lock/lock0lock.cc b/storage/innobase/lock/lock0lock.cc
index 01ffad772dc..a6caa009ef7 100644
--- a/storage/innobase/lock/lock0lock.cc
+++ b/storage/innobase/lock/lock0lock.cc
@@ -795,12 +795,19 @@ lock_reset_lock_and_trx_wait(
const char* stmt2=NULL;
size_t stmt_len;
trx_id_t trx_id = 0;
- stmt = innobase_get_stmt_unsafe(lock->trx->mysql_thd, &stmt_len);
+ stmt = lock->trx->mysql_thd
+ ? innobase_get_stmt_unsafe(
+ lock->trx->mysql_thd, &stmt_len)
+ : NULL;
if (lock->trx->lock.wait_lock &&
lock->trx->lock.wait_lock->trx) {
trx_id = lock->trx->lock.wait_lock->trx->id;
- stmt2 = innobase_get_stmt_unsafe(lock->trx->lock.wait_lock->trx->mysql_thd, &stmt_len);
+ stmt2 = lock->trx->lock.wait_lock->trx->mysql_thd
+ ? innobase_get_stmt_unsafe(
+ lock->trx->lock.wait_lock
+ ->trx->mysql_thd, &stmt_len)
+ : NULL;
}
ib::error() <<
@@ -5054,8 +5061,6 @@ lock_rec_unlock(
lock_t* first_lock;
lock_t* lock;
ulint heap_no;
- const char* stmt;
- size_t stmt_len;
ut_ad(trx);
ut_ad(rec);
@@ -5083,13 +5088,15 @@ lock_rec_unlock(
lock_mutex_exit();
trx_mutex_exit(trx);
- stmt = innobase_get_stmt_unsafe(trx->mysql_thd, &stmt_len);
-
{
ib::error err;
err << "Unlock row could not find a " << lock_mode
<< " mode lock on the record. Current statement: ";
- err.write(stmt, stmt_len);
+ size_t stmt_len;
+ if (const char* stmt = innobase_get_stmt_unsafe(
+ trx->mysql_thd, &stmt_len)) {
+ err.write(stmt, stmt_len);
+ }
}
return;
diff --git a/storage/innobase/log/log0crypt.cc b/storage/innobase/log/log0crypt.cc
index 79301254a0a..69cfec10fed 100644
--- a/storage/innobase/log/log0crypt.cc
+++ b/storage/innobase/log/log0crypt.cc
@@ -120,7 +120,8 @@ log_crypt(byte* buf, ulint size, bool decrypt)
for (const byte* const end = buf + size; buf != end;
buf += OS_FILE_LOG_BLOCK_SIZE) {
- byte dst[OS_FILE_LOG_BLOCK_SIZE - LOG_CRYPT_HDR_SIZE];
+ uint32_t dst[(OS_FILE_LOG_BLOCK_SIZE - LOG_CRYPT_HDR_SIZE)
+ / sizeof(uint32_t)];
const ulint log_block_no = log_block_get_hdr_no(buf);
/* The log block number is not encrypted. */
@@ -130,8 +131,7 @@ log_crypt(byte* buf, ulint size, bool decrypt)
#else
~(LOG_BLOCK_FLUSH_BIT_MASK >> 24)
#endif
- & (*reinterpret_cast<uint32_t*>(dst)
- = *reinterpret_cast<const uint32_t*>(
+ & (*dst = *reinterpret_cast<const uint32_t*>(
buf + LOG_BLOCK_HDR_NO));
#if LOG_BLOCK_HDR_NO + 4 != LOG_CRYPT_HDR_SIZE
# error "LOG_BLOCK_HDR_NO has been moved; redo log format affected!"
@@ -143,7 +143,8 @@ log_crypt(byte* buf, ulint size, bool decrypt)
log_block_no));
int rc = encryption_crypt(
- buf + LOG_CRYPT_HDR_SIZE, sizeof dst, dst, &dst_len,
+ buf + LOG_CRYPT_HDR_SIZE, sizeof dst,
+ reinterpret_cast<byte*>(dst), &dst_len,
const_cast<byte*>(info.crypt_key.bytes),
sizeof info.crypt_key,
reinterpret_cast<byte*>(aes_ctr_iv), sizeof aes_ctr_iv,
@@ -155,19 +156,6 @@ log_crypt(byte* buf, ulint size, bool decrypt)
ut_a(rc == MY_AES_OK);
ut_a(dst_len == sizeof dst);
- if (decrypt) {
- std::ostringstream s;
- ut_print_buf_hex(s, buf + LOG_CRYPT_HDR_SIZE,
- OS_FILE_LOG_BLOCK_SIZE
- - LOG_CRYPT_HDR_SIZE);
- ib::info() << "S: " << s.str();
- std::ostringstream d;
- ut_print_buf_hex(d, dst,
- OS_FILE_LOG_BLOCK_SIZE
- - LOG_CRYPT_HDR_SIZE);
- ib::info() << "c: " << d.str();
- }
-
memcpy(buf + LOG_CRYPT_HDR_SIZE, dst, sizeof dst);
}
}
diff --git a/storage/innobase/log/log0log.cc b/storage/innobase/log/log0log.cc
index 7d4e0459610..d8e4ba6fe98 100644
--- a/storage/innobase/log/log0log.cc
+++ b/storage/innobase/log/log0log.cc
@@ -44,6 +44,7 @@ Created 12/9/1995 Heikki Tuuri
#include "fil0fil.h"
#include "dict0boot.h"
#include "dict0stats_bg.h"
+#include "btr0defragment.h"
#include "srv0srv.h"
#include "srv0start.h"
#include "trx0sys.h"
@@ -666,18 +667,14 @@ log_group_set_fields(
group->lsn = lsn;
}
-/*****************************************************************//**
-Calculates the recommended highest values for lsn - last_checkpoint_lsn
+/** Calculate the recommended highest values for lsn - last_checkpoint_lsn
and lsn - buf_get_oldest_modification().
@retval true on success
@retval false if the smallest log group is too small to
accommodate the number of OS threads in the database server */
-static MY_ATTRIBUTE((warn_unused_result))
bool
-log_calc_max_ages(void)
-/*===================*/
+log_set_capacity()
{
- log_group_t* group;
lsn_t margin;
ulint free;
bool success = true;
@@ -685,21 +682,7 @@ log_calc_max_ages(void)
log_mutex_enter();
- group = UT_LIST_GET_FIRST(log_sys->log_groups);
-
- ut_ad(group);
-
- smallest_capacity = LSN_MAX;
-
- while (group) {
- if (log_group_get_capacity(group) < smallest_capacity) {
-
- smallest_capacity = log_group_get_capacity(group);
- }
-
- group = UT_LIST_GET_NEXT(log_groups, group);
- }
-
+ smallest_capacity = log_group_get_capacity(&log_sys->log);
/* Add extra safety */
smallest_capacity = smallest_capacity - smallest_capacity / 10;
@@ -746,11 +729,9 @@ failure:
return(success);
}
-/******************************************************//**
-Initializes the log. */
+/** Initializes the redo logging subsystem. */
void
-log_init(void)
-/*==========*/
+log_sys_init()
{
log_sys = static_cast<log_t*>(ut_zalloc_nokey(sizeof(log_t)));
@@ -779,7 +760,6 @@ log_init(void)
log_sys->max_buf_free = log_sys->buf_size / LOG_BUF_FLUSH_RATIO
- LOG_BUF_FLUSH_MARGIN;
log_sys->check_flush_or_checkpoint = true;
- UT_LIST_INIT(log_sys->log_groups, &log_group_t::log_groups);
log_sys->n_log_ios_old = log_sys->n_log_ios;
log_sys->last_printout_time = time(NULL);
@@ -823,32 +803,20 @@ log_init(void)
}
}
-/******************************************************************//**
-Inits a log group to the log system.
-@return true if success, false if not */
-MY_ATTRIBUTE((warn_unused_result))
-bool
-log_group_init(
-/*===========*/
- ulint id, /*!< in: group id */
- ulint n_files, /*!< in: number of log files */
- lsn_t file_size, /*!< in: log file size in bytes */
- ulint space_id) /*!< in: space id of the file space
- which contains the log files of this
- group */
+/** Initialize the redo log.
+@param[in] n_files number of files
+@param[in] file_size file size in bytes */
+void
+log_init(ulint n_files, lsn_t file_size)
{
ulint i;
- log_group_t* group;
+ log_group_t* group = &log_sys->log;
- group = static_cast<log_group_t*>(ut_malloc_nokey(sizeof(log_group_t)));
-
- group->id = id;
group->n_files = n_files;
group->format = srv_encrypt_log
? LOG_HEADER_FORMAT_CURRENT | LOG_HEADER_FORMAT_ENCRYPTED
: LOG_HEADER_FORMAT_CURRENT;
group->file_size = file_size;
- group->space_id = space_id;
group->state = LOG_GROUP_OK;
group->lsn = LOG_START_LSN;
group->lsn_offset = LOG_FILE_HDR_SIZE;
@@ -874,9 +842,6 @@ log_group_init(
group->checkpoint_buf = static_cast<byte*>(
ut_align(group->checkpoint_buf_ptr,OS_FILE_LOG_BLOCK_SIZE));
-
- UT_LIST_ADD_LAST(log_sys->log_groups, group);
- return(log_calc_max_ages());
}
/******************************************************//**
@@ -899,12 +864,11 @@ log_io_complete(
case SRV_O_DIRECT:
case SRV_O_DIRECT_NO_FSYNC:
case SRV_ALL_O_DIRECT_FSYNC:
- fil_flush(group->space_id);
+ fil_flush(SRV_LOG_SPACE_FIRST_ID);
}
- DBUG_PRINT("ib_log", ("checkpoint info written to group %u",
- unsigned(group->id)));
+ DBUG_PRINT("ib_log", ("checkpoint info written"));
log_io_complete_checkpoint();
return;
@@ -931,7 +895,6 @@ log_group_file_header_flush(
ut_ad(log_write_mutex_own());
ut_ad(!recv_no_log_write);
- ut_ad(group->id == 0);
ut_a(nth_file < group->n_files);
ut_ad((group->format & ~LOG_HEADER_FORMAT_ENCRYPTED)
== LOG_HEADER_FORMAT_CURRENT);
@@ -950,9 +913,8 @@ log_group_file_header_flush(
dest_offset = nth_file * group->file_size;
DBUG_PRINT("ib_log", ("write " LSN_PF
- " group " ULINTPF
" file " ULINTPF " header",
- start_lsn, group->id, nth_file));
+ start_lsn, nth_file));
log_sys->n_log_ios++;
@@ -964,7 +926,7 @@ log_group_file_header_flush(
= (ulint) (dest_offset / univ_page_size.physical());
fil_io(IORequestLogWrite, true,
- page_id_t(group->space_id, page_no),
+ page_id_t(SRV_LOG_SPACE_FIRST_ID, page_no),
univ_page_size,
(ulint) (dest_offset % univ_page_size.physical()),
OS_FILE_LOG_BLOCK_SIZE, buf, group);
@@ -1050,10 +1012,10 @@ loop:
DBUG_PRINT("ib_log",
("write " LSN_PF " to " LSN_PF
- ": group " ULINTPF " len " ULINTPF
+ ": len " ULINTPF
" blocks " ULINTPF ".." ULINTPF,
start_lsn, next_offset,
- group->id, write_len,
+ write_len,
log_block_get_hdr_no(buf),
log_block_get_hdr_no(
buf + write_len
@@ -1091,7 +1053,7 @@ loop:
= (ulint) (next_offset / univ_page_size.physical());
fil_io(IORequestLogWrite, true,
- page_id_t(group->space_id, page_no),
+ page_id_t(SRV_LOG_SPACE_FIRST_ID, page_no),
univ_page_size,
(ulint) (next_offset % UNIV_PAGE_SIZE), write_len, buf,
group);
@@ -1259,7 +1221,6 @@ loop:
return;
}
- log_group_t* group;
ulint start_offset;
ulint end_offset;
ulint area_start;
@@ -1303,9 +1264,7 @@ loop:
log_buffer_switch();
- group = UT_LIST_GET_FIRST(log_sys->log_groups);
-
- log_group_set_fields(group, log_sys->write_lsn);
+ log_group_set_fields(&log_sys->log, log_sys->write_lsn);
log_mutex_exit();
/* Calculate pad_size if needed. */
@@ -1316,7 +1275,7 @@ loop:
end_offset = log_group_calc_lsn_offset(
ut_uint64_align_up(write_lsn,
OS_FILE_LOG_BLOCK_SIZE),
- group);
+ &log_sys->log);
end_offset_in_unit = (ulint) (end_offset % write_ahead_size);
if (end_offset_in_unit > 0
@@ -1335,7 +1294,7 @@ loop:
}
/* Do the write to the log files */
log_group_write_buf(
- group, write_buf + area_start,
+ &log_sys->log, write_buf + area_start,
area_end - area_start + pad_size,
#ifdef UNIV_DEBUG
pad_size,
@@ -1538,11 +1497,10 @@ log_io_complete_checkpoint(void)
}
/** Write checkpoint info to the log header.
-@param[in,out] group redo log
@param[in] end_lsn start LSN of the MLOG_CHECKPOINT mini-transaction */
static
void
-log_group_checkpoint(log_group_t* group, lsn_t end_lsn)
+log_group_checkpoint(lsn_t end_lsn)
{
lsn_t lsn_offset;
byte* buf;
@@ -1555,10 +1513,11 @@ log_group_checkpoint(log_group_t* group, lsn_t end_lsn)
|| srv_shutdown_state != SRV_SHUTDOWN_NONE);
DBUG_PRINT("ib_log", ("checkpoint " UINT64PF " at " LSN_PF
- " written to group " ULINTPF,
+ " written",
log_sys->next_checkpoint_no,
- log_sys->next_checkpoint_lsn,
- group->id));
+ log_sys->next_checkpoint_lsn));
+
+ log_group_t* group = &log_sys->log;
buf = group->checkpoint_buf;
memset(buf, 0, OS_FILE_LOG_BLOCK_SIZE);
@@ -1600,7 +1559,7 @@ log_group_checkpoint(log_group_t* group, lsn_t end_lsn)
file write and a checkpoint field write */
fil_io(IORequestLogWrite, false,
- page_id_t(group->space_id, 0),
+ page_id_t(SRV_LOG_SPACE_FIRST_ID, 0),
univ_page_size,
(log_sys->next_checkpoint_no & 1)
? LOG_CHECKPOINT_2 : LOG_CHECKPOINT_1,
@@ -1625,7 +1584,8 @@ log_group_header_read(
MONITOR_INC(MONITOR_LOG_IO);
fil_io(IORequestLogRead, true,
- page_id_t(group->space_id, header / univ_page_size.physical()),
+ page_id_t(SRV_LOG_SPACE_FIRST_ID,
+ header / univ_page_size.physical()),
univ_page_size, header % univ_page_size.physical(),
OS_FILE_LOG_BLOCK_SIZE, log_sys->checkpoint_buf, NULL);
}
@@ -1639,12 +1599,7 @@ log_write_checkpoint_info(bool sync, lsn_t end_lsn)
ut_ad(log_mutex_own());
ut_ad(!srv_read_only_mode);
- for (log_group_t* group = UT_LIST_GET_FIRST(log_sys->log_groups);
- group;
- group = UT_LIST_GET_NEXT(log_groups, group)) {
-
- log_group_checkpoint(group, end_lsn);
- }
+ log_group_checkpoint(end_lsn);
log_mutex_exit();
@@ -2010,6 +1965,8 @@ loop:
thread_name = "lock_wait_timeout_thread";
} else if (srv_buf_dump_thread_active) {
thread_name = "buf_dump_thread";
+ } else if (btr_defragment_thread_active) {
+ thread_name = "btr_defragment_thread";
} else if (srv_fast_shutdown != 2 && trx_rollback_or_clean_is_active) {
thread_name = "rollback of recovered transactions";
} else {
@@ -2031,8 +1988,8 @@ wait_suspend_loop:
switch (srv_get_active_thread_type()) {
case SRV_NONE:
- srv_shutdown_state = SRV_SHUTDOWN_FLUSH_PHASE;
if (!srv_n_fil_crypt_threads_started) {
+ srv_shutdown_state = SRV_SHUTDOWN_FLUSH_PHASE;
break;
}
os_event_set(fil_crypt_threads_event);
@@ -2280,13 +2237,11 @@ log_refresh_stats(void)
log_sys->last_printout_time = time(NULL);
}
-/********************************************************//**
-Closes a log group. */
+/** Close a log group.
+@param[in,out] group log group to close */
static
void
-log_group_close(
-/*===========*/
- log_group_t* group) /* in,own: log group to close */
+log_group_close(log_group_t* group)
{
ulint i;
@@ -2297,7 +2252,10 @@ log_group_close(
ut_free(group->file_header_bufs_ptr);
ut_free(group->file_header_bufs);
ut_free(group->checkpoint_buf_ptr);
- ut_free(group);
+ group->n_files = 0;
+ group->file_header_bufs_ptr = NULL;
+ group->file_header_bufs = NULL;
+ group->checkpoint_buf_ptr = NULL;
}
/********************************************************//**
@@ -2306,19 +2264,7 @@ void
log_group_close_all(void)
/*=====================*/
{
- log_group_t* group;
-
- group = UT_LIST_GET_FIRST(log_sys->log_groups);
-
- while (UT_LIST_GET_LEN(log_sys->log_groups) > 0) {
- log_group_t* prev_group = group;
-
- group = UT_LIST_GET_NEXT(log_groups, group);
-
- UT_LIST_REMOVE(log_sys->log_groups, prev_group);
-
- log_group_close(prev_group);
- }
+ log_group_close(&log_sys->log);
}
/********************************************************//**
diff --git a/storage/innobase/log/log0recv.cc b/storage/innobase/log/log0recv.cc
index f3a00e7b5e6..e48e185274a 100644
--- a/storage/innobase/log/log0recv.cc
+++ b/storage/innobase/log/log0recv.cc
@@ -717,7 +717,7 @@ loop:
= (ulint) (source_offset / univ_page_size.physical());
fil_io(IORequestLogRead, true,
- page_id_t(group->space_id, page_no),
+ page_id_t(SRV_LOG_SPACE_FIRST_ID, page_no),
univ_page_size,
(ulint) (source_offset % univ_page_size.physical()),
len, buf, NULL);
@@ -787,20 +787,13 @@ recv_synchronize_groups()
const lsn_t start_lsn = ut_uint64_align_down(recovered_lsn,
OS_FILE_LOG_BLOCK_SIZE);
- log_group_read_log_seg(log_sys->buf,
- UT_LIST_GET_FIRST(log_sys->log_groups),
+ log_group_read_log_seg(log_sys->buf, &log_sys->log,
start_lsn, start_lsn + OS_FILE_LOG_BLOCK_SIZE);
- ut_ad(UT_LIST_GET_LEN(log_sys->log_groups) == 1);
+ /* Update the fields in the group struct to correspond to
+ recovered_lsn */
- for (log_group_t* group = UT_LIST_GET_FIRST(log_sys->log_groups);
- group;
- group = UT_LIST_GET_NEXT(log_groups, group)) {
- /* Update the fields in the group struct to correspond to
- recovered_lsn */
-
- log_group_set_fields(group, recovered_lsn);
- }
+ log_group_set_fields(&log_sys->log, recovered_lsn);
/* Copy the checkpoint info to the log; remember that we have
incremented checkpoint_no by one, and the info will not be written
@@ -831,17 +824,14 @@ recv_check_log_header_checksum(
@return error code or DB_SUCCESS */
static MY_ATTRIBUTE((warn_unused_result))
dberr_t
-recv_find_max_checkpoint_0(
- log_group_t** max_group,
- ulint* max_field)
+recv_find_max_checkpoint_0(log_group_t** max_group, ulint* max_field)
{
- log_group_t* group = UT_LIST_GET_FIRST(log_sys->log_groups);
+ log_group_t* group = &log_sys->log;
ib_uint64_t max_no = 0;
ib_uint64_t checkpoint_no;
byte* buf = log_sys->checkpoint_buf;
ut_ad(group->format == 0);
- ut_ad(UT_LIST_GET_NEXT(log_groups, group) == NULL);
/** Offset of the first checkpoint checksum */
static const uint CHECKSUM_1 = 288;
@@ -852,6 +842,8 @@ recv_find_max_checkpoint_0(
/** Least significant bits of the checkpoint offset */
static const uint OFFSET_LOW32 = 16;
+ *max_group = NULL;
+
for (ulint field = LOG_CHECKPOINT_1; field <= LOG_CHECKPOINT_2;
field += LOG_CHECKPOINT_2 - LOG_CHECKPOINT_1) {
log_group_header_read(group, field);
@@ -883,9 +875,8 @@ recv_find_max_checkpoint_0(
}
DBUG_PRINT("ib_log",
- ("checkpoint " UINT64PF " at " LSN_PF
- " found in group " ULINTPF,
- checkpoint_no, group->lsn, group->id));
+ ("checkpoint " UINT64PF " at " LSN_PF " found",
+ checkpoint_no, group->lsn));
if (checkpoint_no >= max_no) {
*max_group = group;
@@ -916,7 +907,7 @@ dberr_t
recv_log_format_0_recover(lsn_t lsn)
{
log_mutex_enter();
- log_group_t* group = UT_LIST_GET_FIRST(log_sys->log_groups);
+ log_group_t* group = &log_sys->log;
const lsn_t source_offset
= log_group_calc_lsn_offset(lsn, group);
log_mutex_exit();
@@ -932,7 +923,7 @@ recv_log_format_0_recover(lsn_t lsn)
REFMAN "upgrading.html";
fil_io(IORequestLogRead, true,
- page_id_t(group->space_id, page_no),
+ page_id_t(SRV_LOG_SPACE_FIRST_ID, page_no),
univ_page_size,
(ulint) ((source_offset & ~(OS_FILE_LOG_BLOCK_SIZE - 1))
% univ_page_size.physical()),
@@ -968,14 +959,10 @@ recv_log_format_0_recover(lsn_t lsn)
}
/** Find the latest checkpoint in the log header.
-@param[out] max_group log group, or NULL
@param[out] max_field LOG_CHECKPOINT_1 or LOG_CHECKPOINT_2
@return error code or DB_SUCCESS */
-static MY_ATTRIBUTE((warn_unused_result))
dberr_t
-recv_find_max_checkpoint(
- log_group_t** max_group,
- ulint* max_field)
+recv_find_max_checkpoint(ulint* max_field)
{
log_group_t* group;
ib_uint64_t max_no;
@@ -983,101 +970,92 @@ recv_find_max_checkpoint(
ulint field;
byte* buf;
- group = UT_LIST_GET_FIRST(log_sys->log_groups);
+ group = &log_sys->log;
max_no = 0;
- *max_group = NULL;
*max_field = 0;
buf = log_sys->checkpoint_buf;
- while (group) {
- group->state = LOG_GROUP_CORRUPTED;
+ group->state = LOG_GROUP_CORRUPTED;
- log_group_header_read(group, 0);
- /* Check the header page checksum. There was no
- checksum in the first redo log format (version 0). */
- group->format = mach_read_from_4(buf + LOG_HEADER_FORMAT);
- if (group->format != 0
- && !recv_check_log_header_checksum(buf)) {
- ib::error() << "Invalid redo log header checksum.";
- return(DB_CORRUPTION);
- }
+ log_group_header_read(group, 0);
+ /* Check the header page checksum. There was no
+ checksum in the first redo log format (version 0). */
+ group->format = mach_read_from_4(buf + LOG_HEADER_FORMAT);
+ if (group->format != 0
+ && !recv_check_log_header_checksum(buf)) {
+ ib::error() << "Invalid redo log header checksum.";
+ return(DB_CORRUPTION);
+ }
- switch (group->format) {
- case 0:
- return(recv_find_max_checkpoint_0(
- max_group, max_field));
- case LOG_HEADER_FORMAT_CURRENT:
- case LOG_HEADER_FORMAT_CURRENT | LOG_HEADER_FORMAT_ENCRYPTED:
- break;
- default:
- /* Ensure that the string is NUL-terminated. */
- buf[LOG_HEADER_CREATOR_END] = 0;
- ib::error() << "Unsupported redo log format."
- " The redo log was created"
- " with " << buf + LOG_HEADER_CREATOR <<
- ". Please follow the instructions at "
- REFMAN "upgrading-downgrading.html";
- /* Do not issue a message about a possibility
- to cleanly shut down the newer server version
- and to remove the redo logs, because the
- format of the system data structures may
- radically change after MySQL 5.7. */
- return(DB_ERROR);
- }
+ switch (group->format) {
+ case 0:
+ return(recv_find_max_checkpoint_0(&group, max_field));
+ case LOG_HEADER_FORMAT_CURRENT:
+ case LOG_HEADER_FORMAT_CURRENT | LOG_HEADER_FORMAT_ENCRYPTED:
+ break;
+ default:
+ /* Ensure that the string is NUL-terminated. */
+ buf[LOG_HEADER_CREATOR_END] = 0;
+ ib::error() << "Unsupported redo log format."
+ " The redo log was created"
+ " with " << buf + LOG_HEADER_CREATOR <<
+ ". Please follow the instructions at "
+ REFMAN "upgrading-downgrading.html";
+ /* Do not issue a message about a possibility
+ to cleanly shut down the newer server version
+ and to remove the redo logs, because the
+ format of the system data structures may
+ radically change after MySQL 5.7. */
+ return(DB_ERROR);
+ }
- for (field = LOG_CHECKPOINT_1; field <= LOG_CHECKPOINT_2;
- field += LOG_CHECKPOINT_2 - LOG_CHECKPOINT_1) {
+ for (field = LOG_CHECKPOINT_1; field <= LOG_CHECKPOINT_2;
+ field += LOG_CHECKPOINT_2 - LOG_CHECKPOINT_1) {
- log_group_header_read(group, field);
+ log_group_header_read(group, field);
- const ulint crc32 = log_block_calc_checksum_crc32(buf);
- const ulint cksum = log_block_get_checksum(buf);
+ const ulint crc32 = log_block_calc_checksum_crc32(buf);
+ const ulint cksum = log_block_get_checksum(buf);
- if (crc32 != cksum) {
- DBUG_PRINT("ib_log",
- ("invalid checkpoint,"
- " group " ULINTPF " at " ULINTPF
- ", checksum %x expected %x",
- group->id, field,
- (unsigned) cksum,
- (unsigned) crc32));
- continue;
- }
+ if (crc32 != cksum) {
+ DBUG_PRINT("ib_log",
+ ("invalid checkpoint,"
+ " at " ULINTPF
+ ", checksum " ULINTPFx
+ " expected " ULINTPFx,
+ field, cksum, crc32));
+ continue;
+ }
- if (group->is_encrypted()
- && !log_crypt_read_checkpoint_buf(buf)) {
- ib::error() << "Reading checkpoint"
- " encryption info failed.";
- continue;
- }
+ if (group->is_encrypted()
+ && !log_crypt_read_checkpoint_buf(buf)) {
+ ib::error() << "Reading checkpoint"
+ " encryption info failed.";
+ continue;
+ }
+
+ group->state = LOG_GROUP_OK;
- group->state = LOG_GROUP_OK;
+ group->lsn = mach_read_from_8(
+ buf + LOG_CHECKPOINT_LSN);
+ group->lsn_offset = mach_read_from_8(
+ buf + LOG_CHECKPOINT_OFFSET);
+ checkpoint_no = mach_read_from_8(
+ buf + LOG_CHECKPOINT_NO);
- group->lsn = mach_read_from_8(
- buf + LOG_CHECKPOINT_LSN);
- group->lsn_offset = mach_read_from_8(
- buf + LOG_CHECKPOINT_OFFSET);
- checkpoint_no = mach_read_from_8(
- buf + LOG_CHECKPOINT_NO);
+ DBUG_PRINT("ib_log",
+ ("checkpoint " UINT64PF " at " LSN_PF " found ",
+ checkpoint_no, group->lsn));
- DBUG_PRINT("ib_log",
- ("checkpoint " UINT64PF " at " LSN_PF
- " found in group " ULINTPF,
- checkpoint_no, group->lsn, group->id));
-
- if (checkpoint_no >= max_no) {
- *max_group = group;
- *max_field = field;
- max_no = checkpoint_no;
- }
+ if (checkpoint_no >= max_no) {
+ *max_field = field;
+ max_no = checkpoint_no;
}
-
- group = UT_LIST_GET_NEXT(log_groups, group);
}
- if (*max_group == NULL) {
+ if (*max_field == 0) {
/* Before 5.7.9, we could get here during database
initialization if we created an ib_logfile0 file that
was filled with zeroes, and were killed. After
@@ -1798,8 +1776,7 @@ recv_recover_page(bool just_read_in, buf_block_t* block)
while (recv) {
end_lsn = recv->end_lsn;
- ut_ad(end_lsn
- <= UT_LIST_GET_FIRST(log_sys->log_groups)->scanned_lsn);
+ ut_ad(end_lsn <= log_sys->log.scanned_lsn);
if (recv->len > RECV_DATA_BLOCK_SIZE) {
/* We have to copy the record body to a separate
@@ -2988,10 +2965,9 @@ recv_group_scan_log_recs(
DBUG_RETURN(false);
}
- DBUG_PRINT("ib_log", ("%s " LSN_PF
- " completed for log group " ULINTPF,
+ DBUG_PRINT("ib_log", ("%s " LSN_PF " completed",
last_phase ? "rescan" : "scan",
- group->scanned_lsn, group->id));
+ group->scanned_lsn));
DBUG_RETURN(store_to_hash == STORE_NO);
}
@@ -3142,11 +3118,9 @@ recv_init_crash_recovery_spaces()
of first system tablespace page
@return error code or DB_SUCCESS */
dberr_t
-recv_recovery_from_checkpoint_start(
- lsn_t flush_lsn)
+recv_recovery_from_checkpoint_start(lsn_t flush_lsn)
{
log_group_t* group;
- log_group_t* max_cp_group;
ulint max_cp_field;
lsn_t checkpoint_lsn;
bool rescan;
@@ -3172,14 +3146,14 @@ recv_recovery_from_checkpoint_start(
/* Look for the latest checkpoint from any of the log groups */
- err = recv_find_max_checkpoint(&max_cp_group, &max_cp_field);
+ err = recv_find_max_checkpoint(&max_cp_field);
if (err != DB_SUCCESS) {
log_mutex_exit();
return(err);
}
- log_group_header_read(max_cp_group, max_cp_field);
+ log_group_header_read(&log_sys->log, max_cp_field);
buf = log_sys->checkpoint_buf;
@@ -3194,8 +3168,7 @@ recv_recovery_from_checkpoint_start(
ut_ad(RECV_SCAN_SIZE <= log_sys->buf_size);
- ut_ad(UT_LIST_GET_LEN(log_sys->log_groups) == 1);
- group = UT_LIST_GET_FIRST(log_sys->log_groups);
+ group = &log_sys->log;
const lsn_t end_lsn = mach_read_from_8(
buf + LOG_CHECKPOINT_END_LSN);
@@ -3483,11 +3456,8 @@ recv_reset_logs(
log_sys->lsn = ut_uint64_align_up(lsn, OS_FILE_LOG_BLOCK_SIZE);
- for (log_group_t* group = UT_LIST_GET_FIRST(log_sys->log_groups);
- group; group = UT_LIST_GET_NEXT(log_groups, group)) {
- group->lsn = log_sys->lsn;
- group->lsn_offset = LOG_FILE_HDR_SIZE;
- }
+ log_sys->log.lsn = log_sys->lsn;
+ log_sys->log.lsn_offset = LOG_FILE_HDR_SIZE;
log_sys->buf_next_to_write = 0;
log_sys->write_lsn = log_sys->lsn;
diff --git a/storage/innobase/mysql-test/storage_engine/disabled.def b/storage/innobase/mysql-test/storage_engine/disabled.def
index bad10099bbf..1d67f9311ca 100644
--- a/storage/innobase/mysql-test/storage_engine/disabled.def
+++ b/storage/innobase/mysql-test/storage_engine/disabled.def
@@ -4,4 +4,6 @@ insert_high_prio : InnoDB does not use table-level locking
insert_low_prio : InnoDB does not use table-level locking
select_high_prio : InnoDB does not use table-level locking
update_low_prio : InnoDB does not use table-level locking
-
+insert_delayed : MDEV-12880 - INSERT DELAYED is not detected as inapplicable to a table under lock
+lock_concurrent : MDEV-12882 - Assertion failure
+tbl_opt_index_dir : INDEX DIRECTORY option is not supported anymore
diff --git a/storage/innobase/mysql-test/storage_engine/repair_table.rdiff b/storage/innobase/mysql-test/storage_engine/repair_table.rdiff
index be3709c5833..717d437b2d1 100644
--- a/storage/innobase/mysql-test/storage_engine/repair_table.rdiff
+++ b/storage/innobase/mysql-test/storage_engine/repair_table.rdiff
@@ -111,7 +111,7 @@
-test.t1 check error Corrupt
+test.t1 check status OK
SELECT a,b FROM t1;
--ERROR HY000: Incorrect key file for table 't1'; try to repair it
+-ERROR HY000: Index for table 't1' is corrupt; try to repair it
-# Statement ended with one of expected results (0,ER_NOT_KEYFILE,144).
-# If you got a difference in error message, just add it to rdiff file
-INSERT INTO t1 (a,b) VALUES (14,'n'),(15,'o');
diff --git a/storage/innobase/mysql-test/storage_engine/suite.opt b/storage/innobase/mysql-test/storage_engine/suite.opt
index 034b58f2628..627becdbfb5 100644
--- a/storage/innobase/mysql-test/storage_engine/suite.opt
+++ b/storage/innobase/mysql-test/storage_engine/suite.opt
@@ -1 +1 @@
---innodb --ignore-builtin-innodb --plugin-load=ha_innodb
+--innodb
diff --git a/storage/innobase/mysql-test/storage_engine/tbl_opt_data_index_dir.rdiff b/storage/innobase/mysql-test/storage_engine/tbl_opt_index_dir.rdiff
index e09e50b17ec..e09e50b17ec 100644
--- a/storage/innobase/mysql-test/storage_engine/tbl_opt_data_index_dir.rdiff
+++ b/storage/innobase/mysql-test/storage_engine/tbl_opt_index_dir.rdiff
diff --git a/storage/innobase/mysql-test/storage_engine/tbl_opt_row_format.rdiff b/storage/innobase/mysql-test/storage_engine/tbl_opt_row_format.rdiff
index a6572ffa7f0..daa5fc67dec 100644
--- a/storage/innobase/mysql-test/storage_engine/tbl_opt_row_format.rdiff
+++ b/storage/innobase/mysql-test/storage_engine/tbl_opt_row_format.rdiff
@@ -1,10 +1,44 @@
---- suite/storage_engine/tbl_opt_row_format.result 2012-06-24 23:55:19.539380000 +0400
-+++ suite/storage_engine/tbl_opt_row_format.reject 2012-07-15 19:26:02.235049157 +0400
-@@ -1,5 +1,7 @@
- DROP TABLE IF EXISTS t1;
- CREATE TABLE t1 (a <INT_COLUMN>, b <CHAR_COLUMN>) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS> ROW_FORMAT=FIXED;
-+Warnings:
-+Warning 1478 <STORAGE_ENGINE>: assuming ROW_FORMAT=COMPACT.
+--- ../storage/innobase/mysql-test/storage_engine/tbl_opt_row_format.result~ 2017-05-24 00:40:12.854181048 +0300
++++ ../storage/innobase/mysql-test/storage_engine/tbl_opt_row_format.reject 2017-05-24 00:49:06.578191030 +0300
+@@ -7,19 +7,39 @@
+ `b` char(8) DEFAULT NULL
+ ) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 ROW_FORMAT=DYNAMIC
+ ALTER TABLE t1 ROW_FORMAT=FIXED;
++ERROR HY000: Table storage engine '<STORAGE_ENGINE>' does not support the create option 'ROW_TYPE'
++# ERROR: Statement ended with errno 1478, errname ER_ILLEGAL_HA_CREATE_OPTION (expected to succeed)
++# ------------ UNEXPECTED RESULT ------------
++# [ ALTER TABLE t1 ROW_FORMAT=FIXED ]
++# The statement|command finished with ER_ILLEGAL_HA_CREATE_OPTION.
++# ALTER TABLE or the mix could be unsupported|malfunctioning, or the problem was caused by previous errors.
++# You can change the engine code, or create an rdiff, or disable the test by adding it to disabled.def.
++# Further in this test, the message might sometimes be suppressed; a part of the test might be skipped.
++# Also, this problem may cause a chain effect (more errors of different kinds in the test).
++# -------------------------------------------
SHOW CREATE TABLE t1;
Table Create Table
t1 CREATE TABLE `t1` (
+ `a` int(11) DEFAULT NULL,
+ `b` char(8) DEFAULT NULL
+-) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 ROW_FORMAT=FIXED
++) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 ROW_FORMAT=DYNAMIC
+ ALTER TABLE t1 ROW_FORMAT=PAGE;
++ERROR HY000: Table storage engine '<STORAGE_ENGINE>' does not support the create option 'ROW_TYPE'
++# ERROR: Statement ended with errno 1478, errname ER_ILLEGAL_HA_CREATE_OPTION (expected to succeed)
++# ------------ UNEXPECTED RESULT ------------
++# [ ALTER TABLE t1 ROW_FORMAT=PAGE ]
++# The statement|command finished with ER_ILLEGAL_HA_CREATE_OPTION.
++# ALTER TABLE or the mix could be unsupported|malfunctioning, or the problem was caused by previous errors.
++# You can change the engine code, or create an rdiff, or disable the test by adding it to disabled.def.
++# Further in this test, the message might sometimes be suppressed; a part of the test might be skipped.
++# Also, this problem may cause a chain effect (more errors of different kinds in the test).
++# -------------------------------------------
+ SHOW CREATE TABLE t1;
+ Table Create Table
+ t1 CREATE TABLE `t1` (
+ `a` int(11) DEFAULT NULL,
+ `b` char(8) DEFAULT NULL
+-) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 ROW_FORMAT=PAGE
++) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 ROW_FORMAT=DYNAMIC
+ ALTER TABLE t1 ROW_FORMAT=COMPACT;
+ SHOW CREATE TABLE t1;
+ Table Create Table
diff --git a/storage/innobase/mysql-test/storage_engine/type_spatial_indexes.rdiff b/storage/innobase/mysql-test/storage_engine/type_spatial_indexes.rdiff
deleted file mode 100644
index 154116b748c..00000000000
--- a/storage/innobase/mysql-test/storage_engine/type_spatial_indexes.rdiff
+++ /dev/null
@@ -1,712 +0,0 @@
---- suite/storage_engine/type_spatial_indexes.result 2013-08-05 18:08:49.000000000 +0400
-+++ suite/storage_engine/type_spatial_indexes.reject 2013-08-05 18:25:24.000000000 +0400
-@@ -702,699 +702,15 @@
- DROP DATABASE IF EXISTS gis_ogs;
- CREATE DATABASE gis_ogs;
- CREATE TABLE gis_point (fid <INT_COLUMN>, g POINT NOT NULL, SPATIAL INDEX(g)) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
--CREATE TABLE gis_line (fid <INT_COLUMN>, g LINESTRING NOT NULL, SPATIAL INDEX(g)) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
--CREATE TABLE gis_polygon (fid <INT_COLUMN>, g POLYGON NOT NULL, SPATIAL INDEX(g)) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
--CREATE TABLE gis_multi_point (fid <INT_COLUMN>, g MULTIPOINT NOT NULL, SPATIAL INDEX(g)) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
--CREATE TABLE gis_multi_line (fid <INT_COLUMN>, g MULTILINESTRING NOT NULL, SPATIAL INDEX(g)) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
--CREATE TABLE gis_multi_polygon (fid <INT_COLUMN>, g MULTIPOLYGON NOT NULL, SPATIAL INDEX(g)) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
--CREATE TABLE gis_geometrycollection (fid <INT_COLUMN>, g GEOMETRYCOLLECTION NOT NULL, SPATIAL INDEX(g)) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
--CREATE TABLE gis_geometry (fid <INT_COLUMN>, g GEOMETRY NOT NULL) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
--USE gis_ogs;
--CREATE TABLE lakes (fid INT <CUSTOM_COL_OPTIONS>,
--name CHAR(64) <CUSTOM_COL_OPTIONS>,
--shore POLYGON NOT NULL, SPATIAL INDEX s(shore)) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
--CREATE TABLE road_segments (fid INT <CUSTOM_COL_OPTIONS>,
--name CHAR(64) <CUSTOM_COL_OPTIONS>,
--aliases CHAR(64) <CUSTOM_COL_OPTIONS>,
--num_lanes INT <CUSTOM_COL_OPTIONS>,
--centerline LINESTRING NOT NULL, SPATIAL INDEX c(centerline)) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
--CREATE TABLE divided_routes (fid INT <CUSTOM_COL_OPTIONS>,
--name CHAR(64) <CUSTOM_COL_OPTIONS>,
--num_lanes INT <CUSTOM_COL_OPTIONS>,
--centerlines MULTILINESTRING NOT NULL, SPATIAL INDEX c(centerlines)) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
--CREATE TABLE forests (fid INT <CUSTOM_COL_OPTIONS>,
--name CHAR(64) <CUSTOM_COL_OPTIONS>,
--boundary MULTIPOLYGON NOT NULL, SPATIAL INDEX b(boundary)) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
--CREATE TABLE bridges (fid INT <CUSTOM_COL_OPTIONS>,
--name CHAR(64) <CUSTOM_COL_OPTIONS>,
--position POINT NOT NULL, SPATIAL INDEX p(position)) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
--CREATE TABLE streams (fid INT <CUSTOM_COL_OPTIONS>,
--name CHAR(64) <CUSTOM_COL_OPTIONS>,
--centerline LINESTRING NOT NULL, SPATIAL INDEX c(centerline)) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
--CREATE TABLE buildings (fid INT <CUSTOM_COL_OPTIONS>,
--name CHAR(64) <CUSTOM_COL_OPTIONS>,
--position POINT NOT NULL,
--footprint POLYGON NOT NULL, SPATIAL INDEX p(position), SPATIAL INDEX f(footprint)) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
--CREATE TABLE ponds (fid INT <CUSTOM_COL_OPTIONS>,
--name CHAR(64) <CUSTOM_COL_OPTIONS>,
--type CHAR(64) <CUSTOM_COL_OPTIONS>,
--shores MULTIPOLYGON NOT NULL, SPATIAL INDEX s(shores)) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
--CREATE TABLE named_places (fid INT <CUSTOM_COL_OPTIONS>,
--name CHAR(64) <CUSTOM_COL_OPTIONS>,
--boundary POLYGON NOT NULL, SPATIAL INDEX b(boundary)) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
--CREATE TABLE map_neatlines (fid INT <CUSTOM_COL_OPTIONS>,
--neatline POLYGON NOT NULL, SPATIAL INDEX n(neatline)) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
--USE test;
--SHOW FIELDS FROM gis_point;
--Field Type Null Key Default Extra
--fid int(11) YES NULL
--g point NO MUL NULL
--SHOW FIELDS FROM gis_line;
--Field Type Null Key Default Extra
--fid int(11) YES NULL
--g linestring NO MUL NULL
--SHOW FIELDS FROM gis_polygon;
--Field Type Null Key Default Extra
--fid int(11) YES NULL
--g polygon NO MUL NULL
--SHOW FIELDS FROM gis_multi_point;
--Field Type Null Key Default Extra
--fid int(11) YES NULL
--g multipoint NO MUL NULL
--SHOW FIELDS FROM gis_multi_line;
--Field Type Null Key Default Extra
--fid int(11) YES NULL
--g multilinestring NO MUL NULL
--SHOW FIELDS FROM gis_multi_polygon;
--Field Type Null Key Default Extra
--fid int(11) YES NULL
--g multipolygon NO MUL NULL
--SHOW FIELDS FROM gis_geometrycollection;
--Field Type Null Key Default Extra
--fid int(11) YES NULL
--g geometrycollection NO MUL NULL
--SHOW FIELDS FROM gis_geometry;
--Field Type Null Key Default Extra
--fid int(11) YES NULL
--g geometry NO NULL
--INSERT INTO gis_point (fid,g) VALUES
--(101, PointFromText('POINT(10 10)')),
--(102, PointFromText('POINT(20 10)')),
--(103, PointFromText('POINT(20 20)')),
--(104, PointFromWKB(AsWKB(PointFromText('POINT(10 20)'))));
--INSERT INTO gis_line (fid,g) VALUES
--(105, LineFromText('LINESTRING(0 0,0 10,10 0)')),
--(106, LineStringFromText('LINESTRING(10 10,20 10,20 20,10 20,10 10)')),
--(107, LineStringFromWKB(AsWKB(LineString(Point(10, 10), Point(40, 10)))));
--INSERT INTO gis_polygon (fid,g) VALUES
--(108, PolygonFromText('POLYGON((10 10,20 10,20 20,10 20,10 10))')),
--(109, PolyFromText('POLYGON((0 0,50 0,50 50,0 50,0 0), (10 10,20 10,20 20,10 20,10 10))')),
--(110, PolyFromWKB(AsWKB(Polygon(LineString(Point(0, 0), Point(30, 0), Point(30, 30), Point(0, 0))))));
--INSERT INTO gis_multi_point (fid,g) VALUES
--(111, MultiPointFromText('MULTIPOINT(0 0,10 10,10 20,20 20)')),
--(112, MPointFromText('MULTIPOINT(1 1,11 11,11 21,21 21)')),
--(113, MPointFromWKB(AsWKB(MultiPoint(Point(3, 6), Point(4, 10)))));
--INSERT INTO gis_multi_line (fid,g) VALUES
--(114, MultiLineStringFromText('MULTILINESTRING((10 48,10 21,10 0),(16 0,16 23,16 48))')),
--(115, MLineFromText('MULTILINESTRING((10 48,10 21,10 0))')),
--(116, MLineFromWKB(AsWKB(MultiLineString(LineString(Point(1, 2), Point(3, 5)), LineString(Point(2, 5), Point(5, 8), Point(21, 7))))));
--INSERT INTO gis_multi_polygon (fid,g) VALUES
--(117, MultiPolygonFromText('MULTIPOLYGON(((28 26,28 0,84 0,84 42,28 26),(52 18,66 23,73 9,48 6,52 18)),((59 18,67 18,67 13,59 13,59 18)))')),
--(118, MPolyFromText('MULTIPOLYGON(((28 26,28 0,84 0,84 42,28 26),(52 18,66 23,73 9,48 6,52 18)),((59 18,67 18,67 13,59 13,59 18)))')),
--(119, MPolyFromWKB(AsWKB(MultiPolygon(Polygon(LineString(Point(0, 3), Point(3, 3), Point(3, 0), Point(0, 3)))))));
--INSERT INTO gis_geometrycollection (fid,g) VALUES
--(120, GeomCollFromText('GEOMETRYCOLLECTION(POINT(0 0), LINESTRING(0 0,10 10))')),
--(121, GeometryFromWKB(AsWKB(GeometryCollection(Point(44, 6), LineString(Point(3, 6), Point(7, 9)))))),
--(122, GeomFromText('GeometryCollection()')),
--(123, GeomFromText('GeometryCollection EMPTY'));
--INSERT into gis_geometry (fid,g) SELECT fid,g FROM gis_point;
--INSERT into gis_geometry (fid,g) SELECT fid,g FROM gis_line;
--INSERT into gis_geometry (fid,g) SELECT fid,g FROM gis_polygon;
--INSERT into gis_geometry (fid,g) SELECT fid,g FROM gis_multi_point;
--INSERT into gis_geometry (fid,g) SELECT fid,g FROM gis_multi_line;
--INSERT into gis_geometry (fid,g) SELECT fid,g FROM gis_multi_polygon;
--INSERT into gis_geometry (fid,g) SELECT fid,g FROM gis_geometrycollection;
--SELECT fid, AsText(g) FROM gis_point;
--fid AsText(g)
--101 POINT(10 10)
--102 POINT(20 10)
--103 POINT(20 20)
--104 POINT(10 20)
--SELECT fid, AsText(g) FROM gis_line;
--fid AsText(g)
--105 LINESTRING(0 0,0 10,10 0)
--106 LINESTRING(10 10,20 10,20 20,10 20,10 10)
--107 LINESTRING(10 10,40 10)
--SELECT fid, AsText(g) FROM gis_polygon;
--fid AsText(g)
--108 POLYGON((10 10,20 10,20 20,10 20,10 10))
--109 POLYGON((0 0,50 0,50 50,0 50,0 0),(10 10,20 10,20 20,10 20,10 10))
--110 POLYGON((0 0,30 0,30 30,0 0))
--SELECT fid, AsText(g) FROM gis_multi_point;
--fid AsText(g)
--111 MULTIPOINT(0 0,10 10,10 20,20 20)
--112 MULTIPOINT(1 1,11 11,11 21,21 21)
--113 MULTIPOINT(3 6,4 10)
--SELECT fid, AsText(g) FROM gis_multi_line;
--fid AsText(g)
--114 MULTILINESTRING((10 48,10 21,10 0),(16 0,16 23,16 48))
--115 MULTILINESTRING((10 48,10 21,10 0))
--116 MULTILINESTRING((1 2,3 5),(2 5,5 8,21 7))
--SELECT fid, AsText(g) FROM gis_multi_polygon;
--fid AsText(g)
--117 MULTIPOLYGON(((28 26,28 0,84 0,84 42,28 26),(52 18,66 23,73 9,48 6,52 18)),((59 18,67 18,67 13,59 13,59 18)))
--118 MULTIPOLYGON(((28 26,28 0,84 0,84 42,28 26),(52 18,66 23,73 9,48 6,52 18)),((59 18,67 18,67 13,59 13,59 18)))
--119 MULTIPOLYGON(((0 3,3 3,3 0,0 3)))
--SELECT fid, AsText(g) FROM gis_geometrycollection;
--fid AsText(g)
--120 GEOMETRYCOLLECTION(POINT(0 0),LINESTRING(0 0,10 10))
--121 GEOMETRYCOLLECTION(POINT(44 6),LINESTRING(3 6,7 9))
--122 GEOMETRYCOLLECTION EMPTY
--123 GEOMETRYCOLLECTION EMPTY
--SELECT fid, AsText(g) FROM gis_geometry;
--fid AsText(g)
--101 POINT(10 10)
--102 POINT(20 10)
--103 POINT(20 20)
--104 POINT(10 20)
--105 LINESTRING(0 0,0 10,10 0)
--106 LINESTRING(10 10,20 10,20 20,10 20,10 10)
--107 LINESTRING(10 10,40 10)
--108 POLYGON((10 10,20 10,20 20,10 20,10 10))
--109 POLYGON((0 0,50 0,50 50,0 50,0 0),(10 10,20 10,20 20,10 20,10 10))
--110 POLYGON((0 0,30 0,30 30,0 0))
--111 MULTIPOINT(0 0,10 10,10 20,20 20)
--112 MULTIPOINT(1 1,11 11,11 21,21 21)
--113 MULTIPOINT(3 6,4 10)
--114 MULTILINESTRING((10 48,10 21,10 0),(16 0,16 23,16 48))
--115 MULTILINESTRING((10 48,10 21,10 0))
--116 MULTILINESTRING((1 2,3 5),(2 5,5 8,21 7))
--117 MULTIPOLYGON(((28 26,28 0,84 0,84 42,28 26),(52 18,66 23,73 9,48 6,52 18)),((59 18,67 18,67 13,59 13,59 18)))
--118 MULTIPOLYGON(((28 26,28 0,84 0,84 42,28 26),(52 18,66 23,73 9,48 6,52 18)),((59 18,67 18,67 13,59 13,59 18)))
--119 MULTIPOLYGON(((0 3,3 3,3 0,0 3)))
--120 GEOMETRYCOLLECTION(POINT(0 0),LINESTRING(0 0,10 10))
--121 GEOMETRYCOLLECTION(POINT(44 6),LINESTRING(3 6,7 9))
--122 GEOMETRYCOLLECTION EMPTY
--123 GEOMETRYCOLLECTION EMPTY
--SELECT fid, Dimension(g) FROM gis_geometry;
--fid Dimension(g)
--101 0
--102 0
--103 0
--104 0
--105 1
--106 1
--107 1
--108 2
--109 2
--110 2
--111 0
--112 0
--113 0
--114 1
--115 1
--116 1
--117 2
--118 2
--119 2
--120 1
--121 1
--122 0
--123 0
--SELECT fid, GeometryType(g) FROM gis_geometry;
--fid GeometryType(g)
--101 POINT
--102 POINT
--103 POINT
--104 POINT
--105 LINESTRING
--106 LINESTRING
--107 LINESTRING
--108 POLYGON
--109 POLYGON
--110 POLYGON
--111 MULTIPOINT
--112 MULTIPOINT
--113 MULTIPOINT
--114 MULTILINESTRING
--115 MULTILINESTRING
--116 MULTILINESTRING
--117 MULTIPOLYGON
--118 MULTIPOLYGON
--119 MULTIPOLYGON
--120 GEOMETRYCOLLECTION
--121 GEOMETRYCOLLECTION
--122 GEOMETRYCOLLECTION
--123 GEOMETRYCOLLECTION
--SELECT fid, IsEmpty(g) FROM gis_geometry;
--fid IsEmpty(g)
--101 0
--102 0
--103 0
--104 0
--105 0
--106 0
--107 0
--108 0
--109 0
--110 0
--111 0
--112 0
--113 0
--114 0
--115 0
--116 0
--117 0
--118 0
--119 0
--120 0
--121 0
--122 0
--123 0
--SELECT fid, AsText(Envelope(g)) FROM gis_geometry;
--fid AsText(Envelope(g))
--101 POLYGON((10 10,10 10,10 10,10 10,10 10))
--102 POLYGON((20 10,20 10,20 10,20 10,20 10))
--103 POLYGON((20 20,20 20,20 20,20 20,20 20))
--104 POLYGON((10 20,10 20,10 20,10 20,10 20))
--105 POLYGON((0 0,10 0,10 10,0 10,0 0))
--106 POLYGON((10 10,20 10,20 20,10 20,10 10))
--107 POLYGON((10 10,40 10,40 10,10 10,10 10))
--108 POLYGON((10 10,20 10,20 20,10 20,10 10))
--109 POLYGON((0 0,50 0,50 50,0 50,0 0))
--110 POLYGON((0 0,30 0,30 30,0 30,0 0))
--111 POLYGON((0 0,20 0,20 20,0 20,0 0))
--112 POLYGON((1 1,21 1,21 21,1 21,1 1))
--113 POLYGON((3 6,4 6,4 10,3 10,3 6))
--114 POLYGON((10 0,16 0,16 48,10 48,10 0))
--115 POLYGON((10 0,10 0,10 48,10 48,10 0))
--116 POLYGON((1 2,21 2,21 8,1 8,1 2))
--117 POLYGON((28 0,84 0,84 42,28 42,28 0))
--118 POLYGON((28 0,84 0,84 42,28 42,28 0))
--119 POLYGON((0 0,3 0,3 3,0 3,0 0))
--120 POLYGON((0 0,10 0,10 10,0 10,0 0))
--121 POLYGON((3 6,44 6,44 9,3 9,3 6))
--122 GEOMETRYCOLLECTION EMPTY
--123 GEOMETRYCOLLECTION EMPTY
--SELECT fid, X(g) FROM gis_point;
--fid X(g)
--101 10
--102 20
--103 20
--104 10
--SELECT fid, Y(g) FROM gis_point;
--fid Y(g)
--101 10
--102 10
--103 20
--104 20
--SELECT fid, AsText(StartPoint(g)) FROM gis_line;
--fid AsText(StartPoint(g))
--105 POINT(0 0)
--106 POINT(10 10)
--107 POINT(10 10)
--SELECT fid, AsText(EndPoint(g)) FROM gis_line;
--fid AsText(EndPoint(g))
--105 POINT(10 0)
--106 POINT(10 10)
--107 POINT(40 10)
--SELECT fid, GLength(g) FROM gis_line;
--fid GLength(g)
--105 24.14213562373095
--106 40
--107 30
--SELECT fid, NumPoints(g) FROM gis_line;
--fid NumPoints(g)
--105 3
--106 5
--107 2
--SELECT fid, AsText(PointN(g, 2)) FROM gis_line;
--fid AsText(PointN(g, 2))
--105 POINT(0 10)
--106 POINT(20 10)
--107 POINT(40 10)
--SELECT fid, IsClosed(g) FROM gis_line;
--fid IsClosed(g)
--105 0
--106 1
--107 0
--SELECT fid, AsText(Centroid(g)) FROM gis_polygon;
--fid AsText(Centroid(g))
--108 POINT(15 15)
--109 POINT(25.416666666666668 25.416666666666668)
--110 POINT(20 10)
--SELECT fid, Area(g) FROM gis_polygon;
--fid Area(g)
--108 100
--109 2400
--110 450
--SELECT fid, AsText(ExteriorRing(g)) FROM gis_polygon;
--fid AsText(ExteriorRing(g))
--108 LINESTRING(10 10,20 10,20 20,10 20,10 10)
--109 LINESTRING(0 0,50 0,50 50,0 50,0 0)
--110 LINESTRING(0 0,30 0,30 30,0 0)
--SELECT fid, NumInteriorRings(g) FROM gis_polygon;
--fid NumInteriorRings(g)
--108 0
--109 1
--110 0
--SELECT fid, AsText(InteriorRingN(g, 1)) FROM gis_polygon;
--fid AsText(InteriorRingN(g, 1))
--108 NULL
--109 LINESTRING(10 10,20 10,20 20,10 20,10 10)
--110 NULL
--SELECT fid, IsClosed(g) FROM gis_multi_line;
--fid IsClosed(g)
--114 0
--115 0
--116 0
--SELECT fid, AsText(Centroid(g)) FROM gis_multi_polygon;
--fid AsText(Centroid(g))
--117 POINT(55.58852775304245 17.426536064113982)
--118 POINT(55.58852775304245 17.426536064113982)
--119 POINT(2 2)
--SELECT fid, Area(g) FROM gis_multi_polygon;
--fid Area(g)
--117 1684.5
--118 1684.5
--119 4.5
--SELECT fid, NumGeometries(g) from gis_multi_point;
--fid NumGeometries(g)
--111 4
--112 4
--113 2
--SELECT fid, NumGeometries(g) from gis_multi_line;
--fid NumGeometries(g)
--114 2
--115 1
--116 2
--SELECT fid, NumGeometries(g) from gis_multi_polygon;
--fid NumGeometries(g)
--117 2
--118 2
--119 1
--SELECT fid, NumGeometries(g) from gis_geometrycollection;
--fid NumGeometries(g)
--120 2
--121 2
--122 0
--123 0
--SELECT fid, AsText(GeometryN(g, 2)) from gis_multi_point;
--fid AsText(GeometryN(g, 2))
--111 POINT(10 10)
--112 POINT(11 11)
--113 POINT(4 10)
--SELECT fid, AsText(GeometryN(g, 2)) from gis_multi_line;
--fid AsText(GeometryN(g, 2))
--114 LINESTRING(16 0,16 23,16 48)
--115 NULL
--116 LINESTRING(2 5,5 8,21 7)
--SELECT fid, AsText(GeometryN(g, 2)) from gis_multi_polygon;
--fid AsText(GeometryN(g, 2))
--117 POLYGON((59 18,67 18,67 13,59 13,59 18))
--118 POLYGON((59 18,67 18,67 13,59 13,59 18))
--119 NULL
--SELECT fid, AsText(GeometryN(g, 2)) from gis_geometrycollection;
--fid AsText(GeometryN(g, 2))
--120 LINESTRING(0 0,10 10)
--121 LINESTRING(3 6,7 9)
--122 NULL
--123 NULL
--SELECT fid, AsText(GeometryN(g, 1)) from gis_geometrycollection;
--fid AsText(GeometryN(g, 1))
--120 POINT(0 0)
--121 POINT(44 6)
--122 NULL
--123 NULL
--SELECT g1.fid as first, g2.fid as second,
--Within(g1.g, g2.g) as w, Contains(g1.g, g2.g) as c, Overlaps(g1.g, g2.g) as o,
--Equals(g1.g, g2.g) as e, Disjoint(g1.g, g2.g) as d, Touches(g1.g, g2.g) as t,
--Intersects(g1.g, g2.g) as i, Crosses(g1.g, g2.g) as r
--FROM gis_geometrycollection g1, gis_geometrycollection g2 ORDER BY first, second;
--first second w c o e d t i r
--120 120 1 1 0 1 0 0 1 0
--120 121 0 0 1 0 0 0 1 0
--120 122 NULL NULL NULL NULL NULL NULL NULL NULL
--120 123 NULL NULL NULL NULL NULL NULL NULL NULL
--121 120 0 0 1 0 0 0 1 0
--121 121 1 1 0 1 0 0 1 0
--121 122 NULL NULL NULL NULL NULL NULL NULL NULL
--121 123 NULL NULL NULL NULL NULL NULL NULL NULL
--122 120 NULL NULL NULL NULL NULL NULL NULL NULL
--122 121 NULL NULL NULL NULL NULL NULL NULL NULL
--122 122 NULL NULL NULL NULL NULL NULL NULL NULL
--122 123 NULL NULL NULL NULL NULL NULL NULL NULL
--123 120 NULL NULL NULL NULL NULL NULL NULL NULL
--123 121 NULL NULL NULL NULL NULL NULL NULL NULL
--123 122 NULL NULL NULL NULL NULL NULL NULL NULL
--123 123 NULL NULL NULL NULL NULL NULL NULL NULL
--DROP TABLE gis_point, gis_line, gis_polygon, gis_multi_point, gis_multi_line, gis_multi_polygon, gis_geometrycollection, gis_geometry;
--USE gis_ogs;
--# Lakes
--INSERT INTO lakes (fid,name,shore) VALUES (
--101, 'BLUE LAKE',
--PolyFromText(
--'POLYGON(
-- (52 18,66 23,73 9,48 6,52 18),
-- (59 18,67 18,67 13,59 13,59 18)
-- )',
--101));
--# Road Segments
--INSERT INTO road_segments (fid,name,aliases,num_lanes,centerline) VALUES(102, 'Route 5', NULL, 2,
--LineFromText(
--'LINESTRING( 0 18, 10 21, 16 23, 28 26, 44 31 )' ,101));
--INSERT INTO road_segments (fid,name,aliases,num_lanes,centerline) VALUES(103, 'Route 5', 'Main Street', 4,
--LineFromText(
--'LINESTRING( 44 31, 56 34, 70 38 )' ,101));
--INSERT INTO road_segments (fid,name,aliases,num_lanes,centerline) VALUES(104, 'Route 5', NULL, 2,
--LineFromText(
--'LINESTRING( 70 38, 72 48 )' ,101));
--INSERT INTO road_segments (fid,name,aliases,num_lanes,centerline) VALUES(105, 'Main Street', NULL, 4,
--LineFromText(
--'LINESTRING( 70 38, 84 42 )' ,101));
--INSERT INTO road_segments (fid,name,aliases,num_lanes,centerline) VALUES(106, 'Dirt Road by Green Forest', NULL,
--1,
--LineFromText(
--'LINESTRING( 28 26, 28 0 )',101));
--# DividedRoutes
--INSERT INTO divided_routes (fid,name,num_lanes,centerlines) VALUES(119, 'Route 75', 4,
--MLineFromText(
--'MULTILINESTRING((10 48,10 21,10 0),
-- (16 0,16 23,16 48))', 101));
--# Forests
--INSERT INTO forests (fid,name,boundary) VALUES(109, 'Green Forest',
--MPolyFromText(
--'MULTIPOLYGON(((28 26,28 0,84 0,84 42,28 26),
-- (52 18,66 23,73 9,48 6,52 18)),((59 18,67 18,67 13,59 13,59 18)))',
--101));
--# Bridges
--INSERT INTO bridges (fid,name,position) VALUES(110, 'Cam Bridge', PointFromText(
--'POINT( 44 31 )', 101));
--# Streams
--INSERT INTO streams (fid,name,centerline) VALUES(111, 'Cam Stream',
--LineFromText(
--'LINESTRING( 38 48, 44 41, 41 36, 44 31, 52 18 )', 101));
--INSERT INTO streams (fid,name,centerline) VALUES(112, NULL,
--LineFromText(
--'LINESTRING( 76 0, 78 4, 73 9 )', 101));
--# Buildings
--INSERT INTO buildings (fid,name,position,footprint) VALUES(113, '123 Main Street',
--PointFromText(
--'POINT( 52 30 )', 101),
--PolyFromText(
--'POLYGON( ( 50 31, 54 31, 54 29, 50 29, 50 31) )', 101));
--INSERT INTO buildings (fid,name,position,footprint) VALUES(114, '215 Main Street',
--PointFromText(
--'POINT( 64 33 )', 101),
--PolyFromText(
--'POLYGON( ( 66 34, 62 34, 62 32, 66 32, 66 34) )', 101));
--# Ponds
--INSERT INTO ponds (fid,name,type,shores) VALUES(120, NULL, 'Stock Pond',
--MPolyFromText(
--'MULTIPOLYGON( ( ( 24 44, 22 42, 24 40, 24 44) ),
-- ( ( 26 44, 26 40, 28 42, 26 44) ) )', 101));
--# Named Places
--INSERT INTO named_places (fid,name,boundary) VALUES(117, 'Ashton',
--PolyFromText(
--'POLYGON( ( 62 48, 84 48, 84 30, 56 30, 56 34, 62 48) )', 101));
--INSERT INTO named_places (fid,name,boundary) VALUES(118, 'Goose Island',
--PolyFromText(
--'POLYGON( ( 67 13, 67 18, 59 18, 59 13, 67 13) )', 101));
--# Map Neatlines
--INSERT INTO map_neatlines (fid,neatline) VALUES(115,
--PolyFromText(
--'POLYGON( ( 0 0, 0 48, 84 48, 84 0, 0 0 ) )', 101));
--SELECT Dimension(shore)
--FROM lakes
--WHERE name = 'Blue Lake';
--Dimension(shore)
--2
--SELECT GeometryType(centerlines)
--FROM divided_routes
--WHERE name = 'Route 75';
--GeometryType(centerlines)
--MULTILINESTRING
--SELECT AsText(boundary)
--FROM named_places
--WHERE name = 'Goose Island';
--AsText(boundary)
--POLYGON((67 13,67 18,59 18,59 13,67 13))
--SELECT AsText(PolyFromWKB(AsBinary(boundary),101))
--FROM named_places
--WHERE name = 'Goose Island';
--AsText(PolyFromWKB(AsBinary(boundary),101))
--POLYGON((67 13,67 18,59 18,59 13,67 13))
--SELECT SRID(boundary)
--FROM named_places
--WHERE name = 'Goose Island';
--SRID(boundary)
--101
--SELECT IsEmpty(centerline)
--FROM road_segments
--WHERE name = 'Route 5'
--AND aliases = 'Main Street';
--IsEmpty(centerline)
--0
--SELECT AsText(Envelope(boundary))
--FROM named_places
--WHERE name = 'Goose Island';
--AsText(Envelope(boundary))
--POLYGON((59 13,67 13,67 18,59 18,59 13))
--SELECT X(position)
--FROM bridges
--WHERE name = 'Cam Bridge';
--X(position)
--44
--SELECT Y(position)
--FROM bridges
--WHERE name = 'Cam Bridge';
--Y(position)
--31
--SELECT AsText(StartPoint(centerline))
--FROM road_segments
--WHERE fid = 102;
--AsText(StartPoint(centerline))
--POINT(0 18)
--SELECT AsText(EndPoint(centerline))
--FROM road_segments
--WHERE fid = 102;
--AsText(EndPoint(centerline))
--POINT(44 31)
--SELECT GLength(centerline)
--FROM road_segments
--WHERE fid = 106;
--GLength(centerline)
--26
--SELECT NumPoints(centerline)
--FROM road_segments
--WHERE fid = 102;
--NumPoints(centerline)
--5
--SELECT AsText(PointN(centerline, 1))
--FROM road_segments
--WHERE fid = 102;
--AsText(PointN(centerline, 1))
--POINT(0 18)
--SELECT AsText(Centroid(boundary))
--FROM named_places
--WHERE name = 'Goose Island';
--AsText(Centroid(boundary))
--POINT(63 15.5)
--SELECT Area(boundary)
--FROM named_places
--WHERE name = 'Goose Island';
--Area(boundary)
--40
--SELECT AsText(ExteriorRing(shore))
--FROM lakes
--WHERE name = 'Blue Lake';
--AsText(ExteriorRing(shore))
--LINESTRING(52 18,66 23,73 9,48 6,52 18)
--SELECT NumInteriorRings(shore)
--FROM lakes
--WHERE name = 'Blue Lake';
--NumInteriorRings(shore)
--1
--SELECT AsText(InteriorRingN(shore, 1))
--FROM lakes
--WHERE name = 'Blue Lake';
--AsText(InteriorRingN(shore, 1))
--LINESTRING(59 18,67 18,67 13,59 13,59 18)
--SELECT NumGeometries(centerlines)
--FROM divided_routes
--WHERE name = 'Route 75';
--NumGeometries(centerlines)
--2
--SELECT AsText(GeometryN(centerlines, 2))
--FROM divided_routes
--WHERE name = 'Route 75';
--AsText(GeometryN(centerlines, 2))
--LINESTRING(16 0,16 23,16 48)
--SELECT IsClosed(centerlines)
--FROM divided_routes
--WHERE name = 'Route 75';
--IsClosed(centerlines)
--0
--SELECT GLength(centerlines)
--FROM divided_routes
--WHERE name = 'Route 75';
--GLength(centerlines)
--96
--SELECT AsText(Centroid(shores))
--FROM ponds
--WHERE fid = 120;
--AsText(Centroid(shores))
--POINT(25 42)
--SELECT Area(shores)
--FROM ponds
--WHERE fid = 120;
--Area(shores)
--8
--SELECT ST_Equals(boundary,
--PolyFromText('POLYGON( ( 67 13, 67 18, 59 18, 59 13, 67 13) )',1))
--FROM named_places
--WHERE name = 'Goose Island';
--ST_Equals(boundary,
--PolyFromText('POLYGON( ( 67 13, 67 18, 59 18, 59 13, 67 13) )',1))
--1
--SELECT ST_Disjoint(centerlines, boundary)
--FROM divided_routes, named_places
--WHERE divided_routes.name = 'Route 75'
--AND named_places.name = 'Ashton';
--ST_Disjoint(centerlines, boundary)
--1
--SELECT ST_Touches(centerline, shore)
--FROM streams, lakes
--WHERE streams.name = 'Cam Stream'
--AND lakes.name = 'Blue Lake';
--ST_Touches(centerline, shore)
--1
--SELECT Crosses(road_segments.centerline, divided_routes.centerlines)
--FROM road_segments, divided_routes
--WHERE road_segments.fid = 102
--AND divided_routes.name = 'Route 75';
--Crosses(road_segments.centerline, divided_routes.centerlines)
--1
--SELECT ST_Intersects(road_segments.centerline, divided_routes.centerlines)
--FROM road_segments, divided_routes
--WHERE road_segments.fid = 102
--AND divided_routes.name = 'Route 75';
--ST_Intersects(road_segments.centerline, divided_routes.centerlines)
--1
--SELECT ST_Contains(forests.boundary, named_places.boundary)
--FROM forests, named_places
--WHERE forests.name = 'Green Forest'
--AND named_places.name = 'Ashton';
--ST_Contains(forests.boundary, named_places.boundary)
--0
--SELECT ST_Distance(position, boundary)
--FROM bridges, named_places
--WHERE bridges.name = 'Cam Bridge'
--AND named_places.name = 'Ashton';
--ST_Distance(position, boundary)
--12
--SELECT AsText(ST_Difference(named_places.boundary, forests.boundary))
--FROM named_places, forests
--WHERE named_places.name = 'Ashton'
--AND forests.name = 'Green Forest';
--AsText(ST_Difference(named_places.boundary, forests.boundary))
--POLYGON((56 34,62 48,84 48,84 42,56 34))
--SELECT AsText(ST_Union(shore, boundary))
--FROM lakes, named_places
--WHERE lakes.name = 'Blue Lake'
--AND named_places.name = 'Goose Island';
--AsText(ST_Union(shore, boundary))
--POLYGON((48 6,52 18,66 23,73 9,48 6))
--SELECT AsText(ST_SymDifference(shore, boundary))
--FROM lakes, named_places
--WHERE lakes.name = 'Blue Lake'
--AND named_places.name = 'Ashton';
--AsText(ST_SymDifference(shore, boundary))
--MULTIPOLYGON(((48 6,52 18,66 23,73 9,48 6),(59 13,59 18,67 18,67 13,59 13)),((56 30,56 34,62 48,84 48,84 30,56 30)))
--SELECT count(*)
--FROM buildings, bridges
--WHERE ST_Contains(ST_Buffer(bridges.position, 15.0), buildings.footprint) = 1;
--count(*)
--1
-+ERROR HY000: The storage engine <STORAGE_ENGINE> doesn't support SPATIAL indexes
-+# ERROR: Statement ended with errno 1464, errname ER_TABLE_CANT_HANDLE_SPKEYS (expected to succeed)
-+# ------------ UNEXPECTED RESULT ------------
-+# [ CREATE TABLE gis_point (fid INT(11) /*!*/ /*Custom column options*/, g POINT NOT NULL, SPATIAL INDEX(g)) ENGINE=InnoDB /*!*/ /*Custom table options*/ ]
-+# The statement|command finished with ER_TABLE_CANT_HANDLE_SPKEYS.
-+# Geometry types or spatial indexes or the mix could be unsupported|malfunctioning, or the problem was caused by previous errors.
-+# You can change the engine code, or create an rdiff, or disable the test by adding it to disabled.def.
-+# Further in this test, the message might sometimes be suppressed; a part of the test might be skipped.
-+# Also, this problem may cause a chain effect (more errors of different kinds in the test).
-+# -------------------------------------------
- DROP DATABASE gis_ogs;
- USE test;
diff --git a/storage/innobase/os/os0file.cc b/storage/innobase/os/os0file.cc
index 93e8caf9301..5afb4527916 100644
--- a/storage/innobase/os/os0file.cc
+++ b/storage/innobase/os/os0file.cc
@@ -1,6 +1,6 @@
/***********************************************************************
-Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1995, 2017, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2009, Percona Inc.
Copyright (c) 2013, 2017, MariaDB Corporation.
@@ -226,7 +226,7 @@ struct Slot {
os_offset_t offset;
/** file where to read or write */
- os_file_t file;
+ pfs_os_file_t file;
/** file name or path */
const char* name;
@@ -319,7 +319,7 @@ public:
IORequest& type,
fil_node_t* m1,
void* m2,
- os_file_t file,
+ pfs_os_file_t file,
const char* name,
void* buf,
os_offset_t offset,
@@ -2248,7 +2248,7 @@ AIO::is_linux_native_aio_supported()
strcpy(name + dirnamelen, "ib_logfile0");
- fd = ::open(name, O_RDONLY);
+ fd = open(name, O_RDONLY);
if (fd == -1) {
@@ -2578,7 +2578,7 @@ A simple function to open or create a file.
@param[out] success true if succeed, false if error
@return handle to the file, not defined if error, error number
can be retrieved with os_file_get_last_error */
-os_file_t
+pfs_os_file_t
os_file_create_simple_func(
const char* name,
ulint create_mode,
@@ -2586,7 +2586,7 @@ os_file_create_simple_func(
bool read_only,
bool* success)
{
- os_file_t file;
+ pfs_os_file_t file;
*success = false;
@@ -2656,7 +2656,7 @@ os_file_create_simple_func(
bool retry;
do {
- file = ::open(name, create_flag, os_innodb_umask);
+ file = open(name, create_flag, os_innodb_umask);
if (file == -1) {
*success = false;
@@ -2871,7 +2871,7 @@ Opens an existing file or creates a new.
@param[in] success true if succeeded
@return handle to the file, not defined if error, error number
can be retrieved with os_file_get_last_error */
-os_file_t
+pfs_os_file_t
os_file_create_func(
const char* name,
ulint create_mode,
@@ -2958,7 +2958,7 @@ os_file_create_func(
bool retry;
do {
- file = ::open(name, create_flag, os_innodb_umask);
+ file = open(name, create_flag, os_innodb_umask);
if (file == -1) {
const char* operation;
@@ -3037,7 +3037,7 @@ A simple function to open or create a file.
@param[out] success true if succeeded
@return own: handle to the file, not defined if error, error number
can be retrieved with os_file_get_last_error */
-os_file_t
+pfs_os_file_t
os_file_create_simple_no_error_handling_func(
const char* name,
ulint create_mode,
@@ -3092,7 +3092,7 @@ os_file_create_simple_no_error_handling_func(
return(OS_FILE_CLOSED);
}
- file = ::open(name, create_flag, os_innodb_umask);
+ file = open(name, create_flag, os_innodb_umask);
*success = (file != -1);
@@ -3324,8 +3324,8 @@ os_file_get_status_posix(
&& (stat_info->type == OS_FILE_TYPE_FILE
|| stat_info->type == OS_FILE_TYPE_BLOCK)) {
- int access = !read_only ? O_RDWR : O_RDONLY;
- int fh = ::open(path, access, os_innodb_umask);
+ int fh = open(path, read_only ? O_RDONLY : O_RDWR,
+ os_innodb_umask);
if (fh == -1) {
stat_info->rw_perm = false;
@@ -3788,7 +3788,7 @@ A simple function to open or create a file.
@param[out] success true if succeed, false if error
@return handle to the file, not defined if error, error number
can be retrieved with os_file_get_last_error */
-os_file_t
+pfs_os_file_t
os_file_create_simple_func(
const char* name,
ulint create_mode,
@@ -4105,7 +4105,7 @@ Opens an existing file or creates a new.
@param[in] success true if succeeded
@return handle to the file, not defined if error, error number
can be retrieved with os_file_get_last_error */
-os_file_t
+pfs_os_file_t
os_file_create_func(
const char* name,
ulint create_mode,
@@ -4320,7 +4320,7 @@ A simple function to open or create a file.
@param[out] success true if succeeded
@return own: handle to the file, not defined if error, error number
can be retrieved with os_file_get_last_error */
-os_file_t
+pfs_os_file_t
os_file_create_simple_no_error_handling_func(
const char* name,
ulint create_mode,
@@ -6163,7 +6163,7 @@ AIO::reserve_slot(
IORequest& type,
fil_node_t* m1,
void* m2,
- os_file_t file,
+ pfs_os_file_t file,
const char* name,
void* buf,
os_offset_t offset,
@@ -6555,10 +6555,11 @@ os_aio_windows_handler(
/* This read/write does not go through os_file_read
and os_file_write APIs, need to register with
performance schema explicitly here. */
+ PSI_file_locker_state state;
struct PSI_file_locker* locker = NULL;
register_pfs_file_io_begin(
- locker, slot->file, slot->len,
+ &state, locker, slot->file, slot->len,
slot->type.is_write()
? PSI_FILE_WRITE : PSI_FILE_READ, __FILE__, __LINE__);
#endif /* UNIV_PFS_IO */
@@ -6616,7 +6617,7 @@ os_aio_func(
IORequest& type,
ulint mode,
const char* name,
- os_file_t file,
+ pfs_os_file_t file,
void* buf,
os_offset_t offset,
ulint n,
@@ -6672,7 +6673,6 @@ try_again:
ret = ReadFile(
file, slot->ptr, slot->len,
&slot->n_bytes, &slot->control);
-
#elif defined(LINUX_NATIVE_AIO)
if (!array->linux_dispatch(slot)) {
goto err_exit;
@@ -6691,7 +6691,6 @@ try_again:
ret = WriteFile(
file, slot->ptr, slot->len,
&slot->n_bytes, &slot->control);
-
#elif defined(LINUX_NATIVE_AIO)
if (!array->linux_dispatch(slot)) {
goto err_exit;
@@ -7551,7 +7550,7 @@ AIO::to_file(FILE* file) const
fprintf(file,
"%s IO for %s (offset=" UINT64PF
- ", size=%lu)\n",
+ ", size=" ULINTPF ")\n",
slot.type.is_read() ? "read" : "write",
slot.name, slot.offset, slot.len);
}
diff --git a/storage/innobase/os/os0thread.cc b/storage/innobase/os/os0thread.cc
index 72199b4cf0b..0462c62f4b2 100644
--- a/storage/innobase/os/os0thread.cc
+++ b/storage/innobase/os/os0thread.cc
@@ -1,6 +1,7 @@
/*****************************************************************************
Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2017, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -31,14 +32,9 @@ Created 9/8/1995 Heikki Tuuri
#include "os0event.h"
#include <map>
-/** Mutex that tracks the thread count. Used by innorwlocktest.cc
-FIXME: the unit tests should use APIs */
-SysMutex thread_mutex;
-
/** Number of threads active. */
ulint os_thread_count;
-
/***************************************************************//**
Compares two thread ids for equality.
@return TRUE if equal */
@@ -127,11 +123,7 @@ os_thread_create_func(
CloseHandle(handle);
- mutex_enter(&thread_mutex);
-
- os_thread_count++;
-
- mutex_exit(&thread_mutex);
+ my_atomic_addlint(&os_thread_count, 1);
return((os_thread_t)new_thread_id);
#else /* _WIN32 else */
@@ -140,9 +132,7 @@ os_thread_create_func(
pthread_attr_init(&attr);
- mutex_enter(&thread_mutex);
- ++os_thread_count;
- mutex_exit(&thread_mutex);
+ my_atomic_addlint(&os_thread_count, 1);
int ret = pthread_create(&new_thread_id, &attr, func, arg);
@@ -197,16 +187,11 @@ os_thread_exit(
pfs_delete_thread();
#endif
- mutex_enter(&thread_mutex);
-
- os_thread_count--;
+ my_atomic_addlint(&os_thread_count, -1);
#ifdef _WIN32
- mutex_exit(&thread_mutex);
-
ExitThread(0);
#else
- mutex_exit(&thread_mutex);
if (detach) {
pthread_detach(pthread_self());
}
@@ -260,10 +245,6 @@ bool
os_thread_active()
/*==============*/
{
- mutex_enter(&thread_mutex);
-
- bool active = (os_thread_count > 0);
-
/* All the threads have exited or are just exiting;
NOTE that the threads may not have completed their
exit yet. Should we use pthread_join() to make sure
@@ -272,30 +253,5 @@ os_thread_active()
os_thread_exit(). Now we just sleep 0.1
seconds and hope that is enough! */
- mutex_exit(&thread_mutex);
-
- return(active);
-}
-
-/**
-Initializes OS thread management data structures. */
-void
-os_thread_init()
-/*============*/
-{
- mutex_create(LATCH_ID_THREAD_MUTEX, &thread_mutex);
-}
-
-/**
-Frees OS thread management data structures. */
-void
-os_thread_free()
-/*============*/
-{
- if (os_thread_count != 0) {
- ib::warn() << "Some (" << os_thread_count << ") threads are"
- " still active";
- }
-
- mutex_destroy(&thread_mutex);
+ return(my_atomic_loadlint(&os_thread_count) > 0);
}
diff --git a/storage/innobase/que/que0que.cc b/storage/innobase/que/que0que.cc
index fa0a429c283..5a3af9dfaeb 100644
--- a/storage/innobase/que/que0que.cc
+++ b/storage/innobase/que/que0que.cc
@@ -417,7 +417,8 @@ que_graph_free_recursive(
}
DBUG_PRINT("que_graph_free_recursive",
- ("node: %p, type: %lu", node, que_node_get_type(node)));
+ ("node: %p, type: " ULINTPF, node,
+ que_node_get_type(node)));
switch (que_node_get_type(node)) {
diff --git a/storage/innobase/rem/rem0rec.cc b/storage/innobase/rem/rem0rec.cc
index 26bb12e8a03..c78df489179 100644
--- a/storage/innobase/rem/rem0rec.cc
+++ b/storage/innobase/rem/rem0rec.cc
@@ -1354,8 +1354,10 @@ rec_convert_dtuple_to_rec_comp(
}
}
- memcpy(end, dfield_get_data(field), len);
- end += len;
+ if (len) {
+ memcpy(end, dfield_get_data(field), len);
+ end += len;
+ }
}
if (!num_v) {
diff --git a/storage/innobase/row/row0ftsort.cc b/storage/innobase/row/row0ftsort.cc
index fca6ae5a1bf..321b55e9894 100644
--- a/storage/innobase/row/row0ftsort.cc
+++ b/storage/innobase/row/row0ftsort.cc
@@ -94,7 +94,6 @@ row_merge_create_fts_sort_index(
new_index->n_def = FTS_NUM_FIELDS_SORT;
new_index->cached = TRUE;
new_index->parser = index->parser;
- new_index->is_ngram = index->is_ngram;
idx_field = dict_index_get_nth_field(index, 0);
charset = fts_index_get_charset(index);
@@ -515,7 +514,6 @@ row_merge_fts_doc_tokenize(
ulint data_size[FTS_NUM_AUX_INDEX];
ulint n_tuple[FTS_NUM_AUX_INDEX];
st_mysql_ftparser* parser;
- bool is_ngram;
t_str.f_n_char = 0;
t_ctx->buf_used = 0;
@@ -524,7 +522,6 @@ row_merge_fts_doc_tokenize(
memset(data_size, 0, FTS_NUM_AUX_INDEX * sizeof(ulint));
parser = sort_buf[0]->index->parser;
- is_ngram = sort_buf[0]->index->is_ngram;
/* Tokenize the data and add each word string, its corresponding
doc id and position to sort buffer */
@@ -570,7 +567,7 @@ row_merge_fts_doc_tokenize(
/* Ignore string whose character number is less than
"fts_min_token_size" or more than "fts_max_token_size" */
- if (!fts_check_token(&str, NULL, is_ngram, NULL)) {
+ if (!fts_check_token(&str, NULL, NULL)) {
if (parser != NULL) {
UT_LIST_REMOVE(t_ctx->fts_token_list, fts_token);
ut_free(fts_token);
@@ -589,7 +586,7 @@ row_merge_fts_doc_tokenize(
/* if "cached_stopword" is defined, ignore words in the
stopword list */
- if (!fts_check_token(&str, t_ctx->cached_stopword, is_ngram,
+ if (!fts_check_token(&str, t_ctx->cached_stopword,
doc->charset)) {
if (parser != NULL) {
UT_LIST_REMOVE(t_ctx->fts_token_list, fts_token);
diff --git a/storage/innobase/row/row0import.cc b/storage/innobase/row/row0import.cc
index a2773baa34e..82e17c6fb0a 100644
--- a/storage/innobase/row/row0import.cc
+++ b/storage/innobase/row/row0import.cc
@@ -1926,6 +1926,7 @@ PageConverter::update_page(
case FIL_PAGE_TYPE_XDES:
err = set_current_xdes(
block->page.id.page_no(), get_frame(block));
+ /* fall through */
case FIL_PAGE_INODE:
case FIL_PAGE_TYPE_TRX_SYS:
case FIL_PAGE_IBUF_FREE_LIST:
diff --git a/storage/innobase/row/row0ins.cc b/storage/innobase/row/row0ins.cc
index b7133a3e1ee..87c2b61a3cb 100644
--- a/storage/innobase/row/row0ins.cc
+++ b/storage/innobase/row/row0ins.cc
@@ -2277,18 +2277,14 @@ for a clustered index!
@retval DB_SUCCESS if no error
@retval DB_DUPLICATE_KEY if error,
@retval DB_LOCK_WAIT if we have to wait for a lock on a possible duplicate
-record
-@retval DB_SUCCESS_LOCKED_REC if an exact match of the record was found
-in online table rebuild (flags & (BTR_KEEP_SYS_FLAG | BTR_NO_LOCKING_FLAG)) */
+record */
static MY_ATTRIBUTE((nonnull, warn_unused_result))
dberr_t
row_ins_duplicate_error_in_clust(
-/*=============================*/
ulint flags, /*!< in: undo logging and locking flags */
btr_cur_t* cursor, /*!< in: B-tree cursor */
const dtuple_t* entry, /*!< in: entry to insert */
- que_thr_t* thr, /*!< in: query thread */
- mtr_t* mtr) /*!< in: mtr */
+ que_thr_t* thr) /*!< in: query thread */
{
dberr_t err;
rec_t* rec;
@@ -2299,8 +2295,6 @@ row_ins_duplicate_error_in_clust(
ulint* offsets = offsets_;
rec_offs_init(offsets_);
- UT_NOT_USED(mtr);
-
ut_ad(dict_index_is_clust(cursor->index));
/* NOTE: For unique non-clustered indexes there may be any number
@@ -2662,7 +2656,7 @@ row_ins_clust_index_entry_low(
DB_LOCK_WAIT */
err = row_ins_duplicate_error_in_clust(
- flags, cursor, entry, thr, &mtr);
+ flags, cursor, entry, thr);
}
if (err != DB_SUCCESS) {
diff --git a/storage/innobase/row/row0log.cc b/storage/innobase/row/row0log.cc
index 36ad6cdd3d9..6df0d745e46 100644
--- a/storage/innobase/row/row0log.cc
+++ b/storage/innobase/row/row0log.cc
@@ -354,11 +354,10 @@ row_log_online_op(
b += size;
if (mrec_size >= avail_size) {
- dberr_t err;
- IORequest request(IORequest::WRITE);
const os_offset_t byte_offset
= (os_offset_t) log->tail.blocks
* srv_sort_buf_size;
+ IORequest request(IORequest::WRITE);
if (byte_offset + srv_sort_buf_size >= srv_online_max_size) {
goto write_failed;
@@ -379,13 +378,12 @@ row_log_online_op(
goto err_exit;
}
- err = os_file_write(
- request,
- "(modification log)",
- OS_FILE_FROM_FD(log->fd),
- log->tail.block, byte_offset, srv_sort_buf_size);
log->tail.blocks++;
- if (err != DB_SUCCESS) {
+ if (!os_file_write_int_fd(
+ request,
+ "(modification log)",
+ log->fd,
+ log->tail.block, byte_offset, srv_sort_buf_size)) {
write_failed:
/* We set the flag directly instead of invoking
dict_set_corrupted_index_cache_only(index) here,
@@ -472,11 +470,10 @@ row_log_table_close_func(
ut_ad(mutex_own(&log->mutex));
if (size >= avail) {
- dberr_t err;
- IORequest request(IORequest::WRITE);
const os_offset_t byte_offset
= (os_offset_t) log->tail.blocks
* srv_sort_buf_size;
+ IORequest request(IORequest::WRITE);
if (byte_offset + srv_sort_buf_size >= srv_online_max_size) {
goto write_failed;
@@ -497,13 +494,12 @@ row_log_table_close_func(
goto err_exit;
}
- err = os_file_write(
- request,
- "(modification log)",
- OS_FILE_FROM_FD(log->fd),
- log->tail.block, byte_offset, srv_sort_buf_size);
log->tail.blocks++;
- if (err != DB_SUCCESS) {
+ if (!os_file_write_int_fd(
+ request,
+ "(modification log)",
+ log->fd,
+ log->tail.block, byte_offset, srv_sort_buf_size)) {
write_failed:
log->error = DB_ONLINE_LOG_TOO_BIG;
}
@@ -2038,6 +2034,7 @@ row_log_table_apply_update(
When applying the subsequent ROW_T_DELETE, no matching
record will be found. */
+ /* fall through */
case DB_SUCCESS:
ut_ad(row != NULL);
break;
@@ -2888,16 +2885,14 @@ all_done:
goto func_exit;
}
- IORequest request;
+ IORequest request(IORequest::READ);
- dberr_t err = os_file_read_no_error_handling(
- request,
- OS_FILE_FROM_FD(index->online_log->fd),
- index->online_log->head.block, ofs,
- srv_sort_buf_size,
- NULL);
- if (err != DB_SUCCESS) {
+ if (!os_file_read_no_error_handling_int_fd(
+ request,
+ index->online_log->fd,
+ index->online_log->head.block, ofs,
+ srv_sort_buf_size)) {
ib::error()
<< "Unable to read temporary file"
" for table " << index->table_name;
@@ -3707,10 +3702,10 @@ all_done:
goto func_exit;
}
} else {
- os_offset_t ofs;
-
- ofs = (os_offset_t) index->online_log->head.blocks
+ os_offset_t ofs = static_cast<os_offset_t>(
+ index->online_log->head.blocks)
* srv_sort_buf_size;
+ IORequest request(IORequest::READ);
ut_ad(has_index_lock);
has_index_lock = false;
@@ -3723,16 +3718,11 @@ all_done:
goto func_exit;
}
- IORequest request;
-
- dberr_t err = os_file_read_no_error_handling(
- request,
- OS_FILE_FROM_FD(index->online_log->fd),
- index->online_log->head.block, ofs,
- srv_sort_buf_size,
- NULL);
-
- if (err != DB_SUCCESS) {
+ if (!os_file_read_no_error_handling_int_fd(
+ request,
+ index->online_log->fd,
+ index->online_log->head.block, ofs,
+ srv_sort_buf_size)) {
ib::error()
<< "Unable to read temporary file"
" for index " << index->name;
diff --git a/storage/innobase/row/row0merge.cc b/storage/innobase/row/row0merge.cc
index 62cab870e9e..ea332adfdc3 100644
--- a/storage/innobase/row/row0merge.cc
+++ b/storage/innobase/row/row0merge.cc
@@ -1155,10 +1155,9 @@ row_merge_heap_create(
return(heap);
}
-/********************************************************************//**
-Read a merge block from the file system.
-@return TRUE if request was successful, FALSE if fail */
-ibool
+/** Read a merge block from the file system.
+@return whether the request was successful */
+bool
row_merge_read(
/*===========*/
int fd, /*!< in: file descriptor */
@@ -1176,11 +1175,9 @@ row_merge_read(
DBUG_LOG("ib_merge_sort", "fd=" << fd << " ofs=" << ofs);
DBUG_EXECUTE_IF("row_merge_read_failure", DBUG_RETURN(FALSE););
- IORequest request;
-
- dberr_t err = os_file_read_no_error_handling(
- request,
- OS_FILE_FROM_FD(fd), buf, ofs, srv_sort_buf_size, NULL);
+ IORequest request(IORequest::READ);
+ const bool success = os_file_read_no_error_handling_int_fd(
+ request, fd, buf, ofs, srv_sort_buf_size);
/* For encrypted tables, decrypt data after reading and copy data */
if (crypt_data && crypt_buf) {
@@ -1194,11 +1191,11 @@ row_merge_read(
posix_fadvise(fd, ofs, srv_sort_buf_size, POSIX_FADV_DONTNEED);
#endif /* POSIX_FADV_DONTNEED */
- if (err != DB_SUCCESS) {
+ if (!success) {
ib::error() << "Failed to read merge block at " << ofs;
}
- DBUG_RETURN(err == DB_SUCCESS);
+ DBUG_RETURN(success);
}
/********************************************************************//**
@@ -1223,7 +1220,6 @@ row_merge_write(
DBUG_LOG("ib_merge_sort", "fd=" << fd << " ofs=" << ofs);
DBUG_EXECUTE_IF("row_merge_write_failure", DBUG_RETURN(FALSE););
- IORequest request(IORequest::WRITE);
if (crypt_data && crypt_buf) {
row_merge_encrypt_buf(crypt_data, offset, space, (const byte *)buf, (byte *)crypt_buf);
out_buf = crypt_buf;
@@ -1232,9 +1228,9 @@ row_merge_write(
mach_write_to_4((byte *)out_buf, 0);
}
- dberr_t err = os_file_write(
- request,
- "(merge)", OS_FILE_FROM_FD(fd), out_buf, ofs, buf_len);
+ IORequest request(IORequest::WRITE);
+ const bool success = os_file_write_int_fd(
+ request, "(merge)", fd, out_buf, ofs, buf_len);
#ifdef POSIX_FADV_DONTNEED
/* The block will be needed on the next merge pass,
@@ -1242,7 +1238,7 @@ row_merge_write(
posix_fadvise(fd, ofs, buf_len, POSIX_FADV_DONTNEED);
#endif /* POSIX_FADV_DONTNEED */
- DBUG_RETURN(err == DB_SUCCESS);
+ DBUG_RETURN(success);
}
/********************************************************************//**
@@ -4030,7 +4026,7 @@ row_merge_drop_temp_indexes(void)
/** Create temporary merge files in the given paramater path, and if
UNIV_PFS_IO defined, register the file descriptor with Performance Schema.
-@param[in] path location for creating temporary merge files.
+@param[in] path location for creating temporary merge files, or NULL
@return File descriptor */
int
row_merge_file_create_low(
@@ -4041,16 +4037,23 @@ row_merge_file_create_low(
/* This temp file open does not go through normal
file APIs, add instrumentation to register with
performance schema */
- struct PSI_file_locker* locker = NULL;
+ struct PSI_file_locker* locker;
PSI_file_locker_state state;
- register_pfs_file_open_begin(&state, locker, innodb_temp_file_key,
- PSI_FILE_OPEN,
- "Innodb Merge Temp File",
- __FILE__, __LINE__);
+ locker = PSI_FILE_CALL(get_thread_file_name_locker)(
+ &state, innodb_temp_file_key, PSI_FILE_OPEN,
+ "Innodb Merge Temp File", &locker);
+ if (locker != NULL) {
+ PSI_FILE_CALL(start_file_open_wait)(locker,
+ __FILE__,
+ __LINE__);
+ }
#endif
fd = innobase_mysql_tmpfile(path);
#ifdef UNIV_PFS_IO
- register_pfs_file_open_end(locker, fd);
+ if (locker != NULL) {
+ PSI_FILE_CALL(end_file_open_wait_and_bind_to_descriptor)(
+ locker, fd);
+ }
#endif
if (fd < 0) {
@@ -4063,7 +4066,7 @@ row_merge_file_create_low(
/** Create a merge file in the given location.
@param[out] merge_file merge file structure
-@param[in] path location for creating temporary file
+@param[in] path location for creating temporary file, or NULL
@return file descriptor, or -1 on failure */
int
row_merge_file_create(
@@ -4094,15 +4097,20 @@ row_merge_file_destroy_low(
#ifdef UNIV_PFS_IO
struct PSI_file_locker* locker = NULL;
PSI_file_locker_state state;
- register_pfs_file_io_begin(&state, locker,
- fd, 0, PSI_FILE_CLOSE,
- __FILE__, __LINE__);
+ locker = PSI_FILE_CALL(get_thread_file_descriptor_locker)(
+ &state, fd, PSI_FILE_CLOSE);
+ if (locker != NULL) {
+ PSI_FILE_CALL(start_file_wait)(
+ locker, 0, __FILE__, __LINE__);
+ }
#endif
if (fd >= 0) {
close(fd);
}
#ifdef UNIV_PFS_IO
- register_pfs_file_io_end(locker, 0);
+ if (locker != NULL) {
+ PSI_FILE_CALL(end_file_wait)(locker, 0);
+ }
#endif
}
/*********************************************************************//**
@@ -4373,7 +4381,8 @@ row_merge_rename_tables_dict(
@param[in,out] index index
@param[in] add_v new virtual columns added along with add index call
@return DB_SUCCESS or error code */
-static MY_ATTRIBUTE((nonnull, warn_unused_result))
+MY_ATTRIBUTE((nonnull(1,2,3), warn_unused_result))
+static
dberr_t
row_merge_create_index_graph(
trx_t* trx,
@@ -4495,7 +4504,6 @@ row_merge_create_index(
ut_a(index);
index->parser = index_def->parser;
- index->is_ngram = index_def->is_ngram;
index->has_new_v_col = has_new_v_col;
/* Note the id of the transaction that created this
@@ -4729,6 +4737,7 @@ row_merge_build_indexes(
for (i = 0; i < n_indexes; i++) {
merge_files[i].fd = -1;
+ merge_files[i].offset = 0;
}
total_static_cost = COST_BUILD_INDEX_STATIC * n_indexes + COST_READ_CLUSTERED_INDEX;
diff --git a/storage/innobase/row/row0mysql.cc b/storage/innobase/row/row0mysql.cc
index 57ba35a57f6..2a4be043984 100644
--- a/storage/innobase/row/row0mysql.cc
+++ b/storage/innobase/row/row0mysql.cc
@@ -1197,58 +1197,6 @@ row_get_prebuilt_insert_row(
}
/*********************************************************************//**
-Updates the table modification counter and calculates new estimates
-for table and index statistics if necessary. */
-UNIV_INLINE
-void
-row_update_statistics_if_needed(
-/*============================*/
- dict_table_t* table) /*!< in: table */
-{
- ib_uint64_t counter;
- ib_uint64_t n_rows;
-
- if (!table->stat_initialized) {
- DBUG_EXECUTE_IF(
- "test_upd_stats_if_needed_not_inited",
- fprintf(stderr, "test_upd_stats_if_needed_not_inited"
- " was executed\n");
- );
- return;
- }
-
- counter = table->stat_modified_counter++;
- n_rows = dict_table_get_n_rows(table);
-
- if (dict_stats_is_persistent_enabled(table)) {
- if (counter > n_rows / 10 /* 10% */
- && dict_stats_auto_recalc_is_enabled(table)) {
-
- dict_stats_recalc_pool_add(table);
- table->stat_modified_counter = 0;
- }
- return;
- }
-
- /* Calculate new statistics if 1 / 16 of table has been modified
- since the last time a statistics batch was run.
- We calculate statistics at most every 16th round, since we may have
- a counter table which is very small and updated very often. */
- ib_uint64_t threshold= 16 + n_rows / 16; /* 6.25% */
-
- if (srv_stats_modified_counter) {
- threshold= ut_min((ib_uint64_t)srv_stats_modified_counter, threshold);
- }
-
- if (counter > threshold) {
-
- ut_ad(!mutex_own(&dict_sys->mutex));
- /* this will reset table->stat_modified_counter to 0 */
- dict_stats_update(table, DICT_STATS_RECALC_TRANSIENT);
- }
-}
-
-/*********************************************************************//**
Sets an AUTO_INC type lock on the table mentioned in prebuilt. The
AUTO_INC lock gives exclusive access to the auto-inc counter of the
table. The lock is reserved only for the duration of an SQL statement.
@@ -1649,7 +1597,7 @@ error_exit:
ut_memcpy(prebuilt->row_id, node->row_id_buf, DATA_ROW_ID_LEN);
}
- row_update_statistics_if_needed(table);
+ dict_stats_update_if_needed(table);
trx->op_info = "";
if (blob_heap != NULL) {
@@ -1895,6 +1843,7 @@ row_update_for_mysql_using_upd_graph(
ut_ad(trx);
ut_a(prebuilt->magic_n == ROW_PREBUILT_ALLOCATED);
ut_a(prebuilt->magic_n2 == ROW_PREBUILT_ALLOCATED);
+ ut_ad(table->stat_initialized);
UT_NOT_USED(mysql_rec);
if (!table->is_readable()) {
@@ -1931,6 +1880,8 @@ row_update_for_mysql_using_upd_graph(
}
node = prebuilt->upd_node;
+ const bool is_delete = node->is_delete;
+ ut_ad(node->table == table);
if (node->cascade_heap) {
mem_heap_empty(node->cascade_heap);
@@ -2101,8 +2052,11 @@ run_again:
thr->fk_cascade_depth = 0;
- /* Update the statistics only after completing all cascaded
- operations */
+ /* Update the statistics of each involved table
+ only after completing all operations, including
+ FOREIGN KEY...ON...CASCADE|SET NULL. */
+ bool update_statistics;
+
for (upd_cascade_t::iterator i = processed_cascades->begin();
i != processed_cascades->end();
++i) {
@@ -2116,16 +2070,25 @@ run_again:
than protecting the following code with a latch. */
dict_table_n_rows_dec(node->table);
+ update_statistics = !srv_stats_include_delete_marked;
srv_stats.n_rows_deleted.inc(size_t(trx->id));
} else {
+ update_statistics
+ = !(node->cmpl_info & UPD_NODE_NO_ORD_CHANGE);
srv_stats.n_rows_updated.inc(size_t(trx->id));
}
- row_update_statistics_if_needed(node->table);
+ if (update_statistics) {
+ dict_stats_update_if_needed(node->table);
+ } else {
+ /* Always update the table modification counter. */
+ node->table->stat_modified_counter++;
+ }
+
que_graph_free_recursive(node);
}
- if (node->is_delete) {
+ if (is_delete) {
/* Not protected by dict_table_stats_lock() for performance
reasons, we would rather get garbage in stat_n_rows (which is
just an estimate anyway) than protecting the following code
@@ -2137,25 +2100,24 @@ run_again:
} else {
srv_stats.n_rows_deleted.inc(size_t(trx->id));
}
+
+ update_statistics = !srv_stats_include_delete_marked;
} else {
if (table->is_system_db) {
srv_stats.n_system_rows_updated.inc(size_t(trx->id));
} else {
srv_stats.n_rows_updated.inc(size_t(trx->id));
}
+
+ update_statistics
+ = !(node->cmpl_info & UPD_NODE_NO_ORD_CHANGE);
}
- /* We update table statistics only if it is a DELETE or UPDATE
- that changes indexed columns, UPDATEs that change only non-indexed
- columns would not affect statistics. */
- if (node->is_delete || !(node->cmpl_info & UPD_NODE_NO_ORD_CHANGE)) {
- row_update_statistics_if_needed(prebuilt->table);
+ if (update_statistics) {
+ dict_stats_update_if_needed(prebuilt->table);
} else {
- /* Update the table modification counter even when
- non-indexed columns change if statistics is initialized. */
- if (prebuilt->table->stat_initialized) {
- prebuilt->table->stat_modified_counter++;
- }
+ /* Always update the table modification counter. */
+ prebuilt->table->stat_modified_counter++;
}
trx->op_info = "";
@@ -5122,12 +5084,10 @@ loop:
case DB_INTERRUPTED:
goto func_exit;
default:
- {
- const char* doing = "CHECK TABLE";
- ib::warn() << doing << " on index " << index->name << " of"
+ ib::warn() << "CHECK TABLE on index " << index->name << " of"
" table " << index->table->name << " returned " << ret;
- /* fall through (this error is ignored by CHECK TABLE) */
- }
+ /* (this error is ignored by CHECK TABLE) */
+ /* fall through */
case DB_END_OF_INDEX:
ret = DB_SUCCESS;
func_exit:
diff --git a/storage/innobase/row/row0purge.cc b/storage/innobase/row/row0purge.cc
index e49fd7f0f8c..86a9e1259ac 100644
--- a/storage/innobase/row/row0purge.cc
+++ b/storage/innobase/row/row0purge.cc
@@ -27,6 +27,7 @@ Created 3/14/1997 Heikki Tuuri
#include "row0purge.h"
#include "fsp0fsp.h"
#include "mach0data.h"
+#include "dict0stats.h"
#include "trx0rseg.h"
#include "trx0trx.h"
#include "trx0roll.h"
@@ -536,8 +537,9 @@ row_purge_remove_sec_if_poss_leaf(
success = false;
}
}
- /* fall through (the index entry is still needed,
+ /* (The index entry is still needed,
or the deletion succeeded) */
+ /* fall through */
case ROW_NOT_DELETED_REF:
/* The index entry is still needed. */
case ROW_BUFFERED:
@@ -952,10 +954,13 @@ row_purge_record_func(
switch (node->rec_type) {
case TRX_UNDO_DEL_MARK_REC:
purged = row_purge_del_mark(node);
- if (!purged) {
- break;
+ if (purged) {
+ if (node->table->stat_initialized
+ && srv_stats_include_delete_marked) {
+ dict_stats_update_if_needed(node->table);
+ }
+ MONITOR_INC(MONITOR_N_DEL_ROW_PURGE);
}
- MONITOR_INC(MONITOR_N_DEL_ROW_PURGE);
break;
default:
if (!updated_extern) {
diff --git a/storage/innobase/row/row0sel.cc b/storage/innobase/row/row0sel.cc
index 2bee83808a5..3bae9bd25ac 100644
--- a/storage/innobase/row/row0sel.cc
+++ b/storage/innobase/row/row0sel.cc
@@ -2995,27 +2995,32 @@ row_sel_field_store_in_mysql_format_func(
# define row_sel_store_mysql_field(m,p,r,i,o,f,t) \
row_sel_store_mysql_field_func(m,p,r,o,f,t)
#endif /* UNIV_DEBUG */
-/**************************************************************//**
-Convert a field in the Innobase format to a field in the MySQL format. */
+/** Convert a field in the Innobase format to a field in the MySQL format.
+@param[out] mysql_rec record in the MySQL format
+@param[in,out] prebuilt prebuilt struct
+@param[in] rec InnoDB record; must be protected
+ by a page latch
+@param[in] index index of rec
+@param[in] offsets array returned by rec_get_offsets()
+@param[in] field_no templ->rec_field_no or
+ templ->clust_rec_field_no
+ or templ->icp_rec_field_no
+ or sec field no if clust_templ_for_sec
+ is TRUE
+@param[in] templ row template
+*/
static MY_ATTRIBUTE((warn_unused_result))
ibool
row_sel_store_mysql_field_func(
-/*===========================*/
- byte* mysql_rec, /*!< out: record in the
- MySQL format */
- row_prebuilt_t* prebuilt, /*!< in/out: prebuilt struct */
- const rec_t* rec, /*!< in: InnoDB record;
- must be protected by
- a page latch */
+ byte* mysql_rec,
+ row_prebuilt_t* prebuilt,
+ const rec_t* rec,
#ifdef UNIV_DEBUG
- const dict_index_t* index, /*!< in: index of rec */
-#endif /* UNIV_DEBUG */
- const ulint* offsets, /*!< in: array returned by
- rec_get_offsets() */
- ulint field_no, /*!< in: templ->rec_field_no or
- templ->clust_rec_field_no or
- templ->icp_rec_field_no */
- const mysql_row_templ_t*templ) /*!< in: row template */
+ const dict_index_t* index,
+#endif
+ const ulint* offsets,
+ ulint field_no,
+ const mysql_row_templ_t*templ)
{
DBUG_ENTER("row_sel_store_mysql_field_func");
@@ -3151,31 +3156,36 @@ row_sel_store_mysql_field_func(
DBUG_RETURN(TRUE);
}
-/**************************************************************//**
-Convert a row in the Innobase format to a row in the MySQL format.
+/** Convert a row in the Innobase format to a row in the MySQL format.
Note that the template in prebuilt may advise us to copy only a few
columns to mysql_rec, other columns are left blank. All columns may not
be needed in the query.
+@param[out] mysql_rec row in the MySQL format
+@param[in] prebuilt prebuilt structure
+@param[in] rec Innobase record in the index
+ which was described in prebuilt's
+ template, or in the clustered index;
+ must be protected by a page latch
+@param[in] vrow virtual columns
+@param[in] rec_clust whether the rec in the clustered index
+@param[in] index index of rec
+@param[in] offsets array returned by rec_get_offsets(rec)
+@param[in] clust_templ_for_sec TRUE if rec belongs to secondary index
+ but the prebuilt->template is in
+ clustered index format and it is
+ used only for end range comparison
@return TRUE on success, FALSE if not all columns could be retrieved */
static MY_ATTRIBUTE((warn_unused_result))
ibool
row_sel_store_mysql_rec(
-/*====================*/
- byte* mysql_rec, /*!< out: row in the MySQL format */
- row_prebuilt_t* prebuilt, /*!< in: prebuilt struct */
- const rec_t* rec, /*!< in: Innobase record in the index
- which was described in prebuilt's
- template, or in the clustered index;
- must be protected by a page latch */
- const dtuple_t* vrow, /*!< in: virtual columns */
- ibool rec_clust, /*!< in: TRUE if rec is in the
- clustered index instead of
- prebuilt->index */
- const dict_index_t* index, /*!< in: index of rec */
- const ulint* offsets) /*!< in: array returned by
- rec_get_offsets(rec) */
+ byte* mysql_rec,
+ row_prebuilt_t* prebuilt,
+ const rec_t* rec,
+ const dtuple_t* vrow,
+ bool rec_clust,
+ const dict_index_t* index,
+ const ulint* offsets)
{
- ulint i;
DBUG_ENTER("row_sel_store_mysql_rec");
ut_ad(rec_clust || index == prebuilt->index);
@@ -3185,7 +3195,7 @@ row_sel_store_mysql_rec(
row_mysql_prebuilt_free_blob_heap(prebuilt);
}
- for (i = 0; i < prebuilt->n_template; i++) {
+ for (ulint i = 0; i < prebuilt->n_template; i++) {
const mysql_row_templ_t*templ = &prebuilt->mysql_template[i];
if (templ->is_virtual && dict_index_is_clust(index)) {
@@ -4027,7 +4037,7 @@ row_search_idx_cond_check(
if (!prebuilt->need_to_access_clustered
|| dict_index_is_clust(prebuilt->index)) {
if (!row_sel_store_mysql_rec(
- mysql_rec, prebuilt, rec, NULL, FALSE,
+ mysql_rec, prebuilt, rec, NULL, false,
prebuilt->index, offsets)) {
ut_ad(dict_index_is_clust(prebuilt->index));
return(ICP_NO_MATCH);
@@ -4399,7 +4409,8 @@ row_search_mvcc(
if (!row_sel_store_mysql_rec(
buf, prebuilt,
- rec, NULL, FALSE, index, offsets)) {
+ rec, NULL, false, index,
+ offsets)) {
/* Only fresh inserts may contain
incomplete externally stored
columns. Pretend that such
@@ -5371,7 +5382,7 @@ requires_clust_rec:
appropriate version of the clustered index record. */
if (!row_sel_store_mysql_rec(
buf, prebuilt, result_rec, vrow,
- TRUE, clust_index, offsets)) {
+ true, clust_index, offsets)) {
goto next_rec;
}
}
diff --git a/storage/innobase/row/row0uins.cc b/storage/innobase/row/row0uins.cc
index 9288adb21a4..934b5ad5a7a 100644
--- a/storage/innobase/row/row0uins.cc
+++ b/storage/innobase/row/row0uins.cc
@@ -25,6 +25,7 @@ Created 2/25/1997 Heikki Tuuri
#include "row0uins.h"
#include "dict0dict.h"
+#include "dict0stats.h"
#include "dict0boot.h"
#include "dict0crea.h"
#include "trx0undo.h"
@@ -508,6 +509,23 @@ row_undo_ins(
mutex_exit(&dict_sys->mutex);
}
+
+ if (err == DB_SUCCESS && node->table->stat_initialized) {
+ /* Not protected by dict_table_stats_lock() for
+ performance reasons, we would rather get garbage
+ in stat_n_rows (which is just an estimate anyway)
+ than protecting the following code with a latch. */
+ dict_table_n_rows_dec(node->table);
+
+ /* Do not attempt to update statistics when
+ executing ROLLBACK in the InnoDB SQL
+ interpreter, because in that case we would
+ already be holding dict_sys->mutex, which
+ would be acquired when updating statistics. */
+ if (!dict_locked) {
+ dict_stats_update_if_needed(node->table);
+ }
+ }
}
dict_table_close(node->table, dict_locked, FALSE);
diff --git a/storage/innobase/row/row0umod.cc b/storage/innobase/row/row0umod.cc
index eefe9fb2bd8..8fa5c2ccbff 100644
--- a/storage/innobase/row/row0umod.cc
+++ b/storage/innobase/row/row0umod.cc
@@ -1,6 +1,7 @@
/*****************************************************************************
Copyright (c) 1997, 2017, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2017, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -27,6 +28,7 @@ Created 2/27/1997 Heikki Tuuri
#include "row0umod.h"
#include "dict0dict.h"
+#include "dict0stats.h"
#include "dict0boot.h"
#include "trx0undo.h"
#include "trx0roll.h"
@@ -1251,8 +1253,38 @@ row_undo_mod(
}
if (err == DB_SUCCESS) {
-
err = row_undo_mod_clust(node, thr);
+
+ bool update_statistics
+ = !(node->cmpl_info & UPD_NODE_NO_ORD_CHANGE);
+
+ if (err == DB_SUCCESS && node->table->stat_initialized) {
+ switch (node->rec_type) {
+ case TRX_UNDO_UPD_EXIST_REC:
+ break;
+ case TRX_UNDO_DEL_MARK_REC:
+ dict_table_n_rows_inc(node->table);
+ update_statistics = update_statistics
+ || !srv_stats_include_delete_marked;
+ break;
+ case TRX_UNDO_UPD_DEL_REC:
+ dict_table_n_rows_dec(node->table);
+ update_statistics = update_statistics
+ || !srv_stats_include_delete_marked;
+ break;
+ }
+
+ /* Do not attempt to update statistics when
+ executing ROLLBACK in the InnoDB SQL
+ interpreter, because in that case we would
+ already be holding dict_sys->mutex, which
+ would be acquired when updating statistics. */
+ if (update_statistics && !dict_locked) {
+ dict_stats_update_if_needed(node->table);
+ } else {
+ node->table->stat_modified_counter++;
+ }
+ }
}
dict_table_close(node->table, dict_locked, FALSE);
diff --git a/storage/innobase/row/row0upd.cc b/storage/innobase/row/row0upd.cc
index 6ddc2c11cc9..69c828e54c8 100644
--- a/storage/innobase/row/row0upd.cc
+++ b/storage/innobase/row/row0upd.cc
@@ -2603,7 +2603,6 @@ static MY_ATTRIBUTE((nonnull, warn_unused_result))
dberr_t
row_upd_clust_rec_by_insert(
/*========================*/
- ulint flags, /*!< in: undo logging and locking flags */
upd_node_t* node, /*!< in/out: row update node */
dict_index_t* index, /*!< in: clustered index of the record */
que_thr_t* thr, /*!< in: query thread */
@@ -2672,7 +2671,7 @@ row_upd_clust_rec_by_insert(
}
err = btr_cur_del_mark_set_clust_rec(
- flags, btr_cur_get_block(btr_cur), rec, index, offsets,
+ btr_cur_get_block(btr_cur), rec, index, offsets,
thr, node->row, mtr);
if (err != DB_SUCCESS) {
err_exit:
@@ -2913,7 +2912,6 @@ static MY_ATTRIBUTE((nonnull, warn_unused_result))
dberr_t
row_upd_del_mark_clust_rec(
/*=======================*/
- ulint flags, /*!< in: undo logging and locking flags */
upd_node_t* node, /*!< in: row update node */
dict_index_t* index, /*!< in: clustered index */
ulint* offsets,/*!< in/out: rec_get_offsets() for the
@@ -2951,7 +2949,7 @@ row_upd_del_mark_clust_rec(
rec = btr_cur_get_rec(btr_cur);
err = btr_cur_del_mark_set_clust_rec(
- flags, btr_cur_get_block(btr_cur), rec,
+ btr_cur_get_block(btr_cur), rec,
index, offsets, thr, node->row, mtr);
if (err == DB_SUCCESS && referenced) {
@@ -3119,9 +3117,9 @@ row_upd_clust_step(
offsets = rec_get_offsets(rec, index, offsets_,
ULINT_UNDEFINED, &heap);
- if (!node->has_clust_rec_x_lock) {
+ if (!flags && !node->has_clust_rec_x_lock) {
err = lock_clust_rec_modify_check_and_lock(
- flags, btr_pcur_get_block(pcur),
+ 0, btr_pcur_get_block(pcur),
rec, index, offsets, thr);
if (err != DB_SUCCESS) {
mtr_commit(&mtr);
@@ -3138,7 +3136,7 @@ row_upd_clust_step(
if (node->is_delete) {
err = row_upd_del_mark_clust_rec(
- flags, node, index, offsets, thr, referenced, foreign, &mtr);
+ node, index, offsets, thr, referenced, foreign, &mtr);
if (err == DB_SUCCESS) {
node->state = UPD_NODE_UPDATE_ALL_SEC;
@@ -3184,7 +3182,7 @@ row_upd_clust_step(
externally! */
err = row_upd_clust_rec_by_insert(
- flags, node, index, thr, referenced, foreign, &mtr);
+ node, index, thr, referenced, foreign, &mtr);
if (err != DB_SUCCESS) {
goto exit_func;
@@ -3231,7 +3229,7 @@ row_upd(
ut_ad(!thr_get_trx(thr)->in_rollback);
DBUG_PRINT("row_upd", ("table: %s", node->table->name.m_name));
- DBUG_PRINT("row_upd", ("info bits in update vector: 0x%lx",
+ DBUG_PRINT("row_upd", ("info bits in update vector: 0x" ULINTPFx,
node->update ? node->update->info_bits: 0));
DBUG_PRINT("row_upd", ("foreign_id: %s",
node->foreign ? node->foreign->id: "NULL"));
diff --git a/storage/innobase/srv/srv0srv.cc b/storage/innobase/srv/srv0srv.cc
index 6e010fadc6d..d7b7711e402 100644
--- a/storage/innobase/srv/srv0srv.cc
+++ b/storage/innobase/srv/srv0srv.cc
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1995, 2017, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2008, 2009 Google Inc.
Copyright (c) 2009, Percona Inc.
Copyright (c) 2013, 2017, MariaDB Corporation.
@@ -195,9 +195,6 @@ my_bool srv_use_mtflush;
my_bool srv_master_thread_disabled_debug;
/** Event used to inform that master thread is disabled. */
static os_event_t srv_master_thread_disabled_event;
-/** Debug variable to find if any background threads are adding
-to purge during slow shutdown. */
-extern bool trx_commit_disallowed;
#endif /* UNIV_DEBUG */
/*------------------------- LOG FILES ------------------------ */
@@ -539,16 +536,16 @@ UNIV_INTERN ulong srv_buf_dump_status_frequency;
/** Acquire the system_mutex. */
#define srv_sys_mutex_enter() do { \
- mutex_enter(&srv_sys->mutex); \
+ mutex_enter(&srv_sys.mutex); \
} while (0)
/** Test if the system mutex is owned. */
-#define srv_sys_mutex_own() (mutex_own(&srv_sys->mutex) \
+#define srv_sys_mutex_own() (mutex_own(&srv_sys.mutex) \
&& !srv_read_only_mode)
/** Release the system mutex. */
#define srv_sys_mutex_exit() do { \
- mutex_exit(&srv_sys->mutex); \
+ mutex_exit(&srv_sys.mutex); \
} while (0)
#define fetch_lock_wait_timeout(trx) \
@@ -640,7 +637,7 @@ struct srv_sys_t{
ulint n_sys_threads; /*!< size of the sys_threads
array */
- srv_slot_t* sys_threads; /*!< server thread table;
+ srv_slot_t sys_threads[32 + 1]; /*!< server thread table;
os_event_set() and
os_event_reset() on
sys_threads[]->event are
@@ -657,7 +654,7 @@ struct srv_sys_t{
activity */
};
-static srv_sys_t* srv_sys;
+static srv_sys_t srv_sys;
/** Event to signal srv_monitor_thread. Not protected by a mutex.
Set after setting srv_print_innodb_monitor. */
@@ -682,10 +679,10 @@ and/or load it during startup. */
char srv_buffer_pool_dump_at_shutdown = TRUE;
char srv_buffer_pool_load_at_startup = TRUE;
-/** Slot index in the srv_sys->sys_threads array for the purge thread. */
+/** Slot index in the srv_sys.sys_threads array for the purge thread. */
static const ulint SRV_PURGE_SLOT = 1;
-/** Slot index in the srv_sys->sys_threads array for the master thread. */
+/** Slot index in the srv_sys.sys_threads array for the master thread. */
static const ulint SRV_MASTER_SLOT = 0;
#ifdef HAVE_PSI_STAGE_INTERFACE
@@ -825,21 +822,21 @@ srv_reserve_slot(
switch (type) {
case SRV_MASTER:
- slot = &srv_sys->sys_threads[SRV_MASTER_SLOT];
+ slot = &srv_sys.sys_threads[SRV_MASTER_SLOT];
break;
case SRV_PURGE:
- slot = &srv_sys->sys_threads[SRV_PURGE_SLOT];
+ slot = &srv_sys.sys_threads[SRV_PURGE_SLOT];
break;
case SRV_WORKER:
/* Find an empty slot, skip the master and purge slots. */
- for (slot = &srv_sys->sys_threads[2];
+ for (slot = &srv_sys.sys_threads[2];
slot->in_use;
++slot) {
- ut_a(slot < &srv_sys->sys_threads[
- srv_sys->n_sys_threads]);
+ ut_a(slot < &srv_sys.sys_threads[
+ srv_sys.n_sys_threads]);
}
break;
@@ -855,7 +852,7 @@ srv_reserve_slot(
ut_ad(srv_slot_get_type(slot) == type);
- my_atomic_addlint(&srv_sys->n_threads_active[type], 1);
+ my_atomic_addlint(&srv_sys.n_threads_active[type], 1);
srv_sys_mutex_exit();
@@ -885,13 +882,13 @@ srv_suspend_thread_low(
case SRV_MASTER:
/* We have only one master thread and it
should be the first entry always. */
- ut_a(srv_sys->n_threads_active[type] == 1);
+ ut_a(srv_sys.n_threads_active[type] == 1);
break;
case SRV_PURGE:
/* We have only one purge coordinator thread
and it should be the second entry always. */
- ut_a(srv_sys->n_threads_active[type] == 1);
+ ut_a(srv_sys.n_threads_active[type] == 1);
break;
case SRV_WORKER:
@@ -902,7 +899,7 @@ srv_suspend_thread_low(
ut_a(!slot->suspended);
slot->suspended = TRUE;
- if (my_atomic_addlint(&srv_sys->n_threads_active[type], -1) < 0) {
+ if (my_atomic_addlint(&srv_sys.n_threads_active[type], -1) < 0) {
ut_error;
}
@@ -959,7 +956,7 @@ srv_resume_thread(srv_slot_t* slot, int64_t sig_count = 0, bool wait = true,
ut_ad(slot->suspended);
slot->suspended = FALSE;
- my_atomic_addlint(&srv_sys->n_threads_active[slot->type], 1);
+ my_atomic_addlint(&srv_sys.n_threads_active[slot->type], 1);
srv_sys_mutex_exit();
return(timeout);
}
@@ -981,8 +978,8 @@ srv_release_threads(enum srv_thread_type type, ulint n)
srv_sys_mutex_enter();
- for (ulint i = 0; i < srv_sys->n_sys_threads; i++) {
- srv_slot_t* slot = &srv_sys->sys_threads[i];
+ for (ulint i = 0; i < srv_sys.n_sys_threads; i++) {
+ srv_slot_t* slot = &srv_sys.sys_threads[i];
if (!slot->in_use || srv_slot_get_type(slot) != type) {
continue;
@@ -1002,7 +999,7 @@ srv_release_threads(enum srv_thread_type type, ulint n)
should be the first entry always. */
ut_a(n == 1);
ut_a(i == SRV_MASTER_SLOT);
- ut_a(srv_sys->n_threads_active[type] == 0);
+ ut_a(srv_sys.n_threads_active[type] == 0);
break;
case SRV_PURGE:
@@ -1011,12 +1008,12 @@ srv_release_threads(enum srv_thread_type type, ulint n)
ut_a(n == 1);
ut_a(i == SRV_PURGE_SLOT);
ut_a(srv_n_purge_threads > 0);
- ut_a(srv_sys->n_threads_active[type] == 0);
+ ut_a(srv_sys.n_threads_active[type] == 0);
break;
case SRV_WORKER:
ut_a(srv_n_purge_threads > 1);
- ut_a(srv_sys->n_threads_active[type]
+ ut_a(srv_sys.n_threads_active[type]
< srv_n_purge_threads - 1);
break;
}
@@ -1052,32 +1049,19 @@ static
void
srv_init()
{
- ulint n_sys_threads = 0;
- ulint srv_sys_sz = sizeof(*srv_sys);
-
mutex_create(LATCH_ID_SRV_INNODB_MONITOR, &srv_innodb_monitor_mutex);
- if (!srv_read_only_mode) {
-
- /* Number of purge threads + master thread */
- n_sys_threads = srv_n_purge_threads + 1;
-
- srv_sys_sz += n_sys_threads * sizeof(*srv_sys->sys_threads);
- }
-
- srv_sys = static_cast<srv_sys_t*>(ut_zalloc_nokey(srv_sys_sz));
-
- srv_sys->n_sys_threads = n_sys_threads;
+ srv_sys.n_sys_threads = srv_read_only_mode
+ ? 0
+ : srv_n_purge_threads + 1/* purge coordinator */;
if (!srv_read_only_mode) {
- mutex_create(LATCH_ID_SRV_SYS, &srv_sys->mutex);
+ mutex_create(LATCH_ID_SRV_SYS, &srv_sys.mutex);
- mutex_create(LATCH_ID_SRV_SYS_TASKS, &srv_sys->tasks_mutex);
+ mutex_create(LATCH_ID_SRV_SYS_TASKS, &srv_sys.tasks_mutex);
- srv_sys->sys_threads = (srv_slot_t*) &srv_sys[1];
-
- for (ulint i = 0; i < srv_sys->n_sys_threads; ++i) {
- srv_slot_t* slot = &srv_sys->sys_threads[i];
+ for (ulint i = 0; i < srv_sys.n_sys_threads; ++i) {
+ srv_slot_t* slot = &srv_sys.sys_threads[i];
slot->event = os_event_create(0);
@@ -1092,7 +1076,7 @@ srv_init()
buf_flush_event = os_event_create("buf_flush_event");
- UT_LIST_INIT(srv_sys->tasks, &que_thr_t::queue);
+ UT_LIST_INIT(srv_sys.tasks, &que_thr_t::queue);
}
srv_buf_resize_event = os_event_create(0);
@@ -1136,7 +1120,7 @@ void
srv_free(void)
/*==========*/
{
- if (!srv_sys) {
+ if (!srv_buf_resize_event) {
return;
}
@@ -1144,13 +1128,11 @@ srv_free(void)
mutex_free(&page_zip_stat_per_index_mutex);
if (!srv_read_only_mode) {
- mutex_free(&srv_sys->mutex);
- mutex_free(&srv_sys->tasks_mutex);
-
- for (ulint i = 0; i < srv_sys->n_sys_threads; ++i) {
- srv_slot_t* slot = &srv_sys->sys_threads[i];
+ mutex_free(&srv_sys.mutex);
+ mutex_free(&srv_sys.tasks_mutex);
- os_event_destroy(slot->event);
+ for (ulint i = 0; i < srv_sys.n_sys_threads; ++i) {
+ os_event_destroy(srv_sys.sys_threads[i].event);
}
os_event_destroy(srv_error_event);
@@ -1161,18 +1143,11 @@ srv_free(void)
os_event_destroy(srv_buf_resize_event);
-#ifdef UNIV_DEBUG
- os_event_destroy(srv_master_thread_disabled_event);
- srv_master_thread_disabled_event = NULL;
-#endif /* UNIV_DEBUG */
+ ut_d(os_event_destroy(srv_master_thread_disabled_event));
dict_ind_free();
trx_i_s_cache_free(trx_i_s_cache);
-
- ut_free(srv_sys);
-
- srv_sys = 0;
}
/*********************************************************************//**
@@ -1205,7 +1180,6 @@ srv_boot(void)
srv_normalize_init_values();
sync_check_init();
- os_thread_init();
/* Reset the system variables in the recovery module. */
recv_sys_var_init();
trx_pool_init();
@@ -1502,8 +1476,10 @@ srv_export_innodb_status(void)
buf_get_total_stat(&stat);
buf_get_total_list_len(&LRU_len, &free_len, &flush_list_len);
buf_get_total_list_size_in_bytes(&buf_pools_list_size);
- fil_crypt_total_stat(&crypt_stat);
- btr_scrub_total_stat(&scrub_stat);
+ if (!srv_read_only_mode) {
+ fil_crypt_total_stat(&crypt_stat);
+ btr_scrub_total_stat(&scrub_stat);
+ }
mutex_enter(&srv_innodb_monitor_mutex);
@@ -1701,6 +1677,7 @@ srv_export_innodb_status(void)
export_vars.innodb_sec_rec_cluster_reads_avoided =
srv_stats.n_sec_rec_cluster_reads_avoided;
+ if (!srv_read_only_mode) {
export_vars.innodb_encryption_rotation_pages_read_from_cache =
crypt_stat.pages_read_from_cache;
export_vars.innodb_encryption_rotation_pages_read_from_disk =
@@ -1729,6 +1706,7 @@ srv_export_innodb_status(void)
export_vars.innodb_scrub_page_split_failures_unknown =
scrub_stat.page_split_failures_unknown;
export_vars.innodb_scrub_log = srv_stats.n_log_scrubs;
+ }
mutex_exit(&srv_innodb_monitor_mutex);
}
@@ -1878,7 +1856,7 @@ loop:
}
}
- if (srv_shutdown_state >= SRV_SHUTDOWN_CLEANUP) {
+ if (srv_shutdown_state != SRV_SHUTDOWN_NONE) {
goto exit_func;
}
@@ -1999,7 +1977,7 @@ loop:
os_event_wait_time_low(srv_error_event, 1000000, sig_count);
- if (srv_shutdown_state < SRV_SHUTDOWN_CLEANUP) {
+ if (srv_shutdown_state == SRV_SHUTDOWN_NONE) {
goto loop;
}
@@ -2020,7 +1998,7 @@ void
srv_inc_activity_count(void)
/*========================*/
{
- srv_sys->activity_count.inc();
+ srv_sys.activity_count.inc();
}
/**********************************************************************//**
@@ -2041,7 +2019,7 @@ srv_get_active_thread_type(void)
srv_sys_mutex_enter();
for (ulint i = SRV_WORKER; i <= SRV_MASTER; ++i) {
- if (srv_sys->n_threads_active[i] != 0) {
+ if (srv_sys.n_threads_active[i] != 0) {
ret = static_cast<srv_thread_type>(i);
break;
}
@@ -2076,12 +2054,12 @@ srv_active_wake_master_thread_low()
srv_inc_activity_count();
- if (my_atomic_loadlint(&srv_sys->n_threads_active[SRV_MASTER]) == 0) {
+ if (my_atomic_loadlint(&srv_sys.n_threads_active[SRV_MASTER]) == 0) {
srv_slot_t* slot;
srv_sys_mutex_enter();
- slot = &srv_sys->sys_threads[SRV_MASTER_SLOT];
+ slot = &srv_sys.sys_threads[SRV_MASTER_SLOT];
/* Only if the master thread has been started. */
@@ -2101,7 +2079,7 @@ srv_wake_purge_thread_if_not_active()
ut_ad(!srv_sys_mutex_own());
if (purge_sys->state == PURGE_STATE_RUN
- && !my_atomic_loadlint(&srv_sys->n_threads_active[SRV_PURGE])
+ && !my_atomic_loadlint(&srv_sys.n_threads_active[SRV_PURGE])
&& my_atomic_loadlint(&trx_sys->rseg_history_len)) {
srv_release_threads(SRV_PURGE, 1);
@@ -2124,7 +2102,7 @@ ulint
srv_get_activity_count(void)
/*========================*/
{
- return(srv_sys->activity_count);
+ return(srv_sys.activity_count);
}
/*******************************************************************//**
@@ -2135,7 +2113,7 @@ srv_check_activity(
/*===============*/
ulint old_activity_count) /*!< in: old activity count */
{
- return(srv_sys->activity_count != old_activity_count);
+ return(srv_sys.activity_count != old_activity_count);
}
/********************************************************************//**
@@ -2205,7 +2183,7 @@ srv_shutdown_print_master_pending(
time_elapsed = ut_difftime(current_time, *last_print_time);
if (time_elapsed > 60) {
- *last_print_time = ut_time();
+ *last_print_time = current_time;
if (n_tables_to_drop) {
ib::info() << "Waiting for " << n_tables_to_drop
@@ -2308,7 +2286,7 @@ srv_master_do_active_tasks(void)
ut_d(srv_master_do_disabled_loop());
- if (srv_shutdown_state > 0) {
+ if (srv_shutdown_state != SRV_SHUTDOWN_NONE) {
return;
}
@@ -2333,11 +2311,7 @@ srv_master_do_active_tasks(void)
/* Now see if various tasks that are performed at defined
intervals need to be performed. */
- if (srv_shutdown_state > 0) {
- return;
- }
-
- if (srv_shutdown_state > 0) {
+ if (srv_shutdown_state != SRV_SHUTDOWN_NONE) {
return;
}
@@ -2352,7 +2326,7 @@ srv_master_do_active_tasks(void)
MONITOR_SRV_DICT_LRU_MICROSECOND, counter_time);
}
- if (srv_shutdown_state > 0) {
+ if (srv_shutdown_state != SRV_SHUTDOWN_NONE) {
return;
}
@@ -2397,7 +2371,7 @@ srv_master_do_idle_tasks(void)
ut_d(srv_master_do_disabled_loop());
- if (srv_shutdown_state > 0) {
+ if (srv_shutdown_state != SRV_SHUTDOWN_NONE) {
return;
}
@@ -2413,7 +2387,7 @@ srv_master_do_idle_tasks(void)
MONITOR_INC_TIME_IN_MICRO_SECS(
MONITOR_SRV_IBUF_MERGE_MICROSECOND, counter_time);
- if (srv_shutdown_state > 0) {
+ if (srv_shutdown_state != SRV_SHUTDOWN_NONE) {
return;
}
@@ -2431,7 +2405,7 @@ srv_master_do_idle_tasks(void)
MONITOR_INC_TIME_IN_MICRO_SECS(
MONITOR_SRV_LOG_FLUSH_MICROSECOND, counter_time);
- if (srv_shutdown_state > 0) {
+ if (srv_shutdown_state != SRV_SHUTDOWN_NONE) {
return;
}
@@ -2442,70 +2416,42 @@ srv_master_do_idle_tasks(void)
counter_time);
}
-/*********************************************************************//**
-Perform the tasks during shutdown. The tasks that we do at shutdown
-depend on srv_fast_shutdown:
-2 => very fast shutdown => do no book keeping
-1 => normal shutdown => clear drop table queue and make checkpoint
-0 => slow shutdown => in addition to above do complete purge and ibuf
-merge
-@return TRUE if some work was done. FALSE otherwise */
+/** Perform shutdown tasks.
+@param[in] ibuf_merge whether to complete the change buffer merge */
static
-ibool
-srv_master_do_shutdown_tasks(
-/*=========================*/
- ib_time_t* last_print_time)/*!< last time the function
- print the message */
+void
+srv_shutdown(bool ibuf_merge)
{
- ulint n_bytes_merged = 0;
- ulint n_tables_to_drop = 0;
-
- ut_ad(!srv_read_only_mode);
+ ulint n_bytes_merged = 0;
+ ulint n_tables_to_drop;
+ ib_time_t now = ut_time();
- ++srv_main_shutdown_loops;
-
- ut_a(srv_shutdown_state > 0);
-
- /* In very fast shutdown none of the following is necessary */
- if (srv_fast_shutdown == 2) {
- return(FALSE);
- }
-
- /* ALTER TABLE in MySQL requires on Unix that the table handler
- can drop tables lazily after there no longer are SELECT
- queries to them. */
- srv_main_thread_op_info = "doing background drop tables";
- n_tables_to_drop = row_drop_tables_for_mysql_in_background();
-
- /* make sure that there is enough reusable space in the redo
- log files */
- srv_main_thread_op_info = "checking free log space";
- log_free_check();
-
- /* In case of normal shutdown we don't do ibuf merge or purge */
- if (srv_fast_shutdown == 1) {
- goto func_exit;
- }
-
- /* Do an ibuf merge */
- srv_main_thread_op_info = "doing insert buffer merge";
- n_bytes_merged = ibuf_merge_in_background(true);
-
- /* Flush logs if needed */
- srv_sync_log_buffer_in_background();
-
-func_exit:
- /* Make a new checkpoint about once in 10 seconds */
- srv_main_thread_op_info = "making checkpoint";
- log_checkpoint(TRUE, FALSE);
-
- /* Print progress message every 60 seconds during shutdown */
- if (srv_shutdown_state > 0 && srv_print_verbose_log) {
- srv_shutdown_print_master_pending(
- last_print_time, n_tables_to_drop, n_bytes_merged);
- }
+ do {
+ ut_ad(!srv_read_only_mode);
+ ut_ad(srv_shutdown_state == SRV_SHUTDOWN_CLEANUP);
+ ++srv_main_shutdown_loops;
+
+ /* FIXME: Remove the background DROP TABLE queue; it is not
+ crash-safe and breaks ACID. */
+ srv_main_thread_op_info = "doing background drop tables";
+ n_tables_to_drop = row_drop_tables_for_mysql_in_background();
+
+ if (ibuf_merge) {
+ srv_main_thread_op_info = "checking free log space";
+ log_free_check();
+ srv_main_thread_op_info = "doing insert buffer merge";
+ n_bytes_merged = ibuf_merge_in_background(true);
+
+ /* Flush logs if needed */
+ srv_sync_log_buffer_in_background();
+ }
- return(n_bytes_merged || n_tables_to_drop);
+ /* Print progress message every 60 seconds during shutdown */
+ if (srv_print_verbose_log) {
+ srv_shutdown_print_master_pending(
+ &now, n_tables_to_drop, n_bytes_merged);
+ }
+ } while (n_bytes_merged || n_tables_to_drop);
}
/*********************************************************************//**
@@ -2538,7 +2484,6 @@ DECLARE_THREAD(srv_master_thread)(
srv_slot_t* slot;
ulint old_activity_count = srv_get_activity_count();
- ib_time_t last_print_time;
ut_ad(!srv_read_only_mode);
@@ -2555,9 +2500,8 @@ DECLARE_THREAD(srv_master_thread)(
srv_main_thread_id = os_thread_pf(os_thread_get_curr_id());
slot = srv_reserve_slot(SRV_MASTER);
- ut_a(slot == srv_sys->sys_threads);
+ ut_a(slot == srv_sys.sys_threads);
- last_print_time = ut_time();
loop:
if (srv_force_recovery >= SRV_FORCE_NO_BACKGROUND) {
goto suspend_thread;
@@ -2577,14 +2521,26 @@ loop:
}
}
- while (srv_shutdown_state != SRV_SHUTDOWN_EXIT_THREADS
- && srv_master_do_shutdown_tasks(&last_print_time)) {
-
- /* Shouldn't loop here in case of very fast shutdown */
- ut_ad(srv_fast_shutdown < 2);
+suspend_thread:
+ switch (srv_shutdown_state) {
+ case SRV_SHUTDOWN_NONE:
+ break;
+ case SRV_SHUTDOWN_FLUSH_PHASE:
+ case SRV_SHUTDOWN_LAST_PHASE:
+ ut_ad(0);
+ /* fall through */
+ case SRV_SHUTDOWN_EXIT_THREADS:
+ /* srv_init_abort() must have been invoked */
+ case SRV_SHUTDOWN_CLEANUP:
+ if (srv_shutdown_state == SRV_SHUTDOWN_CLEANUP
+ && srv_fast_shutdown < 2) {
+ srv_shutdown(srv_fast_shutdown == 0);
+ }
+ srv_suspend_thread(slot);
+ my_thread_end();
+ os_thread_exit();
}
-suspend_thread:
srv_main_thread_op_info = "suspending";
srv_suspend_thread(slot);
@@ -2596,44 +2552,32 @@ suspend_thread:
srv_main_thread_op_info = "waiting for server activity";
srv_resume_thread(slot);
-
- if (srv_shutdown_state != SRV_SHUTDOWN_EXIT_THREADS) {
- goto loop;
- }
-
- my_thread_end();
- os_thread_exit();
- DBUG_RETURN(0);
+ goto loop;
}
-/**
-Check if purge should stop.
-@return true if it should shutdown. */
+/** Check if purge should stop.
+@param[in] n_purged pages purged in the last batch
+@return whether purge should exit */
static
bool
-srv_purge_should_exit(
- MYSQL_THD thd,
- ulint n_purged) /*!< in: pages purged in last batch */
+srv_purge_should_exit(ulint n_purged)
{
- switch (srv_shutdown_state) {
- case SRV_SHUTDOWN_NONE:
- if ((!srv_was_started || srv_running)
- && !thd_kill_level(thd)) {
- /* Normal operation. */
- break;
- }
- /* close_connections() was called; fall through */
- case SRV_SHUTDOWN_CLEANUP:
- case SRV_SHUTDOWN_EXIT_THREADS:
- /* Exit unless slow shutdown requested or all done. */
- return(srv_fast_shutdown != 0 || n_purged == 0);
+ ut_ad(srv_shutdown_state == SRV_SHUTDOWN_NONE
+ || srv_shutdown_state == SRV_SHUTDOWN_CLEANUP);
- case SRV_SHUTDOWN_LAST_PHASE:
- case SRV_SHUTDOWN_FLUSH_PHASE:
- ut_error;
+ if (srv_undo_sources) {
+ return(false);
}
-
- return(false);
+ if (srv_fast_shutdown) {
+ return(true);
+ }
+ /* Slow shutdown was requested. */
+ if (n_purged) {
+ /* The previous round still did some work. */
+ return(false);
+ }
+ /* Exit if there are no active transactions to roll back. */
+ return(trx_sys_any_active_transactions() == 0);
}
/*********************************************************************//**
@@ -2649,18 +2593,18 @@ srv_task_execute(void)
ut_ad(!srv_read_only_mode);
ut_a(srv_force_recovery < SRV_FORCE_NO_BACKGROUND);
- mutex_enter(&srv_sys->tasks_mutex);
+ mutex_enter(&srv_sys.tasks_mutex);
- if (UT_LIST_GET_LEN(srv_sys->tasks) > 0) {
+ if (UT_LIST_GET_LEN(srv_sys.tasks) > 0) {
- thr = UT_LIST_GET_FIRST(srv_sys->tasks);
+ thr = UT_LIST_GET_FIRST(srv_sys.tasks);
ut_a(que_node_get_type(thr->child) == QUE_NODE_PURGE);
- UT_LIST_REMOVE(srv_sys->tasks, thr);
+ UT_LIST_REMOVE(srv_sys.tasks, thr);
}
- mutex_exit(&srv_sys->tasks_mutex);
+ mutex_exit(&srv_sys.tasks_mutex);
if (thr != NULL) {
@@ -2683,6 +2627,8 @@ DECLARE_THREAD(srv_worker_thread)(
void* arg MY_ATTRIBUTE((unused))) /*!< in: a dummy parameter
required by os_thread_create */
{
+ my_thread_init();
+
srv_slot_t* slot;
ut_ad(!srv_read_only_mode);
@@ -2698,7 +2644,7 @@ DECLARE_THREAD(srv_worker_thread)(
slot = srv_reserve_slot(SRV_WORKER);
ut_a(srv_n_purge_threads > 1);
- ut_a(my_atomic_loadlint(&srv_sys->n_threads_active[SRV_WORKER])
+ ut_a(my_atomic_loadlint(&srv_sys.n_threads_active[SRV_WORKER])
< static_cast<lint>(srv_n_purge_threads));
/* We need to ensure that the worker threads exit after the
@@ -2744,16 +2690,12 @@ DECLARE_THREAD(srv_worker_thread)(
OS_THREAD_DUMMY_RETURN; /* Not reached, avoid compiler warning */
}
-/*********************************************************************//**
-Do the actual purge operation.
+/** Do the actual purge operation.
+@param[in,out] n_total_purged total number of purged pages
@return length of history list before the last purge batch. */
static
ulint
-srv_do_purge(
-/*=========*/
- MYSQL_THD thd,
- ulint n_threads, /*!< in: number of threads to use */
- ulint* n_total_purged) /*!< in/out: total pages purged */
+srv_do_purge(ulint* n_total_purged)
{
ulint n_pages_purged;
@@ -2761,6 +2703,7 @@ srv_do_purge(
static ulint n_use_threads = 0;
static ulint rseg_history_len = 0;
ulint old_activity_count = srv_get_activity_count();
+ const ulint n_threads = srv_n_purge_threads;
ut_a(n_threads > 0);
ut_ad(!srv_read_only_mode);
@@ -2822,7 +2765,7 @@ srv_do_purge(
*n_total_purged += n_pages_purged;
- } while (!srv_purge_should_exit(thd, n_pages_purged)
+ } while (!srv_purge_should_exit(n_pages_purged)
&& n_pages_purged > 0
&& purge_sys->state == PURGE_STATE_RUN);
@@ -2835,7 +2778,6 @@ static
void
srv_purge_coordinator_suspend(
/*==========================*/
- MYSQL_THD thd,
srv_slot_t* slot, /*!< in/out: Purge coordinator
thread slot */
ulint rseg_history_len) /*!< in: history list length
@@ -2897,7 +2839,7 @@ srv_purge_coordinator_suspend(
}
rw_lock_x_unlock(&purge_sys->latch);
- } while (stop && !thd_kill_level(thd));
+ } while (stop && srv_undo_sources);
srv_resume_thread(slot, 0, false);
}
@@ -2947,51 +2889,23 @@ DECLARE_THREAD(srv_purge_coordinator_thread)(
purge didn't purge any records then wait for activity. */
if (srv_shutdown_state == SRV_SHUTDOWN_NONE
- && !thd_kill_level(thd)
+ && srv_undo_sources
&& (purge_sys->state == PURGE_STATE_STOP
|| n_total_purged == 0)) {
- srv_purge_coordinator_suspend(thd, slot, rseg_history_len);
+ srv_purge_coordinator_suspend(slot, rseg_history_len);
}
ut_ad(!slot->suspended);
- if (srv_purge_should_exit(thd, n_total_purged)) {
+ if (srv_purge_should_exit(n_total_purged)) {
break;
}
n_total_purged = 0;
- rseg_history_len = srv_do_purge(
- thd, srv_n_purge_threads, &n_total_purged);
-
- } while (!srv_purge_should_exit(thd, n_total_purged));
-
- /* Ensure that we don't jump out of the loop unless the
- exit condition is satisfied. */
-
- ut_a(srv_purge_should_exit(thd, n_total_purged));
-
- /* Ensure that all records are purged on slow shutdown. */
- while (srv_fast_shutdown == 0
- && trx_purge(1, srv_purge_batch_size, false));
-
-#ifdef UNIV_DEBUG
- if (srv_fast_shutdown == 0) {
- trx_commit_disallowed = true;
- }
-#endif /* UNIV_DEBUG */
-
- /* This trx_purge is called to remove any undo records (added by
- background threads) after completion of the above loop. When
- srv_fast_shutdown != 0, a large batch size can cause significant
- delay in shutdown ,so reducing the batch size to magic number 20
- (which was default in 5.5), which we hope will be sufficient to
- remove all the undo records */
-
- if (trx_purge(1, std::min(srv_purge_batch_size, 20UL), true)) {
- ut_a(srv_fast_shutdown);
- }
+ rseg_history_len = srv_do_purge(&n_total_purged);
+ } while (!srv_purge_should_exit(n_total_purged));
/* The task queue should always be empty, independent of fast
shutdown state. */
@@ -3040,11 +2954,11 @@ srv_que_task_enqueue_low(
que_thr_t* thr) /*!< in: query thread */
{
ut_ad(!srv_read_only_mode);
- mutex_enter(&srv_sys->tasks_mutex);
+ mutex_enter(&srv_sys.tasks_mutex);
- UT_LIST_ADD_LAST(srv_sys->tasks, thr);
+ UT_LIST_ADD_LAST(srv_sys.tasks, thr);
- mutex_exit(&srv_sys->tasks_mutex);
+ mutex_exit(&srv_sys.tasks_mutex);
srv_release_threads(SRV_WORKER, 1);
}
@@ -3060,11 +2974,11 @@ srv_get_task_queue_length(void)
ut_ad(!srv_read_only_mode);
- mutex_enter(&srv_sys->tasks_mutex);
+ mutex_enter(&srv_sys.tasks_mutex);
- n_tasks = UT_LIST_GET_LEN(srv_sys->tasks);
+ n_tasks = UT_LIST_GET_LEN(srv_sys.tasks);
- mutex_exit(&srv_sys->tasks_mutex);
+ mutex_exit(&srv_sys.tasks_mutex);
return(n_tasks);
}
diff --git a/storage/innobase/srv/srv0start.cc b/storage/innobase/srv/srv0start.cc
index e451fcb04b0..2aea7b8ccb3 100644
--- a/storage/innobase/srv/srv0start.cc
+++ b/storage/innobase/srv/srv0start.cc
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1996, 2016, Oracle and/or its affiliates. All rights reserved.
+Copyright (c) 1996, 2017, Oracle and/or its affiliates. All rights reserved.
Copyright (c) 2008, Google Inc.
Copyright (c) 2009, Percona Inc.
Copyright (c) 2013, 2017, MariaDB Corporation.
@@ -126,6 +126,9 @@ ibool srv_start_raw_disk_in_use;
/** Number of IO threads to use */
ulint srv_n_file_io_threads;
+/** UNDO tablespaces starts with space id. */
+ulint srv_undo_space_id_start;
+
/** TRUE if the server is being started, before rolling back any
incomplete transactions */
bool srv_startup_is_before_trx_rollback_phase;
@@ -136,7 +139,11 @@ bool srv_sys_tablespaces_open;
/** TRUE if the server was successfully started */
bool srv_was_started;
/** TRUE if innobase_start_or_create_for_mysql() has been called */
-static bool srv_start_has_been_called;
+static bool srv_start_has_been_called;
+
+/** Whether any undo log records can be generated */
+UNIV_INTERN bool srv_undo_sources;
+
#ifdef UNIV_DEBUG
/** InnoDB system tablespace to set during recovery */
UNIV_INTERN uint srv_sys_space_size_debug;
@@ -146,15 +153,23 @@ UNIV_INTERN uint srv_sys_space_size_debug;
determine which threads need to be stopped if we need to abort during
the initialisation step. */
enum srv_start_state_t {
+ /** No thread started */
SRV_START_STATE_NONE = 0, /*!< No thread started */
+ /** lock_wait_timeout_thread started */
SRV_START_STATE_LOCK_SYS = 1, /*!< Started lock-timeout
thread. */
- SRV_START_STATE_IO = 2, /*!< Started IO threads */
- SRV_START_STATE_MONITOR = 4, /*!< Started montior thread */
- SRV_START_STATE_MASTER = 8, /*!< Started master threadd. */
- SRV_START_STATE_PURGE = 16, /*!< Started purge thread(s) */
- SRV_START_STATE_STAT = 32 /*!< Started bufdump + dict stat
- and FTS optimize thread. */
+ /** buf_flush_page_cleaner_coordinator,
+ buf_flush_page_cleaner_worker started */
+ SRV_START_STATE_IO = 2,
+ /** srv_error_monitor_thread, srv_monitor_thread started */
+ SRV_START_STATE_MONITOR = 4,
+ /** srv_master_thread started */
+ SRV_START_STATE_MASTER = 8,
+ /** srv_purge_coordinator_thread, srv_worker_thread started */
+ SRV_START_STATE_PURGE = 16,
+ /** fil_crypt_thread, btr_defragment_thread started
+ (all background threads that can generate redo log but not undo log */
+ SRV_START_STATE_REDO = 32
};
/** Track server thrd starting phases */
@@ -165,7 +180,7 @@ SRV_SHUTDOWN_CLEANUP and then to SRV_SHUTDOWN_LAST_PHASE, and so on */
enum srv_shutdown_t srv_shutdown_state = SRV_SHUTDOWN_NONE;
/** Files comprising the system tablespace */
-static os_file_t files[1000];
+static pfs_os_file_t files[1000];
/** io_handler_thread parameters for thread identification */
static ulint n[SRV_MAX_N_IO_THREADS + 6];
@@ -182,9 +197,6 @@ static os_thread_t buf_dump_thread_handle;
static os_thread_t dict_stats_thread_handle;
/** Status variables, is thread started ?*/
static bool thread_started[SRV_MAX_N_IO_THREADS + 6 + 32] = {false};
-static bool buf_dump_thread_started = false;
-static bool dict_stats_thread_started = false;
-static bool buf_flush_page_cleaner_thread_started = false;
/** Name of srv_monitor_file */
static char* srv_monitor_file_name;
@@ -344,7 +356,7 @@ static MY_ATTRIBUTE((nonnull, warn_unused_result))
dberr_t
create_log_file(
/*============*/
- os_file_t* file, /*!< out: file handle */
+ pfs_os_file_t* file, /*!< out: file handle */
const char* name) /*!< in: log file name */
{
bool ret;
@@ -448,8 +460,7 @@ create_log_files(
fil_space_t* log_space = fil_space_create(
"innodb_redo_log", SRV_LOG_SPACE_FIRST_ID, 0, FIL_TYPE_LOG,
- NULL, /* innodb_encrypt_log works at a different level */
- true /* this is create */);
+ NULL/* innodb_encrypt_log works at a different level */);
ut_a(fil_validate());
ut_a(log_space != NULL);
@@ -475,9 +486,8 @@ create_log_files(
}
}
- if (!log_group_init(0, srv_n_log_files,
- srv_log_file_size * UNIV_PAGE_SIZE,
- SRV_LOG_SPACE_FIRST_ID)) {
+ log_init(srv_n_log_files, srv_log_file_size * UNIV_PAGE_SIZE);
+ if (!log_set_capacity()) {
return(DB_ERROR);
}
@@ -557,7 +567,7 @@ static MY_ATTRIBUTE((nonnull, warn_unused_result))
dberr_t
open_log_file(
/*==========*/
- os_file_t* file, /*!< out: file handle */
+ pfs_os_file_t* file, /*!< out: file handle */
const char* name, /*!< in: log file name */
os_offset_t* size) /*!< out: file size */
{
@@ -588,7 +598,7 @@ srv_undo_tablespace_create(
const char* name, /*!< in: tablespace name */
ulint size) /*!< in: tablespace size in pages */
{
- os_file_t fh;
+ pfs_os_file_t fh;
bool ret;
dberr_t err = DB_SUCCESS;
@@ -657,7 +667,7 @@ srv_undo_tablespace_open(
const char* name, /*!< in: tablespace file name */
ulint space_id) /*!< in: tablespace id */
{
- os_file_t fh;
+ pfs_os_file_t fh;
bool ret;
dberr_t err = DB_ERROR;
char undo_name[sizeof "innodb_undo000"];
@@ -705,7 +715,7 @@ srv_undo_tablespace_open(
space = fil_space_create(
undo_name, space_id, FSP_FLAGS_PAGE_SSIZE(),
- FIL_TYPE_TABLESPACE, NULL, true);
+ FIL_TYPE_TABLESPACE, NULL);
ut_a(fil_validate());
ut_a(space);
@@ -833,13 +843,23 @@ srv_undo_tablespaces_init(bool create_new_db)
for (i = 0; create_new_db && i < srv_undo_tablespaces; ++i) {
char name[OS_FILE_MAX_PATH];
+ ulint space_id = i + 1;
+
+ DBUG_EXECUTE_IF("innodb_undo_upgrade",
+ space_id = i + 3;);
ut_snprintf(
name, sizeof(name),
"%s%cundo%03zu",
- srv_undo_dir, OS_PATH_SEPARATOR, i + 1);
+ srv_undo_dir, OS_PATH_SEPARATOR, space_id);
+
+ if (i == 0) {
+ srv_undo_space_id_start = space_id;
+ prev_space_id = srv_undo_space_id_start - 1;
+ }
+
+ undo_tablespace_ids[i] = space_id;
- /* Undo space ids start from 1. */
err = srv_undo_tablespace_create(
name, SRV_UNDO_TABLESPACE_SIZE_IN_PAGES);
@@ -897,11 +917,10 @@ srv_undo_tablespaces_init(bool create_new_db)
srv_undo_tablespaces_active = srv_undo_tablespaces;
n_undo_tablespaces = srv_undo_tablespaces;
- for (i = 1; i <= n_undo_tablespaces; ++i) {
- undo_tablespace_ids[i - 1] = i;
+ if (n_undo_tablespaces != 0) {
+ srv_undo_space_id_start = undo_tablespace_ids[0];
+ prev_space_id = srv_undo_space_id_start - 1;
}
-
- undo_tablespace_ids[i] = ULINT_UNDEFINED;
}
/* Open all the undo tablespaces that are currently in use. If we
@@ -925,8 +944,6 @@ srv_undo_tablespaces_init(bool create_new_db)
ut_a(undo_tablespace_ids[i] != 0);
ut_a(undo_tablespace_ids[i] != ULINT_UNDEFINED);
- /* Undo space ids start from 1. */
-
err = srv_undo_tablespace_open(name, undo_tablespace_ids[i]);
if (err != DB_SUCCESS) {
@@ -937,6 +954,12 @@ srv_undo_tablespaces_init(bool create_new_db)
prev_space_id = undo_tablespace_ids[i];
+ /* Note the first undo tablespace id in case of
+ no active undo tablespace. */
+ if (0 == srv_undo_tablespaces_open++) {
+ srv_undo_space_id_start = undo_tablespace_ids[i];
+ }
+
++srv_undo_tablespaces_open;
}
@@ -964,6 +987,12 @@ srv_undo_tablespaces_init(bool create_new_db)
++srv_undo_tablespaces_open;
}
+ /* Initialize srv_undo_space_id_start=0 when there are no
+ dedicated undo tablespaces. */
+ if (n_undo_tablespaces == 0) {
+ srv_undo_space_id_start = 0;
+ }
+
/* If the user says that there are fewer than what we find we
tolerate that discrepancy but not the inverse. Because there could
be unused undo tablespaces for future use. */
@@ -993,10 +1022,11 @@ srv_undo_tablespaces_init(bool create_new_db)
mtr_start(&mtr);
/* The undo log tablespace */
- for (i = 1; i <= n_undo_tablespaces; ++i) {
+ for (i = 0; i < n_undo_tablespaces; ++i) {
fsp_header_init(
- i, SRV_UNDO_TABLESPACE_SIZE_IN_PAGES, &mtr);
+ undo_tablespace_ids[i],
+ SRV_UNDO_TABLESPACE_SIZE_IN_PAGES, &mtr);
}
mtr_commit(&mtr);
@@ -1203,10 +1233,6 @@ srv_shutdown_all_bg_threads()
{
srv_shutdown_state = SRV_SHUTDOWN_EXIT_THREADS;
- if (!srv_start_state) {
- return;
- }
-
/* All threads end up waiting for certain events. Put those events
to the signaled state. Then the threads will exit themselves after
os_event_wait(). */
@@ -1312,7 +1338,7 @@ srv_init_abort_low(
dberr_t err)
{
if (create_new_db) {
- ib::error() << "InnoDB Database creation was aborted"
+ ib::error() << "Database creation was aborted"
#ifdef UNIV_DEBUG
" at " << innobase_basename(file) << "[" << line << "]"
#endif /* UNIV_DEBUG */
@@ -1429,8 +1455,7 @@ Starts InnoDB and creates a new database if database files
are not found and the user wants.
@return DB_SUCCESS or error code */
dberr_t
-innobase_start_or_create_for_mysql(void)
-/*====================================*/
+innobase_start_or_create_for_mysql()
{
bool create_new_db = false;
lsn_t flushed_lsn;
@@ -1446,6 +1471,10 @@ innobase_start_or_create_for_mysql(void)
srv_read_only_mode = true;
}
+ if (srv_force_recovery == SRV_FORCE_NO_LOG_REDO) {
+ srv_read_only_mode = 1;
+ }
+
high_level_read_only = srv_read_only_mode
|| srv_force_recovery > SRV_FORCE_NO_TRX_UNDO;
@@ -1808,12 +1837,11 @@ innobase_start_or_create_for_mysql(void)
#endif /* UNIV_DEBUG */
fsp_init();
- log_init();
+ log_sys_init();
recv_sys_create();
recv_sys_init(buf_pool_get_curr_size());
lock_sys_create(srv_lock_table_size);
- srv_start_state_set(SRV_START_STATE_LOCK_SYS);
/* Create i/o-handler threads: */
@@ -1828,21 +1856,15 @@ innobase_start_or_create_for_mysql(void)
if (!srv_read_only_mode) {
buf_flush_page_cleaner_init();
+ buf_page_cleaner_is_active = true;
os_thread_create(buf_flush_page_cleaner_coordinator,
NULL, NULL);
- buf_flush_page_cleaner_thread_started = true;
-
for (i = 1; i < srv_n_page_cleaners; ++i) {
os_thread_create(buf_flush_page_cleaner_worker,
NULL, NULL);
}
- /* Make sure page cleaner is active. */
- while (!buf_page_cleaner_is_active) {
- os_thread_sleep(10000);
- }
-
srv_start_state_set(SRV_START_STATE_IO);
}
@@ -2044,8 +2066,7 @@ innobase_start_or_create_for_mysql(void)
"innodb_redo_log",
SRV_LOG_SPACE_FIRST_ID, 0,
FIL_TYPE_LOG,
- NULL /* no encryption yet */,
- true /* create */);
+ NULL /* no encryption yet */);
ut_a(fil_validate());
ut_a(log_space);
@@ -2064,8 +2085,9 @@ innobase_start_or_create_for_mysql(void)
}
}
- if (!log_group_init(0, i, srv_log_file_size * UNIV_PAGE_SIZE,
- SRV_LOG_SPACE_FIRST_ID)) {
+ log_init(i, srv_log_file_size * UNIV_PAGE_SIZE);
+
+ if (!log_set_capacity()) {
return(srv_init_abort(DB_ERROR));
}
}
@@ -2104,14 +2126,24 @@ files_checked:
mtr_start(&mtr);
- bool ret = fsp_header_init(0, sum_of_new_sizes, &mtr);
+ fsp_header_init(0, sum_of_new_sizes, &mtr);
+
+ compile_time_assert(TRX_SYS_SPACE == 0);
+ compile_time_assert(IBUF_SPACE_ID == 0);
+
+ ulint ibuf_root = btr_create(
+ DICT_CLUSTERED | DICT_UNIVERSAL | DICT_IBUF,
+ 0, univ_page_size, DICT_IBUF_ID_MIN,
+ dict_ind_redundant, NULL, &mtr);
mtr_commit(&mtr);
- if (!ret) {
+ if (ibuf_root == FIL_NULL) {
return(srv_init_abort(DB_ERROR));
}
+ ut_ad(ibuf_root == IBUF_TREE_ROOT_PAGE_NO);
+
/* To maintain backward compatibility we create only
the first rollback segment before the double write buffer.
All the remaining rollback segments will be created later,
@@ -2259,7 +2291,7 @@ files_checked:
const ulint sum_of_data_file_sizes
= srv_sys_space.get_sum_of_sizes();
/* Compare the system tablespace file size to what is
- stored in FSP_SIZE. In open_or_create_data_files()
+ stored in FSP_SIZE. In srv_sys_space.open_or_create()
we already checked that the file sizes match the
innodb_data_file_path specification. */
if (srv_read_only_mode
@@ -2366,7 +2398,6 @@ files_checked:
}
}
-
/* Validate a few system page types that were left
uninitialized by older versions of MySQL. */
if (!high_level_read_only) {
@@ -2463,6 +2494,7 @@ files_checked:
}
recv_recovery_rollback_active();
+ srv_startup_is_before_trx_rollback_phase = FALSE;
/* It is possible that file_format tag has never
been set. In this case we initialize it to minimum
@@ -2525,7 +2557,8 @@ files_checked:
srv_monitor_thread,
NULL, thread_ids + 4 + SRV_MAX_N_IO_THREADS);
thread_started[4 + SRV_MAX_N_IO_THREADS] = true;
- srv_start_state_set(SRV_START_STATE_MONITOR);
+ srv_start_state |= SRV_START_STATE_LOCK_SYS
+ | SRV_START_STATE_MONITOR;
}
/* Create the SYS_FOREIGN and SYS_FOREIGN_COLS system tables */
@@ -2575,6 +2608,15 @@ files_checked:
NULL, thread_ids + (1 + SRV_MAX_N_IO_THREADS));
thread_started[1 + SRV_MAX_N_IO_THREADS] = true;
srv_start_state_set(SRV_START_STATE_MASTER);
+
+ srv_undo_sources = true;
+ /* Create the dict stats gathering thread */
+ srv_dict_stats_thread_active = true;
+ dict_stats_thread_handle = os_thread_create(
+ dict_stats_thread, NULL, NULL);
+
+ /* Create the thread that will optimize the FTS sub-system. */
+ fts_optimize_init();
}
if (!srv_read_only_mode
@@ -2655,11 +2697,10 @@ files_checked:
if (!wsrep_recovery) {
#endif /* WITH_WSREP */
/* Create the buffer pool dump/load thread */
+ srv_buf_dump_thread_active = true;
buf_dump_thread_handle=
os_thread_create(buf_dump_thread, NULL, NULL);
- srv_buf_dump_thread_active = true;
- buf_dump_thread_started = true;
#ifdef WITH_WSREP
} else {
ib::warn() <<
@@ -2683,22 +2724,15 @@ files_checked:
*/
log_make_checkpoint_at(LSN_MAX, TRUE);
- /* Create the dict stats gathering thread */
- dict_stats_thread_handle = os_thread_create(
- dict_stats_thread, NULL, NULL);
- srv_dict_stats_thread_active = true;
- dict_stats_thread_started = true;
-
- /* Create the thread that will optimize the FTS sub-system. */
- fts_optimize_init();
-
/* Init data for datafile scrub threads */
btr_scrub_init();
/* Initialize online defragmentation. */
btr_defragment_init();
+ btr_defragment_thread_active = true;
+ os_thread_create(btr_defragment_thread, NULL, NULL);
- srv_start_state_set(SRV_START_STATE_STAT);
+ srv_start_state |= SRV_START_STATE_REDO;
}
/* Create the buffer pool resize thread */
@@ -2738,16 +2772,19 @@ srv_fts_close(void)
}
#endif
-/****************************************************************//**
-Shuts down background threads that can generate undo pages. */
+/** Shut down background threads that can generate undo log. */
void
-srv_shutdown_bg_undo_sources(void)
-/*===========================*/
+srv_shutdown_bg_undo_sources()
{
- if (srv_start_state_is_set(SRV_START_STATE_STAT)) {
+ if (srv_undo_sources) {
ut_ad(!srv_read_only_mode);
fts_optimize_shutdown();
dict_stats_shutdown();
+ while (row_get_background_drop_list_len_low()) {
+ srv_wake_master_thread();
+ os_thread_yield();
+ }
+ srv_undo_sources = false;
}
}
@@ -2756,10 +2793,7 @@ void
innodb_shutdown()
{
ut_ad(!srv_running);
-
- if (srv_fast_shutdown) {
- srv_shutdown_bg_undo_sources();
- }
+ ut_ad(!srv_undo_sources);
/* 1. Flush the buffer pool to disk, write the current lsn to
the tablespace header(s), and copy all log data to archive.
@@ -2812,14 +2846,14 @@ innodb_shutdown()
dict_stats_thread_deinit();
}
- if (srv_start_state_is_set(SRV_START_STATE_STAT)) {
+ if (srv_start_state_is_set(SRV_START_STATE_REDO)) {
ut_ad(!srv_read_only_mode);
/* srv_shutdown_bg_undo_sources() already invoked
fts_optimize_shutdown(); dict_stats_shutdown(); */
fil_crypt_threads_cleanup();
btr_scrub_cleanup();
- /* FIXME: call btr_defragment_shutdown(); */
+ btr_defragment_shutdown();
}
/* This must be disabled before closing the buffer pool
@@ -2885,10 +2919,6 @@ innodb_shutdown()
buf_pool_free(srv_buf_pool_instances);
}
- /* 6. Free the thread management resoruces. */
- os_thread_free();
-
- /* 7. Free the synchronisation infrastructure. */
sync_check_close();
if (dict_foreign_err_file) {
@@ -2905,85 +2935,6 @@ innodb_shutdown()
srv_start_has_been_called = false;
}
-#if 0 // TODO: Enable this in WL#6608
-/********************************************************************
-Signal all per-table background threads to shutdown, and wait for them to do
-so. */
-static
-void
-srv_shutdown_table_bg_threads(void)
-/*===============================*/
-{
- dict_table_t* table;
- dict_table_t* first;
- dict_table_t* last = NULL;
-
- mutex_enter(&dict_sys->mutex);
-
- /* Signal all threads that they should stop. */
- table = UT_LIST_GET_FIRST(dict_sys->table_LRU);
- first = table;
- while (table) {
- dict_table_t* next;
- fts_t* fts = table->fts;
-
- if (fts != NULL) {
- fts_start_shutdown(table, fts);
- }
-
- next = UT_LIST_GET_NEXT(table_LRU, table);
-
- if (!next) {
- last = table;
- }
-
- table = next;
- }
-
- /* We must release dict_sys->mutex here; if we hold on to it in the
- loop below, we will deadlock if any of the background threads try to
- acquire it (for example, the FTS thread by calling que_eval_sql).
-
- Releasing it here and going through dict_sys->table_LRU without
- holding it is safe because:
-
- a) MySQL only starts the shutdown procedure after all client
- threads have been disconnected and no new ones are accepted, so no
- new tables are added or old ones dropped.
-
- b) Despite its name, the list is not LRU, and the order stays
- fixed.
-
- To safeguard against the above assumptions ever changing, we store
- the first and last items in the list above, and then check that
- they've stayed the same below. */
-
- mutex_exit(&dict_sys->mutex);
-
- /* Wait for the threads of each table to stop. This is not inside
- the above loop, because by signaling all the threads first we can
- overlap their shutting down delays. */
- table = UT_LIST_GET_FIRST(dict_sys->table_LRU);
- ut_a(first == table);
- while (table) {
- dict_table_t* next;
- fts_t* fts = table->fts;
-
- if (fts != NULL) {
- fts_shutdown(table, fts);
- }
-
- next = UT_LIST_GET_NEXT(table_LRU, table);
-
- if (table == last) {
- ut_a(!next);
- }
-
- table = next;
- }
-}
-#endif
-
/** Get the meta-data filename from the table name for a
single-table tablespace.
@param[in] table table object
diff --git a/storage/innobase/sync/sync0debug.cc b/storage/innobase/sync/sync0debug.cc
index 4fff24a77f1..11743e14be2 100644
--- a/storage/innobase/sync/sync0debug.cc
+++ b/storage/innobase/sync/sync0debug.cc
@@ -1498,8 +1498,6 @@ sync_latch_meta_init()
LATCH_ADD_MUTEX(SYNC_ARRAY_MUTEX, SYNC_NO_ORDER_CHECK,
sync_array_mutex_key);
- LATCH_ADD_MUTEX(THREAD_MUTEX, SYNC_NO_ORDER_CHECK, thread_mutex_key);
-
LATCH_ADD_MUTEX(ZIP_PAD_MUTEX, SYNC_NO_ORDER_CHECK, zip_pad_mutex_key);
LATCH_ADD_MUTEX(OS_AIO_READ_MUTEX, SYNC_NO_ORDER_CHECK,
diff --git a/storage/innobase/trx/trx0purge.cc b/storage/innobase/trx/trx0purge.cc
index b21ec75c3a6..e317a38815b 100644
--- a/storage/innobase/trx/trx0purge.cc
+++ b/storage/innobase/trx/trx0purge.cc
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1996, 2017, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2017, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
@@ -61,7 +61,6 @@ trx_undo_rec_t trx_purge_dummy_rec;
#ifdef UNIV_DEBUG
my_bool srv_purge_view_update_only_debug;
-bool trx_commit_disallowed = false;
#endif /* UNIV_DEBUG */
/** Sentinel value */
@@ -148,12 +147,10 @@ TrxUndoRsegsIterator::set_next()
ut_a(purge_sys->rseg->last_page_no != FIL_NULL);
ut_ad(purge_sys->rseg->last_trx_no == m_trx_undo_rsegs.get_trx_no());
- /* We assume in purge of externally stored fields that
- space id is in the range of UNDO tablespace space ids
- unless space is system tablespace */
- ut_a(purge_sys->rseg->space <= srv_undo_tablespaces_open
- || is_system_tablespace(
- purge_sys->rseg->space));
+ /* We assume in purge of externally stored fields that space id is
+ in the range of UNDO tablespace space ids */
+ ut_a(purge_sys->rseg->space == TRX_SYS_SPACE
+ || srv_is_undo_tablespace(purge_sys->rseg->space));
ut_a(purge_sys->iter.trx_no <= purge_sys->rseg->last_trx_no);
@@ -284,7 +281,18 @@ trx_purge_add_update_undo_to_history(
hist_size + undo->size, MLOG_4BYTES, mtr);
}
- ut_ad(!trx_commit_disallowed);
+ /* Before any transaction-generating background threads or the
+ purge have been started, recv_recovery_rollback_active() can
+ start transactions in row_merge_drop_temp_indexes() and
+ fts_drop_orphaned_tables(), and roll back recovered transactions.
+ After the purge thread has been given permission to exit,
+ in fast shutdown, we may roll back transactions (trx->undo_no==0)
+ in THD::cleanup() invoked from unlink_thd(). */
+ ut_ad(srv_undo_sources
+ || ((srv_startup_is_before_trx_rollback_phase
+ || trx_rollback_or_clean_is_active)
+ && purge_sys->state == PURGE_STATE_INIT)
+ || (trx->undo_no == 0 && srv_fast_shutdown));
/* Add the log as the first in the history list */
flst_add_first(rseg_header + TRX_RSEG_HISTORY,
diff --git a/storage/innobase/trx/trx0rec.cc b/storage/innobase/trx/trx0rec.cc
index 47d24b63114..6d3ade289bb 100644
--- a/storage/innobase/trx/trx0rec.cc
+++ b/storage/innobase/trx/trx0rec.cc
@@ -1,7 +1,7 @@
/*****************************************************************************
Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved.
-Copyright (c) 2017, MariaDB Corporation
+Copyright (c) 2017, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -1839,22 +1839,22 @@ transaction.
dberr_t
trx_undo_report_row_operation(
/*==========================*/
- ulint flags, /*!< in: if BTR_NO_UNDO_LOG_FLAG bit is
- set, does nothing */
- ulint op_type, /*!< in: TRX_UNDO_INSERT_OP or
- TRX_UNDO_MODIFY_OP */
que_thr_t* thr, /*!< in: query thread */
dict_index_t* index, /*!< in: clustered index */
const dtuple_t* clust_entry, /*!< in: in the case of an insert,
index entry to insert into the
- clustered index, otherwise NULL */
+ clustered index; in updates,
+ may contain a clustered index
+ record tuple that also contains
+ virtual columns of the table;
+ otherwise, NULL */
const upd_t* update, /*!< in: in the case of an update,
the update vector, otherwise NULL */
ulint cmpl_info, /*!< in: compiler info on secondary
index updates */
- const rec_t* rec, /*!< in: in case of an update or delete
+ const rec_t* rec, /*!< in: case of an update or delete
marking, the record in the clustered
- index, otherwise NULL */
+ index; NULL if insert */
const ulint* offsets, /*!< in: rec_get_offsets(rec) */
roll_ptr_t* roll_ptr) /*!< out: rollback pointer to the
inserted undo log record,
@@ -1870,18 +1870,9 @@ trx_undo_report_row_operation(
#endif /* UNIV_DEBUG */
ut_a(dict_index_is_clust(index));
+ ut_ad(!update || rec);
ut_ad(!rec || rec_offs_validate(rec, index, offsets));
ut_ad(!srv_read_only_mode);
- ut_ad(op_type == TRX_UNDO_INSERT_OP || op_type == TRX_UNDO_MODIFY_OP);
- ut_ad((op_type != TRX_UNDO_INSERT_OP)
- || (clust_entry && !update && !rec));
-
- if (flags & BTR_NO_UNDO_LOG_FLAG) {
-
- *roll_ptr = 0;
-
- return(DB_SUCCESS);
- }
trx = thr_get_trx(thr);
@@ -1902,7 +1893,7 @@ trx_undo_report_row_operation(
not listed there. */
trx->mod_tables.insert(index->table);
- pundo = op_type == TRX_UNDO_INSERT_OP
+ pundo = !rec
? &trx->rsegs.m_redo.insert_undo
: &trx->rsegs.m_redo.update_undo;
rseg = trx->rsegs.m_redo.rseg;
@@ -1913,7 +1904,7 @@ trx_undo_report_row_operation(
if (*pundo) {
err = DB_SUCCESS;
- } else if (op_type == TRX_UNDO_INSERT_OP || is_temp) {
+ } else if (!rec || is_temp) {
err = trx_undo_assign_undo(trx, rseg, pundo, TRX_UNDO_INSERT);
} else {
err = trx_undo_assign_undo(trx, rseg, pundo, TRX_UNDO_UPDATE);
@@ -1937,23 +1928,14 @@ trx_undo_report_row_operation(
buf_block_dbg_add_level(undo_block, SYNC_TRX_UNDO_PAGE);
do {
- page_t* undo_page;
- ulint offset;
-
- undo_page = buf_block_get_frame(undo_block);
ut_ad(page_no == undo_block->page.id.page_no());
-
- switch (op_type) {
- case TRX_UNDO_INSERT_OP:
- offset = trx_undo_page_report_insert(
- undo_page, trx, index, clust_entry, &mtr);
- break;
- default:
- ut_ad(op_type == TRX_UNDO_MODIFY_OP);
- offset = trx_undo_page_report_modify(
+ page_t* undo_page = buf_block_get_frame(undo_block);
+ ulint offset = !rec
+ ? trx_undo_page_report_insert(
+ undo_page, trx, index, clust_entry, &mtr)
+ : trx_undo_page_report_modify(
undo_page, trx, index, rec, offsets, update,
cmpl_info, clust_entry, &mtr);
- }
if (UNIV_UNLIKELY(offset == 0)) {
/* The record did not fit on the page. We erase the
@@ -2007,8 +1989,7 @@ trx_undo_report_row_operation(
mutex_exit(&trx->undo_mutex);
*roll_ptr = trx_undo_build_roll_ptr(
- op_type == TRX_UNDO_INSERT_OP,
- rseg->id, page_no, offset);
+ !rec, rseg->id, page_no, offset);
return(DB_SUCCESS);
}
diff --git a/storage/innobase/trx/trx0roll.cc b/storage/innobase/trx/trx0roll.cc
index 69f01e64b59..d6857b892da 100644
--- a/storage/innobase/trx/trx0roll.cc
+++ b/storage/innobase/trx/trx0roll.cc
@@ -869,7 +869,6 @@ DECLARE_THREAD(trx_rollback_or_clean_all_recovered)(
os_thread_create */
{
my_thread_init();
-
ut_ad(!srv_read_only_mode);
#ifdef UNIV_PFS_THREAD
diff --git a/storage/innobase/trx/trx0rseg.cc b/storage/innobase/trx/trx0rseg.cc
index 663566cf26f..b48f3e18f1b 100644
--- a/storage/innobase/trx/trx0rseg.cc
+++ b/storage/innobase/trx/trx0rseg.cc
@@ -263,7 +263,9 @@ trx_rseg_array_init()
}
/** Create a persistent rollback segment.
-@param[in] space_id system or undo tablespace id */
+@param[in] space_id system or undo tablespace id
+@return pointer to new rollback segment
+@retval NULL on failure */
trx_rseg_t*
trx_rseg_create(ulint space_id)
{
diff --git a/storage/innobase/trx/trx0sys.cc b/storage/innobase/trx/trx0sys.cc
index d511aae48ec..82bcfd5055a 100644
--- a/storage/innobase/trx/trx0sys.cc
+++ b/storage/innobase/trx/trx0sys.cc
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1996, 2017, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2017, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
@@ -910,7 +910,8 @@ trx_sys_create_rsegs()
/* Tablespace 0 is the system tablespace.
Dedicated undo log tablespaces start from 1. */
ulint space = srv_undo_tablespaces > 0
- ? (i % srv_undo_tablespaces) + 1
+ ? (i % srv_undo_tablespaces)
+ + srv_undo_space_id_start
: TRX_SYS_SPACE;
if (!trx_rseg_create(space)) {
diff --git a/storage/innobase/usr/usr0sess.cc b/storage/innobase/usr/usr0sess.cc
index 85eca604d80..55ce9500e5c 100644
--- a/storage/innobase/usr/usr0sess.cc
+++ b/storage/innobase/usr/usr0sess.cc
@@ -1,6 +1,7 @@
/*****************************************************************************
Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2017, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
diff --git a/storage/innobase/ut/ut0ut.cc b/storage/innobase/ut/ut0ut.cc
index e7489861473..4eb9d45b0dc 100644
--- a/storage/innobase/ut/ut0ut.cc
+++ b/storage/innobase/ut/ut0ut.cc
@@ -38,8 +38,6 @@ Created 5/11/1994 Heikki Tuuri
#include <string>
#include "log.h"
-/** A constant to prevent the compiler from optimizing ut_delay() away. */
-ibool ut_always_false = FALSE;
#ifdef _WIN32
/*****************************************************************//**
NOTE: The Windows epoch starts from 1601/01/01 whereas the Unix
@@ -839,7 +837,7 @@ error::~error()
fatal::~fatal()
{
sql_print_error("[FATAL] InnoDB: %s", m_oss.str().c_str());
- ut_error;
+ abort();
}
error_or_warn::~error_or_warn()
@@ -853,8 +851,11 @@ error_or_warn::~error_or_warn()
fatal_or_error::~fatal_or_error()
{
- sql_print_error("InnoDB: %s", m_oss.str().c_str());
- ut_a(!m_fatal);
+ sql_print_error(m_fatal ? "[FATAL] InnoDB: %s" : "InnoDB: %s",
+ m_oss.str().c_str());
+ if (m_fatal) {
+ abort();
+ }
}
} // namespace ib
diff --git a/storage/maria/ha_maria.cc b/storage/maria/ha_maria.cc
index b21d631be6a..3694fe591ce 100644
--- a/storage/maria/ha_maria.cc
+++ b/storage/maria/ha_maria.cc
@@ -834,7 +834,10 @@ extern "C" {
int _ma_killed_ptr(HA_CHECK *param)
{
- return thd_killed((THD*)param->thd);
+ if (likely(thd_killed((THD*)param->thd)) == 0)
+ return 0;
+ my_errno= HA_ERR_ABORTED_BY_USER;
+ return 1;
}
@@ -1669,8 +1672,11 @@ int ha_maria::repair(THD *thd, HA_CHECK *param, bool do_optimize)
}
if (error && file->create_unique_index_by_sort &&
share->state.dupp_key != MAX_KEY)
+ {
+ my_errno= HA_ERR_FOUND_DUPP_KEY;
print_keydup_error(table, &table->key_info[share->state.dupp_key],
MYF(0));
+ }
}
else
{
diff --git a/storage/maria/ma_check.c b/storage/maria/ma_check.c
index f92774a0321..84fba63a2f3 100644
--- a/storage/maria/ma_check.c
+++ b/storage/maria/ma_check.c
@@ -1364,6 +1364,7 @@ static int check_dynamic_record(HA_CHECK *param, MARIA_HA *info, int extend,
pos=block_info.filepos+block_info.block_len;
if (block_info.rec_len > (uint) share->base.max_pack_length)
{
+ my_errno= HA_ERR_WRONG_IN_RECORD;
_ma_check_print_error(param,"Found too long record (%lu) at %s",
(ulong) block_info.rec_len,
llstr(start_recpos,llbuff));
@@ -4220,6 +4221,7 @@ int maria_repair_parallel(HA_CHECK *param, register MARIA_HA *info,
printf("Data records: %s\n", llstr(start_records, llbuff));
}
+ bzero(&new_data_cache, sizeof(new_data_cache));
if (initialize_variables_for_repair(param, &sort_info, &tmp_sort_param, info,
rep_quick, &backup_share))
goto err;
@@ -4995,6 +4997,7 @@ static int sort_get_next_record(MARIA_SORT_PARAM *sort_param)
param->error_printed=1;
param->retry_repair=1;
param->testflag|=T_RETRY_WITHOUT_QUICK;
+ my_errno= HA_ERR_WRONG_IN_RECORD;
DBUG_RETURN(1); /* Something wrong with data */
}
b_type= _ma_get_block_info(info, &block_info,-1,pos);
@@ -5268,6 +5271,7 @@ static int sort_get_next_record(MARIA_SORT_PARAM *sort_param)
param->error_printed=1;
param->retry_repair=1;
param->testflag|=T_RETRY_WITHOUT_QUICK;
+ my_errno= HA_ERR_WRONG_IN_RECORD;
DBUG_RETURN(1); /* Something wrong with data */
}
sort_param->start_recpos=sort_param->pos;
diff --git a/storage/maria/ma_extra.c b/storage/maria/ma_extra.c
index 0cf5b2822b1..1db81a0ee1b 100644
--- a/storage/maria/ma_extra.c
+++ b/storage/maria/ma_extra.c
@@ -157,6 +157,7 @@ int maria_extra(MARIA_HA *info, enum ha_extra_function function,
if (info->s->data_file_type != DYNAMIC_RECORD)
break;
/* Remove read/write cache if dynamic rows */
+ /* fall through */
case HA_EXTRA_NO_CACHE:
if (info->opt_flag & (READ_CACHE_USED | WRITE_CACHE_USED))
{
@@ -313,7 +314,7 @@ int maria_extra(MARIA_HA *info, enum ha_extra_function function,
share->state.open_count= 1;
share->changed= 1;
_ma_mark_file_changed_now(share);
- /* Fall trough */
+ /* Fall through */
case HA_EXTRA_PREPARE_FOR_RENAME:
{
my_bool do_flush= MY_TEST(function != HA_EXTRA_PREPARE_FOR_DROP);
diff --git a/storage/maria/ma_loghandler.c b/storage/maria/ma_loghandler.c
index 86e51cc3526..e143ed793e4 100644
--- a/storage/maria/ma_loghandler.c
+++ b/storage/maria/ma_loghandler.c
@@ -946,6 +946,7 @@ static File create_logfile_by_number_no_cache(uint32 file_no)
{
DBUG_PRINT("error", ("Error %d during syncing directory '%s'",
errno, log_descriptor.directory));
+ mysql_file_close(file, MYF(0));
translog_stop_writing();
DBUG_RETURN(-1);
}
@@ -1447,17 +1448,16 @@ LSN translog_get_file_max_lsn_stored(uint32 file)
if (translog_read_file_header(&info, fd))
{
DBUG_PRINT("error", ("Can't read file header"));
- DBUG_RETURN(LSN_ERROR);
+ info.max_lsn= LSN_ERROR;
}
if (mysql_file_close(fd, MYF(MY_WME)))
{
DBUG_PRINT("error", ("Can't close file"));
- DBUG_RETURN(LSN_ERROR);
+ info.max_lsn= LSN_ERROR;
}
- DBUG_PRINT("info", ("Max lsn: (%lu,0x%lx)",
- LSN_IN_PARTS(info.max_lsn)));
+ DBUG_PRINT("info", ("Max lsn: (%lu,0x%lx)", LSN_IN_PARTS(info.max_lsn)));
DBUG_RETURN(info.max_lsn);
}
}
@@ -1621,13 +1621,15 @@ static my_bool translog_create_new_file()
if (allocate_dynamic(&log_descriptor.open_files,
log_descriptor.max_file - log_descriptor.min_file + 2))
goto error_lock;
- if ((file->handler.file=
- create_logfile_by_number_no_cache(file_no)) == -1)
+
+ /* this call just expand the array */
+ if (insert_dynamic(&log_descriptor.open_files, (uchar*)&file))
+ goto error_lock;
+
+ if ((file->handler.file= create_logfile_by_number_no_cache(file_no)) == -1)
goto error_lock;
translog_file_init(file, file_no, 0);
- /* this call just expand the array */
- insert_dynamic(&log_descriptor.open_files, (uchar*)&file);
log_descriptor.max_file++;
{
char *start= (char*) dynamic_element(&log_descriptor.open_files, 0,
@@ -1661,6 +1663,7 @@ error_lock:
mysql_rwlock_unlock(&log_descriptor.open_files_lock);
error:
translog_stop_writing();
+ my_free(file);
DBUG_RETURN(1);
}
@@ -3962,11 +3965,14 @@ my_bool translog_init_with_table(const char *directory,
/* Start new log system from scratch */
log_descriptor.horizon= MAKE_LSN(start_file_num,
TRANSLOG_PAGE_SIZE); /* header page */
- if ((file->handler.file=
- create_logfile_by_number_no_cache(start_file_num)) == -1)
- goto err;
translog_file_init(file, start_file_num, 0);
if (insert_dynamic(&log_descriptor.open_files, (uchar*)&file))
+ {
+ my_free(file);
+ goto err;
+ }
+ if ((file->handler.file=
+ create_logfile_by_number_no_cache(start_file_num)) == -1)
goto err;
log_descriptor.min_file= log_descriptor.max_file= start_file_num;
if (translog_write_file_header())
@@ -7789,8 +7795,24 @@ void translog_flush_buffers(TRANSLOG_ADDRESS *lsn,
translog_force_current_buffer_to_finish();
translog_buffer_unlock(buffer);
}
- else if (log_descriptor.bc.buffer->prev_last_lsn != LSN_IMPOSSIBLE)
+ else
{
+ if (log_descriptor.bc.buffer->last_lsn == LSN_IMPOSSIBLE)
+ {
+ /*
+ In this case both last_lsn & prev_last_lsn are LSN_IMPOSSIBLE
+ otherwise it will go in the first IF because LSN_IMPOSSIBLE less
+ then any real LSN and cmp_translog_addr(*lsn,
+ log_descriptor.bc.buffer->prev_last_lsn) will be TRUE
+ */
+ DBUG_ASSERT(log_descriptor.bc.buffer->prev_last_lsn ==
+ LSN_IMPOSSIBLE);
+ DBUG_PRINT("info", ("There is no LSNs yet generated => do nothing"));
+ translog_unlock();
+ DBUG_VOID_RETURN;
+ }
+
+ DBUG_ASSERT(log_descriptor.bc.buffer->prev_last_lsn != LSN_IMPOSSIBLE);
/* fix lsn if it was horizon */
*lsn= log_descriptor.bc.buffer->prev_last_lsn;
DBUG_PRINT("info", ("LSN to flush fixed to prev last lsn: (%lu,0x%lx)",
@@ -7799,13 +7821,6 @@ void translog_flush_buffers(TRANSLOG_ADDRESS *lsn,
TRANSLOG_BUFFERS_NO);
translog_unlock();
}
- else if (log_descriptor.bc.buffer->last_lsn == LSN_IMPOSSIBLE)
- {
- DBUG_PRINT("info", ("There is no LSNs yet generated => do nothing"));
- translog_unlock();
- DBUG_VOID_RETURN;
- }
-
/* flush buffers */
*sent_to_disk= translog_get_sent_to_disk();
if (cmp_translog_addr(*lsn, *sent_to_disk) > 0)
diff --git a/storage/maria/ma_packrec.c b/storage/maria/ma_packrec.c
index 5243d55428c..861023a0064 100644
--- a/storage/maria/ma_packrec.c
+++ b/storage/maria/ma_packrec.c
@@ -1445,7 +1445,7 @@ uint _ma_pack_get_block_info(MARIA_HA *maria, MARIA_BIT_BUFF *bit_buff,
maria->blob_length=info->blob_len;
}
info->filepos=filepos+head_length;
- if (file > 0)
+ if (file >= 0)
{
info->offset=MY_MIN(info->rec_len, ref_length - head_length);
memcpy(*rec_buff_p, header + head_length, info->offset);
diff --git a/storage/maria/ma_pagecache.c b/storage/maria/ma_pagecache.c
index 1eebfac03f8..caaaacfc9b1 100644
--- a/storage/maria/ma_pagecache.c
+++ b/storage/maria/ma_pagecache.c
@@ -1187,14 +1187,14 @@ void end_pagecache(PAGECACHE *pagecache, my_bool cleanup)
pagecache->blocks_changed= 0;
}
- DBUG_PRINT("status", ("used: %zu changed: %zu w_requests: %lu "
- "writes: %lu r_requests: %lu reads: %lu",
- (ulong) pagecache->blocks_used,
- (ulong) pagecache->global_blocks_changed,
- (ulong) pagecache->global_cache_w_requests,
- (ulong) pagecache->global_cache_write,
- (ulong) pagecache->global_cache_r_requests,
- (ulong) pagecache->global_cache_read));
+ DBUG_PRINT("status", ("used: %zu changed: %zu w_requests: %llu "
+ "writes: %llu r_requests: %llu reads: %llu",
+ pagecache->blocks_used,
+ pagecache->global_blocks_changed,
+ pagecache->global_cache_w_requests,
+ pagecache->global_cache_write,
+ pagecache->global_cache_r_requests,
+ pagecache->global_cache_read));
if (cleanup)
{
diff --git a/storage/maria/ma_recovery.c b/storage/maria/ma_recovery.c
index 08d306a89be..d0e0210db64 100644
--- a/storage/maria/ma_recovery.c
+++ b/storage/maria/ma_recovery.c
@@ -1988,7 +1988,7 @@ prototype_redo_exec_hook(UNDO_KEY_INSERT)
const HA_KEYSEG *keyseg= info->s->keyinfo[keynr].seg;
ulonglong value;
char llbuf[22];
- uchar *to;
+ uchar reversed[MARIA_MAX_KEY_BUFF], *to;
tprint(tracef, " state older than record\n");
/* we read the record to find the auto_increment value */
enlarge_buffer(rec);
@@ -2005,7 +2005,6 @@ prototype_redo_exec_hook(UNDO_KEY_INSERT)
if (keyseg->flag & HA_SWAP_KEY)
{
/* We put key from log record to "data record" packing format... */
- uchar reversed[MARIA_MAX_KEY_BUFF];
uchar *key_ptr= to;
uchar *key_end= key_ptr + keyseg->length;
to= reversed + keyseg->length;
@@ -3061,7 +3060,7 @@ static MARIA_HA *get_MARIA_HA_from_REDO_record(const
case LOGREC_REDO_INDEX:
case LOGREC_REDO_INDEX_FREE_PAGE:
index_page_redo_entry= 1;
- /* Fall trough*/
+ /* Fall through */
case LOGREC_REDO_INSERT_ROW_HEAD:
case LOGREC_REDO_INSERT_ROW_TAIL:
case LOGREC_REDO_PURGE_ROW_HEAD:
diff --git a/storage/myisam/ha_myisam.cc b/storage/myisam/ha_myisam.cc
index 210111d89ff..86f3bbb9bc5 100644
--- a/storage/myisam/ha_myisam.cc
+++ b/storage/myisam/ha_myisam.cc
@@ -577,7 +577,10 @@ extern "C" {
int killed_ptr(HA_CHECK *param)
{
- return thd_killed((THD*)param->thd);
+ if (likely(thd_killed((THD*)param->thd)) == 0)
+ return 0;
+ my_errno= HA_ERR_ABORTED_BY_USER;
+ return 1;
}
void mi_check_print_error(HA_CHECK *param, const char *fmt,...)
@@ -850,6 +853,10 @@ int ha_myisam::open(const char *name, int mode, uint test_if_locked)
/* Count statistics of usage for newly open normal files */
if (file->s->reopen == 1 && ! (test_if_locked & HA_OPEN_TMP_TABLE))
{
+ /* use delay_key_write from .frm, not .MYI */
+ file->s->delay_key_write= delay_key_write_options == DELAY_KEY_WRITE_ALL ||
+ (delay_key_write_options == DELAY_KEY_WRITE_ON &&
+ table->s->db_create_options & HA_OPTION_DELAY_KEY_WRITE);
if (file->s->delay_key_write)
feature_files_opened_with_delayed_keys++;
}
@@ -1214,6 +1221,11 @@ int ha_myisam::repair(THD *thd, HA_CHECK &param, bool do_optimize)
if (remap)
mi_munmap_file(file);
#endif
+ /*
+ The following is to catch errors when my_errno is no set properly
+ during repairt
+ */
+ my_errno= 0;
if (mi_test_if_sort_rep(file,file->state->records,tmp_key_map,0) &&
(local_testflag & T_REP_BY_SORT))
{
@@ -1236,8 +1248,11 @@ int ha_myisam::repair(THD *thd, HA_CHECK &param, bool do_optimize)
}
if (error && file->create_unique_index_by_sort &&
share->state.dupp_key != MAX_KEY)
+ {
+ my_errno= HA_ERR_FOUND_DUPP_KEY;
print_keydup_error(table, &table->key_info[share->state.dupp_key],
MYF(0));
+ }
}
else
{
@@ -1328,6 +1343,7 @@ int ha_myisam::assign_to_keycache(THD* thd, HA_CHECK_OPT *check_opt)
{
KEY_CACHE *new_key_cache= check_opt->key_cache;
const char *errmsg= 0;
+ char buf[STRING_BUFFER_USUAL_SIZE];
int error= HA_ADMIN_OK;
ulonglong map;
TABLE_LIST *table_list= table->pos_in_table_list;
@@ -1344,7 +1360,6 @@ int ha_myisam::assign_to_keycache(THD* thd, HA_CHECK_OPT *check_opt)
if ((error= mi_assign_to_key_cache(file, map, new_key_cache)))
{
- char buf[STRING_BUFFER_USUAL_SIZE];
my_snprintf(buf, sizeof(buf),
"Failed to flush to index file (errno: %d)", error);
errmsg= buf;
@@ -2357,10 +2372,8 @@ bool ha_myisam::check_if_incompatible_data(HA_CREATE_INFO *create_info,
table_changes & IS_EQUAL_PACK_LENGTH) // Not implemented yet
return COMPATIBLE_DATA_NO;
- if ((options & (HA_OPTION_PACK_RECORD | HA_OPTION_CHECKSUM |
- HA_OPTION_DELAY_KEY_WRITE)) !=
- (create_info->table_options & (HA_OPTION_PACK_RECORD | HA_OPTION_CHECKSUM |
- HA_OPTION_DELAY_KEY_WRITE)))
+ if ((options & (HA_OPTION_PACK_RECORD | HA_OPTION_CHECKSUM)) !=
+ (create_info->table_options & (HA_OPTION_PACK_RECORD | HA_OPTION_CHECKSUM)))
return COMPATIBLE_DATA_NO;
return COMPATIBLE_DATA_YES;
}
diff --git a/storage/myisam/mi_check.c b/storage/myisam/mi_check.c
index e0016eca43f..b65bb8b78bd 100644
--- a/storage/myisam/mi_check.c
+++ b/storage/myisam/mi_check.c
@@ -3126,6 +3126,7 @@ static int sort_key_read(MI_SORT_PARAM *sort_param, void *key)
}
if (info->state->records == sort_info->max_records)
{
+ my_errno= HA_ERR_WRONG_IN_RECORD;
mi_check_print_error(sort_info->param,
"Key %d - Found too many records; Can't continue",
sort_param->key+1);
@@ -3332,6 +3333,7 @@ static int sort_get_next_record(MI_SORT_PARAM *sort_param)
param->error_printed=1;
param->retry_repair=1;
param->testflag|=T_RETRY_WITHOUT_QUICK;
+ my_errno= HA_ERR_WRONG_IN_RECORD;
DBUG_RETURN(1); /* Something wrong with data */
}
b_type=_mi_get_block_info(&block_info,-1,pos);
@@ -3590,6 +3592,7 @@ static int sort_get_next_record(MI_SORT_PARAM *sort_param)
param->error_printed=1;
param->retry_repair=1;
param->testflag|=T_RETRY_WITHOUT_QUICK;
+ my_errno= HA_ERR_WRONG_IN_RECORD;
DBUG_RETURN(1); /* Something wrong with data */
}
sort_param->start_recpos=sort_param->pos;
diff --git a/storage/myisam/mi_extra.c b/storage/myisam/mi_extra.c
index 3d6049c0172..1487592d2d6 100644
--- a/storage/myisam/mi_extra.c
+++ b/storage/myisam/mi_extra.c
@@ -150,6 +150,7 @@ int mi_extra(MI_INFO *info, enum ha_extra_function function, void *extra_arg)
if (info->s->data_file_type != DYNAMIC_RECORD)
break;
/* Remove read/write cache if dynamic rows */
+ /* fall through */
case HA_EXTRA_NO_CACHE:
if (info->opt_flag & (READ_CACHE_USED | WRITE_CACHE_USED))
{
@@ -262,7 +263,7 @@ int mi_extra(MI_INFO *info, enum ha_extra_function function, void *extra_arg)
//share->deleting= TRUE;
share->global_changed= FALSE; /* force writing changed flag */
_mi_mark_file_changed(info);
- /* Fall trough */
+ /* Fall through */
case HA_EXTRA_PREPARE_FOR_RENAME:
DBUG_ASSERT(!share->temporary);
mysql_mutex_lock(&THR_LOCK_myisam);
diff --git a/storage/myisam/mi_locking.c b/storage/myisam/mi_locking.c
index 531b800c63e..1921926463e 100644
--- a/storage/myisam/mi_locking.c
+++ b/storage/myisam/mi_locking.c
@@ -29,7 +29,7 @@ static void mi_update_status_with_lock(MI_INFO *info);
int mi_lock_database(MI_INFO *info, int lock_type)
{
- int error;
+ int error, mark_crashed= 0;
uint count;
MYISAM_SHARE *share=info->s;
DBUG_ENTER("mi_lock_database");
@@ -52,6 +52,7 @@ int mi_lock_database(MI_INFO *info, int lock_type)
}
error= 0;
+ DBUG_EXECUTE_IF ("mi_lock_database_failure", error= EINVAL;);
mysql_mutex_lock(&share->intern_lock);
if (share->kfile >= 0) /* May only be false on windows */
{
@@ -75,17 +76,15 @@ int mi_lock_database(MI_INFO *info, int lock_type)
&share->dirty_part_map,
FLUSH_KEEP))
{
- error=my_errno;
+ mark_crashed= error=my_errno;
mi_print_error(info->s, HA_ERR_CRASHED);
- mi_mark_crashed(info); /* Mark that table must be checked */
}
if (info->opt_flag & (READ_CACHE_USED | WRITE_CACHE_USED))
{
if (end_io_cache(&info->rec_cache))
{
- error=my_errno;
+ mark_crashed= error=my_errno;
mi_print_error(info->s, HA_ERR_CRASHED);
- mi_mark_crashed(info);
}
}
if (!count)
@@ -110,22 +109,19 @@ int mi_lock_database(MI_INFO *info, int lock_type)
share->state.unique= info->last_unique= info->this_unique;
share->state.update_count= info->last_loop= ++info->this_loop;
if (mi_state_info_write(share->kfile, &share->state, 1))
- error=my_errno;
+ mark_crashed= error=my_errno;
share->changed=0;
if (myisam_flush)
{
if (mysql_file_sync(share->kfile, MYF(0)))
- error= my_errno;
+ mark_crashed= error= my_errno;
if (mysql_file_sync(info->dfile, MYF(0)))
- error= my_errno;
+ mark_crashed= error= my_errno;
}
else
share->not_flushed=1;
if (error)
- {
mi_print_error(info->s, HA_ERR_CRASHED);
- mi_mark_crashed(info);
- }
}
if (info->lock_type != F_EXTRA_LCK)
{
@@ -260,6 +256,8 @@ int mi_lock_database(MI_INFO *info, int lock_type)
}
#endif
mysql_mutex_unlock(&share->intern_lock);
+ if (mark_crashed)
+ mi_mark_crashed(info);
DBUG_RETURN(error);
} /* mi_lock_database */
diff --git a/storage/myisam/mi_open.c b/storage/myisam/mi_open.c
index ef47265a18b..41b0e18da02 100644
--- a/storage/myisam/mi_open.c
+++ b/storage/myisam/mi_open.c
@@ -151,7 +151,7 @@ MI_INFO *mi_open(const char *name, int mode, uint open_flags)
}
share->mode=open_mode;
errpos=1;
- if (mysql_file_read(kfile, share->state.header.file_version, head_length,
+ if (mysql_file_read(kfile, (uchar*)&share->state.header, head_length,
MYF(MY_NABP)))
{
my_errno= HA_ERR_NOT_A_TABLE;
diff --git a/storage/myisam/mysql-test/storage_engine/alter_tablespace.rdiff b/storage/myisam/mysql-test/storage_engine/alter_tablespace.rdiff
index 4215af58011..a8c78b117a9 100644
--- a/storage/myisam/mysql-test/storage_engine/alter_tablespace.rdiff
+++ b/storage/myisam/mysql-test/storage_engine/alter_tablespace.rdiff
@@ -13,7 +13,7 @@
-2
-ALTER TABLE t1 DISCARD TABLESPACE;
-SELECT a FROM t1;
--ERROR HY000: Tablespace has been discarded for table 't1'
+-ERROR HY000: Tablespace has been discarded for table `t1`
-ALTER TABLE t1 IMPORT TABLESPACE;
-Warnings:
-Warning 1810 IO Read error: (2, No such file or directory) Error opening './test/t1.cfg', will attempt to import without schema verification
diff --git a/storage/myisammrg/mysql-test/storage_engine/alter_tablespace.rdiff b/storage/myisammrg/mysql-test/storage_engine/alter_tablespace.rdiff
index 19ca1a1b6e1..e5462f8cb1f 100644
--- a/storage/myisammrg/mysql-test/storage_engine/alter_tablespace.rdiff
+++ b/storage/myisammrg/mysql-test/storage_engine/alter_tablespace.rdiff
@@ -13,7 +13,7 @@
-2
-ALTER TABLE t1 DISCARD TABLESPACE;
-SELECT a FROM t1;
--ERROR HY000: Tablespace has been discarded for table 't1'
+-ERROR HY000: Tablespace has been discarded for table `t1`
-ALTER TABLE t1 IMPORT TABLESPACE;
-Warnings:
-Warning 1810 IO Read error: (2, No such file or directory) Error opening './test/t1.cfg', will attempt to import without schema verification
diff --git a/storage/myisammrg/mysql-test/storage_engine/create_table.rdiff b/storage/myisammrg/mysql-test/storage_engine/create_table.rdiff
index 007fc0906a7..4c98e62625a 100644
--- a/storage/myisammrg/mysql-test/storage_engine/create_table.rdiff
+++ b/storage/myisammrg/mysql-test/storage_engine/create_table.rdiff
@@ -29,7 +29,7 @@
-SHOW CREATE TABLE t1;
-Table Create Table
-t1 CREATE TABLE `t1` (
-- `1` bigint(20) NOT NULL DEFAULT '0'
+- `1` bigint(20) NOT NULL DEFAULT 0
-) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1
-SELECT * FROM t1;
-1
diff --git a/storage/myisammrg/mysql-test/storage_engine/disabled.def b/storage/myisammrg/mysql-test/storage_engine/disabled.def
new file mode 100644
index 00000000000..227e33029d8
--- /dev/null
+++ b/storage/myisammrg/mysql-test/storage_engine/disabled.def
@@ -0,0 +1,3 @@
+insert_delayed : MDEV-12880 - INSERT DELAYED is not detected as inapplicable to a table under lock
+lock_concurrent : MDEV-12882 - Assertion failure
+select_high_prio : MDEV-12885 - MDL_SHARED_READ_ONLY is taken instead of MDL_SHARED_READ
diff --git a/storage/myisammrg/mysql-test/storage_engine/parts/repair_table.rdiff b/storage/myisammrg/mysql-test/storage_engine/parts/repair_table.rdiff
index d7bf99fd674..4346545abcf 100644
--- a/storage/myisammrg/mysql-test/storage_engine/parts/repair_table.rdiff
+++ b/storage/myisammrg/mysql-test/storage_engine/parts/repair_table.rdiff
@@ -1,6 +1,6 @@
---- repair_table.result 2013-01-23 01:35:44.388267080 +0400
-+++ repair_table.reject 2013-01-23 03:16:26.468307847 +0400
-@@ -1,234 +1,114 @@
+--- suite/storage_engine/parts/repair_table.result 2017-05-20 03:58:19.451939791 +0300
++++ ../storage/myisammrg/mysql-test/storage_engine/parts/repair_table.reject 2017-05-24 02:42:31.130318292 +0300
+@@ -1,234 +1,115 @@
call mtr.add_suppression("Table '.*t1.*' is marked as crashed and should be repaired");
DROP TABLE IF EXISTS t1, t2;
CREATE TABLE t1 (a <INT_COLUMN>, b <CHAR_COLUMN>) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS> PARTITION BY HASH(a) PARTITIONS 2;
@@ -144,6 +144,7 @@
call mtr.add_suppression("MySQL thread id .*, query id .* localhost.*root Checking table");
call mtr.add_suppression(" '\..test.t1'");
call mtr.add_suppression("Couldn't repair table: test.t1");
++call mtr.add_suppression("Table 't1' is marked as crashed.*");
CREATE TABLE t1 (a <INT_COLUMN>, b <CHAR_COLUMN>, <CUSTOM_INDEX> (a)) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS> PARTITION BY HASH(a) PARTITIONS 2;
+ERROR HY000: Engine cannot be used in partitioned tables
+# ERROR: Statement ended with errno 1572, errname ER_PARTITION_MERGE_ERROR (expected to succeed)
diff --git a/storage/myisammrg/mysql-test/storage_engine/repair_table.rdiff b/storage/myisammrg/mysql-test/storage_engine/repair_table.rdiff
index 9ff8f906511..79f6c7040e0 100644
--- a/storage/myisammrg/mysql-test/storage_engine/repair_table.rdiff
+++ b/storage/myisammrg/mysql-test/storage_engine/repair_table.rdiff
@@ -1,5 +1,5 @@
---- repair_table.result 2013-01-23 01:26:05.995538460 +0400
-+++ repair_table.reject 2013-01-23 02:50:55.035560564 +0400
+--- suite/storage_engine/repair_table.result 2017-05-24 01:09:07.274213486 +0300
++++ suite/storage_engine/repair_table.reject 2017-05-24 01:10:25.466214949 +0300
@@ -4,56 +4,50 @@
CREATE TABLE t2 (a <INT_COLUMN>, b <CHAR_COLUMN>) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
REPAIR TABLE t1;
@@ -71,7 +71,7 @@
DROP TABLE t1, t2;
call mtr.add_suppression("Got an error from thread_id=.*");
call mtr.add_suppression("MySQL thread id .*, query id .* localhost.*root Checking table");
-@@ -62,45 +56,32 @@
+@@ -63,45 +57,32 @@
CREATE TABLE t1 (a <INT_COLUMN>, b <CHAR_COLUMN>, <CUSTOM_INDEX> (a)) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
REPAIR TABLE t1;
Table Op Msg_type Msg_text
@@ -104,7 +104,7 @@
-test.t1 check error Corrupt
+test.t1 check status OK
SELECT a,b FROM t1;
--ERROR HY000: Incorrect key file for table 't1'; try to repair it
+-ERROR HY000: Index for table 't1' is corrupt; try to repair it
-# Statement ended with one of expected results (0,ER_NOT_KEYFILE,144).
-# If you got a difference in error message, just add it to rdiff file
-INSERT INTO t1 (a,b) VALUES (14,'n'),(15,'o');
diff --git a/storage/myisammrg/mysql-test/storage_engine/tbl_opt_data_index_dir.rdiff b/storage/myisammrg/mysql-test/storage_engine/tbl_opt_data_dir.rdiff
index e6055278b3c..671e26ec617 100644
--- a/storage/myisammrg/mysql-test/storage_engine/tbl_opt_data_index_dir.rdiff
+++ b/storage/myisammrg/mysql-test/storage_engine/tbl_opt_data_dir.rdiff
@@ -1,18 +1,18 @@
---- tbl_opt_data_index_dir.result 2013-01-22 22:05:05.246633000 +0400
-+++ tbl_opt_data_index_dir.reject 2013-01-23 02:50:59.951498762 +0400
-@@ -4,7 +4,7 @@
+--- suite/storage_engine/tbl_opt_data_dir.result 2017-05-24 00:21:15.550159778 +0300
++++ ../storage/myisammrg/mysql-test/storage_engine/tbl_opt_data_dir.reject 2017-05-24 00:25:45.506164827 +0300
+@@ -5,7 +5,7 @@
t1 CREATE TABLE `t1` (
`a` int(11) DEFAULT NULL,
`b` char(8) DEFAULT NULL
--) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 DATA DIRECTORY='<DATA_DIR>' INDEX DIRECTORY='<INDEX_DIR>'
+-) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 DATA DIRECTORY='<DATA_DIR_1>'
+) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 INSERT_METHOD=LAST UNION=(`mrg`.`t1`)
+ # For ALTER TABLE the option is ignored
+ # Running ALTER TABLE .. DATA DIRECTORY = <>
Warnings:
- Warning 1618 <INDEX DIRECTORY> option ignored
- SHOW CREATE TABLE t1;
-@@ -12,5 +12,5 @@
+@@ -15,5 +15,5 @@
t1 CREATE TABLE `t1` (
`a` int(11) DEFAULT NULL,
`b` char(8) DEFAULT NULL
--) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 DATA DIRECTORY='<DATA_DIR>' INDEX DIRECTORY='<INDEX_DIR>'
+-) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 DATA DIRECTORY='<DATA_DIR_1>'
+) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 INSERT_METHOD=LAST UNION=(`mrg`.`t1`)
DROP TABLE t1;
diff --git a/storage/myisammrg/mysql-test/storage_engine/tbl_opt_index_dir.rdiff b/storage/myisammrg/mysql-test/storage_engine/tbl_opt_index_dir.rdiff
new file mode 100644
index 00000000000..ca025861f68
--- /dev/null
+++ b/storage/myisammrg/mysql-test/storage_engine/tbl_opt_index_dir.rdiff
@@ -0,0 +1,18 @@
+--- suite/storage_engine/tbl_opt_index_dir.result 2017-05-24 00:21:15.550159778 +0300
++++ ../storage/myisammrg/mysql-test/storage_engine/tbl_opt_index_dir.reject 2017-05-24 00:25:45.506164827 +0300
+@@ -5,7 +5,7 @@
+ t1 CREATE TABLE `t1` (
+ `a` int(11) DEFAULT NULL,
+ `b` char(8) DEFAULT NULL
+-) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 INDEX DIRECTORY='<INDEX_DIR_1>'
++) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 INSERT_METHOD=LAST UNION=(`mrg`.`t1`)
+ # For ALTER TABLE the option is ignored
+ # Running ALTER TABLE .. INDEX DIRECTORY = <>
+ Warnings:
+@@ -15,5 +15,5 @@
+ t1 CREATE TABLE `t1` (
+ `a` int(11) DEFAULT NULL,
+ `b` char(8) DEFAULT NULL
+-) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 INDEX DIRECTORY='<INDEX_DIR_1>'
++) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 INSERT_METHOD=LAST UNION=(`mrg`.`t1`)
+ DROP TABLE t1;
diff --git a/storage/myisammrg/mysql-test/storage_engine/tbl_opt_row_format.rdiff b/storage/myisammrg/mysql-test/storage_engine/tbl_opt_row_format.rdiff
index f7e0905d4e7..6c756e7b8e1 100644
--- a/storage/myisammrg/mysql-test/storage_engine/tbl_opt_row_format.rdiff
+++ b/storage/myisammrg/mysql-test/storage_engine/tbl_opt_row_format.rdiff
@@ -1,17 +1,33 @@
---- tbl_opt_row_format.result 2013-01-22 22:05:05.246633000 +0400
-+++ tbl_opt_row_format.reject 2013-01-23 02:51:04.743438518 +0400
-@@ -5,12 +5,12 @@
+--- ../storage/myisammrg/mysql-test/storage_engine/tbl_opt_row_format.result~ 2017-05-24 00:50:44.254192857 +0300
++++ ../storage/myisammrg/mysql-test/storage_engine/tbl_opt_row_format.reject 2017-05-24 00:50:44.334192859 +0300
+@@ -5,26 +5,26 @@
+ t1 CREATE TABLE `t1` (
+ `a` int(11) DEFAULT NULL,
+ `b` char(8) DEFAULT NULL
+-) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 ROW_FORMAT=DYNAMIC
++) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 ROW_FORMAT=DYNAMIC INSERT_METHOD=LAST UNION=(`mrg`.`t1`)
+ ALTER TABLE t1 ROW_FORMAT=FIXED;
+ SHOW CREATE TABLE t1;
+ Table Create Table
t1 CREATE TABLE `t1` (
`a` int(11) DEFAULT NULL,
`b` char(8) DEFAULT NULL
-) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 ROW_FORMAT=FIXED
+) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 ROW_FORMAT=FIXED INSERT_METHOD=LAST UNION=(`mrg`.`t1`)
- ALTER TABLE t1 ROW_FORMAT=DYNAMIC;
+ ALTER TABLE t1 ROW_FORMAT=PAGE;
SHOW CREATE TABLE t1;
Table Create Table
t1 CREATE TABLE `t1` (
`a` int(11) DEFAULT NULL,
`b` char(8) DEFAULT NULL
--) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 ROW_FORMAT=DYNAMIC
-+) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 ROW_FORMAT=DYNAMIC INSERT_METHOD=LAST UNION=(`mrg`.`t1`)
+-) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 ROW_FORMAT=PAGE
++) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 ROW_FORMAT=PAGE INSERT_METHOD=LAST UNION=(`mrg`.`t1`)
+ ALTER TABLE t1 ROW_FORMAT=COMPACT;
+ SHOW CREATE TABLE t1;
+ Table Create Table
+ t1 CREATE TABLE `t1` (
+ `a` int(11) DEFAULT NULL,
+ `b` char(8) DEFAULT NULL
+-) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 ROW_FORMAT=COMPACT
++) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 ROW_FORMAT=COMPACT INSERT_METHOD=LAST UNION=(`mrg`.`t1`)
DROP TABLE t1;
diff --git a/storage/myisammrg/mysql-test/storage_engine/vcol.rdiff b/storage/myisammrg/mysql-test/storage_engine/vcol.rdiff
index c7372326fad..d537967ef99 100644
--- a/storage/myisammrg/mysql-test/storage_engine/vcol.rdiff
+++ b/storage/myisammrg/mysql-test/storage_engine/vcol.rdiff
@@ -6,12 +6,12 @@
-SHOW COLUMNS IN t1;
-Field Type Null Key Default Extra
-a int(11) # #
--b int(11) # # VIRTUAL
+-b int(11) # # VIRTUAL GENERATED
-INSERT INTO t1 (a) VALUES (1),(2);
-INSERT INTO t1 (a,b) VALUES (3,3),(4,4);
-Warnings:
--Warning 1906 The value specified for computed column 'b' in table 't1' ignored
--Warning 1906 The value specified for computed column 'b' in table 't1' ignored
+-Warning 1906 The value specified for generated column 'b' in table 't1' ignored
+-Warning 1906 The value specified for generated column 'b' in table 't1' ignored
-SELECT a,b FROM t1;
-a b
-1 2
@@ -23,12 +23,12 @@
-SHOW COLUMNS IN t1;
-Field Type Null Key Default Extra
-a int(11) # #
--b int(11) # # PERSISTENT
+-b int(11) # # STORED GENERATED
-INSERT INTO t1 (a) VALUES (1),(2);
-INSERT INTO t1 (a,b) VALUES (3,3),(4,4);
-Warnings:
--Warning 1906 The value specified for computed column 'b' in table 't1' ignored
--Warning 1906 The value specified for computed column 'b' in table 't1' ignored
+-Warning 1906 The value specified for generated column 'b' in table 't1' ignored
+-Warning 1906 The value specified for generated column 'b' in table 't1' ignored
-SELECT a,b FROM t1;
-a b
-1 2
@@ -40,12 +40,12 @@
-SHOW COLUMNS IN t1;
-Field Type Null Key Default Extra
-a int(11) # #
--b int(11) # # VIRTUAL
+-b int(11) # # VIRTUAL GENERATED
-INSERT INTO t1 (a) VALUES (1),(2);
-INSERT INTO t1 (a,b) VALUES (3,3),(4,4);
-Warnings:
--Warning 1906 The value specified for computed column 'b' in table 't1' ignored
--Warning 1906 The value specified for computed column 'b' in table 't1' ignored
+-Warning 1906 The value specified for generated column 'b' in table 't1' ignored
+-Warning 1906 The value specified for generated column 'b' in table 't1' ignored
-SELECT a,b FROM t1;
-a b
-1 2
@@ -57,12 +57,12 @@
-SHOW COLUMNS IN t1;
-Field Type Null Key Default Extra
-a int(11) # #
--b int(11) # # PERSISTENT
+-b int(11) # # STORED GENERATED
-INSERT INTO t1 (a) VALUES (1),(2);
-INSERT INTO t1 (a,b) VALUES (3,3),(4,4);
-Warnings:
--Warning 1906 The value specified for computed column 'b' in table 't1' ignored
--Warning 1906 The value specified for computed column 'b' in table 't1' ignored
+-Warning 1906 The value specified for generated column 'b' in table 't1' ignored
+-Warning 1906 The value specified for generated column 'b' in table 't1' ignored
-SELECT a,b FROM t1;
-a b
-1 2
@@ -70,11 +70,11 @@
-3 4
-4 5
-DROP TABLE t1;
-+ERROR HY000: MRG_MyISAM storage engine does not support computed columns
-+# ERROR: Statement ended with errno 1910, errname ER_UNSUPPORTED_ENGINE_FOR_VIRTUAL_COLUMNS (expected to succeed)
++ERROR HY000: MRG_MyISAM storage engine does not support generated columns
++# ERROR: Statement ended with errno 1910, errname ER_UNSUPPORTED_ENGINE_FOR_GENERATED_COLUMNS (expected to succeed)
+# ------------ UNEXPECTED RESULT ------------
+# [ CREATE TABLE t1 (a INT(11) /*!*/ /*Custom column options*/, b INT(11) /*!*/ /*Custom column options*/ GENERATED ALWAYS AS (a+1)) ENGINE=MRG_MYISAM /*!*/ /*Custom table options*/ UNION(mrg.t1) INSERT_METHOD=LAST ]
-+# The statement|command finished with ER_UNSUPPORTED_ENGINE_FOR_VIRTUAL_COLUMNS.
++# The statement|command finished with ER_UNSUPPORTED_ENGINE_FOR_GENERATED_COLUMNS.
+# Virtual columns or the mix could be unsupported|malfunctioning, or the problem was caused by previous errors.
+# You can change the engine code, or create an rdiff, or disable the test by adding it to disabled.def.
+# Further in this test, the message might sometimes be suppressed; a part of the test might be skipped.
diff --git a/storage/perfschema/ha_perfschema.cc b/storage/perfschema/ha_perfschema.cc
index 0ee08238318..7ea68670304 100644
--- a/storage/perfschema/ha_perfschema.cc
+++ b/storage/perfschema/ha_perfschema.cc
@@ -225,7 +225,7 @@ maria_declare_plugin(perfschema)
0x0001,
pfs_status_vars,
NULL,
- "5.6.33",
+ "5.6.36",
MariaDB_PLUGIN_MATURITY_STABLE
}
maria_declare_plugin_end;
diff --git a/storage/perfschema/pfs.cc b/storage/perfschema/pfs.cc
index 58704c87b74..c411025d148 100644
--- a/storage/perfschema/pfs.cc
+++ b/storage/perfschema/pfs.cc
@@ -3953,9 +3953,11 @@ static PSI_file* end_file_open_wait_v1(PSI_file_locker *locker,
switch (state->m_operation)
{
case PSI_FILE_STAT:
+ case PSI_FILE_RENAME:
break;
case PSI_FILE_STREAM_OPEN:
case PSI_FILE_CREATE:
+ case PSI_FILE_OPEN:
if (result != NULL)
{
PFS_file_class *klass= reinterpret_cast<PFS_file_class*> (state->m_class);
@@ -3966,7 +3968,6 @@ static PSI_file* end_file_open_wait_v1(PSI_file_locker *locker,
state->m_file= reinterpret_cast<PSI_file*> (pfs_file);
}
break;
- case PSI_FILE_OPEN:
default:
DBUG_ASSERT(false);
break;
diff --git a/storage/perfschema/pfs_digest.cc b/storage/perfschema/pfs_digest.cc
index 3330c29795f..86b05f37fd2 100644
--- a/storage/perfschema/pfs_digest.cc
+++ b/storage/perfschema/pfs_digest.cc
@@ -1,4 +1,4 @@
-/* Copyright (c) 2008, 2016, Oracle and/or its affiliates. All rights reserved.
+/* Copyright (c) 2008, 2017, Oracle and/or its affiliates. All rights reserved.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -257,10 +257,11 @@ search:
if (safe_index == 0)
{
/* Record [0] is reserved. */
- safe_index= 1;
+ continue;
}
/* Add a new record in digest stat array. */
+ DBUG_ASSERT(safe_index < digest_max);
pfs= &statements_digest_stat_array[safe_index];
if (pfs->m_lock.is_free())
diff --git a/storage/perfschema/unittest/pfs-t.cc b/storage/perfschema/unittest/pfs-t.cc
index f76b1aa2e75..b8814f2ad2d 100644
--- a/storage/perfschema/unittest/pfs-t.cc
+++ b/storage/perfschema/unittest/pfs-t.cc
@@ -1,4 +1,4 @@
-/* Copyright (c) 2008, 2015, Oracle and/or its affiliates. All rights reserved.
+/* Copyright (c) 2008, 2017, Oracle and/or its affiliates. All rights reserved.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -27,6 +27,8 @@
#include "stub_print_error.h"
#include "stub_pfs_defaults.h"
+void unload_performance_schema();
+
/* test helpers, to simulate the setup */
void setup_thread(PSI_thread *t, bool enabled)
@@ -126,7 +128,7 @@ void test_bootstrap()
psi_2= boot->get_interface(PSI_VERSION_2);
ok(psi_2 == NULL, "version 2");
- shutdown_performance_schema();
+ unload_performance_schema();
}
/*
@@ -183,6 +185,27 @@ PSI * load_perfschema()
return (PSI*) psi;
}
+void unload_performance_schema()
+{
+ cleanup_table_share();
+ cleanup_instruments();
+ cleanup_sync_class();
+ cleanup_thread_class();
+ cleanup_table_share();
+ cleanup_file_class();
+ cleanup_stage_class();
+ cleanup_statement_class();
+ cleanup_socket_class();
+ cleanup_events_waits_history_long();
+ cleanup_events_stages_history_long();
+ cleanup_events_statements_history_long();
+ cleanup_table_share_hash();
+ cleanup_file_hash();
+ cleanup_digest();
+
+ shutdown_performance_schema();
+}
+
void test_bad_registration()
{
PSI *psi;
@@ -581,8 +604,7 @@ void test_bad_registration()
psi->register_socket("X", bad_socket_3, 1);
ok(dummy_socket_key == 2, "assigned key");
-
- shutdown_performance_schema();
+ unload_performance_schema();
}
void test_init_disabled()
@@ -1016,7 +1038,7 @@ void test_init_disabled()
socket_A1= psi->init_socket(99, NULL, NULL, 0);
ok(socket_A1 == NULL, "broken socket key not instrumented");
- shutdown_performance_schema();
+ unload_performance_schema();
}
void test_locker_disabled()
@@ -1316,14 +1338,14 @@ void test_locker_disabled()
/* Pretend the socket does not have a thread owner */
/* ---------------------------------------------- */
- psi->delete_current_thread();
socket_class_A->m_enabled= true;
socket_A1= psi->init_socket(socket_key_A, NULL, NULL, 0);
ok(socket_A1 != NULL, "instrumented");
/* Socket thread owner has not been set */
socket_locker= psi->start_socket_wait(&socket_state, socket_A1, PSI_SOCKET_SEND, 12, "foo.cc", 12);
- ok(socket_locker == NULL, "no locker (no thread owner)");
-
+ ok(socket_locker != NULL, "locker (owner not used)");
+ psi->end_socket_wait(socket_locker, 10);
+
/* Pretend the running thread is not instrumented */
/* ---------------------------------------------- */
@@ -1351,7 +1373,7 @@ void test_locker_disabled()
socket_locker= psi->start_socket_wait(&socket_state, socket_A1, PSI_SOCKET_SEND, 12, "foo.cc", 12);
ok(socket_locker == NULL, "no locker");
- shutdown_performance_schema();
+ unload_performance_schema();
}
void test_file_instrumentation_leak()
@@ -1438,7 +1460,7 @@ void test_file_instrumentation_leak()
file_locker= psi->get_thread_file_descriptor_locker(&file_state, (File) 12, PSI_FILE_WRITE);
ok(file_locker == NULL, "no locker, no leak");
- shutdown_performance_schema();
+ unload_performance_schema();
}
void test_enabled()
@@ -1474,7 +1496,7 @@ void test_enabled()
{ & cond_key_B, "C-B", 0}
};
- shutdown_performance_schema();
+ unload_performance_schema();
#endif
}
@@ -1644,5 +1666,5 @@ int main(int argc, char **argv)
MY_INIT(argv[0]);
do_all_tests();
my_end(0);
- return exit_status();
+ return (exit_status());
}
diff --git a/storage/perfschema/unittest/pfs_account-oom-t.cc b/storage/perfschema/unittest/pfs_account-oom-t.cc
index f1cd5069b54..a87588487cb 100644
--- a/storage/perfschema/unittest/pfs_account-oom-t.cc
+++ b/storage/perfschema/unittest/pfs_account-oom-t.cc
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011, 2015, Oracle and/or its affiliates. All rights reserved.
+/* Copyright (c) 2011, 2017, Oracle and/or its affiliates. All rights reserved.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -108,6 +108,6 @@ int main(int, char **)
MY_INIT("pfs_account-oom-t");
do_all_tests();
my_end(0);
- return exit_status();
+ return (exit_status());
}
diff --git a/storage/perfschema/unittest/pfs_connect_attr-t.cc b/storage/perfschema/unittest/pfs_connect_attr-t.cc
index 7bee1d063a1..ecf790eeede 100644
--- a/storage/perfschema/unittest/pfs_connect_attr-t.cc
+++ b/storage/perfschema/unittest/pfs_connect_attr-t.cc
@@ -1,4 +1,4 @@
-/* Copyright (c) 2008, 2011, Oracle and/or its affiliates. All rights reserved.
+/* Copyright (c) 2008, 2017, Oracle and/or its affiliates. All rights reserved.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -341,5 +341,5 @@ int main(int, char **)
diag("skipping the cp1251 tests : missing character set");
plan(59 + (cs_cp1251 ? 10 : 0));
do_all_tests();
- return 0;
+ return (exit_status());
}
diff --git a/storage/perfschema/unittest/pfs_host-oom-t.cc b/storage/perfschema/unittest/pfs_host-oom-t.cc
index c72162038ca..c089083e4ae 100644
--- a/storage/perfschema/unittest/pfs_host-oom-t.cc
+++ b/storage/perfschema/unittest/pfs_host-oom-t.cc
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011, 2015, Oracle and/or its affiliates. All rights reserved.
+/* Copyright (c) 2011, 2017, Oracle and/or its affiliates. All rights reserved.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -108,6 +108,6 @@ int main(int, char **)
MY_INIT("pfs_host-oom-t");
do_all_tests();
my_end(0);
- return exit_status();
+ return (exit_status());
}
diff --git a/storage/perfschema/unittest/pfs_instr-oom-t.cc b/storage/perfschema/unittest/pfs_instr-oom-t.cc
index 93cddb0de6c..888acfab744 100644
--- a/storage/perfschema/unittest/pfs_instr-oom-t.cc
+++ b/storage/perfschema/unittest/pfs_instr-oom-t.cc
@@ -1,4 +1,4 @@
-/* Copyright (c) 2008, 2015, Oracle and/or its affiliates. All rights reserved.
+/* Copyright (c) 2008, 2017, Oracle and/or its affiliates. All rights reserved.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -355,6 +355,11 @@ void test_oom()
rc= init_instruments(& param);
ok(rc == 1, "oom (per thread wait)");
+ cleanup_sync_class();
+ cleanup_thread_class();
+ cleanup_file_class();
+ cleanup_instruments();
+
param.m_enabled= true;
param.m_mutex_class_sizing= 0;
param.m_rwlock_class_sizing= 0;
@@ -432,6 +437,8 @@ void test_oom()
init_event_name_sizing(& param);
rc= init_instruments(& param);
ok(rc == 1, "oom (thread stages history sizing)");
+
+ cleanup_thread_class();
cleanup_instruments();
param.m_enabled= true;
@@ -467,6 +474,9 @@ void test_oom()
init_event_name_sizing(& param);
rc= init_instruments(& param);
ok(rc == 1, "oom (per thread stages)");
+
+ cleanup_stage_class();
+ cleanup_thread_class();
cleanup_instruments();
param.m_enabled= true;
@@ -502,6 +512,8 @@ void test_oom()
init_event_name_sizing(& param);
rc= init_instruments(& param);
ok(rc == 1, "oom (thread statements history sizing)");
+
+ cleanup_thread_class();
cleanup_instruments();
param.m_enabled= true;
@@ -537,6 +549,9 @@ void test_oom()
init_event_name_sizing(& param);
rc= init_instruments(& param);
ok(rc == 1, "oom (per thread statements)");
+
+ cleanup_statement_class();
+ cleanup_thread_class();
cleanup_instruments();
param.m_enabled= true;
@@ -572,6 +587,8 @@ void test_oom()
init_event_name_sizing(& param);
rc= init_instruments(& param);
ok(rc == 1, "oom (global waits)");
+
+ cleanup_sync_class();
cleanup_instruments();
param.m_enabled= true;
@@ -609,8 +626,10 @@ void test_oom()
ok(rc == 0, "init stage class");
rc= init_instruments(& param);
ok(rc == 1, "oom (global stages)");
- cleanup_instruments();
+
+ cleanup_sync_class();
cleanup_stage_class();
+ cleanup_instruments();
param.m_enabled= true;
param.m_mutex_class_sizing= 10;
@@ -647,8 +666,10 @@ void test_oom()
ok(rc == 0, "init statement class");
rc= init_instruments(& param);
ok(rc == 1, "oom (global statements)");
- cleanup_instruments();
+
+ cleanup_sync_class();
cleanup_statement_class();
+ cleanup_instruments();
}
void do_all_tests()
@@ -662,6 +683,6 @@ int main(int argc, char **argv)
MY_INIT(argv[0]);
do_all_tests();
my_end(0);
- return exit_status();
+ return (exit_status());
}
diff --git a/storage/perfschema/unittest/pfs_instr-t.cc b/storage/perfschema/unittest/pfs_instr-t.cc
index c191b3dc41a..659f410e283 100644
--- a/storage/perfschema/unittest/pfs_instr-t.cc
+++ b/storage/perfschema/unittest/pfs_instr-t.cc
@@ -1,4 +1,4 @@
-/* Copyright (c) 2008, 2015, Oracle and/or its affiliates. All rights reserved.
+/* Copyright (c) 2008, 2017, Oracle and/or its affiliates. All rights reserved.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -23,10 +23,11 @@
#include <memory.h>
+PFS_global_param param;
+
void test_no_instruments()
{
int rc;
- PFS_global_param param;
memset(& param, 0xFF, sizeof(param));
param.m_enabled= true;
@@ -86,7 +87,6 @@ void test_no_instances()
PFS_file *file;
PFS_socket *socket;
PFS_table *table;
- PFS_global_param param;
memset(& param, 0xFF, sizeof(param));
param.m_enabled= true;
@@ -227,7 +227,6 @@ void test_with_instances()
PFS_socket *socket_2;
PFS_table *table_1;
PFS_table *table_2;
- PFS_global_param param;
memset(& param, 0xFF, sizeof(param));
param.m_enabled= true;
@@ -414,6 +413,6 @@ int main(int argc, char **argv)
MY_INIT(argv[0]);
do_all_tests();
my_end(0);
- return exit_status();
+ return (exit_status());
}
diff --git a/storage/perfschema/unittest/pfs_instr_class-oom-t.cc b/storage/perfschema/unittest/pfs_instr_class-oom-t.cc
index 708e08be6d4..c9b87b9cf1e 100644
--- a/storage/perfschema/unittest/pfs_instr_class-oom-t.cc
+++ b/storage/perfschema/unittest/pfs_instr_class-oom-t.cc
@@ -1,4 +1,4 @@
-/* Copyright (c) 2008, 2015, Oracle and/or its affiliates. All rights reserved.
+/* Copyright (c) 2008, 2017, Oracle and/or its affiliates. All rights reserved.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -64,6 +64,6 @@ int main(int argc, char **argv)
MY_INIT(argv[0]);
do_all_tests();
my_end(0);
- return exit_status();
+ return (exit_status());
}
diff --git a/storage/perfschema/unittest/pfs_instr_class-t.cc b/storage/perfschema/unittest/pfs_instr_class-t.cc
index 19a8989b34e..dcbc3927eae 100644
--- a/storage/perfschema/unittest/pfs_instr_class-t.cc
+++ b/storage/perfschema/unittest/pfs_instr_class-t.cc
@@ -1,4 +1,4 @@
-/* Copyright (c) 2008, 2015, Oracle and/or its affiliates. All rights reserved.
+/* Copyright (c) 2008, 2017, Oracle and/or its affiliates. All rights reserved.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -670,5 +670,5 @@ int main(int argc, char **argv)
MY_INIT(argv[0]);
do_all_tests();
my_end(0);
- return exit_status();
+ return (exit_status());
}
diff --git a/storage/perfschema/unittest/pfs_misc-t.cc b/storage/perfschema/unittest/pfs_misc-t.cc
index a4b11b9a727..eed9039dfb2 100644
--- a/storage/perfschema/unittest/pfs_misc-t.cc
+++ b/storage/perfschema/unittest/pfs_misc-t.cc
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+/* Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -67,6 +67,6 @@ int main(int, char **)
MY_INIT("pfs_misc-t");
do_all_tests();
my_end(0);
- return exit_status();
+ return (exit_status());
}
diff --git a/storage/perfschema/unittest/pfs_timer-t.cc b/storage/perfschema/unittest/pfs_timer-t.cc
index 8fb3a206ebf..55113860532 100644
--- a/storage/perfschema/unittest/pfs_timer-t.cc
+++ b/storage/perfschema/unittest/pfs_timer-t.cc
@@ -1,4 +1,4 @@
-/* Copyright (c) 2008, 2011, Oracle and/or its affiliates. All rights reserved.
+/* Copyright (c) 2008, 2017, Oracle and/or its affiliates. All rights reserved.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -115,6 +115,6 @@ int main(int, char **)
MY_INIT("pfs_timer-t");
do_all_tests();
my_end(0);
- return exit_status();
+ return (exit_status());
}
diff --git a/storage/perfschema/unittest/pfs_user-oom-t.cc b/storage/perfschema/unittest/pfs_user-oom-t.cc
index 9fb64d130ae..e153b39cbd2 100644
--- a/storage/perfschema/unittest/pfs_user-oom-t.cc
+++ b/storage/perfschema/unittest/pfs_user-oom-t.cc
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011, 2015, Oracle and/or its affiliates. All rights reserved.
+/* Copyright (c) 2011, 2017, Oracle and/or its affiliates. All rights reserved.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -107,6 +107,6 @@ int main(int, char **)
MY_INIT("pfs_user-oom-t");
do_all_tests();
my_end(0);
- return exit_status();
+ return (exit_status());
}
diff --git a/storage/rocksdb/rdb_datadic.h b/storage/rocksdb/rdb_datadic.h
index 8742a0794a4..9974baaeed0 100644
--- a/storage/rocksdb/rdb_datadic.h
+++ b/storage/rocksdb/rdb_datadic.h
@@ -751,7 +751,7 @@ public:
interface Rdb_tables_scanner {
virtual int add_table(Rdb_tbl_def * tdef) = 0;
- virtual ~Rdb_tables_scanner() {}
+ virtual ~Rdb_tables_scanner() {} /* Keep the compiler happy */
};
/*
diff --git a/storage/sphinx/ha_sphinx.cc b/storage/sphinx/ha_sphinx.cc
index e9eeb802568..be57c04806b 100644
--- a/storage/sphinx/ha_sphinx.cc
+++ b/storage/sphinx/ha_sphinx.cc
@@ -216,7 +216,9 @@ enum ESphGroupBy
SPH_GROUPBY_WEEK = 1, ///< group by week
SPH_GROUPBY_MONTH = 2, ///< group by month
SPH_GROUPBY_YEAR = 3, ///< group by year
- SPH_GROUPBY_ATTR = 4 ///< group by attribute value
+ SPH_GROUPBY_ATTR = 4, ///< group by attribute value
+ SPH_GROUPBY_ATTRPAIR = 5, ///< group by sequential attrs pair (rendered redundant by 64bit attrs support; removed)
+ SPH_GROUPBY_MULTIPLE = 6 ///< group by on multiple attribute values
};
/// known attribute types
@@ -911,7 +913,7 @@ bool sphinx_show_status ( THD * thd )
}
// show last error or warning (either in addition to stats, or on their own)
- if ( pTls && pTls->m_pHeadTable && pTls->m_pHeadTable->m_tStats.m_sLastMessage && pTls->m_pHeadTable->m_tStats.m_sLastMessage[0] )
+ if ( pTls && pTls->m_pHeadTable && pTls->m_pHeadTable->m_tStats.m_sLastMessage[0] )
{
const char * sMessageType = pTls->m_pHeadTable->m_tStats.m_bLastError ? "error" : "warning";
@@ -1563,6 +1565,7 @@ bool CSphSEQuery::ParseField ( char * sField )
{ "month:", SPH_GROUPBY_MONTH },
{ "year:", SPH_GROUPBY_YEAR },
{ "attr:", SPH_GROUPBY_ATTR },
+ { "multi:", SPH_GROUPBY_MULTIPLE }
};
int i;
diff --git a/storage/spider/spd_db_conn.cc b/storage/spider/spd_db_conn.cc
index 96d323a3492..ce425e9a65e 100644
--- a/storage/spider/spd_db_conn.cc
+++ b/storage/spider/spd_db_conn.cc
@@ -1846,8 +1846,8 @@ int spider_db_append_key_where_internal(
#if defined(MARIADB_BASE_VERSION) && MYSQL_VERSION_ID >= 100000
case HA_READ_PREFIX_LAST:
result_list->desc_flg = TRUE;
- /* fall through */
#endif
+ /* fall through */
case HA_READ_KEY_EXACT:
if (sql_kind == SPIDER_SQL_KIND_SQL)
{
diff --git a/storage/spider/spd_table.cc b/storage/spider/spd_table.cc
index 7724eb067e9..56931f47f24 100644
--- a/storage/spider/spd_table.cc
+++ b/storage/spider/spd_table.cc
@@ -516,7 +516,6 @@ int spider_free_share_alloc(
) {
int roop_count;
DBUG_ENTER("spider_free_share_alloc");
- if (share->dbton_bitmap)
{
for (roop_count = SPIDER_DBTON_SIZE - 1; roop_count >= 0; roop_count--)
{
diff --git a/storage/tokudb/CMakeLists.txt b/storage/tokudb/CMakeLists.txt
index ac8e5a11e2a..c02375ceb2b 100644
--- a/storage/tokudb/CMakeLists.txt
+++ b/storage/tokudb/CMakeLists.txt
@@ -1,4 +1,4 @@
-SET(TOKUDB_VERSION 5.6.35-80.0)
+SET(TOKUDB_VERSION 5.6.36-82.0)
# PerconaFT only supports x86-64 and cmake-2.8.9+
IF(CMAKE_VERSION VERSION_LESS "2.8.9")
MESSAGE(STATUS "CMake 2.8.9 or higher is required by TokuDB")
@@ -21,7 +21,8 @@ SET(TOKUDB_SOURCES
tokudb_background.cc
tokudb_information_schema.cc
tokudb_sysvars.cc
- tokudb_thread.cc)
+ tokudb_thread.cc
+ tokudb_dir_cmd.cc)
MYSQL_ADD_PLUGIN(tokudb ${TOKUDB_SOURCES} STORAGE_ENGINE MODULE_ONLY
COMPONENT tokudb-engine)
diff --git a/storage/tokudb/PerconaFT/buildheader/make_tdb.cc b/storage/tokudb/PerconaFT/buildheader/make_tdb.cc
index 0145d631839..6f0b7c5f419 100644
--- a/storage/tokudb/PerconaFT/buildheader/make_tdb.cc
+++ b/storage/tokudb/PerconaFT/buildheader/make_tdb.cc
@@ -426,6 +426,9 @@ static void print_db_env_struct (void) {
"bool (*set_dir_per_db)(DB_ENV *, bool new_val)",
"bool (*get_dir_per_db)(DB_ENV *)",
"const char *(*get_data_dir)(DB_ENV *env)",
+ "int (*dirtool_attach)(DB_ENV *, DB_TXN *, const char *, const char *)",
+ "int (*dirtool_detach)(DB_ENV *, DB_TXN *, const char *)",
+ "int (*dirtool_move)(DB_ENV *, DB_TXN *, const char *, const char *)",
"void (*kill_waiter)(DB_ENV *, void *extra)",
NULL};
diff --git a/storage/tokudb/PerconaFT/ft/ft-ops.cc b/storage/tokudb/PerconaFT/ft/ft-ops.cc
index eba9aa33e9f..d3a5a6c9e91 100644
--- a/storage/tokudb/PerconaFT/ft/ft-ops.cc
+++ b/storage/tokudb/PerconaFT/ft/ft-ops.cc
@@ -3901,25 +3901,34 @@ struct keyrange_compare_s {
};
// TODO: Remove me, I'm boring
-static int keyrange_compare(DBT const &kdbt, const struct keyrange_compare_s &s) {
+static int keyrange_compare(DBT const &kdbt,
+ const struct keyrange_compare_s &s) {
return s.ft->cmp(&kdbt, s.key);
}
-static void
-keysrange_in_leaf_partition (FT_HANDLE ft_handle, FTNODE node,
- DBT* key_left, DBT* key_right,
- int left_child_number, int right_child_number, uint64_t estimated_num_rows,
- uint64_t *less, uint64_t* equal_left, uint64_t* middle,
- uint64_t* equal_right, uint64_t* greater, bool* single_basement_node)
+static void keysrange_in_leaf_partition(FT_HANDLE ft_handle,
+ FTNODE node,
+ DBT *key_left,
+ DBT *key_right,
+ int left_child_number,
+ int right_child_number,
+ uint64_t estimated_num_rows,
+ uint64_t *less,
+ uint64_t *equal_left,
+ uint64_t *middle,
+ uint64_t *equal_right,
+ uint64_t *greater,
+ bool *single_basement_node)
// If the partition is in main memory then estimate the number
// Treat key_left == NULL as negative infinity
// Treat key_right == NULL as positive infinity
{
- paranoid_invariant(node->height == 0); // we are in a leaf
+ paranoid_invariant(node->height == 0); // we are in a leaf
paranoid_invariant(!(key_left == NULL && key_right != NULL));
paranoid_invariant(left_child_number <= right_child_number);
bool single_basement = left_child_number == right_child_number;
- paranoid_invariant(!single_basement || (BP_STATE(node, left_child_number) == PT_AVAIL));
+ paranoid_invariant(!single_basement ||
+ (BP_STATE(node, left_child_number) == PT_AVAIL));
if (BP_STATE(node, left_child_number) == PT_AVAIL) {
int r;
// The partition is in main memory then get an exact count.
@@ -3927,29 +3936,35 @@ keysrange_in_leaf_partition (FT_HANDLE ft_handle, FTNODE node,
BASEMENTNODE bn = BLB(node, left_child_number);
uint32_t idx_left = 0;
// if key_left is NULL then set r==-1 and idx==0.
- r = key_left ? bn->data_buffer.find_zero<decltype(s_left), keyrange_compare>(s_left, nullptr, nullptr, nullptr, &idx_left) : -1;
+ r = key_left
+ ? bn->data_buffer.find_zero<decltype(s_left), keyrange_compare>(
+ s_left, nullptr, nullptr, nullptr, &idx_left)
+ : -1;
*less = idx_left;
- *equal_left = (r==0) ? 1 : 0;
+ *equal_left = (r == 0) ? 1 : 0;
uint32_t size = bn->data_buffer.num_klpairs();
uint32_t idx_right = size;
r = -1;
if (single_basement && key_right) {
struct keyrange_compare_s s_right = {ft_handle->ft, key_right};
- r = bn->data_buffer.find_zero<decltype(s_right), keyrange_compare>(s_right, nullptr, nullptr, nullptr, &idx_right);
+ r = bn->data_buffer.find_zero<decltype(s_right), keyrange_compare>(
+ s_right, nullptr, nullptr, nullptr, &idx_right);
}
*middle = idx_right - idx_left - *equal_left;
- *equal_right = (r==0) ? 1 : 0;
+ *equal_right = (r == 0) ? 1 : 0;
*greater = size - idx_right - *equal_right;
} else {
paranoid_invariant(!single_basement);
uint32_t idx_left = estimated_num_rows / 2;
if (!key_left) {
- //Both nullptr, assume key_left belongs before leftmost entry, key_right belongs after rightmost entry
+ // Both nullptr, assume key_left belongs before leftmost entry,
+ // key_right belongs after rightmost entry
idx_left = 0;
paranoid_invariant(!key_right);
}
- // Assume idx_left and idx_right point to where key_left and key_right belong, (but are not there).
+ // Assume idx_left and idx_right point to where key_left and key_right
+ // belong, (but are not there).
*less = idx_left;
*equal_left = 0;
*middle = estimated_num_rows - idx_left;
@@ -3959,44 +3974,76 @@ keysrange_in_leaf_partition (FT_HANDLE ft_handle, FTNODE node,
*single_basement_node = single_basement;
}
-static int
-toku_ft_keysrange_internal (FT_HANDLE ft_handle, FTNODE node,
- DBT* key_left, DBT* key_right, bool may_find_right,
- uint64_t* less, uint64_t* equal_left, uint64_t* middle,
- uint64_t* equal_right, uint64_t* greater, bool* single_basement_node,
- uint64_t estimated_num_rows,
- ftnode_fetch_extra *min_bfe, // set up to read a minimal read.
- ftnode_fetch_extra *match_bfe, // set up to read a basement node iff both keys in it
- struct unlockers *unlockers, ANCESTORS ancestors, const pivot_bounds &bounds)
-// Implementation note: Assign values to less, equal, and greater, and then on the way out (returning up the stack) we add more values in.
+static int toku_ft_keysrange_internal(
+ FT_HANDLE ft_handle,
+ FTNODE node,
+ DBT *key_left,
+ DBT *key_right,
+ bool may_find_right,
+ uint64_t *less,
+ uint64_t *equal_left,
+ uint64_t *middle,
+ uint64_t *equal_right,
+ uint64_t *greater,
+ bool *single_basement_node,
+ uint64_t estimated_num_rows,
+ ftnode_fetch_extra *min_bfe, // set up to read a minimal read.
+ ftnode_fetch_extra
+ *match_bfe, // set up to read a basement node iff both keys in it
+ struct unlockers *unlockers,
+ ANCESTORS ancestors,
+ const pivot_bounds &bounds)
+// Implementation note: Assign values to less, equal, and greater, and then on
+// the way out (returning up the stack) we add more values in.
{
int r = 0;
// if KEY is NULL then use the leftmost key.
- int left_child_number = key_left ? toku_ftnode_which_child (node, key_left, ft_handle->ft->cmp) : 0;
- int right_child_number = node->n_children; // Sentinel that does not equal left_child_number.
+ int left_child_number =
+ key_left ? toku_ftnode_which_child(node, key_left, ft_handle->ft->cmp)
+ : 0;
+ int right_child_number =
+ node->n_children; // Sentinel that does not equal left_child_number.
if (may_find_right) {
- right_child_number = key_right ? toku_ftnode_which_child (node, key_right, ft_handle->ft->cmp) : node->n_children - 1;
+ right_child_number =
+ key_right
+ ? toku_ftnode_which_child(node, key_right, ft_handle->ft->cmp)
+ : node->n_children - 1;
}
uint64_t rows_per_child = estimated_num_rows / node->n_children;
if (node->height == 0) {
- keysrange_in_leaf_partition(ft_handle, node, key_left, key_right, left_child_number, right_child_number,
- rows_per_child, less, equal_left, middle, equal_right, greater, single_basement_node);
-
- *less += rows_per_child * left_child_number;
+ keysrange_in_leaf_partition(ft_handle,
+ node,
+ key_left,
+ key_right,
+ left_child_number,
+ right_child_number,
+ rows_per_child,
+ less,
+ equal_left,
+ middle,
+ equal_right,
+ greater,
+ single_basement_node);
+
+ *less += rows_per_child * left_child_number;
if (*single_basement_node) {
- *greater += rows_per_child * (node->n_children - left_child_number - 1);
+ *greater +=
+ rows_per_child * (node->n_children - left_child_number - 1);
} else {
- *middle += rows_per_child * (node->n_children - left_child_number - 1);
+ *middle +=
+ rows_per_child * (node->n_children - left_child_number - 1);
}
} else {
// do the child.
struct ancestors next_ancestors = {node, left_child_number, ancestors};
BLOCKNUM childblocknum = BP_BLOCKNUM(node, left_child_number);
- uint32_t fullhash = compute_child_fullhash(ft_handle->ft->cf, node, left_child_number);
+ uint32_t fullhash =
+ compute_child_fullhash(ft_handle->ft->cf, node, left_child_number);
FTNODE childnode;
bool msgs_applied = false;
- bool child_may_find_right = may_find_right && left_child_number == right_child_number;
+ bool child_may_find_right =
+ may_find_right && left_child_number == right_child_number;
r = toku_pin_ftnode_for_query(
ft_handle,
childblocknum,
@@ -4007,27 +4054,45 @@ toku_ft_keysrange_internal (FT_HANDLE ft_handle, FTNODE node,
child_may_find_right ? match_bfe : min_bfe,
false,
&childnode,
- &msgs_applied
- );
+ &msgs_applied);
paranoid_invariant(!msgs_applied);
if (r != TOKUDB_TRY_AGAIN) {
assert_zero(r);
- struct unlock_ftnode_extra unlock_extra = {ft_handle,childnode,false};
- struct unlockers next_unlockers = {true, unlock_ftnode_fun, (void*)&unlock_extra, unlockers};
- const pivot_bounds next_bounds = bounds.next_bounds(node, left_child_number);
-
- r = toku_ft_keysrange_internal(ft_handle, childnode, key_left, key_right, child_may_find_right,
- less, equal_left, middle, equal_right, greater, single_basement_node,
- rows_per_child, min_bfe, match_bfe, &next_unlockers, &next_ancestors, next_bounds);
+ struct unlock_ftnode_extra unlock_extra = {
+ ft_handle, childnode, false};
+ struct unlockers next_unlockers = {
+ true, unlock_ftnode_fun, (void *)&unlock_extra, unlockers};
+ const pivot_bounds next_bounds =
+ bounds.next_bounds(node, left_child_number);
+
+ r = toku_ft_keysrange_internal(ft_handle,
+ childnode,
+ key_left,
+ key_right,
+ child_may_find_right,
+ less,
+ equal_left,
+ middle,
+ equal_right,
+ greater,
+ single_basement_node,
+ rows_per_child,
+ min_bfe,
+ match_bfe,
+ &next_unlockers,
+ &next_ancestors,
+ next_bounds);
if (r != TOKUDB_TRY_AGAIN) {
assert_zero(r);
- *less += rows_per_child * left_child_number;
+ *less += rows_per_child * left_child_number;
if (*single_basement_node) {
- *greater += rows_per_child * (node->n_children - left_child_number - 1);
+ *greater += rows_per_child *
+ (node->n_children - left_child_number - 1);
} else {
- *middle += rows_per_child * (node->n_children - left_child_number - 1);
+ *middle += rows_per_child *
+ (node->n_children - left_child_number - 1);
}
assert(unlockers->locked);
@@ -4038,10 +4103,21 @@ toku_ft_keysrange_internal (FT_HANDLE ft_handle, FTNODE node,
return r;
}
-void toku_ft_keysrange(FT_HANDLE ft_handle, DBT* key_left, DBT* key_right, uint64_t *less_p, uint64_t* equal_left_p, uint64_t* middle_p, uint64_t* equal_right_p, uint64_t* greater_p, bool* middle_3_exact_p)
-// Effect: Return an estimate of the number of keys to the left, the number equal (to left key), number between keys, number equal to right key, and the number to the right of both keys.
+void toku_ft_keysrange(FT_HANDLE ft_handle,
+ DBT *key_left,
+ DBT *key_right,
+ uint64_t *less_p,
+ uint64_t *equal_left_p,
+ uint64_t *middle_p,
+ uint64_t *equal_right_p,
+ uint64_t *greater_p,
+ bool *middle_3_exact_p)
+// Effect: Return an estimate of the number of keys to the left, the number
+// equal (to left key), number between keys, number equal to right key, and the
+// number to the right of both keys.
// The values are an estimate.
-// If you perform a keyrange on two keys that are in the same basement, equal_less, middle, and equal_right will be exact.
+// If you perform a keyrange on two keys that are in the same basement,
+// equal_less, middle, and equal_right will be exact.
// 4184: What to do with a NULL key?
// key_left==NULL is treated as -infinity
// key_right==NULL is treated as +infinity
@@ -4049,10 +4125,21 @@ void toku_ft_keysrange(FT_HANDLE ft_handle, DBT* key_left, DBT* key_right, uint6
// key_right can be non-null only if key_left is non-null;
{
if (!key_left && key_right) {
- // Simplify internals by only supporting key_right != null when key_left != null
- // If key_right != null and key_left == null, then swap them and fix up numbers.
- uint64_t less = 0, equal_left = 0, middle = 0, equal_right = 0, greater = 0;
- toku_ft_keysrange(ft_handle, key_right, nullptr, &less, &equal_left, &middle, &equal_right, &greater, middle_3_exact_p);
+ // Simplify internals by only supporting key_right != null when key_left
+ // != null
+ // If key_right != null and key_left == null, then swap them and fix up
+ // numbers.
+ uint64_t less = 0, equal_left = 0, middle = 0, equal_right = 0,
+ greater = 0;
+ toku_ft_keysrange(ft_handle,
+ key_right,
+ nullptr,
+ &less,
+ &equal_left,
+ &middle,
+ &equal_right,
+ &greater,
+ middle_3_exact_p);
*less_p = 0;
*equal_left_p = 0;
*middle_p = less;
@@ -4065,98 +4152,132 @@ void toku_ft_keysrange(FT_HANDLE ft_handle, DBT* key_left, DBT* key_right, uint6
paranoid_invariant(!(!key_left && key_right));
ftnode_fetch_extra min_bfe;
ftnode_fetch_extra match_bfe;
- min_bfe.create_for_min_read(ft_handle->ft); // read pivot keys but not message buffers
- match_bfe.create_for_keymatch(ft_handle->ft, key_left, key_right, false, false); // read basement node only if both keys in it.
-try_again:
+ min_bfe.create_for_min_read(
+ ft_handle->ft); // read pivot keys but not message buffers
+ match_bfe.create_for_keymatch(
+ ft_handle->ft,
+ key_left,
+ key_right,
+ false,
+ false); // read basement node only if both keys in it.
+try_again : {
+ uint64_t less = 0, equal_left = 0, middle = 0, equal_right = 0, greater = 0;
+ bool single_basement_node = false;
+ FTNODE node = NULL;
{
- uint64_t less = 0, equal_left = 0, middle = 0, equal_right = 0, greater = 0;
- bool single_basement_node = false;
- FTNODE node = NULL;
- {
- uint32_t fullhash;
- CACHEKEY root_key;
- toku_calculate_root_offset_pointer(ft_handle->ft, &root_key, &fullhash);
- toku_pin_ftnode(
- ft_handle->ft,
- root_key,
- fullhash,
- &match_bfe,
- PL_READ, // may_modify_node, cannot change root during keyrange
- &node,
- true
- );
- }
+ uint32_t fullhash;
+ CACHEKEY root_key;
+ toku_calculate_root_offset_pointer(ft_handle->ft, &root_key, &fullhash);
+ toku_pin_ftnode(
+ ft_handle->ft,
+ root_key,
+ fullhash,
+ &match_bfe,
+ PL_READ, // may_modify_node, cannot change root during keyrange
+ &node,
+ true);
+ }
- struct unlock_ftnode_extra unlock_extra = {ft_handle,node,false};
- struct unlockers unlockers = {true, unlock_ftnode_fun, (void*)&unlock_extra, (UNLOCKERS)NULL};
+ struct unlock_ftnode_extra unlock_extra = {ft_handle, node, false};
+ struct unlockers unlockers = {
+ true, unlock_ftnode_fun, (void *)&unlock_extra, (UNLOCKERS)NULL};
- {
- int r;
- int64_t numrows = ft_handle->ft->in_memory_stats.numrows;
- if (numrows < 0)
- numrows = 0; // prevent appearance of a negative number
- r = toku_ft_keysrange_internal (ft_handle, node, key_left, key_right, true,
- &less, &equal_left, &middle, &equal_right, &greater,
- &single_basement_node, numrows,
- &min_bfe, &match_bfe, &unlockers, (ANCESTORS)NULL, pivot_bounds::infinite_bounds());
+ {
+ int r;
+ int64_t numrows = ft_handle->ft->in_memory_logical_rows;
+ if (numrows < 0)
+ numrows = 0; // prevent appearance of a negative number
+ r = toku_ft_keysrange_internal(ft_handle,
+ node,
+ key_left,
+ key_right,
+ true,
+ &less,
+ &equal_left,
+ &middle,
+ &equal_right,
+ &greater,
+ &single_basement_node,
+ numrows,
+ &min_bfe,
+ &match_bfe,
+ &unlockers,
+ (ANCESTORS)NULL,
+ pivot_bounds::infinite_bounds());
+ assert(r == 0 || r == TOKUDB_TRY_AGAIN);
+ if (r == TOKUDB_TRY_AGAIN) {
+ assert(!unlockers.locked);
+ goto try_again;
+ }
+ // May need to do a second query.
+ if (!single_basement_node && key_right != nullptr) {
+ // "greater" is stored in "middle"
+ invariant_zero(equal_right);
+ invariant_zero(greater);
+ uint64_t less2 = 0, equal_left2 = 0, middle2 = 0, equal_right2 = 0,
+ greater2 = 0;
+ bool ignore;
+ r = toku_ft_keysrange_internal(ft_handle,
+ node,
+ key_right,
+ nullptr,
+ false,
+ &less2,
+ &equal_left2,
+ &middle2,
+ &equal_right2,
+ &greater2,
+ &ignore,
+ numrows,
+ &min_bfe,
+ &match_bfe,
+ &unlockers,
+ (ANCESTORS) nullptr,
+ pivot_bounds::infinite_bounds());
assert(r == 0 || r == TOKUDB_TRY_AGAIN);
if (r == TOKUDB_TRY_AGAIN) {
assert(!unlockers.locked);
goto try_again;
}
- // May need to do a second query.
- if (!single_basement_node && key_right != nullptr) {
- // "greater" is stored in "middle"
- invariant_zero(equal_right);
- invariant_zero(greater);
- uint64_t less2 = 0, equal_left2 = 0, middle2 = 0, equal_right2 = 0, greater2 = 0;
- bool ignore;
- r = toku_ft_keysrange_internal (ft_handle, node, key_right, nullptr, false,
- &less2, &equal_left2, &middle2, &equal_right2, &greater2,
- &ignore, numrows,
- &min_bfe, &match_bfe, &unlockers, (ANCESTORS)nullptr, pivot_bounds::infinite_bounds());
- assert(r == 0 || r == TOKUDB_TRY_AGAIN);
- if (r == TOKUDB_TRY_AGAIN) {
- assert(!unlockers.locked);
- goto try_again;
- }
- invariant_zero(equal_right2);
- invariant_zero(greater2);
- // Update numbers.
- // less is already correct.
- // equal_left is already correct.
-
- // "middle" currently holds everything greater than left_key in first query
- // 'middle2' currently holds everything greater than right_key in second query
- // 'equal_left2' is how many match right_key
-
- // Prevent underflow.
- if (middle >= equal_left2 + middle2) {
- middle -= equal_left2 + middle2;
- } else {
- middle = 0;
- }
- equal_right = equal_left2;
- greater = middle2;
+ invariant_zero(equal_right2);
+ invariant_zero(greater2);
+ // Update numbers.
+ // less is already correct.
+ // equal_left is already correct.
+
+ // "middle" currently holds everything greater than left_key in
+ // first query
+ // 'middle2' currently holds everything greater than right_key in
+ // second query
+ // 'equal_left2' is how many match right_key
+
+ // Prevent underflow.
+ if (middle >= equal_left2 + middle2) {
+ middle -= equal_left2 + middle2;
+ } else {
+ middle = 0;
}
+ equal_right = equal_left2;
+ greater = middle2;
}
- assert(unlockers.locked);
- toku_unpin_ftnode_read_only(ft_handle->ft, node);
- if (!key_right) {
- paranoid_invariant_zero(equal_right);
- paranoid_invariant_zero(greater);
- }
- if (!key_left) {
- paranoid_invariant_zero(less);
- paranoid_invariant_zero(equal_left);
- }
- *less_p = less;
- *equal_left_p = equal_left;
- *middle_p = middle;
- *equal_right_p = equal_right;
- *greater_p = greater;
- *middle_3_exact_p = single_basement_node;
}
+ assert(unlockers.locked);
+ toku_unpin_ftnode_read_only(ft_handle->ft, node);
+ if (!key_right) {
+ paranoid_invariant_zero(equal_right);
+ paranoid_invariant_zero(greater);
+ }
+ if (!key_left) {
+ paranoid_invariant_zero(less);
+ paranoid_invariant_zero(equal_left);
+ }
+ *less_p = less;
+ *equal_left_p = equal_left;
+ *middle_p = middle;
+ *equal_right_p = equal_right;
+ *greater_p = greater;
+ *middle_3_exact_p = single_basement_node;
+}
}
struct get_key_after_bytes_iterate_extra {
diff --git a/storage/tokudb/PerconaFT/src/ydb.cc b/storage/tokudb/PerconaFT/src/ydb.cc
index 45385ef9120..6eb138f15d7 100644
--- a/storage/tokudb/PerconaFT/src/ydb.cc
+++ b/storage/tokudb/PerconaFT/src/ydb.cc
@@ -71,6 +71,8 @@ const char *toku_copyright_string = "Copyright (c) 2006, 2015, Percona and/or it
#include "util/status.h"
#include "util/context.h"
+#include <functional>
+
// Include ydb_lib.cc here so that its constructor/destructor gets put into
// ydb.o, to make sure they don't get erased at link time (when linking to
// a static libtokufractaltree.a that was compiled with gcc). See #5094.
@@ -1315,6 +1317,159 @@ static const char *env_get_data_dir(DB_ENV *env) {
return env->i->real_data_dir;
}
+static int env_dirtool_attach(DB_ENV *env,
+ DB_TXN *txn,
+ const char *dname,
+ const char *iname) {
+ int r;
+ DBT dname_dbt;
+ DBT iname_dbt;
+
+ HANDLE_PANICKED_ENV(env);
+ if (!env_opened(env)) {
+ return EINVAL;
+ }
+ HANDLE_READ_ONLY_TXN(txn);
+ toku_fill_dbt(&dname_dbt, dname, strlen(dname) + 1);
+ toku_fill_dbt(&iname_dbt, iname, strlen(iname) + 1);
+
+ r = toku_db_put(env->i->directory,
+ txn,
+ &dname_dbt,
+ &iname_dbt,
+ 0,
+ true);
+ return r;
+}
+
+static int env_dirtool_detach(DB_ENV *env,
+ DB_TXN *txn,
+ const char *dname) {
+ int r;
+ DBT dname_dbt;
+ DBT old_iname_dbt;
+
+ HANDLE_PANICKED_ENV(env);
+ if (!env_opened(env)) {
+ return EINVAL;
+ }
+ HANDLE_READ_ONLY_TXN(txn);
+
+ toku_fill_dbt(&dname_dbt, dname, strlen(dname) + 1);
+ toku_init_dbt_flags(&old_iname_dbt, DB_DBT_REALLOC);
+
+ r = toku_db_get(env->i->directory,
+ txn,
+ &dname_dbt,
+ &old_iname_dbt,
+ DB_SERIALIZABLE); // allocates memory for iname
+ if (r == DB_NOTFOUND)
+ return EEXIST;
+ toku_free(old_iname_dbt.data);
+
+ r = toku_db_del(env->i->directory, txn, &dname_dbt, DB_DELETE_ANY, true);
+
+ return r;
+}
+
+static int env_dirtool_move(DB_ENV *env,
+ DB_TXN *txn,
+ const char *old_dname,
+ const char *new_dname) {
+ int r;
+ DBT old_dname_dbt;
+ DBT new_dname_dbt;
+ DBT iname_dbt;
+
+ HANDLE_PANICKED_ENV(env);
+ if (!env_opened(env)) {
+ return EINVAL;
+ }
+ HANDLE_READ_ONLY_TXN(txn);
+
+ toku_fill_dbt(&old_dname_dbt, old_dname, strlen(old_dname) + 1);
+ toku_fill_dbt(&new_dname_dbt, new_dname, strlen(new_dname) + 1);
+ toku_init_dbt_flags(&iname_dbt, DB_DBT_REALLOC);
+
+ r = toku_db_get(env->i->directory,
+ txn,
+ &old_dname_dbt,
+ &iname_dbt,
+ DB_SERIALIZABLE); // allocates memory for iname
+ if (r == DB_NOTFOUND)
+ return EEXIST;
+
+ r = toku_db_del(
+ env->i->directory, txn, &old_dname_dbt, DB_DELETE_ANY, true);
+ if (r != 0)
+ goto exit;
+
+ r = toku_db_put(
+ env->i->directory, txn, &new_dname_dbt, &iname_dbt, 0, true);
+
+exit:
+ toku_free(iname_dbt.data);
+ return r;
+}
+
+static int locked_env_op(DB_ENV *env,
+ DB_TXN *txn,
+ std::function<int(DB_TXN *)> f) {
+ int ret, r;
+ HANDLE_READ_ONLY_TXN(txn);
+ HANDLE_ILLEGAL_WORKING_PARENT_TXN(env, txn);
+
+ DB_TXN *child_txn = NULL;
+ int using_txns = env->i->open_flags & DB_INIT_TXN;
+ if (using_txns) {
+ ret = toku_txn_begin(env, txn, &child_txn, 0);
+ lazy_assert_zero(ret);
+ }
+
+ // cannot begin a checkpoint
+ toku_multi_operation_client_lock();
+ r = f(child_txn);
+ toku_multi_operation_client_unlock();
+
+ if (using_txns) {
+ if (r == 0) {
+ ret = locked_txn_commit(child_txn, 0);
+ lazy_assert_zero(ret);
+ } else {
+ ret = locked_txn_abort(child_txn);
+ lazy_assert_zero(ret);
+ }
+ }
+ return r;
+
+}
+
+static int locked_env_dirtool_attach(DB_ENV *env,
+ DB_TXN *txn,
+ const char *dname,
+ const char *iname) {
+ auto f = std::bind(
+ env_dirtool_attach, env, std::placeholders::_1, dname, iname);
+ return locked_env_op(env, txn, f);
+}
+
+static int locked_env_dirtool_detach(DB_ENV *env,
+ DB_TXN *txn,
+ const char *dname) {
+ auto f = std::bind(
+ env_dirtool_detach, env, std::placeholders::_1, dname);
+ return locked_env_op(env, txn, f);
+}
+
+static int locked_env_dirtool_move(DB_ENV *env,
+ DB_TXN *txn,
+ const char *old_dname,
+ const char *new_dname) {
+ auto f = std::bind(
+ env_dirtool_move, env, std::placeholders::_1, old_dname, new_dname);
+ return locked_env_op(env, txn, f);
+}
+
static int env_dbremove(DB_ENV * env, DB_TXN *txn, const char *fname, const char *dbname, uint32_t flags);
static int
@@ -2657,6 +2812,9 @@ toku_env_create(DB_ENV ** envp, uint32_t flags) {
#define SENV(name) result->name = locked_env_ ## name
SENV(dbremove);
SENV(dbrename);
+ SENV(dirtool_attach);
+ SENV(dirtool_detach);
+ SENV(dirtool_move);
//SENV(set_noticecall);
#undef SENV
#define USENV(name) result->name = env_ ## name
@@ -2988,8 +3146,10 @@ env_dbremove(DB_ENV * env, DB_TXN *txn, const char *fname, const char *dbname, u
if (txn && r) {
if (r == EMFILE || r == ENFILE)
r = toku_ydb_do_error(env, r, "toku dbremove failed because open file limit reached\n");
- else
+ else if (r != ENOENT)
r = toku_ydb_do_error(env, r, "toku dbremove failed\n");
+ else
+ r = 0;
goto exit;
}
if (txn) {
diff --git a/storage/tokudb/ha_tokudb.cc b/storage/tokudb/ha_tokudb.cc
index 9db14dbb443..1a7c42e970f 100644
--- a/storage/tokudb/ha_tokudb.cc
+++ b/storage/tokudb/ha_tokudb.cc
@@ -5248,17 +5248,17 @@ int ha_tokudb::fill_range_query_buf(
DEBUG_SYNC(ha_thd(), "tokudb_icp_asc_scan_out_of_range");
goto cleanup;
} else if (result == ICP_NO_MATCH) {
- // if we are performing a DESC ICP scan and have no end_range
- // to compare to stop using ICP filtering as there isn't much more
- // that we can do without going through contortions with remembering
- // and comparing key parts.
+ // Optimizer change for MyRocks also benefits us here in TokuDB as
+ // opt_range.cc QUICK_SELECT::get_next now sets end_range during
+ // descending scan. We should not ever hit this condition, but
+ // leaving this code in to prevent any possibility of a descending
+ // scan to the beginning of an index and catch any possibility
+ // in debug builds with an assertion
+ assert_debug(!(!end_range && direction < 0));
if (!end_range &&
direction < 0) {
-
cancel_pushed_idx_cond();
- DEBUG_SYNC(ha_thd(), "tokudb_icp_desc_scan_invalidate");
}
-
error = TOKUDB_CURSOR_CONTINUE;
goto cleanup;
}
@@ -6108,7 +6108,6 @@ int ha_tokudb::info(uint flag) {
stats.records = share->row_count() + share->rows_from_locked_table;
stats.deleted = 0;
if (!(flag & HA_STATUS_NO_LOCK)) {
- uint64_t num_rows = 0;
error = txn_begin(db_env, NULL, &txn, DB_READ_UNCOMMITTED, ha_thd());
if (error) {
@@ -6118,20 +6117,13 @@ int ha_tokudb::info(uint flag) {
// we should always have a primary key
assert_always(share->file != NULL);
- error = estimate_num_rows(share->file, &num_rows, txn);
- if (error == 0) {
- share->set_row_count(num_rows, false);
- stats.records = num_rows;
- } else {
- goto cleanup;
- }
-
DB_BTREE_STAT64 dict_stats;
error = share->file->stat64(share->file, txn, &dict_stats);
if (error) {
goto cleanup;
}
-
+ share->set_row_count(dict_stats.bt_ndata, false);
+ stats.records = dict_stats.bt_ndata;
stats.create_time = dict_stats.bt_create_time_sec;
stats.update_time = dict_stats.bt_modify_time_sec;
stats.check_time = dict_stats.bt_verify_time_sec;
@@ -7835,7 +7827,7 @@ ha_rows ha_tokudb::records_in_range(uint keynr, key_range* start_key, key_range*
// As a result, equal may be 0 and greater may actually be equal+greater
// So, we call key_range64 on the key, and the key that is after it.
if (!start_key && !end_key) {
- error = estimate_num_rows(kfile, &rows, transaction);
+ error = estimate_num_rows(share->file, &rows, transaction);
if (error) {
ret_val = HA_TOKUDB_RANGE_COUNT;
goto cleanup;
diff --git a/storage/tokudb/mysql-test/tokudb/r/bug-1657908.result b/storage/tokudb/mysql-test/tokudb/r/bug-1657908.result
new file mode 100644
index 00000000000..1d86478d833
--- /dev/null
+++ b/storage/tokudb/mysql-test/tokudb/r/bug-1657908.result
@@ -0,0 +1,70 @@
+SET GLOBAL tokudb_dir_per_db=ON;
+CREATE PROCEDURE create_table()
+BEGIN
+CREATE TABLE test.t1 (
+a INT
+) ENGINE = TokuDB
+PARTITION BY RANGE (a)
+(PARTITION p100 VALUES LESS THAN (100) ENGINE = TokuDB,
+PARTITION p_to_del VALUES LESS THAN (200) ENGINE = TokuDB,
+PARTITION p300 VALUES LESS THAN (300) ENGINE = TokuDB,
+PARTITION p400 VALUES LESS THAN (400) ENGINE = TokuDB
+);
+END|
+### Create partitioned table
+CALL create_table();
+## Looking for *.tokudb files in data_dir
+## Looking for *.tokudb files in data_dir/test
+t1_P_p100_main_id.tokudb
+t1_P_p100_status_id.tokudb
+t1_P_p300_main_id.tokudb
+t1_P_p300_status_id.tokudb
+t1_P_p400_main_id.tokudb
+t1_P_p400_status_id.tokudb
+t1_P_p_to_del_main_id.tokudb
+t1_P_p_to_del_status_id.tokudb
+### Stop server
+### Remove 'main' file of one of the partitions
+### Start server
+### Make sure 'main' partition file is deleted
+## Looking for *.tokudb files in data_dir
+## Looking for *.tokudb files in data_dir/test
+t1_P_p100_main_id.tokudb
+t1_P_p100_status_id.tokudb
+t1_P_p300_main_id.tokudb
+t1_P_p300_status_id.tokudb
+t1_P_p400_main_id.tokudb
+t1_P_p400_status_id.tokudb
+t1_P_p_to_del_status_id.tokudb
+### Make sure the table still exists
+SHOW TABLES;
+Tables_in_test
+t1
+### Drop table
+DROP TABLE t1;
+### Make sure the table is dropped
+SHOW TABLES;
+Tables_in_test
+### Check what files still exist after DROP TABLE
+## Looking for *.tokudb files in data_dir
+## Looking for *.tokudb files in data_dir/test
+### Remove the rest of the files
+### Make sure there are no tokudb files
+## Looking for *.tokudb files in data_dir
+## Looking for *.tokudb files in data_dir/test
+### Create the same table once more
+CALL create_table();
+## Looking for *.tokudb files in data_dir
+## Looking for *.tokudb files in data_dir/test
+t1_P_p100_main_id.tokudb
+t1_P_p100_status_id.tokudb
+t1_P_p300_main_id.tokudb
+t1_P_p300_status_id.tokudb
+t1_P_p400_main_id.tokudb
+t1_P_p400_status_id.tokudb
+t1_P_p_to_del_main_id.tokudb
+t1_P_p_to_del_status_id.tokudb
+### Restore state
+DROP TABLE t1;
+DROP PROCEDURE create_table;
+SET GLOBAL tokudb_dir_per_db=default;
diff --git a/storage/tokudb/mysql-test/tokudb/r/dir_cmd.result b/storage/tokudb/mysql-test/tokudb/r/dir_cmd.result
new file mode 100644
index 00000000000..dd3b693db49
--- /dev/null
+++ b/storage/tokudb/mysql-test/tokudb/r/dir_cmd.result
@@ -0,0 +1,58 @@
+SET GLOBAL tokudb_dir_per_db = ON;
+SET tokudb_dir_cmd = "attach test_dname_1 test_iname_1";
+SET tokudb_dir_cmd = "attach test_dname_2 test_iname_2";
+SELECT dictionary_name, internal_file_name
+FROM information_schema.TokuDB_file_map;
+dictionary_name internal_file_name
+test_dname_1 test_iname_1
+test_dname_2 test_iname_2
+SET tokudb_dir_cmd = "detach test_dname_1";
+SELECT dictionary_name, internal_file_name
+FROM information_schema.TokuDB_file_map;
+dictionary_name internal_file_name
+test_dname_2 test_iname_2
+SET tokudb_dir_cmd = "move test_dname_2 test_dname_3";
+SELECT dictionary_name, internal_file_name
+FROM information_schema.TokuDB_file_map;
+dictionary_name internal_file_name
+test_dname_3 test_iname_2
+SET tokudb_dir_cmd = "detach test_dname_3";
+SELECT dictionary_name, internal_file_name
+FROM information_schema.TokuDB_file_map;
+dictionary_name internal_file_name
+CREATE TABLE t1(a int) ENGINE=tokudb;
+INSERT INTO t1 (a) VALUES (10);
+SELECT dictionary_name, internal_file_name
+FROM information_schema.TokuDB_file_map;
+dictionary_name internal_file_name
+./test/t1-main ./test/t1_main_id.tokudb
+./test/t1-status ./test/t1_status_id.tokudb
+SET tokudb_dir_cmd = "attach ./test/t1-main test/t1-main-renamed.tokudb";
+SELECT dictionary_name, internal_file_name
+FROM information_schema.TokuDB_file_map;
+dictionary_name internal_file_name
+./test/t1-main test/t1-main-renamed.tokudb
+./test/t1-status ./test/t1_status_id.tokudb
+### rename t1_main_id.tokudb to t1-main-renamed.tokudb
+SELECT * FROM t1;
+a
+10
+### Test for errors notification
+SET tokudb_dir_cmd = "detach foo";
+ERROR 42000: Variable 'tokudb_dir_cmd' can't be set to the value of 'detach foo'
+SELECT @@tokudb_dir_cmd_last_error;
+@@tokudb_dir_cmd_last_error
+17
+SELECT @@tokudb_dir_cmd_last_error_string;
+@@tokudb_dir_cmd_last_error_string
+detach command error
+SET @@tokudb_dir_cmd_last_error_string = "blablabla";
+SELECT @@tokudb_dir_cmd_last_error_string;
+@@tokudb_dir_cmd_last_error_string
+blablabla
+SET STATEMENT tokudb_dir_cmd_last_error_string = "statement_blablabla" FOR
+SELECT @@tokudb_dir_cmd_last_error_string;
+@@tokudb_dir_cmd_last_error_string
+statement_blablabla
+DROP TABLE t1;
+SET GLOBAL tokudb_dir_per_db = default;
diff --git a/storage/tokudb/mysql-test/tokudb/t/background_job_manager.opt b/storage/tokudb/mysql-test/tokudb/t/background_job_manager.opt
deleted file mode 100644
index 3cc9ea3009e..00000000000
--- a/storage/tokudb/mysql-test/tokudb/t/background_job_manager.opt
+++ /dev/null
@@ -1 +0,0 @@
---tokudb-background-job-status
diff --git a/storage/tokudb/mysql-test/tokudb/t/background_job_manager.test b/storage/tokudb/mysql-test/tokudb/t/background_job_manager.test
index 933814442e0..709fc463696 100644
--- a/storage/tokudb/mysql-test/tokudb/t/background_job_manager.test
+++ b/storage/tokudb/mysql-test/tokudb/t/background_job_manager.test
@@ -1,3 +1,4 @@
+skip Background Job Manager not supported in MariaDB;
# This is a comprehensive test for the background job manager and
# the information_schema.tokudb_background_job_status table
#
diff --git a/storage/tokudb/mysql-test/tokudb/t/bug-1657908.test b/storage/tokudb/mysql-test/tokudb/t/bug-1657908.test
new file mode 100644
index 00000000000..adcf4ef55f6
--- /dev/null
+++ b/storage/tokudb/mysql-test/tokudb/t/bug-1657908.test
@@ -0,0 +1,73 @@
+--source include/have_partition.inc
+# See https://bugs.launchpad.net/percona-server/+bug/1657908
+
+source include/have_tokudb.inc;
+
+SET GLOBAL tokudb_dir_per_db=ON;
+
+--let $DB= test
+--let $DATADIR= `SELECT @@datadir`
+
+--delimiter |
+CREATE PROCEDURE create_table()
+BEGIN
+CREATE TABLE test.t1 (
+ a INT
+) ENGINE = TokuDB
+PARTITION BY RANGE (a)
+(PARTITION p100 VALUES LESS THAN (100) ENGINE = TokuDB,
+ PARTITION p_to_del VALUES LESS THAN (200) ENGINE = TokuDB,
+ PARTITION p300 VALUES LESS THAN (300) ENGINE = TokuDB,
+ PARTITION p400 VALUES LESS THAN (400) ENGINE = TokuDB
+);
+END|
+--delimiter ;
+
+--echo ### Create partitioned table
+CALL create_table();
+--source dir_per_db_show_table_files.inc
+
+--echo ### Stop server
+--exec echo "wait" > $MYSQLTEST_VARDIR/tmp/mysqld.1.expect
+--shutdown_server
+--source include/wait_until_disconnected.inc
+
+--echo ### Remove 'main' file of one of the partitions
+--remove_files_wildcard $DATADIR/$DB t1_P_p_to_del_main_*.tokudb
+
+--echo ### Start server
+--enable_reconnect
+--exec echo "restart: --loose-tokudb-dir-per-db=ON" > $MYSQLTEST_VARDIR/tmp/mysqld.1.expect
+--source include/wait_until_connected_again.inc
+
+--echo ### Make sure 'main' partition file is deleted
+--source dir_per_db_show_table_files.inc
+
+--echo ### Make sure the table still exists
+SHOW TABLES;
+
+--echo ### Drop table
+# error 1051 was here before the fix
+DROP TABLE t1;
+
+--echo ### Make sure the table is dropped
+SHOW TABLES;
+
+--echo ### Check what files still exist after DROP TABLE
+--source dir_per_db_show_table_files.inc
+
+--echo ### Remove the rest of the files
+--remove_files_wildcard $DATADIR/$DB *.tokudb
+
+--echo ### Make sure there are no tokudb files
+--source dir_per_db_show_table_files.inc
+
+--echo ### Create the same table once more
+# engine error 17 (EEXIST) was here before the fix
+CALL create_table();
+--source dir_per_db_show_table_files.inc
+
+--echo ### Restore state
+DROP TABLE t1;
+DROP PROCEDURE create_table;
+SET GLOBAL tokudb_dir_per_db=default;
diff --git a/storage/tokudb/mysql-test/tokudb/t/card_auto_analyze_lots.test b/storage/tokudb/mysql-test/tokudb/t/card_auto_analyze_lots.test
index ec74a4a28bc..9675449372b 100644
--- a/storage/tokudb/mysql-test/tokudb/t/card_auto_analyze_lots.test
+++ b/storage/tokudb/mysql-test/tokudb/t/card_auto_analyze_lots.test
@@ -1,3 +1,4 @@
+skip Background Job Manager not supported in MariaDB;
# Test the auto analyze on lots of tables
-- source include/have_tokudb.inc
diff --git a/storage/tokudb/mysql-test/tokudb/t/dir_cmd.test b/storage/tokudb/mysql-test/tokudb/t/dir_cmd.test
new file mode 100644
index 00000000000..b9d8c80de65
--- /dev/null
+++ b/storage/tokudb/mysql-test/tokudb/t/dir_cmd.test
@@ -0,0 +1,52 @@
+skip TokuDB dir CMD disabled in MariaDB;
+source include/have_tokudb.inc;
+
+--let $MYSQL_DATADIR=`select @@datadir`
+
+SET GLOBAL tokudb_dir_per_db = ON;
+
+SET tokudb_dir_cmd = "attach test_dname_1 test_iname_1";
+SET tokudb_dir_cmd = "attach test_dname_2 test_iname_2";
+SELECT dictionary_name, internal_file_name
+ FROM information_schema.TokuDB_file_map;
+
+SET tokudb_dir_cmd = "detach test_dname_1";
+SELECT dictionary_name, internal_file_name
+ FROM information_schema.TokuDB_file_map;
+
+SET tokudb_dir_cmd = "move test_dname_2 test_dname_3";
+SELECT dictionary_name, internal_file_name
+ FROM information_schema.TokuDB_file_map;
+
+SET tokudb_dir_cmd = "detach test_dname_3";
+SELECT dictionary_name, internal_file_name
+ FROM information_schema.TokuDB_file_map;
+
+CREATE TABLE t1(a int) ENGINE=tokudb;
+INSERT INTO t1 (a) VALUES (10);
+--source include/table_files_replace_pattern.inc
+SELECT dictionary_name, internal_file_name
+ FROM information_schema.TokuDB_file_map;
+
+SET tokudb_dir_cmd = "attach ./test/t1-main test/t1-main-renamed.tokudb";
+--source include/table_files_replace_pattern.inc
+SELECT dictionary_name, internal_file_name
+ FROM information_schema.TokuDB_file_map;
+
+--echo ### rename t1_main_id.tokudb to t1-main-renamed.tokudb
+--exec mv $MYSQL_DATADIR/test/t1_main_*.tokudb $MYSQL_DATADIR/test/t1-main-renamed.tokudb
+
+SELECT * FROM t1;
+
+--echo ### Test for errors notification
+--error 1231
+SET tokudb_dir_cmd = "detach foo";
+SELECT @@tokudb_dir_cmd_last_error;
+SELECT @@tokudb_dir_cmd_last_error_string;
+SET @@tokudb_dir_cmd_last_error_string = "blablabla";
+SELECT @@tokudb_dir_cmd_last_error_string;
+SET STATEMENT tokudb_dir_cmd_last_error_string = "statement_blablabla" FOR
+ SELECT @@tokudb_dir_cmd_last_error_string;
+
+DROP TABLE t1;
+SET GLOBAL tokudb_dir_per_db = default;
diff --git a/storage/tokudb/mysql-test/tokudb/t/i_s_tokudb_lock_waits_timeout.test b/storage/tokudb/mysql-test/tokudb/t/i_s_tokudb_lock_waits_timeout.test
index 42fb548814f..8fe5e66a9b3 100644
--- a/storage/tokudb/mysql-test/tokudb/t/i_s_tokudb_lock_waits_timeout.test
+++ b/storage/tokudb/mysql-test/tokudb/t/i_s_tokudb_lock_waits_timeout.test
@@ -1,3 +1,4 @@
+skip Tokudb Lock Waits not in I_S in MariaDB;
# verify that tokudb_locks and tokudb_lock_waits contents for 2 conflicting transactions with a lock timeout
source include/have_tokudb.inc;
diff --git a/storage/tokudb/mysql-test/tokudb/t/i_s_tokudb_locks.test b/storage/tokudb/mysql-test/tokudb/t/i_s_tokudb_locks.test
index 8f205ad7f45..59d04ead386 100644
--- a/storage/tokudb/mysql-test/tokudb/t/i_s_tokudb_locks.test
+++ b/storage/tokudb/mysql-test/tokudb/t/i_s_tokudb_locks.test
@@ -1,3 +1,4 @@
+skip Tokudb Lock Waits not in I_S in MariaDB;
# verify that information_schema.tokudb_locks gets populated with locks for 2 clients
source include/have_tokudb.inc;
diff --git a/storage/tokudb/mysql-test/tokudb/t/i_s_tokudb_trx.test b/storage/tokudb/mysql-test/tokudb/t/i_s_tokudb_trx.test
index 517280391c4..b4ab64be962 100644
--- a/storage/tokudb/mysql-test/tokudb/t/i_s_tokudb_trx.test
+++ b/storage/tokudb/mysql-test/tokudb/t/i_s_tokudb_trx.test
@@ -1,3 +1,4 @@
+skip Tokudb trx not in I_S in MariaDB;
# verify that information_schema.tokudb_trx gets populated with transactions
source include/have_tokudb.inc;
diff --git a/storage/tokudb/mysql-test/tokudb_backup/r/backup_master_info.result b/storage/tokudb/mysql-test/tokudb_backup/r/backup_master_info.result
new file mode 100644
index 00000000000..992a828e287
--- /dev/null
+++ b/storage/tokudb/mysql-test/tokudb_backup/r/backup_master_info.result
@@ -0,0 +1,26 @@
+###
+# Test for binlog position
+#####
+include/master-slave.inc
+Warnings:
+Note #### Sending passwords in plain text without SSL/TLS is extremely insecure.
+Note #### Storing MySQL user name or password information in the master info repository is not secure and is therefore not recommended. Please consider using the USER and PASSWORD connection options for START SLAVE; see the 'START SLAVE Syntax' in the MySQL Manual for more information.
+[connection master]
+CREATE TABLE t1(a INT) ENGINE=TokuDB;
+DROP TABLE t1;
+Backup
+include/filter_file.inc
+### tokubackup_slave_info content:
+host: #.#.#.#, user: ####, port: ####, master log file: ####, relay log file: ####, exec master log pos: ####, executed gtid set: , channel name:
+###
+# Test for gtid set
+#####
+include/rpl_set_gtid_mode.inc
+CREATE TABLE t1(a INT) ENGINE=TokuDB;
+DROP TABLE t1;
+Backup
+include/filter_file.inc
+### tokubackup_slave_info content:
+host: #.#.#.#, user: ####, port: ####, master log file: ####, relay log file: ####, exec master log pos: ####, executed gtid set: ####, channel name:
+include/rpl_set_gtid_mode.inc
+include/rpl_end.inc
diff --git a/storage/tokudb/mysql-test/tokudb_backup/r/backup_master_state.result b/storage/tokudb/mysql-test/tokudb_backup/r/backup_master_state.result
new file mode 100644
index 00000000000..072dfff448b
--- /dev/null
+++ b/storage/tokudb/mysql-test/tokudb_backup/r/backup_master_state.result
@@ -0,0 +1,36 @@
+### Create backup dir
+include/master-slave.inc
+Warnings:
+Note #### Sending passwords in plain text without SSL/TLS is extremely insecure.
+Note #### Storing MySQL user name or password information in the master info repository is not secure and is therefore not recommended. Please consider using the USER and PASSWORD connection options for START SLAVE; see the 'START SLAVE Syntax' in the MySQL Manual for more information.
+[connection master]
+### Check for settings
+SELECT @@gtid_mode;
+@@gtid_mode
+OFF
+### Generate some binlog events
+CREATE TABLE t1(a INT) ENGINE=TokuDB;
+DROP TABLE t1;
+### Master backup
+include/filter_file.inc
+### tokubackup_binlog_info content:
+filename: ####, position: ####, gtid_mode: OFF, GTID of last change:
+### Delete backup dir
+### Create backup dir
+### GTID-mode on
+include/rpl_set_gtid_mode.inc
+### Check for settings
+SELECT @@gtid_mode;
+@@gtid_mode
+ON
+### Generate some binlog events
+CREATE TABLE t1(a INT) ENGINE=TokuDB;
+DROP TABLE t1;
+### Master backup
+include/filter_file.inc
+### tokubackup_binlog_info content:
+filename: ####, position: ####, gtid_mode: ON, GTID of last change: #####
+### Delete backup dir
+### GTID-mode off
+include/rpl_set_gtid_mode.inc
+include/rpl_end.inc
diff --git a/storage/tokudb/mysql-test/tokudb_backup/r/empty_slave_info_file.result b/storage/tokudb/mysql-test/tokudb_backup/r/empty_slave_info_file.result
new file mode 100644
index 00000000000..a0af40d80cc
--- /dev/null
+++ b/storage/tokudb/mysql-test/tokudb_backup/r/empty_slave_info_file.result
@@ -0,0 +1 @@
+Backup
diff --git a/storage/tokudb/mysql-test/tokudb_backup/r/innodb_use_native_aio_enabled.result b/storage/tokudb/mysql-test/tokudb_backup/r/innodb_use_native_aio_enabled.result
new file mode 100644
index 00000000000..94e113fc87d
--- /dev/null
+++ b/storage/tokudb/mysql-test/tokudb_backup/r/innodb_use_native_aio_enabled.result
@@ -0,0 +1,5 @@
+SELECT @@innodb_use_native_aio;
+@@innodb_use_native_aio
+1
+SET SESSION tokudb_backup_dir='MYSQL_TMP_DIR/tokudb_backup';
+ERROR 42000: Variable 'tokudb_backup_dir' can't be set to the value of 'MYSQL_TMP_DIR/tokudb_backup'
diff --git a/storage/tokudb/mysql-test/tokudb_backup/r/rpl_safe_slave.result b/storage/tokudb/mysql-test/tokudb_backup/r/rpl_safe_slave.result
new file mode 100644
index 00000000000..13b5915354f
--- /dev/null
+++ b/storage/tokudb/mysql-test/tokudb_backup/r/rpl_safe_slave.result
@@ -0,0 +1,77 @@
+###
+# Master-slave test
+####
+include/rpl_init.inc [topology=1->2]
+Warnings:
+Note #### Sending passwords in plain text without SSL/TLS is extremely insecure.
+Note #### Storing MySQL user name or password information in the master info repository is not secure and is therefore not recommended. Please consider using the USER and PASSWORD connection options for START SLAVE; see the 'START SLAVE Syntax' in the MySQL Manual for more information.
+### Create temp table on master
+CREATE TEMPORARY TABLE t1 (a INT);
+include/sync_slave_sql_with_master.inc
+### Setup debug_sync points and prepare for slave backup
+SET SESSION debug="+d,debug_sync_abort_on_timeout";
+SHOW STATUS LIKE 'Slave_open_temp_tables';
+Variable_name Value
+Slave_open_temp_tables 1
+SET DEBUG_SYNC= 'tokudb_backup_wait_for_safe_slave_entered SIGNAL sse WAIT_FOR sse_continue';
+SET DEBUG_SYNC= 'tokudb_backup_wait_for_temp_tables_loop_begin SIGNAL ttlb WAIT_FOR ttlb_continue';
+SET DEBUG_SYNC= 'tokudb_backup_wait_for_temp_tables_loop_slave_started SIGNAL ttlss WAIT_FOR ttlss_continue EXECUTE 2';
+SET DEBUG_SYNC= 'tokudb_backup_wait_for_temp_tables_loop_end SIGNAL ttle WAIT_FOR ttle_continue';
+### Turn-on safe-slave option
+SET GLOBAL tokudb_backup_safe_slave=ON;
+SET GLOBAL tokudb_backup_safe_slave_timeout=30;
+### Start slave backup
+SET SESSION debug="+d,debug_sync_abort_on_timeout";
+### Wait for safe slave function to start
+SET DEBUG_SYNC = "now WAIT_FOR sse";
+SHOW STATUS LIKE 'Slave_open_temp_tables';
+Variable_name Value
+Slave_open_temp_tables 1
+### Wait for safe slave loop start
+SET DEBUG_SYNC = "now SIGNAL sse_continue WAIT_FOR ttlb";
+SHOW STATUS LIKE 'Slave_open_temp_tables';
+Variable_name Value
+Slave_open_temp_tables 1
+### Wait for safe thread loop point just after slave sql thread start 1
+SET DEBUG_SYNC = "now SIGNAL ttlb_continue WAIT_FOR ttlss";
+SHOW STATUS LIKE 'Slave_open_temp_tables';
+Variable_name Value
+Slave_open_temp_tables 1
+### Wait for safe thread loop end
+SET DEBUG_SYNC = "now SIGNAL ttlss_continue WAIT_FOR ttle";
+SHOW STATUS LIKE 'Slave_open_temp_tables';
+Variable_name Value
+Slave_open_temp_tables 1
+### Wait for safe thread loop point just after slave sql thread start 2
+SET DEBUG_SYNC = "now SIGNAL ttle_continue WAIT_FOR ttlss";
+### Drop temp table on master
+DROP TABLE t1;
+### and syncronize slave
+include/sync_slave_sql_with_master.inc
+SHOW STATUS LIKE 'Slave_open_temp_tables';
+Variable_name Value
+Slave_open_temp_tables 0
+### Continue backup
+SET DEBUG_SYNC = "now SIGNAL ttlss_continue";
+## Reset debug_sync points
+SET DEBUG_SYNC = "RESET";
+### Wait for backup finish
+include/filter_file.inc
+### Slave tokubackup_slave_info content:
+host: #.#.#.#, user: ####, port: ####, master log file: ####, relay log file: ####, exec master log pos: ####, executed gtid set: , channel name:
+### Delete slave backup dir
+### Turn-off safe-slave option for slave
+SET GLOBAL tokudb_backup_safe_slave=default;
+SET GLOBAL tokudb_backup_safe_slave_timeout=default;
+### Turn-on safe-slave option for master
+SET GLOBAL tokudb_backup_safe_slave=ON;
+SET GLOBAL tokudb_backup_safe_slave_timeout=30;
+### Backup master
+### Turn-off safe-slave option for master
+SET GLOBAL tokudb_backup_safe_slave=default;
+SET GLOBAL tokudb_backup_safe_slave_timeout=default;
+include/filter_file.inc
+### Master tokubackup_binlog_info content:
+filename: ####, position: ####, gtid_mode: OFF, GTID of last change:
+### Delete master backup dir
+include/rpl_end.inc
diff --git a/storage/tokudb/mysql-test/tokudb_backup/r/rpl_tokudb_commit_sync.result b/storage/tokudb/mysql-test/tokudb_backup/r/rpl_tokudb_commit_sync.result
new file mode 100644
index 00000000000..50508f073ab
--- /dev/null
+++ b/storage/tokudb/mysql-test/tokudb_backup/r/rpl_tokudb_commit_sync.result
@@ -0,0 +1,59 @@
+include/master-slave.inc
+Warnings:
+Note #### Sending passwords in plain text without SSL/TLS is extremely insecure.
+Note #### Storing MySQL user name or password information in the master info repository is not secure and is therefore not recommended. Please consider using the USER and PASSWORD connection options for START SLAVE; see the 'START SLAVE Syntax' in the MySQL Manual for more information.
+[connection master]
+### Create some data on master
+CREATE TABLE t1(a INT, b INT, PRIMARY KEY (a)) ENGINE=TokuDB;
+INSERT INTO t1 SET a=100, b=100;
+INSERT INTO t1 SET a=200, b=100;
+INSERT INTO t1 SET a=300, b=100;
+INSERT INTO t1 SET a=400, b=100;
+INSERT INTO t1 SET a=500, b=100;
+UPDATE t1 SET b = 200 WHERE a = 200;
+DELETE FROM t1 WHERE a = 100;
+SELECT * FROM t1;
+a b
+200 200
+300 100
+400 100
+500 100
+### Check for slave options
+SELECT @@tokudb_commit_sync;
+@@tokudb_commit_sync
+0
+SELECT @@tokudb_fsync_log_period;
+@@tokudb_fsync_log_period
+1000000
+### Check data on slave after sync
+SELECT * FROM t1;
+a b
+200 200
+300 100
+400 100
+500 100
+### Do backup on slave
+### Check for errors
+SELECT @@session.tokudb_backup_last_error;
+@@session.tokudb_backup_last_error
+0
+SELECT @@session.tokudb_backup_last_error_string;
+@@session.tokudb_backup_last_error_string
+NULL
+### Stop slave server
+include/rpl_stop_server.inc [server_number=2]
+### Restore backup
+### Start slave server and slave threads
+include/rpl_start_server.inc [server_number=2]
+include/start_slave.inc
+### Sync slave with master
+### Check data on slave
+SELECT * FROM t1;
+a b
+200 200
+300 100
+400 100
+500 100
+### Cleanup
+DROP TABLE t1;
+include/rpl_end.inc
diff --git a/storage/tokudb/mysql-test/tokudb_backup/t/backup_master_info.test b/storage/tokudb/mysql-test/tokudb_backup/t/backup_master_info.test
new file mode 100644
index 00000000000..8e9f6df4b1d
--- /dev/null
+++ b/storage/tokudb/mysql-test/tokudb_backup/t/backup_master_info.test
@@ -0,0 +1,94 @@
+--source include/have_tokudb_backup.inc
+--source include/not_gtid_enabled.inc
+
+
+--let $SLAVE_INFO_FILE=tokubackup_slave_info
+--let $BACKUP_DIR_SLAVE=$MYSQL_TMP_DIR/tokudb_backup_slave
+--let $SLAVE_INFO_FILE_PATH=$BACKUP_DIR_SLAVE/$SLAVE_INFO_FILE
+--let DDIR=$BACKUP_DIR_SLAVE
+
+# Settings for include/filter_file.inc
+
+--delimiter |
+let $script=
+ s{host: [^,]+,}{host: #.#.#.#,};
+ s{user: [^,]+,}{user: ####,};
+ s{port: [^,]+,}{port: ####,};
+ s{master log file: [^,]+,}{master log file: ####,};
+ s{relay log file: [^,]+,}{relay log file: ####,};
+ s{exec master log pos: [^,]+,}{exec master log pos: ####,};
+ s{executed gtid set: [^,]+, }{executed gtid set: ####, };
+ s{executed gtid set: [^,]+,[^,]+, }{executed gtid set: ####,####, };
+|
+--delimiter ;
+--let $input_file = $SLAVE_INFO_FILE_PATH
+--let $skip_column_names= 1
+
+--echo ###
+--echo # Test for binlog position
+--echo #####
+
+--mkdir $BACKUP_DIR_SLAVE
+
+--source include/master-slave.inc
+
+--connection master
+CREATE TABLE t1(a INT) ENGINE=TokuDB;
+DROP TABLE t1;
+
+--sync_slave_with_master
+
+--connection slave
+--echo Backup
+--disable_query_log
+--eval SET SESSION tokudb_backup_dir='$BACKUP_DIR_SLAVE'
+--enable_query_log
+
+--source include/filter_file.inc
+--echo ### $SLAVE_INFO_FILE content:
+--cat_file $SLAVE_INFO_FILE_PATH
+
+--perl
+use File::Path 'rmtree';
+$DDIR=$ENV{"DDIR"};
+rmtree([ "$DDIR" ]);
+EOF
+
+--echo ###
+--echo # Test for gtid set
+--echo #####
+
+--mkdir $BACKUP_DIR_SLAVE
+
+--let $rpl_server_numbers= 1,2
+--let $rpl_set_enforce_gtid_consistency= 1
+--source include/rpl_set_gtid_mode.inc
+
+--connection master
+CREATE TABLE t1(a INT) ENGINE=TokuDB;
+DROP TABLE t1;
+
+--sync_slave_with_master
+
+--connection slave
+--echo Backup
+--disable_query_log
+--eval SET SESSION tokudb_backup_dir='$BACKUP_DIR_SLAVE'
+--enable_query_log
+
+--source include/filter_file.inc
+--echo ### $SLAVE_INFO_FILE content:
+--cat_file $SLAVE_INFO_FILE_PATH
+
+--perl
+use File::Path 'rmtree';
+$DDIR=$ENV{"DDIR"};
+rmtree([ "$DDIR" ]);
+EOF
+
+--let $rpl_gtid_mode= OFF
+--let $rpl_set_enforce_gtid_consistency= 0
+--let $rpl_server_numbers= 1,2
+--let $rpl_skip_sync= 1
+--source include/rpl_set_gtid_mode.inc
+--source include/rpl_end.inc
diff --git a/storage/tokudb/mysql-test/tokudb_backup/t/backup_master_state.test b/storage/tokudb/mysql-test/tokudb_backup/t/backup_master_state.test
new file mode 100644
index 00000000000..c301d55f8fa
--- /dev/null
+++ b/storage/tokudb/mysql-test/tokudb_backup/t/backup_master_state.test
@@ -0,0 +1,87 @@
+--source include/have_tokudb_backup.inc
+--source include/not_gtid_enabled.inc
+
+--let $MASTER_STATE_FILE=tokubackup_binlog_info
+--let $BACKUP_DIR_MASTER=$MYSQL_TMP_DIR/tokudb_backup_master
+--let $MASTER_STATE_FILE_PATH=$BACKUP_DIR_MASTER/$MASTER_STATE_FILE
+--let DDIR=$BACKUP_DIR_MASTER
+
+# Settings for include/filter_file.inc
+--delimiter |
+let $script=
+ s{filename: [^,]+,}{filename: ####,};
+ s{position: [^,]+,}{position: ####,};
+ s{GTID of last change: [^ ]+}{GTID of last change: #####};
+|
+--delimiter ;
+--let $input_file = $MASTER_STATE_FILE_PATH
+--let $skip_column_names= 1
+
+--echo ### Create backup dir
+--mkdir $BACKUP_DIR_MASTER
+
+--source include/master-slave.inc
+
+--connection master
+
+--echo ### Check for settings
+SELECT @@gtid_mode;
+
+--echo ### Generate some binlog events
+CREATE TABLE t1(a INT) ENGINE=TokuDB;
+DROP TABLE t1;
+
+--echo ### Master backup
+--disable_query_log
+--eval SET SESSION tokudb_backup_dir='$BACKUP_DIR_MASTER'
+--enable_query_log
+
+--source include/filter_file.inc
+--echo ### $MASTER_STATE_FILE content:
+--cat_file $MASTER_STATE_FILE_PATH
+
+--echo ### Delete backup dir
+--perl
+use File::Path 'rmtree';
+$DDIR=$ENV{"DDIR"};
+rmtree([ "$DDIR" ]);
+EOF
+
+--echo ### Create backup dir
+--mkdir $BACKUP_DIR_MASTER
+
+--echo ### GTID-mode on
+--let $rpl_server_numbers= 1,2
+--let $rpl_set_enforce_gtid_consistency= 1
+--source include/rpl_set_gtid_mode.inc
+
+--echo ### Check for settings
+SELECT @@gtid_mode;
+
+--echo ### Generate some binlog events
+CREATE TABLE t1(a INT) ENGINE=TokuDB;
+DROP TABLE t1;
+
+--echo ### Master backup
+--disable_query_log
+--eval SET SESSION tokudb_backup_dir='$BACKUP_DIR_MASTER'
+--enable_query_log
+
+--source include/filter_file.inc
+--echo ### $MASTER_STATE_FILE content:
+--cat_file $MASTER_STATE_FILE_PATH
+
+--echo ### Delete backup dir
+--perl
+use File::Path 'rmtree';
+$DDIR=$ENV{"DDIR"};
+rmtree([ "$DDIR" ]);
+EOF
+
+--echo ### GTID-mode off
+--let $rpl_gtid_mode= OFF
+--let $rpl_set_enforce_gtid_consistency= 0
+--let $rpl_server_numbers= 1,2
+--source include/rpl_set_gtid_mode.inc
+
+--source include/rpl_end.inc
diff --git a/storage/tokudb/mysql-test/tokudb_backup/t/empty_slave_info_file.test b/storage/tokudb/mysql-test/tokudb_backup/t/empty_slave_info_file.test
new file mode 100644
index 00000000000..53592903a27
--- /dev/null
+++ b/storage/tokudb/mysql-test/tokudb_backup/t/empty_slave_info_file.test
@@ -0,0 +1,23 @@
+--source include/have_tokudb_backup.inc
+--source include/not_gtid_enabled.inc
+
+
+--let $SLAVE_INFO_FILE=tokubackup_slave_info
+--let $BACKUP_DIR_SLAVE=$MYSQL_TMP_DIR/tokudb_backup_slave
+--let DDIR=$BACKUP_DIR_SLAVE
+
+--mkdir $BACKUP_DIR_SLAVE
+
+--echo Backup
+--disable_query_log
+--eval SET SESSION tokudb_backup_dir='$BACKUP_DIR_SLAVE'
+--enable_query_log
+
+--list_files $BACKUP_DIR_SLAVE $SLAVE_INFO_FILE
+
+--perl
+use File::Path 'rmtree';
+$DDIR=$ENV{"DDIR"};
+rmtree([ "$DDIR" ]);
+EOF
+
diff --git a/storage/tokudb/mysql-test/tokudb_backup/t/innodb_use_native_aio_enabled-master.opt b/storage/tokudb/mysql-test/tokudb_backup/t/innodb_use_native_aio_enabled-master.opt
new file mode 100644
index 00000000000..5f5dbb9c7c6
--- /dev/null
+++ b/storage/tokudb/mysql-test/tokudb_backup/t/innodb_use_native_aio_enabled-master.opt
@@ -0,0 +1 @@
+--innodb_use_native_aio=on
diff --git a/storage/tokudb/mysql-test/tokudb_backup/t/innodb_use_native_aio_enabled.test b/storage/tokudb/mysql-test/tokudb_backup/t/innodb_use_native_aio_enabled.test
new file mode 100644
index 00000000000..3e09b465c02
--- /dev/null
+++ b/storage/tokudb/mysql-test/tokudb_backup/t/innodb_use_native_aio_enabled.test
@@ -0,0 +1,19 @@
+# Check if tokudb hot backup is prevented if innodb_use_native_aio enabled
+--source include/have_tokudb_backup.inc
+--source include/have_innodb.inc
+
+SELECT @@innodb_use_native_aio;
+
+--let BACKUP_DIR= $MYSQL_TMP_DIR/tokudb_backup
+
+--mkdir $BACKUP_DIR
+
+--replace_result $MYSQL_TMP_DIR MYSQL_TMP_DIR
+--error ER_WRONG_VALUE_FOR_VAR
+--eval SET SESSION tokudb_backup_dir='$BACKUP_DIR'
+
+--perl
+use File::Path 'rmtree';
+$DDIR=$ENV{"BACKUP_DIR"};
+rmtree([ "$DDIR" ]);
+EOF
diff --git a/storage/tokudb/mysql-test/tokudb_backup/t/rpl_safe_slave-master.opt b/storage/tokudb/mysql-test/tokudb_backup/t/rpl_safe_slave-master.opt
new file mode 100644
index 00000000000..af3a211967b
--- /dev/null
+++ b/storage/tokudb/mysql-test/tokudb_backup/t/rpl_safe_slave-master.opt
@@ -0,0 +1 @@
+--binlog-format=statement
diff --git a/storage/tokudb/mysql-test/tokudb_backup/t/rpl_safe_slave-slave.opt b/storage/tokudb/mysql-test/tokudb_backup/t/rpl_safe_slave-slave.opt
new file mode 100644
index 00000000000..49405b1aec3
--- /dev/null
+++ b/storage/tokudb/mysql-test/tokudb_backup/t/rpl_safe_slave-slave.opt
@@ -0,0 +1 @@
+--master-info-repository=TABLE --relay-log-info-repository=TABLE
diff --git a/storage/tokudb/mysql-test/tokudb_backup/t/rpl_safe_slave.cnf b/storage/tokudb/mysql-test/tokudb_backup/t/rpl_safe_slave.cnf
new file mode 100644
index 00000000000..321be4ab2fc
--- /dev/null
+++ b/storage/tokudb/mysql-test/tokudb_backup/t/rpl_safe_slave.cnf
@@ -0,0 +1,14 @@
+!include ../../rpl/my.cnf
+
+[mysqld.1]
+
+[mysqld.2]
+
+[mysqld.3]
+master-info-repository=TABLE
+relay-log-info-repository=TABLE
+
+[ENV]
+SERVER_MYPORT_3= @mysqld.3.port
+SERVER_MYSOCK_3= @mysqld.3.socket
+
diff --git a/storage/tokudb/mysql-test/tokudb_backup/t/rpl_safe_slave.inc b/storage/tokudb/mysql-test/tokudb_backup/t/rpl_safe_slave.inc
new file mode 100644
index 00000000000..e0732ee63fc
--- /dev/null
+++ b/storage/tokudb/mysql-test/tokudb_backup/t/rpl_safe_slave.inc
@@ -0,0 +1,112 @@
+--connection server_1
+--echo ### Create temp table on master
+CREATE TEMPORARY TABLE t1 (a INT);
+
+--let $sync_slave_connection= server_2
+--source include/sync_slave_sql_with_master.inc
+
+--echo ### Setup debug_sync points and prepare for slave backup
+--connection slave_2
+SET SESSION debug="+d,debug_sync_abort_on_timeout";
+
+SHOW STATUS LIKE 'Slave_open_temp_tables';
+
+SET DEBUG_SYNC= 'tokudb_backup_wait_for_safe_slave_entered SIGNAL sse WAIT_FOR sse_continue';
+SET DEBUG_SYNC= 'tokudb_backup_wait_for_temp_tables_loop_begin SIGNAL ttlb WAIT_FOR ttlb_continue';
+SET DEBUG_SYNC= 'tokudb_backup_wait_for_temp_tables_loop_slave_started SIGNAL ttlss WAIT_FOR ttlss_continue EXECUTE 2';
+SET DEBUG_SYNC= 'tokudb_backup_wait_for_temp_tables_loop_end SIGNAL ttle WAIT_FOR ttle_continue';
+
+--mkdir $BACKUP_DIR_SLAVE
+
+--echo ### Turn-on safe-slave option
+SET GLOBAL tokudb_backup_safe_slave=ON;
+SET GLOBAL tokudb_backup_safe_slave_timeout=30;
+
+--echo ### Start slave backup
+--disable_query_log
+--send_eval SET SESSION tokudb_backup_dir='$BACKUP_DIR_SLAVE'
+--enable_query_log
+
+--connection server_2
+SET SESSION debug="+d,debug_sync_abort_on_timeout";
+
+--echo ### Wait for safe slave function to start
+SET DEBUG_SYNC = "now WAIT_FOR sse";
+SHOW STATUS LIKE 'Slave_open_temp_tables';
+--echo ### Wait for safe slave loop start
+SET DEBUG_SYNC = "now SIGNAL sse_continue WAIT_FOR ttlb";
+SHOW STATUS LIKE 'Slave_open_temp_tables';
+--echo ### Wait for safe thread loop point just after slave sql thread start 1
+SET DEBUG_SYNC = "now SIGNAL ttlb_continue WAIT_FOR ttlss";
+SHOW STATUS LIKE 'Slave_open_temp_tables';
+--echo ### Wait for safe thread loop end
+SET DEBUG_SYNC = "now SIGNAL ttlss_continue WAIT_FOR ttle";
+SHOW STATUS LIKE 'Slave_open_temp_tables';
+
+--echo ### Wait for safe thread loop point just after slave sql thread start 2
+SET DEBUG_SYNC = "now SIGNAL ttle_continue WAIT_FOR ttlss";
+
+--connection server_1
+--echo ### Drop temp table on master
+DROP TABLE t1;
+
+--echo ### and syncronize slave
+--let $sync_slave_connection= server_2
+--source include/sync_slave_sql_with_master.inc
+
+SHOW STATUS LIKE 'Slave_open_temp_tables';
+
+--echo ### Continue backup
+SET DEBUG_SYNC = "now SIGNAL ttlss_continue";
+
+--echo ## Reset debug_sync points
+SET DEBUG_SYNC = "RESET";
+
+--connection slave_2
+--echo ### Wait for backup finish
+--reap
+
+--let $input_file = $S_SLAVE_INFO_FILE_PATH
+--source include/filter_file.inc
+--echo ### Slave $SLAVE_INFO_FILE content:
+--cat_file $S_SLAVE_INFO_FILE_PATH
+
+--echo ### Delete slave backup dir
+--perl
+use File::Path 'rmtree';
+$DDIR=$ENV{"BACKUP_DIR_SLAVE"};
+rmtree([ "$DDIR" ]);
+EOF
+
+--echo ### Turn-off safe-slave option for slave
+SET GLOBAL tokudb_backup_safe_slave=default;
+SET GLOBAL tokudb_backup_safe_slave_timeout=default;
+
+--connection server_1
+
+--echo ### Turn-on safe-slave option for master
+SET GLOBAL tokudb_backup_safe_slave=ON;
+SET GLOBAL tokudb_backup_safe_slave_timeout=30;
+
+--echo ### Backup master
+--mkdir $BACKUP_DIR_MASTER
+--disable_query_log
+--eval SET SESSION tokudb_backup_dir='$BACKUP_DIR_MASTER'
+--enable_query_log
+
+--echo ### Turn-off safe-slave option for master
+SET GLOBAL tokudb_backup_safe_slave=default;
+SET GLOBAL tokudb_backup_safe_slave_timeout=default;
+
+--let $input_file = $M_MASTER_INFO_FILE_PATH
+--source include/filter_file.inc
+--echo ### Master $MASTER_INFO_FILE content:
+--cat_file $M_MASTER_INFO_FILE_PATH
+
+--echo ### Delete master backup dir
+--perl
+use File::Path 'rmtree';
+$DDIR=$ENV{"BACKUP_DIR_MASTER"};
+rmtree([ "$DDIR" ]);
+EOF
+
diff --git a/storage/tokudb/mysql-test/tokudb_backup/t/rpl_safe_slave.test b/storage/tokudb/mysql-test/tokudb_backup/t/rpl_safe_slave.test
new file mode 100644
index 00000000000..15ba1d8bb66
--- /dev/null
+++ b/storage/tokudb/mysql-test/tokudb_backup/t/rpl_safe_slave.test
@@ -0,0 +1,49 @@
+--source include/have_tokudb_backup.inc
+--source include/have_binlog_format_statement.inc
+--source include/have_debug_sync.inc
+
+--let $SLAVE_INFO_FILE=tokubackup_slave_info
+--let $MASTER_INFO_FILE=tokubackup_binlog_info
+
+--let BACKUP_DIR_SLAVE=$MYSQL_TMP_DIR/tokudb_backup_slave
+--let $S_SLAVE_INFO_FILE_PATH=$BACKUP_DIR_SLAVE/$SLAVE_INFO_FILE
+
+--let BACKUP_DIR_MASTER=$MYSQL_TMP_DIR/tokudb_backup_master
+--let $M_MASTER_INFO_FILE_PATH=$BACKUP_DIR_MASTER/$MASTER_INFO_FILE
+
+# Settings for include/filter_file.inc
+--delimiter |
+let $script=
+ s{filename: [^,]+,}{filename: ####,};
+ s{position: [^,]+,}{position: ####,};
+ s{GTID of last change: [^ ]+}{GTID of last change: #####};
+ s{host: [^,]+,}{host: #.#.#.#,};
+ s{user: [^,]+,}{user: ####,};
+ s{port: [^,]+,}{port: ####,};
+ s{master log file: [^,]+,}{master log file: ####,};
+ s{relay log file: [^,]+,}{relay log file: ####,};
+ s{exec master log pos: [^,]+,}{exec master log pos: ####,};
+ s{executed gtid set: [^,]+, }{executed gtid set: ####, };
+ s{executed gtid set: [^,]+,[^,]+, }{executed gtid set: ####,####, };
+|
+--delimiter ;
+--let $skip_column_names= 1
+
+--disable_query_log
+CALL mtr.add_suppression("Unsafe statement written to the binary log using statement format since BINLOG_FORMAT = STATEMENT");
+CALL mtr.add_suppression("Sending passwords in plain text without SSL/TLS is extremely insecure");
+--enable_query_log
+
+--echo ###
+--echo # Master-slave test
+--echo ####
+
+--let $rpl_server_count=3
+--let $rpl_topology=1->2
+--source include/rpl_init.inc
+
+--connect (slave_2,localhost,root,,test,$SLAVE_MYPORT,$SLAVE_MYSOCK)
+
+--source rpl_safe_slave.inc
+
+--source include/rpl_end.inc
diff --git a/storage/tokudb/mysql-test/tokudb_backup/t/rpl_tokudb_commit_sync-slave.opt b/storage/tokudb/mysql-test/tokudb_backup/t/rpl_tokudb_commit_sync-slave.opt
new file mode 100644
index 00000000000..263e1aef0ab
--- /dev/null
+++ b/storage/tokudb/mysql-test/tokudb_backup/t/rpl_tokudb_commit_sync-slave.opt
@@ -0,0 +1 @@
+--loose-tokudb-commit-sync=OFF --loose-tokudb-fsync-log-period=1000000
diff --git a/storage/tokudb/mysql-test/tokudb_backup/t/rpl_tokudb_commit_sync.test b/storage/tokudb/mysql-test/tokudb_backup/t/rpl_tokudb_commit_sync.test
new file mode 100644
index 00000000000..5bd53cacdab
--- /dev/null
+++ b/storage/tokudb/mysql-test/tokudb_backup/t/rpl_tokudb_commit_sync.test
@@ -0,0 +1,72 @@
+# if --tokudb-commit-sync is off on slave tokudb log must be flushed on backup
+# to provide the ability to restore replication after backup restoring
+
+--source include/have_tokudb_backup.inc
+
+--let $BACKUP_DIR_SLAVE= $MYSQL_TMP_DIR/tokudb_backup_slave
+--let $BACKUP_MYSQL_DATA_DIR= $BACKUP_DIR_SLAVE/mysql_data_dir
+
+--mkdir $BACKUP_DIR_SLAVE
+
+--source include/master-slave.inc
+
+--echo ### Create some data on master
+--connection master
+CREATE TABLE t1(a INT, b INT, PRIMARY KEY (a)) ENGINE=TokuDB;
+INSERT INTO t1 SET a=100, b=100;
+INSERT INTO t1 SET a=200, b=100;
+INSERT INTO t1 SET a=300, b=100;
+INSERT INTO t1 SET a=400, b=100;
+INSERT INTO t1 SET a=500, b=100;
+UPDATE t1 SET b = 200 WHERE a = 200;
+DELETE FROM t1 WHERE a = 100;
+
+SELECT * FROM t1;
+
+--sync_slave_with_master
+--let $SLAVE_DATA_DIR=`SELECT @@DATADIR`
+
+--echo ### Check for slave options
+SELECT @@tokudb_commit_sync;
+SELECT @@tokudb_fsync_log_period;
+
+--echo ### Check data on slave after sync
+SELECT * FROM t1;
+
+
+--echo ### Do backup on slave
+--disable_query_log
+--eval SET SESSION tokudb_backup_dir='$BACKUP_DIR_SLAVE'
+--enable_query_log
+
+--echo ### Check for errors
+SELECT @@session.tokudb_backup_last_error;
+SELECT @@session.tokudb_backup_last_error_string;
+
+--echo ### Stop slave server
+--connection slave
+--let $rpl_server_number= 2
+--let $rpl_force_stop= 1
+--source include/rpl_stop_server.inc
+
+--echo ### Restore backup
+--exec rm -rf $SLAVE_DATA_DIR;
+--exec mv $BACKUP_MYSQL_DATA_DIR $SLAVE_DATA_DIR;
+
+--echo ### Start slave server and slave threads
+--connection slave
+--source include/rpl_start_server.inc
+--source include/start_slave.inc
+
+--echo ### Sync slave with master
+--connection master
+--sync_slave_with_master
+
+--echo ### Check data on slave
+SELECT * FROM t1;
+
+--echo ### Cleanup
+--connection master
+DROP TABLE t1;
+
+--source include/rpl_end.inc
diff --git a/storage/tokudb/mysql-test/tokudb_backup/t/suite.opt b/storage/tokudb/mysql-test/tokudb_backup/t/suite.opt
index 5d4cb245e27..a624d6895cc 100644
--- a/storage/tokudb/mysql-test/tokudb_backup/t/suite.opt
+++ b/storage/tokudb/mysql-test/tokudb_backup/t/suite.opt
@@ -1 +1 @@
-$TOKUDB_OPT $TOKUDB_LOAD_ADD_PATH $TOKUDB_BACKUP_OPT $TOKUDB_BACKUP_LOAD_ADD_PATH --loose-tokudb-check-jemalloc=0 --loose-tokudb-cache-size=512M --loose-tokudb-block-size=1M
+$TOKUDB_OPT $TOKUDB_LOAD_ADD_PATH $TOKUDB_BACKUP_OPT $TOKUDB_BACKUP_LOAD_ADD_PATH --loose-innodb_use_native_aio=off --loose-tokudb-check-jemalloc=0 --loose-tokudb-cache-size=512M --loose-tokudb-block-size=1M
diff --git a/storage/tokudb/mysql-test/tokudb_bugs/r/db233.result b/storage/tokudb/mysql-test/tokudb_bugs/r/db233.result
index e5808f52e69..a7cdbcae1e2 100644
--- a/storage/tokudb/mysql-test/tokudb_bugs/r/db233.result
+++ b/storage/tokudb/mysql-test/tokudb_bugs/r/db233.result
@@ -14,16 +14,6 @@ INSERT INTO t1 VALUES(1, 1, '1', '1'), (2, 2, '2', '2'), (3, 3, '3', '3'), (4, 4
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
test.t1 analyze status OK
-set DEBUG_SYNC = 'tokudb_icp_desc_scan_invalidate SIGNAL hit1 WAIT_FOR done1';
-SELECT c FROM t1 WHERE id BETWEEN 5 AND 8 ORDER BY id DESC;
-set DEBUG_SYNC = 'now WAIT_FOR hit1';
-set DEBUG_SYNC = 'now SIGNAL done1';
-c
-8
-7
-6
-6
-5
set DEBUG_SYNC = 'tokudb_icp_asc_scan_out_of_range SIGNAL hit2 WAIT_FOR done2';
SELECT c FROM t1 WHERE id BETWEEN 5 AND 8 ORDER BY id ASC;
set DEBUG_SYNC = 'now WAIT_FOR hit2';
diff --git a/storage/tokudb/mysql-test/tokudb_bugs/t/db233.test b/storage/tokudb/mysql-test/tokudb_bugs/t/db233.test
index 8e4c3b73c09..fec11bf0553 100644
--- a/storage/tokudb/mysql-test/tokudb_bugs/t/db233.test
+++ b/storage/tokudb/mysql-test/tokudb_bugs/t/db233.test
@@ -29,24 +29,6 @@ ANALYZE TABLE t1;
# lets flip to another connection
connect(conn1, localhost, root);
-# set up the DEBUG_SYNC point
-set DEBUG_SYNC = 'tokudb_icp_desc_scan_invalidate SIGNAL hit1 WAIT_FOR done1';
-
-# send the query
-send SELECT c FROM t1 WHERE id BETWEEN 5 AND 8 ORDER BY id DESC;
-
-# back to default connection
-connection default;
-
-# wait for the ICP reverse scan to invalidate
-set DEBUG_SYNC = 'now WAIT_FOR hit1';
-
-# lets release and clean up
-set DEBUG_SYNC = 'now SIGNAL done1';
-
-connection conn1;
-reap;
-
# set up the DEBUG_SYNC point again, but for the out of range
set DEBUG_SYNC = 'tokudb_icp_asc_scan_out_of_range SIGNAL hit2 WAIT_FOR done2';
diff --git a/storage/tokudb/mysql-test/tokudb_bugs/t/db397_delete_trigger.test b/storage/tokudb/mysql-test/tokudb_bugs/t/db397_delete_trigger.test
index dfd935b17ff..0502b35bc2c 100644
--- a/storage/tokudb/mysql-test/tokudb_bugs/t/db397_delete_trigger.test
+++ b/storage/tokudb/mysql-test/tokudb_bugs/t/db397_delete_trigger.test
@@ -1,3 +1,4 @@
+skip Tokudb Lock Waits not in I_S in MariaDB;
# check for any locking weirdness on DELETE triggers
source include/have_tokudb.inc;
set default_storage_engine='tokudb';
diff --git a/storage/tokudb/mysql-test/tokudb_bugs/t/db397_insert_trigger.test b/storage/tokudb/mysql-test/tokudb_bugs/t/db397_insert_trigger.test
index ffe2face9f2..313b1f96b52 100644
--- a/storage/tokudb/mysql-test/tokudb_bugs/t/db397_insert_trigger.test
+++ b/storage/tokudb/mysql-test/tokudb_bugs/t/db397_insert_trigger.test
@@ -1,3 +1,4 @@
+skip Tokudb Lock Waits not in I_S in MariaDB;
# check for any locking weirdness on INSERT triggers
source include/have_tokudb.inc;
set default_storage_engine='tokudb';
diff --git a/storage/tokudb/mysql-test/tokudb_bugs/t/db397_update_trigger.test b/storage/tokudb/mysql-test/tokudb_bugs/t/db397_update_trigger.test
index 063a88cb4ab..ce1eb6bddd1 100644
--- a/storage/tokudb/mysql-test/tokudb_bugs/t/db397_update_trigger.test
+++ b/storage/tokudb/mysql-test/tokudb_bugs/t/db397_update_trigger.test
@@ -1,3 +1,4 @@
+skip Tokudb Lock Waits not in I_S in MariaDB;
# check for any locking weirdness on UPDATE triggers
source include/have_tokudb.inc;
set default_storage_engine='tokudb';
diff --git a/storage/tokudb/mysql-test/tokudb_bugs/t/db938.test b/storage/tokudb/mysql-test/tokudb_bugs/t/db938.test
index 50434a79a00..d65bf3d95de 100644
--- a/storage/tokudb/mysql-test/tokudb_bugs/t/db938.test
+++ b/storage/tokudb/mysql-test/tokudb_bugs/t/db938.test
@@ -1,3 +1,4 @@
+skip Background Job Manager not supported in MariaDB;
# This test for DB-938 tests a race condition where a scheduled background job
# (analyze) ends up operating on a set of DB* key_file[] in TOKUDB_SHARE that
# were set to NULL during a TRUNCATE TABLE operation.
diff --git a/storage/tokudb/mysql-test/tokudb_bugs/t/db945.test b/storage/tokudb/mysql-test/tokudb_bugs/t/db945.test
index c7774877291..50dc91829d8 100644
--- a/storage/tokudb/mysql-test/tokudb_bugs/t/db945.test
+++ b/storage/tokudb/mysql-test/tokudb_bugs/t/db945.test
@@ -1,3 +1,4 @@
+skip Background Job Manager not supported in MariaDB;
source include/have_tokudb.inc;
set default_storage_engine='tokudb';
disable_warnings;
diff --git a/storage/tokudb/mysql-test/tokudb_bugs/t/leak172.test b/storage/tokudb/mysql-test/tokudb_bugs/t/leak172.test
index 9d82604e4b1..28cfff14770 100644
--- a/storage/tokudb/mysql-test/tokudb_bugs/t/leak172.test
+++ b/storage/tokudb/mysql-test/tokudb_bugs/t/leak172.test
@@ -132,6 +132,7 @@ CREATE TABLE `t2` (
);
LOAD DATA INFILE 'leak172_t1.data' INTO TABLE `t1` fields terminated by ',';
+remove_file $MYSQLD_DATADIR/test/leak172_t1.data;
connect(conn1,localhost,root,,);
set session debug_dbug="+d,tokudb_end_bulk_insert_sleep";
@@ -145,6 +146,7 @@ UPDATE t1, t2 SET t1.`c5` = 4 WHERE t1.`c6` <= 'o';
connection conn1;
reap;
+remove_file $MYSQLD_DATADIR/test/leak172_t2.data;
connection default;
disconnect conn1;
diff --git a/storage/tokudb/mysql-test/tokudb_bugs/t/tokudb718.test b/storage/tokudb/mysql-test/tokudb_bugs/t/tokudb718.test
index 735a88afed8..e7ef3211401 100644
--- a/storage/tokudb/mysql-test/tokudb_bugs/t/tokudb718.test
+++ b/storage/tokudb/mysql-test/tokudb_bugs/t/tokudb718.test
@@ -1,3 +1,4 @@
+skip Tokudb Fractal Tree info not in I_S in MariaDB;
# test DB-718, a crash caused by broken error handling in tokudb's fractal_tree_info information schema
source include/have_tokudb.inc;
set default_storage_engine='tokudb';
diff --git a/storage/tokudb/tokudb_dir_cmd.cc b/storage/tokudb/tokudb_dir_cmd.cc
new file mode 100644
index 00000000000..5431cbab7aa
--- /dev/null
+++ b/storage/tokudb/tokudb_dir_cmd.cc
@@ -0,0 +1,331 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+/* -*- mode: C; c-basic-offset: 4 -*- */
+#ident "$Id$"
+/*======
+This file is part of TokuDB
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ TokuDBis is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ TokuDB is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with TokuDB. If not, see <http://www.gnu.org/licenses/>.
+
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "hatoku_hton.h"
+#include "tokudb_dir_cmd.h"
+#include "my_dbug.h"
+#include "sql_base.h"
+
+#include <vector>
+#include <string>
+
+namespace tokudb {
+
+const char tokens_delimiter = ' ';
+const char tokens_escape_delimiter_char = '\\';
+
+static int MDL_and_TDC(THD *thd,
+ const char *db,
+ const char *table,
+ const dir_cmd_callbacks &cb) {
+ int error;
+ LEX_CSTRING db_arg;
+ LEX_CSTRING table_arg;
+
+ db_arg.str = const_cast<char *>(db);
+ db_arg.length = strlen(db);;
+ table_arg.str = const_cast<char *>(table);
+ table_arg.length = strlen(table);
+ Table_ident table_ident(thd, &db_arg, &table_arg, true);;
+ thd->lex->select_lex.add_table_to_list(
+ thd, &table_ident, NULL, 1, TL_UNLOCK, MDL_EXCLUSIVE, 0, 0, 0);
+ /* The lock will be released at the end of mysq_execute_command() */
+ error = lock_table_names(thd,
+ thd->lex->select_lex.table_list.first,
+ NULL,
+ thd->variables.lock_wait_timeout,
+ 0);
+ if (error) {
+ if (cb.set_error)
+ cb.set_error(thd,
+ error,
+ "Can't lock table '%s.%s'",
+ db,
+ table);
+ return error;
+ }
+ tdc_remove_table(thd, TDC_RT_REMOVE_ALL, db, table, false);
+ return error;
+}
+
+static bool parse_db_and_table(const char *dname,
+ std::string /*out*/ &db_name,
+ std::string /*out*/ &table_name) {
+ const char *begin;
+ const char *end;
+ const char *db_name_begin;
+ const char *db_name_end;
+
+ begin = strchr(dname, '/');
+ if (!begin)
+ return false;
+ ++begin;
+ end = strchr(begin, '/');
+ if (!end)
+ return false;
+
+ db_name_begin = begin;
+ db_name_end = end;
+
+ begin = end + 1;
+
+ end = strchr(begin, '-');
+ if (!end)
+ return false;
+
+ if (strncmp(end, "-main", strlen("-main")) &&
+ strncmp(end, "-status", strlen("-status")) &&
+ strncmp(end, "-key", strlen("-key")))
+ return false;
+
+ db_name.assign(db_name_begin, db_name_end);
+ table_name.assign(begin, end);
+
+ return true;
+}
+
+static int attach(THD *thd,
+ const std::string &dname,
+ const std::string &iname,
+ const dir_cmd_callbacks &cb) {
+ int error;
+ DB_TXN* txn = NULL;
+ DB_TXN *parent_txn = NULL;
+ tokudb_trx_data *trx = NULL;
+
+ std::string db_name;
+ std::string table_name;
+
+ if (parse_db_and_table(dname.c_str(), db_name, table_name)) {
+ error = MDL_and_TDC(thd, db_name.c_str(), table_name.c_str(), cb);
+ if (error)
+ goto cleanup;
+ }
+
+ trx = (tokudb_trx_data *) thd_get_ha_data(thd, tokudb_hton);
+ if (trx && trx->sub_sp_level)
+ parent_txn = trx->sub_sp_level;
+ error = txn_begin(db_env, parent_txn, &txn, 0, thd);
+ if (error)
+ goto cleanup;
+
+ error = db_env->dirtool_attach(db_env,
+ txn,
+ dname.c_str(),
+ iname.c_str());
+cleanup:
+ if (txn) {
+ if (error) {
+ abort_txn(txn);
+ }
+ else {
+ commit_txn(txn, 0);
+ }
+ }
+ return error;
+}
+
+static int detach(THD *thd,
+ const std::string &dname,
+ const dir_cmd_callbacks &cb) {
+ int error;
+ DB_TXN* txn = NULL;
+ DB_TXN *parent_txn = NULL;
+ tokudb_trx_data *trx = NULL;
+
+ std::string db_name;
+ std::string table_name;
+
+ if (parse_db_and_table(dname.c_str(), db_name, table_name)) {
+ error = MDL_and_TDC(thd, db_name.c_str(), table_name.c_str(), cb);
+ if (error)
+ goto cleanup;
+ }
+
+ trx = (tokudb_trx_data *) thd_get_ha_data(thd, tokudb_hton);
+ if (trx && trx->sub_sp_level)
+ parent_txn = trx->sub_sp_level;
+ error = txn_begin(db_env, parent_txn, &txn, 0, thd);
+ if (error)
+ goto cleanup;
+
+ error = db_env->dirtool_detach(db_env,
+ txn,
+ dname.c_str());
+cleanup:
+ if (txn) {
+ if (error) {
+ abort_txn(txn);
+ }
+ else {
+ commit_txn(txn, 0);
+ }
+ }
+ return error;
+}
+
+static int move(THD *thd,
+ const std::string &old_dname,
+ const std::string &new_dname,
+ const dir_cmd_callbacks &cb) {
+ int error;
+ DB_TXN* txn = NULL;
+ DB_TXN *parent_txn = NULL;
+ tokudb_trx_data *trx = NULL;
+
+ std::string db_name;
+ std::string table_name;
+
+ if (parse_db_and_table(old_dname.c_str(), db_name, table_name)) {
+ error = MDL_and_TDC(thd, db_name.c_str(), table_name.c_str(), cb);
+ if (error)
+ goto cleanup;
+ }
+
+ trx = (tokudb_trx_data *) thd_get_ha_data(thd, tokudb_hton);
+ if (trx && trx->sub_sp_level)
+ parent_txn = trx->sub_sp_level;
+ error = txn_begin(db_env, parent_txn, &txn, 0, thd);
+ if (error)
+ goto cleanup;
+
+ error = db_env->dirtool_move(db_env,
+ txn,
+ old_dname.c_str(),
+ new_dname.c_str());
+cleanup:
+ if (txn) {
+ if (error) {
+ abort_txn(txn);
+ }
+ else {
+ commit_txn(txn, 0);
+ }
+ }
+ return error;
+}
+
+static void tokenize(const char *cmd_str,
+ std::vector<std::string> /*out*/ &tokens) {
+ DBUG_ASSERT(cmd_str);
+
+ bool was_escape = false;
+ const char *token_begin = cmd_str;
+ const char *token_end = token_begin;
+
+ while (*token_end) {
+ if (*token_end == tokens_escape_delimiter_char) {
+ was_escape = true;
+ }
+ else if (*token_end == tokens_delimiter) {
+ if (was_escape)
+ was_escape = false;
+ else {
+ if (token_begin == token_end)
+ ++token_begin;
+ else {
+ tokens.push_back(std::string(token_begin, token_end));
+ token_begin = token_end + 1;
+ }
+ }
+ }
+ else {
+ was_escape = false;
+ }
+ ++token_end;
+ }
+
+ if (token_begin != token_end)
+ tokens.push_back(std::string(token_begin, token_end));
+}
+
+void process_dir_cmd(THD *thd,
+ const char *cmd_str,
+ const dir_cmd_callbacks &cb) {
+
+ DBUG_ASSERT(thd);
+ DBUG_ASSERT(cmd_str);
+
+ std::vector<std::string> tokens;
+ tokenize(cmd_str, tokens);
+
+ if (tokens.empty())
+ return;
+
+ const std::string &cmd = tokens[0];
+
+ if (!cmd.compare("attach")) {
+ if (tokens.size() != 3) {
+ if (cb.set_error)
+ cb.set_error(thd,
+ EINVAL,
+ "attach command requires two arguments");
+ }
+ else {
+ int r = attach(thd, tokens[1], tokens[2], cb);
+ if (r && cb.set_error)
+ cb.set_error(thd, r, "Attach command error");
+ }
+ }
+ else if (!cmd.compare("detach")) {
+ if (tokens.size() != 2) {
+ if (cb.set_error)
+ cb.set_error(thd,
+ EINVAL,
+ "detach command requires one argument");
+ }
+ else {
+ int r = detach(thd, tokens[1], cb);
+ if (r && cb.set_error)
+ cb.set_error(thd, r, "detach command error");
+ }
+ }
+ else if (!cmd.compare("move")) {
+ if (tokens.size() != 3) {
+ if (cb.set_error)
+ cb.set_error(thd,
+ EINVAL,
+ "move command requires two arguments");
+ }
+ else {
+ int r = move(thd, tokens[1], tokens[2], cb);
+ if (r && cb.set_error)
+ cb.set_error(thd, r, "move command error");
+ }
+ }
+ else {
+ if (cb.set_error)
+ cb.set_error(thd,
+ ENOENT,
+ "Unknown command '%s'",
+ cmd.c_str());
+ }
+
+ return;
+};
+
+
+} // namespace tokudb
diff --git a/storage/tokudb/tokudb_dir_cmd.h b/storage/tokudb/tokudb_dir_cmd.h
new file mode 100644
index 00000000000..b39caadc7c3
--- /dev/null
+++ b/storage/tokudb/tokudb_dir_cmd.h
@@ -0,0 +1,46 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of TokuDB
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ TokuDBis is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ TokuDB is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with TokuDB. If not, see <http://www.gnu.org/licenses/>.
+
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#ifndef _TOKUDB_DIR_CMD_H
+#define _TOKUDB_DIR_CMD_H
+
+#include <sql_class.h>
+
+namespace tokudb {
+
+struct dir_cmd_callbacks {
+ void (*set_error)(THD *thd,
+ int error,
+ const char *error_fmt,
+ ...);
+};
+
+void process_dir_cmd(THD *thd,
+ const char *cmd_str,
+ const dir_cmd_callbacks &cb);
+
+};
+
+#endif // _TOKUDB_DIR_CMD_H
diff --git a/storage/tokudb/tokudb_sysvars.cc b/storage/tokudb/tokudb_sysvars.cc
index b758929c10e..bbc39dc550a 100644
--- a/storage/tokudb/tokudb_sysvars.cc
+++ b/storage/tokudb/tokudb_sysvars.cc
@@ -25,6 +25,9 @@ Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
#include "hatoku_hton.h"
+#include "sql_acl.h"
+#include "tokudb_dir_cmd.h"
+#include "sql_parse.h"
namespace tokudb {
namespace sysvars {
@@ -40,6 +43,7 @@ namespace sysvars {
#define TOKUDB_VERSION_STR NULL
#endif
+
ulonglong cache_size = 0;
uint cachetable_pool_threads = 0;
int cardinality_scale_percent = 0;
@@ -918,8 +922,6 @@ static MYSQL_THDVAR_BOOL(
true);
#endif
-
-
//******************************************************************************
// all system variables
//******************************************************************************
@@ -949,7 +951,6 @@ st_mysql_sys_var* system_variables[] = {
MYSQL_SYSVAR(version),
MYSQL_SYSVAR(write_status_frequency),
MYSQL_SYSVAR(dir_per_db),
-
#if TOKU_INCLUDE_HANDLERTON_HANDLE_FATAL_SIGNAL
MYSQL_SYSVAR(gdb_path),
MYSQL_SYSVAR(gdb_on_fatal),
diff --git a/storage/xtradb/btr/btr0btr.cc b/storage/xtradb/btr/btr0btr.cc
index d84c93f8b3e..e66599e206d 100644
--- a/storage/xtradb/btr/btr0btr.cc
+++ b/storage/xtradb/btr/btr0btr.cc
@@ -2,7 +2,7 @@
Copyright (c) 1994, 2016, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2012, Facebook Inc.
-Copyright (c) 2014, 2017, MariaDB Corporation
+Copyright (c) 2014, 2017, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -1703,9 +1703,7 @@ btr_create(
dict_index_t* index, /*!< in: index */
mtr_t* mtr) /*!< in: mini-transaction handle */
{
- ulint page_no;
buf_block_t* block;
- buf_frame_t* frame;
page_t* page;
page_zip_des_t* page_zip;
@@ -1720,6 +1718,10 @@ btr_create(
space, 0,
IBUF_HEADER + IBUF_TREE_SEG_HEADER, mtr);
+ if (ibuf_hdr_block == NULL) {
+ return(FIL_NULL);
+ }
+
buf_block_dbg_add_level(
ibuf_hdr_block, SYNC_IBUF_TREE_NODE_NEW);
@@ -1733,7 +1735,17 @@ btr_create(
+ IBUF_HEADER + IBUF_TREE_SEG_HEADER,
IBUF_TREE_ROOT_PAGE_NO,
FSP_UP, mtr);
+
+ if (block == NULL) {
+ return(FIL_NULL);
+ }
+
ut_ad(buf_block_get_page_no(block) == IBUF_TREE_ROOT_PAGE_NO);
+
+ buf_block_dbg_add_level(block, SYNC_IBUF_TREE_NODE_NEW);
+
+ flst_init(block->frame + PAGE_HEADER + PAGE_BTR_IBUF_FREE_LIST,
+ mtr);
} else {
#ifdef UNIV_BLOB_DEBUG
if ((type & DICT_CLUSTERED) && !index->blobs) {
@@ -1745,34 +1757,19 @@ btr_create(
#endif /* UNIV_BLOB_DEBUG */
block = fseg_create(space, 0,
PAGE_HEADER + PAGE_BTR_SEG_TOP, mtr);
- }
- if (block == NULL) {
-
- return(FIL_NULL);
- }
-
- page_no = buf_block_get_page_no(block);
- frame = buf_block_get_frame(block);
-
- if (type & DICT_IBUF) {
- /* It is an insert buffer tree: initialize the free list */
- buf_block_dbg_add_level(block, SYNC_IBUF_TREE_NODE_NEW);
-
- ut_ad(page_no == IBUF_TREE_ROOT_PAGE_NO);
+ if (block == NULL) {
+ return(FIL_NULL);
+ }
- flst_init(frame + PAGE_HEADER + PAGE_BTR_IBUF_FREE_LIST, mtr);
- } else {
- /* It is a non-ibuf tree: create a file segment for leaf
- pages */
buf_block_dbg_add_level(block, SYNC_TREE_NODE_NEW);
- if (!fseg_create(space, page_no,
+ if (!fseg_create(space, buf_block_get_page_no(block),
PAGE_HEADER + PAGE_BTR_SEG_LEAF, mtr)) {
/* Not enough space for new segment, free root
segment before return. */
- btr_free_root(space, zip_size, page_no, mtr);
-
+ btr_free_root(space, zip_size,
+ buf_block_get_page_no(block), mtr);
return(FIL_NULL);
}
@@ -1816,7 +1813,7 @@ btr_create(
ut_ad(page_get_max_insert_size(page, 2) > 2 * BTR_PAGE_MAX_REC_SIZE);
- return(page_no);
+ return(buf_block_get_page_no(block));
}
/************************************************************//**
diff --git a/storage/xtradb/btr/btr0cur.cc b/storage/xtradb/btr/btr0cur.cc
index 1ea1ec0696b..ffd7ebc7504 100644
--- a/storage/xtradb/btr/btr0cur.cc
+++ b/storage/xtradb/btr/btr0cur.cc
@@ -1329,18 +1329,21 @@ btr_cur_ins_lock_and_undo(
index, thr, mtr, inherit);
if (err != DB_SUCCESS
+ || !(~flags | (BTR_NO_UNDO_LOG_FLAG | BTR_KEEP_SYS_FLAG))
|| !dict_index_is_clust(index) || dict_index_is_ibuf(index)) {
return(err);
}
- err = trx_undo_report_row_operation(flags, TRX_UNDO_INSERT_OP,
- thr, index, entry,
- NULL, 0, NULL, NULL,
- &roll_ptr);
- if (err != DB_SUCCESS) {
-
- return(err);
+ if (flags & BTR_NO_UNDO_LOG_FLAG) {
+ roll_ptr = 0;
+ } else {
+ err = trx_undo_report_row_operation(thr, index, entry,
+ NULL, 0, NULL, NULL,
+ &roll_ptr);
+ if (err != DB_SUCCESS) {
+ return(err);
+ }
}
/* Now we can fill in the roll ptr field in entry */
@@ -1389,15 +1392,17 @@ btr_cur_optimistic_insert(
btr_cur_t* cursor, /*!< in: cursor on page after which to insert;
cursor stays valid */
ulint** offsets,/*!< out: offsets on *rec */
- mem_heap_t** heap, /*!< in/out: pointer to memory heap, or NULL */
+ mem_heap_t** heap, /*!< in/out: pointer to memory heap */
dtuple_t* entry, /*!< in/out: entry to insert */
rec_t** rec, /*!< out: pointer to inserted record if
succeed */
big_rec_t** big_rec,/*!< out: big rec vector whose fields have to
- be stored externally by the caller, or
- NULL */
+ be stored externally by the caller */
ulint n_ext, /*!< in: number of externally stored columns */
- que_thr_t* thr, /*!< in: query thread or NULL */
+ que_thr_t* thr, /*!< in/out: query thread; can be NULL if
+ !(~flags
+ & (BTR_NO_LOCKING_FLAG
+ | BTR_NO_UNDO_LOG_FLAG)) */
mtr_t* mtr) /*!< in/out: mini-transaction;
if this function returns DB_SUCCESS on
a leaf page of a secondary index in a
@@ -1418,6 +1423,7 @@ btr_cur_optimistic_insert(
ulint rec_size;
dberr_t err;
+ ut_ad(thr || !(~flags & (BTR_NO_LOCKING_FLAG | BTR_NO_UNDO_LOG_FLAG)));
*big_rec = NULL;
block = btr_cur_get_block(cursor);
@@ -1427,7 +1433,10 @@ btr_cur_optimistic_insert(
page = buf_block_get_frame(block);
index = cursor->index;
- ut_ad((thr && thr_get_trx(thr)->fake_changes)
+ const bool fake_changes = (~flags & (BTR_NO_LOCKING_FLAG
+ | BTR_NO_UNDO_LOG_FLAG))
+ && thr_get_trx(thr)->fake_changes;
+ ut_ad(fake_changes
|| mtr_memo_contains(mtr, block, MTR_MEMO_PAGE_X_FIX));
ut_ad(!dict_index_is_online_ddl(index)
|| dict_index_is_clust(index)
@@ -1568,7 +1577,7 @@ fail_err:
goto fail_err;
}
- if (UNIV_UNLIKELY(thr && thr_get_trx(thr)->fake_changes)) {
+ if (UNIV_UNLIKELY(fake_changes)) {
/* skip CHANGE, LOG */
*big_rec = big_rec_vec;
return(err); /* == DB_SUCCESS */
@@ -1686,15 +1695,17 @@ btr_cur_pessimistic_insert(
cursor stays valid */
ulint** offsets,/*!< out: offsets on *rec */
mem_heap_t** heap, /*!< in/out: pointer to memory heap
- that can be emptied, or NULL */
+ that can be emptied */
dtuple_t* entry, /*!< in/out: entry to insert */
rec_t** rec, /*!< out: pointer to inserted record if
succeed */
big_rec_t** big_rec,/*!< out: big rec vector whose fields have to
- be stored externally by the caller, or
- NULL */
+ be stored externally by the caller */
ulint n_ext, /*!< in: number of externally stored columns */
- que_thr_t* thr, /*!< in: query thread or NULL */
+ que_thr_t* thr, /*!< in/out: query thread; can be NULL if
+ !(~flags
+ & (BTR_NO_LOCKING_FLAG
+ | BTR_NO_UNDO_LOG_FLAG)) */
mtr_t* mtr) /*!< in/out: mini-transaction */
{
dict_index_t* index = cursor->index;
@@ -1706,13 +1717,17 @@ btr_cur_pessimistic_insert(
ulint n_reserved = 0;
ut_ad(dtuple_check_typed(entry));
+ ut_ad(thr || !(~flags & (BTR_NO_LOCKING_FLAG | BTR_NO_UNDO_LOG_FLAG)));
*big_rec = NULL;
- ut_ad((thr && thr_get_trx(thr)->fake_changes) || mtr_memo_contains(mtr,
+ const bool fake_changes = (~flags & (BTR_NO_LOCKING_FLAG
+ | BTR_NO_UNDO_LOG_FLAG))
+ && thr_get_trx(thr)->fake_changes;
+ ut_ad(fake_changes || mtr_memo_contains(mtr,
dict_index_get_lock(btr_cur_get_index(cursor)),
MTR_MEMO_X_LOCK));
- ut_ad((thr && thr_get_trx(thr)->fake_changes) || mtr_memo_contains(mtr, btr_cur_get_block(cursor),
+ ut_ad(fake_changes || mtr_memo_contains(mtr, btr_cur_get_block(cursor),
MTR_MEMO_PAGE_X_FIX));
ut_ad(!dict_index_is_online_ddl(index)
|| dict_index_is_clust(index)
@@ -1773,7 +1788,7 @@ btr_cur_pessimistic_insert(
}
}
- if (UNIV_UNLIKELY(thr && thr_get_trx(thr)->fake_changes)) {
+ if (UNIV_UNLIKELY(fake_changes)) {
/* skip CHANGE, LOG */
if (n_reserved > 0) {
fil_space_release_free_extents(index->space,
@@ -1871,7 +1886,9 @@ btr_cur_upd_lock_and_undo(
const rec_t* rec;
dberr_t err;
- if (UNIV_UNLIKELY(thr_get_trx(thr)->fake_changes)) {
+ ut_ad((thr != NULL) || (flags & BTR_NO_LOCKING_FLAG));
+
+ if (!(flags & BTR_NO_LOCKING_FLAG) && thr_get_trx(thr)->fake_changes) {
/* skip LOCK, UNDO */
return(DB_SUCCESS);
}
@@ -1906,9 +1923,10 @@ btr_cur_upd_lock_and_undo(
/* Append the info about the update in the undo log */
- return(trx_undo_report_row_operation(
- flags, TRX_UNDO_MODIFY_OP, thr,
- index, NULL, update,
+ return((flags & BTR_NO_UNDO_LOG_FLAG)
+ ? DB_SUCCESS
+ : trx_undo_report_row_operation(
+ thr, index, NULL, update,
cmpl_info, rec, offsets, roll_ptr));
}
@@ -2659,12 +2677,12 @@ btr_cur_pessimistic_update(
ulint** offsets,/*!< out: offsets on cursor->page_cur.rec */
mem_heap_t** offsets_heap,
/*!< in/out: pointer to memory heap
- that can be emptied, or NULL */
+ that can be emptied */
mem_heap_t* entry_heap,
/*!< in/out: memory heap for allocating
big_rec and the index tuple */
big_rec_t** big_rec,/*!< out: big rec vector whose fields have to
- be stored externally by the caller, or NULL */
+ be stored externally by the caller */
const upd_t* update, /*!< in: update vector; this is allowed also
contain trx id and roll ptr fields, but
the values in update vector have no effect */
@@ -3239,7 +3257,7 @@ btr_cur_del_mark_set_clust_rec(
return(err);
}
- err = trx_undo_report_row_operation(0, TRX_UNDO_MODIFY_OP, thr,
+ err = trx_undo_report_row_operation(thr,
index, NULL, NULL, 0, rec, offsets,
&roll_ptr);
if (err != DB_SUCCESS) {
diff --git a/storage/xtradb/btr/btr0defragment.cc b/storage/xtradb/btr/btr0defragment.cc
index 64dc077d582..c2f58a8e1cf 100644
--- a/storage/xtradb/btr/btr0defragment.cc
+++ b/storage/xtradb/btr/btr0defragment.cc
@@ -154,7 +154,6 @@ btr_defragment_init()
(ulonglong) (1000000.0 / srv_defragment_frequency));
mutex_create(btr_defragment_mutex_key, &btr_defragment_mutex,
SYNC_ANY_LATCH);
- os_thread_create(btr_defragment_thread, NULL, NULL);
}
/******************************************************************//**
@@ -735,14 +734,13 @@ btr_defragment_n_pages(
return current_block;
}
-/******************************************************************//**
-Thread that merges consecutive b-tree pages into fewer pages to defragment
-the index. */
+/** Whether btr_defragment_thread is active */
+bool btr_defragment_thread_active;
+
+/** Merge consecutive b-tree pages into fewer pages to defragment indexes */
extern "C" UNIV_INTERN
os_thread_ret_t
-DECLARE_THREAD(btr_defragment_thread)(
-/*==========================================*/
- void* arg) /*!< in: work queue */
+DECLARE_THREAD(btr_defragment_thread)(void*)
{
btr_pcur_t* pcur;
btr_cur_t* cursor;
@@ -752,6 +750,8 @@ DECLARE_THREAD(btr_defragment_thread)(
buf_block_t* last_block;
while (srv_shutdown_state == SRV_SHUTDOWN_NONE) {
+ ut_ad(btr_defragment_thread_active);
+
/* If defragmentation is disabled, sleep before
checking whether it's enabled. */
if (!srv_defragment) {
@@ -825,9 +825,9 @@ DECLARE_THREAD(btr_defragment_thread)(
btr_defragment_remove_item(item);
}
}
- btr_defragment_shutdown();
+
+ btr_defragment_thread_active = false;
os_thread_exit(NULL);
OS_THREAD_DUMMY_RETURN;
}
-
#endif /* !UNIV_HOTBACKUP */
diff --git a/storage/xtradb/btr/btr0sea.cc b/storage/xtradb/btr/btr0sea.cc
index 68dbcdf1fa7..2f0428747d5 100644
--- a/storage/xtradb/btr/btr0sea.cc
+++ b/storage/xtradb/btr/btr0sea.cc
@@ -192,7 +192,7 @@ btr_search_sys_create(
&btr_search_latch_arr[i], SYNC_SEARCH_SYS);
btr_search_sys->hash_tables[i]
- = ha_create(hash_size, 0, MEM_HEAP_FOR_BTR_SEARCH, 0);
+ = ib_create(hash_size, 0, MEM_HEAP_FOR_BTR_SEARCH, 0);
#if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
btr_search_sys->hash_tables[i]->adaptive = TRUE;
diff --git a/storage/xtradb/buf/buf0buf.cc b/storage/xtradb/buf/buf0buf.cc
index c57dab79ef7..01bec11d2ed 100644
--- a/storage/xtradb/buf/buf0buf.cc
+++ b/storage/xtradb/buf/buf0buf.cc
@@ -2,7 +2,7 @@
Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2008, Google Inc.
-Copyright (c) 2013, 2017, MariaDB Corporation. All Rights Reserved.
+Copyright (c) 2013, 2017, MariaDB Corporation.
Portions of this file contain modifications contributed and copyrighted by
Google, Inc. Those modifications are gratefully acknowledged and are described
@@ -65,6 +65,18 @@ Created 11/5/1995 Heikki Tuuri
#include "fil0pagecompress.h"
#include "ha_prototypes.h"
+#ifdef UNIV_LINUX
+#include <stdlib.h>
+#endif
+
+#ifdef HAVE_LZO
+#include "lzo/lzo1x.h"
+#endif
+
+#ifdef HAVE_SNAPPY
+#include "snappy-c.h"
+#endif
+
/** Decrypt a page.
@param[in,out] bpage Page control block
@param[in,out] space tablespace
@@ -74,9 +86,38 @@ bool
buf_page_decrypt_after_read(buf_page_t* bpage, fil_space_t* space)
MY_ATTRIBUTE((nonnull));
+/********************************************************************//**
+Mark a table with the specified space pointed by bpage->space corrupted.
+Also remove the bpage from LRU list.
+@param[in,out] bpage Block */
+static
+void
+buf_mark_space_corrupt(
+ buf_page_t* bpage);
+
/* prototypes for new functions added to ha_innodb.cc */
trx_t* innobase_get_trx();
+inline void* aligned_malloc(size_t size, size_t align) {
+ void *result;
+#ifdef _MSC_VER
+ result = _aligned_malloc(size, align);
+#else
+ if(posix_memalign(&result, align, size)) {
+ result = 0;
+ }
+#endif
+ return result;
+}
+
+inline void aligned_free(void *ptr) {
+#ifdef _MSC_VER
+ _aligned_free(ptr);
+#else
+ free(ptr);
+#endif
+}
+
static inline
void
_increment_page_get_statistics(buf_block_t* block, trx_t* trx)
@@ -108,10 +149,6 @@ _increment_page_get_statistics(buf_block_t* block, trx_t* trx)
return;
}
-#ifdef HAVE_LZO
-#include "lzo/lzo1x.h"
-#endif
-
/*
IMPLEMENTATION OF THE BUFFER POOL
=================================
@@ -1510,8 +1547,6 @@ buf_pool_init_instance(
buf_pool->chunks = chunk =
(buf_chunk_t*) mem_zalloc(sizeof *chunk);
- UT_LIST_INIT(buf_pool->free);
-
if (!buf_chunk_init(buf_pool, chunk, buf_pool_size)) {
mem_free(chunk);
mem_free(buf_pool);
@@ -1533,7 +1568,7 @@ buf_pool_init_instance(
ut_a(srv_n_page_hash_locks != 0);
ut_a(srv_n_page_hash_locks <= MAX_PAGE_HASH_LOCKS);
- buf_pool->page_hash = ha_create(2 * buf_pool->curr_size,
+ buf_pool->page_hash = ib_create(2 * buf_pool->curr_size,
srv_n_page_hash_locks,
MEM_HEAP_FOR_PAGE_HASH,
SYNC_BUF_PAGE_HASH);
@@ -1642,20 +1677,14 @@ buf_pool_free_instance(
if (buf_pool->tmp_arr) {
for(ulint i = 0; i < buf_pool->tmp_arr->n_slots; i++) {
buf_tmp_buffer_t* slot = &(buf_pool->tmp_arr->slots[i]);
-#ifdef HAVE_LZO
- if (slot && slot->lzo_mem) {
- ut_free(slot->lzo_mem);
- slot->lzo_mem = NULL;
- }
-#endif
- if (slot && slot->crypt_buf_free) {
- ut_free(slot->crypt_buf_free);
- slot->crypt_buf_free = NULL;
+ if (slot && slot->crypt_buf) {
+ aligned_free(slot->crypt_buf);
+ slot->crypt_buf = NULL;
}
- if (slot && slot->comp_buf_free) {
- ut_free(slot->comp_buf_free);
- slot->comp_buf_free = NULL;
+ if (slot && slot->comp_buf) {
+ aligned_free(slot->comp_buf);
+ slot->comp_buf = NULL;
}
}
}
@@ -2521,17 +2550,26 @@ buf_zip_decompress(
{
const byte* frame = block->page.zip.data;
ulint size = page_zip_get_size(&block->page.zip);
+ /* Space is not found if this function is called during IMPORT */
+ fil_space_t* space = fil_space_acquire_for_io(block->page.space);
+ const unsigned key_version = mach_read_from_4(frame +
+ FIL_PAGE_FILE_FLUSH_LSN_OR_KEY_VERSION);
+ fil_space_crypt_t* crypt_data = space ? space->crypt_data : NULL;
+ const bool encrypted = crypt_data
+ && crypt_data->type != CRYPT_SCHEME_UNENCRYPTED
+ && (!crypt_data->is_default_encryption()
+ || srv_encrypt_tables);
ut_ad(buf_block_get_zip_size(block));
ut_a(buf_block_get_space(block) != 0);
if (UNIV_UNLIKELY(check && !page_zip_verify_checksum(frame, size))) {
- ut_print_timestamp(stderr);
- fprintf(stderr,
- " InnoDB: compressed page checksum mismatch"
- " (space %u page %u): stored: %lu, crc32: %lu "
- "innodb: %lu, none: %lu\n",
+ ib_logf(IB_LOG_LEVEL_ERROR,
+ "Compressed page checksum mismatch"
+ " for %s [%u:%u]: stored: " ULINTPF ", crc32: " ULINTPF
+ " innodb: " ULINTPF ", none: " ULINTPF ".",
+ space ? space->chain.start->name : "N/A",
block->page.space, block->page.offset,
mach_read_from_4(frame + FIL_PAGE_SPACE_OR_CHKSUM),
page_zip_calc_checksum(frame, size,
@@ -2540,22 +2578,28 @@ buf_zip_decompress(
SRV_CHECKSUM_ALGORITHM_INNODB),
page_zip_calc_checksum(frame, size,
SRV_CHECKSUM_ALGORITHM_NONE));
- return(FALSE);
+ goto err_exit;
}
switch (fil_page_get_type(frame)) {
- case FIL_PAGE_INDEX:
+ case FIL_PAGE_INDEX: {
+
if (page_zip_decompress(&block->page.zip,
block->frame, TRUE)) {
+ if (space) {
+ fil_space_release_for_io(space);
+ }
return(TRUE);
}
- fprintf(stderr,
- "InnoDB: unable to decompress space %u page %u\n",
+ ib_logf(IB_LOG_LEVEL_ERROR,
+ "Unable to decompress space %s [%u:%u]",
+ space ? space->chain.start->name : "N/A",
block->page.space,
block->page.offset);
- return(FALSE);
+ goto err_exit;
+ }
case FIL_PAGE_TYPE_ALLOCATED:
case FIL_PAGE_INODE:
case FIL_PAGE_IBUF_BITMAP:
@@ -2566,14 +2610,36 @@ buf_zip_decompress(
/* Copy to uncompressed storage. */
memcpy(block->frame, frame,
buf_block_get_zip_size(block));
+
+ if (space) {
+ fil_space_release_for_io(space);
+ }
+
return(TRUE);
}
- ut_print_timestamp(stderr);
- fprintf(stderr,
- " InnoDB: unknown compressed page"
- " type %lu\n",
- fil_page_get_type(frame));
+ ib_logf(IB_LOG_LEVEL_ERROR,
+ "Unknown compressed page in %s [%u:%u]"
+ " type %s [" ULINTPF "].",
+ space ? space->chain.start->name : "N/A",
+ block->page.space, block->page.offset,
+ fil_get_page_type_name(fil_page_get_type(frame)), fil_page_get_type(frame));
+
+err_exit:
+ if (encrypted) {
+ ib_logf(IB_LOG_LEVEL_INFO,
+ "Row compressed page could be encrypted with key_version %u.",
+ key_version);
+ block->page.encrypted = true;
+ dict_set_encrypted_by_space(block->page.space);
+ } else {
+ dict_set_corrupted_by_space(block->page.space);
+ }
+
+ if (space) {
+ fil_space_release_for_io(space);
+ }
+
return(FALSE);
}
@@ -3056,9 +3122,9 @@ loop:
}
ib_logf(IB_LOG_LEVEL_FATAL, "Unable"
- " to read tablespace %lu page no"
- " %lu into the buffer pool after"
- " %lu attempts"
+ " to read tablespace " ULINTPF " page no "
+ ULINTPF " into the buffer pool after "
+ ULINTPF " attempts."
" The most probable cause"
" of this error may be that the"
" table has been corrupted."
@@ -3271,12 +3337,21 @@ got_block:
/* Decompress the page while not holding
any buf_pool or block->mutex. */
- /* Page checksum verification is already done when
- the page is read from disk. Hence page checksum
- verification is not necessary when decompressing the page. */
{
- bool success = buf_zip_decompress(block, FALSE);
- ut_a(success);
+ bool success = buf_zip_decompress(block, TRUE);
+
+ if (!success) {
+ buf_block_mutex_enter(fix_block);
+ buf_block_set_io_fix(fix_block, BUF_IO_NONE);
+ buf_block_mutex_exit(fix_block);
+
+ os_atomic_decrement_ulint(&buf_pool->n_pend_unzip, 1);
+ rw_lock_x_unlock(&fix_block->lock);
+ mutex_enter(&buf_pool->LRU_list_mutex);
+ buf_block_unfix(fix_block);
+ mutex_exit(&buf_pool->LRU_list_mutex);
+ return NULL;
+ }
}
if (!recv_no_ibuf_operations) {
@@ -3374,16 +3449,10 @@ got_block:
goto loop;
}
- fprintf(stderr,
- "innodb_change_buffering_debug evict %u %u\n",
- (unsigned) space, (unsigned) offset);
return(NULL);
}
if (buf_flush_page_try(buf_pool, fix_block)) {
- fprintf(stderr,
- "innodb_change_buffering_debug flush %u %u\n",
- (unsigned) space, (unsigned) offset);
guess = fix_block;
goto loop;
}
@@ -4354,11 +4423,11 @@ buf_page_create(
memset(frame + FIL_PAGE_NEXT, 0xff, 4);
mach_write_to_2(frame + FIL_PAGE_TYPE, FIL_PAGE_TYPE_ALLOCATED);
- /* Reset to zero the file flush lsn field in the page; if the first
- page of an ibdata file is 'created' in this function into the buffer
- pool then we lose the original contents of the file flush lsn stamp.
- Then InnoDB could in a crash recovery print a big, false, corruption
- warning if the stamp contains an lsn bigger than the ib_logfile lsn. */
+ /* FIL_PAGE_FILE_FLUSH_LSN_OR_KEY_VERSION is only used on the
+ following pages:
+ (1) The first page of the InnoDB system tablespace (page 0:0)
+ (2) FIL_RTREE_SPLIT_SEQ_NUM on R-tree pages
+ (3) key_version on encrypted pages (not page 0:0) */
memset(frame + FIL_PAGE_FILE_FLUSH_LSN_OR_KEY_VERSION, 0, 8);
@@ -4570,6 +4639,7 @@ buf_page_check_corrupt(buf_page_t* bpage, fil_space_t* space)
!bpage->encrypted &&
fil_space_verify_crypt_checksum(dst_frame, zip_size,
space, bpage->offset));
+
if (!still_encrypted) {
/* If traditional checksums match, we assume that page is
not anymore encrypted. */
@@ -6176,22 +6246,27 @@ buf_pool_reserve_tmp_slot(
buf_pool_mutex_exit(buf_pool);
/* Allocate temporary memory for encryption/decryption */
- if (free_slot->crypt_buf_free == NULL) {
- free_slot->crypt_buf_free = static_cast<byte *>(ut_malloc(UNIV_PAGE_SIZE*2));
- free_slot->crypt_buf = static_cast<byte *>(ut_align(free_slot->crypt_buf_free, UNIV_PAGE_SIZE));
- memset(free_slot->crypt_buf_free, 0, UNIV_PAGE_SIZE *2);
+ if (free_slot->crypt_buf == NULL) {
+ free_slot->crypt_buf = static_cast<byte*>(aligned_malloc(UNIV_PAGE_SIZE, UNIV_PAGE_SIZE));
+ memset(free_slot->crypt_buf, 0, UNIV_PAGE_SIZE);
}
/* For page compressed tables allocate temporary memory for
compression/decompression */
- if (compressed && free_slot->comp_buf_free == NULL) {
- free_slot->comp_buf_free = static_cast<byte *>(ut_malloc(UNIV_PAGE_SIZE*2));
- free_slot->comp_buf = static_cast<byte *>(ut_align(free_slot->comp_buf_free, UNIV_PAGE_SIZE));
- memset(free_slot->comp_buf_free, 0, UNIV_PAGE_SIZE *2);
-#ifdef HAVE_LZO
- free_slot->lzo_mem = static_cast<byte *>(ut_malloc(LZO1X_1_15_MEM_COMPRESS));
- memset(free_slot->lzo_mem, 0, LZO1X_1_15_MEM_COMPRESS);
+ if (compressed && free_slot->comp_buf == NULL) {
+ ulint size = UNIV_PAGE_SIZE;
+
+ /* Both snappy and lzo compression methods require that
+ output buffer used for compression is bigger than input
+ buffer. Increase the allocated buffer size accordingly. */
+#if HAVE_SNAPPY
+ size = snappy_max_compressed_length(size);
+#endif
+#if HAVE_LZO
+ size += LZO1X_1_15_MEM_COMPRESS;
#endif
+ free_slot->comp_buf = static_cast<byte*>(aligned_malloc(size, UNIV_PAGE_SIZE));
+ memset(free_slot->comp_buf, 0, size);
}
return (free_slot);
@@ -6279,8 +6354,7 @@ buf_page_encrypt_before_write(
fsp_flags_get_page_compression_level(space->flags),
fil_space_get_block_size(space, bpage->offset),
encrypted,
- &out_len,
- IF_LZO(slot->lzo_mem, NULL));
+ &out_len);
bpage->real_size = out_len;
diff --git a/storage/xtradb/buf/buf0dblwr.cc b/storage/xtradb/buf/buf0dblwr.cc
index 1c9646c0bd6..49371f9a6f1 100644
--- a/storage/xtradb/buf/buf0dblwr.cc
+++ b/storage/xtradb/buf/buf0dblwr.cc
@@ -1,7 +1,7 @@
/*****************************************************************************
-Copyright (c) 1995, 2014, Oracle and/or its affiliates. All Rights Reserved.
-Copyright (c) 2013, 2017, MariaDB Corporation. All Rights Reserved.
+Copyright (c) 1995, 2017, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2013, 2017, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -175,13 +175,14 @@ buf_dblwr_init(
mem_zalloc(buf_size * sizeof(void*)));
}
-/****************************************************************//**
-Creates the doublewrite buffer to a new InnoDB installation. The header of the
-doublewrite buffer is placed on the trx system header page. */
+/** Create the doublewrite buffer if the doublewrite buffer header
+is not present in the TRX_SYS page.
+@return whether the operation succeeded
+@retval true if the doublewrite buffer exists or was created
+@retval false if the creation failed (too small first data file) */
UNIV_INTERN
-void
-buf_dblwr_create(void)
-/*==================*/
+bool
+buf_dblwr_create()
{
buf_block_t* block2;
buf_block_t* new_block;
@@ -194,8 +195,7 @@ buf_dblwr_create(void)
if (buf_dblwr) {
/* Already inited */
-
- return;
+ return(true);
}
start_again:
@@ -213,39 +213,59 @@ start_again:
mtr_commit(&mtr);
buf_dblwr_being_created = FALSE;
- return;
+ return(true);
}
- ib_logf(IB_LOG_LEVEL_INFO,
- "Doublewrite buffer not found: creating new");
-
if (buf_pool_get_curr_size()
< ((TRX_SYS_DOUBLEWRITE_BLOCKS * TRX_SYS_DOUBLEWRITE_BLOCK_SIZE
+ FSP_EXTENT_SIZE / 2 + 100)
* UNIV_PAGE_SIZE)) {
- ib_logf(IB_LOG_LEVEL_FATAL,
- "Cannot create doublewrite buffer: you must "
- "increase your buffer pool size. Cannot continue "
- "operation.");
+ ib_logf(IB_LOG_LEVEL_ERROR,
+ "Cannot create doublewrite buffer: "
+ "innodb_buffer_pool_size is too small.");
+ mtr_commit(&mtr);
+ return(false);
+ } else {
+ fil_space_t* space = fil_space_acquire(TRX_SYS_SPACE);
+ const bool fail = UT_LIST_GET_FIRST(space->chain)->size
+ < 3 * FSP_EXTENT_SIZE;
+ fil_space_release(space);
+
+ if (fail) {
+ goto too_small;
+ }
}
block2 = fseg_create(TRX_SYS_SPACE, TRX_SYS_PAGE_NO,
TRX_SYS_DOUBLEWRITE
+ TRX_SYS_DOUBLEWRITE_FSEG, &mtr);
+ if (block2 == NULL) {
+too_small:
+ ib_logf(IB_LOG_LEVEL_ERROR,
+ "Cannot create doublewrite buffer: "
+ "the first file in innodb_data_file_path"
+ " must be at least %luM.",
+ 3 * (FSP_EXTENT_SIZE * UNIV_PAGE_SIZE) >> 20);
+ mtr_commit(&mtr);
+ return(false);
+ }
+
+ ib_logf(IB_LOG_LEVEL_INFO,
+ "Doublewrite buffer not found: creating new");
+
+ /* FIXME: After this point, the doublewrite buffer creation
+ is not atomic. The doublewrite buffer should not exist in
+ the InnoDB system tablespace file in the first place.
+ It could be located in separate optional file(s) in a
+ user-specified location. */
+
/* fseg_create acquires a second latch on the page,
therefore we must declare it: */
buf_block_dbg_add_level(block2, SYNC_NO_ORDER_CHECK);
- if (block2 == NULL) {
- ib_logf(IB_LOG_LEVEL_FATAL,
- "Cannot create doublewrite buffer: you must "
- "increase your tablespace size. "
- "Cannot continue operation.");
- }
-
fseg_header = doublewrite + TRX_SYS_DOUBLEWRITE_FSEG;
prev_page_no = 0;
@@ -351,7 +371,7 @@ recovery, this function loads the pages from double write buffer into memory. */
void
buf_dblwr_init_or_load_pages(
/*=========================*/
- os_file_t file,
+ pfs_os_file_t file,
char* path,
bool load_corrupt_pages)
{
@@ -482,6 +502,14 @@ buf_dblwr_process()
byte* unaligned_read_buf;
recv_dblwr_t& recv_dblwr = recv_sys->dblwr;
+ if (!buf_dblwr) {
+ return;
+ }
+
+ ib_logf(IB_LOG_LEVEL_INFO,
+ "Restoring possible half-written data pages "
+ "from the doublewrite buffer...");
+
unaligned_read_buf = static_cast<byte*>(ut_malloc(2 * UNIV_PAGE_SIZE));
read_buf = static_cast<byte*>(
diff --git a/storage/xtradb/buf/buf0dump.cc b/storage/xtradb/buf/buf0dump.cc
index e728636042b..71b97b770e1 100644
--- a/storage/xtradb/buf/buf0dump.cc
+++ b/storage/xtradb/buf/buf0dump.cc
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 2011, 2016, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2011, 2017, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2017, MariaDB Corporation. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
@@ -612,6 +612,7 @@ buf_load()
if (dump_n == 0) {
ut_free(dump);
+ ut_free(dump_tmp);
ut_sprintf_timestamp(now);
buf_load_status(STATUS_NOTICE,
"Buffer pool(s) load completed at %s "
@@ -685,6 +686,7 @@ extern "C" UNIV_INTERN
os_thread_ret_t
DECLARE_THREAD(buf_dump_thread)(void*)
{
+ my_thread_init();
ut_ad(!srv_read_only_mode);
buf_dump_status(STATUS_INFO, "Dumping buffer pool(s) not yet started");
@@ -721,6 +723,7 @@ DECLARE_THREAD(buf_dump_thread)(void*)
srv_buf_dump_thread_active = false;
+ my_thread_end();
/* We count the number of threads in os_thread_exit(). A created
thread should always use that to exit and not use return() to exit. */
os_thread_exit(NULL);
diff --git a/storage/xtradb/buf/buf0flu.cc b/storage/xtradb/buf/buf0flu.cc
index 1f5c3993be7..84eea3bc692 100644
--- a/storage/xtradb/buf/buf0flu.cc
+++ b/storage/xtradb/buf/buf0flu.cc
@@ -1,7 +1,7 @@
/*****************************************************************************
-Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved.
-Copyright (c) 2013, 2017, MariaDB Corporation. All Rights Reserved.
+Copyright (c) 1995, 2017, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2013, 2017, MariaDB Corporation.
Copyright (c) 2013, 2014, Fusion-io
This program is free software; you can redistribute it and/or modify it under
@@ -62,10 +62,10 @@ is set to TRUE by the page_cleaner thread when it is spawned and is set
back to FALSE at shutdown by the page_cleaner as well. Therefore no
need to protect it by a mutex. It is only ever read by the thread
doing the shutdown */
-UNIV_INTERN ibool buf_page_cleaner_is_active = FALSE;
+UNIV_INTERN bool buf_page_cleaner_is_active;
/** Flag indicating if the lru_manager is in active state. */
-UNIV_INTERN bool buf_lru_manager_is_active = false;
+UNIV_INTERN bool buf_lru_manager_is_active;
#ifdef UNIV_PFS_THREAD
UNIV_INTERN mysql_pfs_key_t buf_page_cleaner_thread_key;
@@ -352,6 +352,7 @@ buf_flush_insert_into_flush_list(
buf_block_t* block, /*!< in/out: block which is modified */
lsn_t lsn) /*!< in: oldest modification */
{
+ ut_ad(srv_shutdown_state != SRV_SHUTDOWN_FLUSH_PHASE);
ut_ad(log_flush_order_mutex_own());
ut_ad(mutex_own(&block->mutex));
@@ -410,6 +411,7 @@ buf_flush_insert_sorted_into_flush_list(
buf_page_t* prev_b;
buf_page_t* b;
+ ut_ad(srv_shutdown_state != SRV_SHUTDOWN_FLUSH_PHASE);
ut_ad(log_flush_order_mutex_own());
ut_ad(mutex_own(&block->mutex));
ut_ad(buf_block_get_state(block) == BUF_BLOCK_FILE_PAGE);
@@ -715,6 +717,7 @@ buf_flush_write_complete(
buf_page_set_io_fix(bpage, BUF_IO_NONE);
buf_pool->n_flush[flush_type]--;
+ ut_ad(buf_pool->n_flush[flush_type] != ULINT_MAX);
#ifdef UNIV_MTFLUSH_DEBUG
fprintf(stderr, "n pending flush %lu\n",
@@ -1099,6 +1102,7 @@ buf_flush_page(
}
++buf_pool->n_flush[flush_type];
+ ut_ad(buf_pool->n_flush[flush_type] != 0);
mutex_exit(&buf_pool->flush_state_mutex);
@@ -2264,13 +2268,14 @@ Clears up tail of the LRU lists:
* Flush dirty pages at the tail of LRU to the disk
The depth to which we scan each buffer pool is controlled by dynamic
config parameter innodb_LRU_scan_depth.
-@return number of pages flushed */
+@return number of flushed and evicted pages */
UNIV_INTERN
ulint
buf_flush_LRU_tail(void)
/*====================*/
{
ulint total_flushed = 0;
+ ulint total_evicted = 0;
ulint start_time = ut_time_ms();
ulint scan_depth[MAX_BUFFER_POOLS];
ulint requested_pages[MAX_BUFFER_POOLS];
@@ -2341,6 +2346,7 @@ buf_flush_LRU_tail(void)
limited_scan[i]
= (previous_evicted[i] > n.evicted);
previous_evicted[i] = n.evicted;
+ total_evicted += n.evicted;
requested_pages[i] += lru_chunk_size;
@@ -2381,7 +2387,7 @@ buf_flush_LRU_tail(void)
}
}
- return(total_flushed);
+ return(total_flushed + total_evicted);
}
/*********************************************************************//**
@@ -2682,6 +2688,23 @@ buf_get_total_free_list_length(void)
return result;
}
+/** Returns the aggregate LRU list length over all buffer pool instances.
+@return total LRU list length. */
+MY_ATTRIBUTE((warn_unused_result))
+static
+ulint
+buf_get_total_LRU_list_length(void)
+{
+ ulint result = 0;
+
+ for (ulint i = 0; i < srv_buf_pool_instances; i++) {
+
+ result += UT_LIST_GET_LEN(buf_pool_from_array(i)->LRU);
+ }
+
+ return result;
+}
+
/*********************************************************************//**
Adjust the desired page cleaner thread sleep time for LRU flushes. */
MY_ATTRIBUTE((nonnull))
@@ -2694,8 +2717,9 @@ page_cleaner_adapt_lru_sleep_time(
ulint lru_n_flushed) /*!< in: number of flushed in previous batch */
{
- ulint free_len = buf_get_total_free_list_length();
- ulint max_free_len = srv_LRU_scan_depth * srv_buf_pool_instances;
+ ulint free_len = buf_get_total_free_list_length();
+ ulint max_free_len = ut_min(buf_get_total_LRU_list_length(),
+ srv_LRU_scan_depth * srv_buf_pool_instances);
if (free_len < max_free_len / 100 && lru_n_flushed) {
@@ -2707,7 +2731,7 @@ page_cleaner_adapt_lru_sleep_time(
/* Free lists filled more than 20%
or no pages flushed in previous batch, sleep a bit more */
- *lru_sleep_time += 50;
+ *lru_sleep_time += 1;
if (*lru_sleep_time > srv_cleaner_max_lru_time)
*lru_sleep_time = srv_cleaner_max_lru_time;
} else if (free_len < max_free_len / 20 && *lru_sleep_time >= 50) {
@@ -2754,6 +2778,7 @@ DECLARE_THREAD(buf_flush_page_cleaner_thread)(
/*!< in: a dummy parameter required by
os_thread_create */
{
+ my_thread_init();
ulint next_loop_time = ut_time_ms() + 1000;
ulint n_flushed = 0;
ulint last_activity = srv_get_activity_count();
@@ -2774,8 +2799,6 @@ DECLARE_THREAD(buf_flush_page_cleaner_thread)(
os_thread_pf(os_thread_get_curr_id()));
#endif /* UNIV_DEBUG_THREAD_CREATION */
- buf_page_cleaner_is_active = TRUE;
-
while (srv_shutdown_state == SRV_SHUTDOWN_NONE) {
ulint page_cleaner_sleep_time;
@@ -2884,8 +2907,9 @@ DECLARE_THREAD(buf_flush_page_cleaner_thread)(
/* We have lived our life. Time to die. */
thread_exit:
- buf_page_cleaner_is_active = FALSE;
+ buf_page_cleaner_is_active = false;
+ my_thread_end();
/* We count the number of threads in os_thread_exit(). A created
thread should always use that to exit and not use return() to exit. */
os_thread_exit(NULL);
@@ -2924,8 +2948,6 @@ DECLARE_THREAD(buf_flush_lru_manager_thread)(
os_thread_pf(os_thread_get_curr_id()));
#endif /* UNIV_DEBUG_THREAD_CREATION */
- buf_lru_manager_is_active = true;
-
/* On server shutdown, the LRU manager thread runs through cleanup
phase to provide free pages for the master and purge threads. */
while (srv_shutdown_state == SRV_SHUTDOWN_NONE
diff --git a/storage/xtradb/dict/dict0dict.cc b/storage/xtradb/dict/dict0dict.cc
index bc46bcab63b..a1cfeb3860f 100644
--- a/storage/xtradb/dict/dict0dict.cc
+++ b/storage/xtradb/dict/dict0dict.cc
@@ -888,6 +888,12 @@ dict_index_get_nth_col_or_prefix_pos(
ut_ad(index);
ut_ad(index->magic_n == DICT_INDEX_MAGIC_N);
+ ut_ad((inc_prefix && !prefix_col_pos) || (!inc_prefix));
+
+ if (!prefix_col_pos) {
+ prefix_col_pos = &prefixed_pos_dummy;
+ }
+ *prefix_col_pos = ULINT_UNDEFINED;
if (!prefix_col_pos) {
prefix_col_pos = &prefixed_pos_dummy;
diff --git a/storage/xtradb/dict/dict0mem.cc b/storage/xtradb/dict/dict0mem.cc
index fa7177c5137..125d7d78a1f 100644
--- a/storage/xtradb/dict/dict0mem.cc
+++ b/storage/xtradb/dict/dict0mem.cc
@@ -320,8 +320,8 @@ dict_mem_table_col_rename_low(
ut_ad(from_len <= NAME_LEN);
ut_ad(to_len <= NAME_LEN);
- char from[NAME_LEN];
- strncpy(from, s, NAME_LEN);
+ char from[NAME_LEN + 1];
+ strncpy(from, s, NAME_LEN + 1);
if (from_len == to_len) {
/* The easy case: simply replace the column name in
diff --git a/storage/xtradb/dict/dict0stats.cc b/storage/xtradb/dict/dict0stats.cc
index d2e9a2618c0..c1463e98ce0 100644
--- a/storage/xtradb/dict/dict0stats.cc
+++ b/storage/xtradb/dict/dict0stats.cc
@@ -1168,8 +1168,9 @@ dict_stats_analyze_index_level(
leaf-level delete marks because delete marks on
non-leaf level do not make sense. */
- if (level == 0 && srv_stats_include_delete_marked? 0:
- rec_get_deleted_flag(
+ if (level == 0
+ && !srv_stats_include_delete_marked
+ && rec_get_deleted_flag(
rec,
page_is_comp(btr_pcur_get_page(&pcur)))) {
diff --git a/storage/xtradb/dict/dict0stats_bg.cc b/storage/xtradb/dict/dict0stats_bg.cc
index 55d34ff6ae1..ba6fd115551 100644
--- a/storage/xtradb/dict/dict0stats_bg.cc
+++ b/storage/xtradb/dict/dict0stats_bg.cc
@@ -1,7 +1,7 @@
/*****************************************************************************
-Copyright (c) 2012, 2016, Oracle and/or its affiliates. All Rights Reserved.
-Copyright (c) 2017, MariaDB Corporation. All Rights Reserved.
+Copyright (c) 2012, 2017, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2017, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -39,12 +39,18 @@ Created Apr 25, 2012 Vasil Dimov
/** Minimum time interval between stats recalc for a given table */
#define MIN_RECALC_INTERVAL 10 /* seconds */
-#define SHUTTING_DOWN() (srv_shutdown_state != SRV_SHUTDOWN_NONE)
-
/** Event to wake up dict_stats_thread on dict_stats_recalc_pool_add()
or shutdown. Not protected by any mutex. */
UNIV_INTERN os_event_t dict_stats_event;
+/** Variable to initiate shutdown the dict stats thread. Note we don't
+use 'srv_shutdown_state' because we want to shutdown dict stats thread
+before purge thread. */
+static bool dict_stats_start_shutdown;
+
+/** Event to wait for shutdown of the dict stats thread */
+static os_event_t dict_stats_shutdown_event;
+
/** This mutex protects the "recalc_pool" variable. */
static ib_mutex_t recalc_pool_mutex;
static ib_mutex_t defrag_pool_mutex;
@@ -341,11 +347,11 @@ Must be called before dict_stats_thread() is started. */
UNIV_INTERN
void
dict_stats_thread_init()
-/*====================*/
{
ut_a(!srv_read_only_mode);
dict_stats_event = os_event_create();
+ dict_stats_shutdown_event = os_event_create();
/* The recalc_pool_mutex is acquired from:
1) the background stats gathering thread before any other latch
@@ -390,6 +396,9 @@ dict_stats_thread_deinit()
os_event_free(dict_stats_event);
dict_stats_event = NULL;
+ os_event_free(dict_stats_shutdown_event);
+ dict_stats_shutdown_event = NULL;
+ dict_stats_start_shutdown = false;
}
/*****************************************************************//**
@@ -530,9 +539,10 @@ extern "C" UNIV_INTERN
os_thread_ret_t
DECLARE_THREAD(dict_stats_thread)(void*)
{
+ my_thread_init();
ut_a(!srv_read_only_mode);
- while (!SHUTTING_DOWN()) {
+ while (!dict_stats_start_shutdown) {
/* Wake up periodically even if not signaled. This is
because we may lose an event - if the below call to
@@ -542,7 +552,7 @@ DECLARE_THREAD(dict_stats_thread)(void*)
os_event_wait_time(
dict_stats_event, MIN_RECALC_INTERVAL * 1000000);
- if (SHUTTING_DOWN()) {
+ if (dict_stats_start_shutdown) {
break;
}
@@ -556,9 +566,20 @@ DECLARE_THREAD(dict_stats_thread)(void*)
srv_dict_stats_thread_active = false;
+ os_event_set(dict_stats_shutdown_event);
+ my_thread_end();
/* We count the number of threads in os_thread_exit(). A created
thread should always use that to exit instead of return(). */
os_thread_exit(NULL);
OS_THREAD_DUMMY_RETURN;
}
+
+/** Shut down the dict_stats_thread. */
+void
+dict_stats_shutdown()
+{
+ dict_stats_start_shutdown = true;
+ os_event_set(dict_stats_event);
+ os_event_wait(dict_stats_shutdown_event);
+}
diff --git a/storage/xtradb/fil/fil0crypt.cc b/storage/xtradb/fil/fil0crypt.cc
index e27e93244ae..e73d600d2ca 100644
--- a/storage/xtradb/fil/fil0crypt.cc
+++ b/storage/xtradb/fil/fil0crypt.cc
@@ -887,7 +887,7 @@ fil_space_decrypt(
Calculate post encryption checksum
@param[in] zip_size zip_size or 0
@param[in] dst_frame Block where checksum is calculated
-@return page checksum or BUF_NO_CHECKSUM_MAGIC
+@return page checksum
not needed. */
UNIV_INTERN
ulint
@@ -896,30 +896,13 @@ fil_crypt_calculate_checksum(
const byte* dst_frame)
{
ib_uint32_t checksum = 0;
- srv_checksum_algorithm_t algorithm =
- static_cast<srv_checksum_algorithm_t>(srv_checksum_algorithm);
+ /* For encrypted tables we use only crc32 and strict_crc32 */
if (zip_size == 0) {
- switch (algorithm) {
- case SRV_CHECKSUM_ALGORITHM_CRC32:
- case SRV_CHECKSUM_ALGORITHM_STRICT_CRC32:
- checksum = buf_calc_page_crc32(dst_frame);
- break;
- case SRV_CHECKSUM_ALGORITHM_INNODB:
- case SRV_CHECKSUM_ALGORITHM_STRICT_INNODB:
- checksum = (ib_uint32_t) buf_calc_page_new_checksum(
- dst_frame);
- break;
- case SRV_CHECKSUM_ALGORITHM_NONE:
- case SRV_CHECKSUM_ALGORITHM_STRICT_NONE:
- checksum = BUF_NO_CHECKSUM_MAGIC;
- break;
- /* no default so the compiler will emit a warning
- * if new enum is added and not handled here */
- }
+ checksum = buf_calc_page_crc32(dst_frame);
} else {
checksum = page_zip_calc_checksum(dst_frame, zip_size,
- algorithm);
+ SRV_CHECKSUM_ALGORITHM_CRC32);
}
return checksum;
@@ -953,14 +936,6 @@ fil_space_verify_crypt_checksum(
return(false);
}
- srv_checksum_algorithm_t algorithm =
- static_cast<srv_checksum_algorithm_t>(srv_checksum_algorithm);
-
- /* If no checksum is used, can't continue checking. */
- if (algorithm == SRV_CHECKSUM_ALGORITHM_NONE) {
- return(true);
- }
-
/* Read stored post encryption checksum. */
ib_uint32_t checksum = mach_read_from_4(
page + FIL_PAGE_FILE_FLUSH_LSN_OR_KEY_VERSION + 4);
@@ -1044,7 +1019,6 @@ fil_space_verify_crypt_checksum(
checksum1 = mach_read_from_4(
page + UNIV_PAGE_SIZE - FIL_PAGE_END_LSN_OLD_CHKSUM);
valid = (buf_page_is_checksum_valid_crc32(page,checksum1,checksum2)
- || buf_page_is_checksum_valid_none(page,checksum1,checksum2)
|| buf_page_is_checksum_valid_innodb(page,checksum1, checksum2));
}
@@ -1141,6 +1115,36 @@ fil_crypt_needs_rotation(
return false;
}
+/** Read page 0 and possible crypt data from there.
+@param[in,out] space Tablespace */
+static inline
+void
+fil_crypt_read_crypt_data(fil_space_t* space)
+{
+ if (space->crypt_data || space->size) {
+ /* The encryption metadata has already been read, or
+ the tablespace is not encrypted and the file has been
+ opened already. */
+ return;
+ }
+
+ mtr_t mtr;
+ mtr_start(&mtr);
+ ulint zip_size = fsp_flags_get_zip_size(space->flags);
+ ulint offset = fsp_header_get_crypt_offset(zip_size);
+ if (buf_block_t* block = buf_page_get(space->id, zip_size, 0,
+ RW_S_LATCH, &mtr)) {
+ mutex_enter(&fil_system->mutex);
+ if (!space->crypt_data) {
+ space->crypt_data = fil_space_read_crypt_data(
+ space->id, block->frame, offset);
+ }
+ mutex_exit(&fil_system->mutex);
+ }
+
+ mtr_commit(&mtr);
+}
+
/***********************************************************************
Start encrypting a space
@param[in,out] space Tablespace
@@ -1151,6 +1155,7 @@ fil_crypt_start_encrypting_space(
fil_space_t* space)
{
bool recheck = false;
+
mutex_enter(&fil_crypt_threads_mutex);
fil_space_crypt_t *crypt_data = space->crypt_data;
@@ -1217,8 +1222,6 @@ fil_crypt_start_encrypting_space(
byte* frame = buf_block_get_frame(block);
crypt_data->type = CRYPT_SCHEME_1;
crypt_data->write_page0(frame, &mtr);
-
-
mtr_commit(&mtr);
/* record lsn of update */
@@ -1294,10 +1297,10 @@ struct rotate_thread_t {
bool should_shutdown() const {
switch (srv_shutdown_state) {
case SRV_SHUTDOWN_NONE:
- case SRV_SHUTDOWN_CLEANUP:
return thread_no >= srv_n_fil_crypt_threads;
- case SRV_SHUTDOWN_FLUSH_PHASE:
+ case SRV_SHUTDOWN_CLEANUP:
return true;
+ case SRV_SHUTDOWN_FLUSH_PHASE:
case SRV_SHUTDOWN_LAST_PHASE:
case SRV_SHUTDOWN_EXIT_THREADS:
break;
@@ -1646,6 +1649,8 @@ fil_crypt_find_space_to_rotate(
}
while (!state->should_shutdown() && state->space) {
+ fil_crypt_read_crypt_data(state->space);
+
if (fil_crypt_space_needs_rotation(state, key_state, recheck)) {
ut_ad(key_state->key_id);
/* init state->min_key_version_found before
@@ -2340,8 +2345,10 @@ DECLARE_THREAD(fil_crypt_thread)(
while (!thr.should_shutdown() &&
fil_crypt_find_page_to_rotate(&new_state, &thr)) {
- /* rotate a (set) of pages */
- fil_crypt_rotate_pages(&new_state, &thr);
+ if (!thr.space->is_stopping()) {
+ /* rotate a (set) of pages */
+ fil_crypt_rotate_pages(&new_state, &thr);
+ }
/* If space is marked as stopping, release
space and stop rotation. */
@@ -2571,10 +2578,10 @@ fil_space_crypt_get_status(
memset(status, 0, sizeof(*status));
ut_ad(space->n_pending_ops > 0);
- fil_space_crypt_t* crypt_data = space->crypt_data;
+ fil_crypt_read_crypt_data(const_cast<fil_space_t*>(space));
status->space = space->id;
- if (crypt_data != NULL) {
+ if (fil_space_crypt_t* crypt_data = space->crypt_data) {
mutex_enter(&crypt_data->mutex);
status->scheme = crypt_data->type;
status->keyserver_requests = crypt_data->keyserver_requests;
diff --git a/storage/xtradb/fil/fil0fil.cc b/storage/xtradb/fil/fil0fil.cc
index e39be46840c..12048bc479f 100644
--- a/storage/xtradb/fil/fil0fil.cc
+++ b/storage/xtradb/fil/fil0fil.cc
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1995, 2017, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2014, 2017, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
@@ -166,7 +166,8 @@ UNIV_INTERN extern uint srv_fil_crypt_rotate_key_age;
UNIV_INTERN extern ib_mutex_t fil_crypt_threads_mutex;
/** Determine if (i) is a user tablespace id or not. */
-# define fil_is_user_tablespace_id(i) ((i) > srv_undo_tablespaces_open)
+# define fil_is_user_tablespace_id(i) (i != 0 \
+ && !srv_is_undo_tablespace(i))
/** Determine if user has explicitly disabled fsync(). */
#ifndef __WIN__
@@ -621,7 +622,8 @@ fil_node_open_file(
size_bytes = os_file_get_size(node->handle);
ut_a(size_bytes != (os_offset_t) -1);
- node->file_block_size = os_file_get_block_size(node->handle, node->name);
+ node->file_block_size = os_file_get_block_size(
+ node->handle, node->name);
space->file_block_size = node->file_block_size;
#ifdef UNIV_HOTBACKUP
@@ -659,12 +661,10 @@ fil_node_open_file(
/* Try to read crypt_data from page 0 if it is not yet
read. */
- if (!node->space->page_0_crypt_read) {
- ulint offset = fsp_header_get_crypt_offset(
- fsp_flags_get_zip_size(flags));
- ut_ad(node->space->crypt_data == NULL);
+ if (!node->space->crypt_data) {
+ const ulint offset = fsp_header_get_crypt_offset(
+ fsp_flags_get_zip_size(flags));
node->space->crypt_data = fil_space_read_crypt_data(space_id, page, offset);
- node->space->page_0_crypt_read = true;
}
ut_free(buf2);
@@ -731,7 +731,8 @@ add_size:
}
if (node->file_block_size == 0) {
- node->file_block_size = os_file_get_block_size(node->handle, node->name);
+ node->file_block_size = os_file_get_block_size(
+ node->handle, node->name);
space->file_block_size = node->file_block_size;
}
@@ -1597,22 +1598,6 @@ fil_space_create(
space->magic_n = FIL_SPACE_MAGIC_N;
space->crypt_data = crypt_data;
- /* In create table we write page 0 so we have already
- "read" it and for system tablespaces we have read
- crypt data at startup. */
- if (create_table || crypt_data != NULL) {
- space->page_0_crypt_read = true;
- }
-
-#ifdef UNIV_DEBUG
- ib_logf(IB_LOG_LEVEL_INFO,
- "Created tablespace for space %lu name %s key_id %u encryption %d.",
- space->id,
- space->name,
- space->crypt_data ? space->crypt_data->key_id : 0,
- space->crypt_data ? space->crypt_data->encryption : 0);
-#endif
-
rw_lock_create(fil_space_latch_key, &space->latch, SYNC_FSP);
HASH_INSERT(fil_space_t, hash, fil_system->spaces, id, space);
@@ -2063,8 +2048,6 @@ fil_init(
fil_system->spaces = hash_create(hash_size);
fil_system->name_hash = hash_create(hash_size);
- UT_LIST_INIT(fil_system->LRU);
-
fil_system->max_n_open = max_n_open;
fil_space_crypt_init();
@@ -2264,99 +2247,70 @@ fil_set_max_space_id_if_bigger(
mutex_exit(&fil_system->mutex);
}
-/****************************************************************//**
-Writes the flushed lsn and the latest archived log number to the page header
-of the first page of a data file of the system tablespace (space 0),
-which is uncompressed. */
-static MY_ATTRIBUTE((warn_unused_result))
+/** Write the flushed LSN to the page header of the first page in the
+system tablespace.
+@param[in] lsn flushed LSN
+@return DB_SUCCESS or error number */
dberr_t
-fil_write_lsn_and_arch_no_to_file(
-/*==============================*/
- ulint space, /*!< in: space to write to */
- ulint sum_of_sizes, /*!< in: combined size of previous files
- in space, in database pages */
- lsn_t lsn, /*!< in: lsn to write */
- ulint arch_log_no MY_ATTRIBUTE((unused)))
- /*!< in: archived log number to write */
+fil_write_flushed_lsn(
+ lsn_t lsn)
{
byte* buf1;
byte* buf;
dberr_t err;
- buf1 = static_cast<byte*>(mem_alloc(2 * UNIV_PAGE_SIZE));
+ buf1 = static_cast<byte*>(ut_malloc(2 * UNIV_PAGE_SIZE));
buf = static_cast<byte*>(ut_align(buf1, UNIV_PAGE_SIZE));
- err = fil_read(TRUE, space, 0, sum_of_sizes, 0,
- UNIV_PAGE_SIZE, buf, NULL, 0);
- if (err == DB_SUCCESS) {
- mach_write_to_8(buf + FIL_PAGE_FILE_FLUSH_LSN_OR_KEY_VERSION,
- lsn);
-
- err = fil_write(TRUE, space, 0, sum_of_sizes, 0,
- UNIV_PAGE_SIZE, buf, NULL, 0);
- }
-
- mem_free(buf1);
-
- return(err);
-}
-
-/****************************************************************//**
-Writes the flushed lsn and the latest archived log number to the page
-header of the first page of each data file in the system tablespace.
-@return DB_SUCCESS or error number */
-UNIV_INTERN
-dberr_t
-fil_write_flushed_lsn_to_data_files(
-/*================================*/
- lsn_t lsn, /*!< in: lsn to write */
- ulint arch_log_no) /*!< in: latest archived log file number */
-{
- fil_space_t* space;
- fil_node_t* node;
- dberr_t err;
-
- mutex_enter(&fil_system->mutex);
-
- for (space = UT_LIST_GET_FIRST(fil_system->space_list);
- space != NULL;
- space = UT_LIST_GET_NEXT(space_list, space)) {
-
- /* We only write the lsn to all existing data files which have
- been open during the lifetime of the mysqld process; they are
- represented by the space objects in the tablespace memory
- cache. Note that all data files in the system tablespace 0
- and the UNDO log tablespaces (if separate) are always open. */
-
- if (space->purpose == FIL_TABLESPACE
- && !fil_is_user_tablespace_id(space->id)) {
- ulint sum_of_sizes = 0;
+ /* Acquire system tablespace */
+ fil_space_t* space = fil_space_acquire(0);
- for (node = UT_LIST_GET_FIRST(space->chain);
- node != NULL;
- node = UT_LIST_GET_NEXT(chain, node)) {
+ /* If tablespace is not encrypted, stamp flush_lsn to
+ first page of all system tablespace datafiles to avoid
+ unnecessary error messages on possible downgrade. */
+ if (space->crypt_data->min_key_version == 0) {
+ fil_node_t* node;
+ ulint sum_of_sizes = 0;
- mutex_exit(&fil_system->mutex);
-
- err = fil_write_lsn_and_arch_no_to_file(
- space->id, sum_of_sizes, lsn,
- arch_log_no);
+ for (node = UT_LIST_GET_FIRST(space->chain);
+ node != NULL;
+ node = UT_LIST_GET_NEXT(chain, node)) {
- if (err != DB_SUCCESS) {
+ err = fil_read(TRUE, 0, 0, sum_of_sizes, 0,
+ UNIV_PAGE_SIZE, buf, NULL, 0);
- return(err);
- }
+ if (err == DB_SUCCESS) {
+ mach_write_to_8(buf + FIL_PAGE_FILE_FLUSH_LSN_OR_KEY_VERSION,
+ lsn);
- mutex_enter(&fil_system->mutex);
+ err = fil_write(TRUE, 0, 0, sum_of_sizes, 0,
+ UNIV_PAGE_SIZE, buf, NULL, 0);
sum_of_sizes += node->size;
}
}
+ } else {
+ /* When system tablespace is encrypted stamp flush_lsn to
+ only the first page of the first datafile (rest of pages
+ are encrypted). */
+ err = fil_read(TRUE, 0, 0, 0, 0,
+ UNIV_PAGE_SIZE, buf, NULL, 0);
+
+ if (err == DB_SUCCESS) {
+ mach_write_to_8(buf + FIL_PAGE_FILE_FLUSH_LSN_OR_KEY_VERSION,
+ lsn);
+
+ err = fil_write(TRUE, 0, 0, 0, 0,
+ UNIV_PAGE_SIZE, buf, NULL, 0);
+ }
}
- mutex_exit(&fil_system->mutex);
+ fil_flush_file_spaces(FIL_TABLESPACE);
+ fil_space_release(space);
- return(DB_SUCCESS);
+ ut_free(buf1);
+
+ return(err);
}
/** Check the consistency of the first data page of a tablespace
@@ -2409,30 +2363,29 @@ fil_check_first_page(const page_t* page, ulint space_id, ulint flags)
return("inconsistent data in space header");
}
-/*******************************************************************//**
-Reads the flushed lsn, arch no, space_id and tablespace flag fields from
-the first page of a data file at database startup.
+/** Reads the flushed lsn, arch no, space_id and tablespace flag fields from
+the first page of a first data file at database startup.
+@param[in] data_file open data file
+@param[in] one_read_only true if first datafile is already
+ read
+@param[out] flags FSP_SPACE_FLAGS
+@param[out] space_id tablepspace ID
+@param[out] flushed_lsn flushed lsn value
+@param[out] crypt_data encryption crypt data
@retval NULL on success, or if innodb_force_recovery is set
@return pointer to an error message string */
UNIV_INTERN
const char*
fil_read_first_page(
-/*================*/
- os_file_t data_file, /*!< in: open data file */
- ibool one_read_already, /*!< in: TRUE if min and max
- parameters below already
- contain sensible data */
- ulint* flags, /*!< out: FSP_SPACE_FLAGS */
- ulint* space_id, /*!< out: tablespace ID */
- lsn_t* min_flushed_lsn, /*!< out: min of flushed
- lsn values in data files */
- lsn_t* max_flushed_lsn, /*!< out: max of flushed
- lsn values in data files */
- fil_space_crypt_t** crypt_data) /*< out: crypt data */
+ pfs_os_file_t data_file,
+ ibool one_read_already,
+ ulint* flags,
+ ulint* space_id,
+ lsn_t* flushed_lsn,
+ fil_space_crypt_t** crypt_data)
{
byte* buf;
byte* page;
- lsn_t flushed_lsn;
const char* check_msg = NULL;
fil_space_crypt_t* cdata;
@@ -2449,6 +2402,7 @@ fil_read_first_page(
return "File size is less than minimum";
}
}
+
buf = static_cast<byte*>(ut_malloc(2 * UNIV_PAGE_SIZE));
/* Align the memory for a possible read from a raw device */
@@ -2467,6 +2421,11 @@ fil_read_first_page(
*space_id = fsp_header_get_space_id(page);
*flags = fsp_header_get_flags(page);
+ if (flushed_lsn) {
+ *flushed_lsn = mach_read_from_8(page +
+ FIL_PAGE_FILE_FLUSH_LSN_OR_KEY_VERSION);
+ }
+
if (!fsp_flags_is_valid(*flags)) {
ulint cflags = fsp_flags_convert_from_101(*flags);
if (cflags == ULINT_UNDEFINED) {
@@ -2479,37 +2438,36 @@ fil_read_first_page(
}
}
- if (!(IS_XTRABACKUP() && srv_backup_mode)) {
- check_msg = fil_check_first_page(page, *space_id, *flags);
+ if (!(IS_XTRABACKUP() && srv_backup_mode)) {
+ check_msg = fil_check_first_page(page, *space_id, *flags);
}
- }
- flushed_lsn = mach_read_from_8(page +
- FIL_PAGE_FILE_FLUSH_LSN_OR_KEY_VERSION);
+ /* Possible encryption crypt data is also stored only to first page
+ of the first datafile. */
+ const ulint offset = fsp_header_get_crypt_offset(
+ fsp_flags_get_zip_size(*flags));
- ulint space = fsp_header_get_space_id(page);
- ulint offset = fsp_header_get_crypt_offset(
- fsp_flags_get_zip_size(*flags));
+ cdata = fil_space_read_crypt_data(*space_id, page, offset);
- cdata = fil_space_read_crypt_data(space, page, offset);
-
- if (crypt_data) {
- *crypt_data = cdata;
- }
-
- /* If file space is encrypted we need to have at least some
- encryption service available where to get keys */
- if (cdata && cdata->should_encrypt()) {
+ if (crypt_data) {
+ *crypt_data = cdata;
+ }
- if (!encryption_key_id_exists(cdata->key_id)) {
- ib_logf(IB_LOG_LEVEL_ERROR,
- "Tablespace id %ld is encrypted but encryption service"
- " or used key_id %u is not available. Can't continue opening tablespace.",
- space, cdata->key_id);
+ /* If file space is encrypted we need to have at least some
+ encryption service available where to get keys */
+ if (cdata && cdata->should_encrypt()) {
- return ("table encrypted but encryption service not available.");
+ if (!encryption_key_id_exists(cdata->key_id)) {
+ ib_logf(IB_LOG_LEVEL_ERROR,
+ "Tablespace id " ULINTPF
+ " is encrypted but encryption service"
+ " or used key_id %u is not available. "
+ "Can't continue opening tablespace.",
+ *space_id, cdata->key_id);
+ return ("table encrypted but encryption service not available.");
+ }
}
}
@@ -2519,20 +2477,6 @@ fil_read_first_page(
return(check_msg);
}
- if (!one_read_already) {
- *min_flushed_lsn = flushed_lsn;
- *max_flushed_lsn = flushed_lsn;
-
- return(NULL);
- }
-
- if (*min_flushed_lsn > flushed_lsn) {
- *min_flushed_lsn = flushed_lsn;
- }
- if (*max_flushed_lsn < flushed_lsn) {
- *max_flushed_lsn = flushed_lsn;
- }
-
return(NULL);
}
@@ -2791,14 +2735,12 @@ fil_op_log_parse_or_replay(
} else if (log_flags & MLOG_FILE_FLAG_TEMP) {
/* Temporary table, do nothing */
} else {
- const char* path = NULL;
-
/* Create the database directory for name, if it does
not exist yet */
fil_create_directory_for_tablename(name);
if (fil_create_new_single_table_tablespace(
- space_id, name, path, flags,
+ space_id, name, NULL, flags,
DICT_TF2_USE_TABLESPACE,
FIL_IBD_FILE_INITIAL_SIZE,
FIL_ENCRYPTION_DEFAULT,
@@ -3744,7 +3686,7 @@ fil_open_linked_file(
/*===============*/
const char* tablename, /*!< in: database/tablename */
char** remote_filepath,/*!< out: remote filepath */
- os_file_t* remote_file, /*!< out: remote file handle */
+ pfs_os_file_t* remote_file, /*!< out: remote file handle */
ulint atomic_writes) /*!< in: atomic writes table option
value */
{
@@ -3807,7 +3749,8 @@ fil_create_new_single_table_tablespace(
fil_encryption_t mode, /*!< in: encryption mode */
ulint key_id) /*!< in: encryption key_id */
{
- os_file_t file;
+ pfs_os_file_t file;
+
ibool ret;
dberr_t err;
byte* buf2;
@@ -4250,6 +4193,7 @@ fsp_flags_try_adjust(ulint space_id, ulint flags)
flags, MLOG_4BYTES, &mtr);
}
}
+
mtr_commit(&mtr);
}
@@ -4377,6 +4321,7 @@ fil_open_single_table_tablespace(
def.file = os_file_create_simple_no_error_handling(
innodb_file_data_key, def.filepath, OS_FILE_OPEN,
OS_FILE_READ_ONLY, &def.success, atomic_writes);
+
if (def.success) {
tablespaces_found++;
}
@@ -4391,8 +4336,8 @@ fil_open_single_table_tablespace(
/* Read the first page of the datadir tablespace, if found. */
if (def.success) {
def.check_msg = fil_read_first_page(
- def.file, FALSE, &def.flags, &def.id,
- &def.lsn, &def.lsn, &def.crypt_data);
+ def.file, false, &def.flags, &def.id,
+ NULL, &def.crypt_data);
if (table) {
table->crypt_data = def.crypt_data;
@@ -4401,6 +4346,7 @@ fil_open_single_table_tablespace(
def.valid = !def.check_msg && def.id == id
&& fsp_flags_match(flags, def.flags);
+
if (def.valid) {
valid_tablespaces_found++;
} else {
@@ -4414,8 +4360,8 @@ fil_open_single_table_tablespace(
/* Read the first page of the remote tablespace */
if (remote.success) {
remote.check_msg = fil_read_first_page(
- remote.file, FALSE, &remote.flags, &remote.id,
- &remote.lsn, &remote.lsn, &remote.crypt_data);
+ remote.file, false, &remote.flags, &remote.id,
+ NULL, &remote.crypt_data);
if (table) {
table->crypt_data = remote.crypt_data;
@@ -4425,6 +4371,7 @@ fil_open_single_table_tablespace(
/* Validate this single-table-tablespace with SYS_TABLES. */
remote.valid = !remote.check_msg && remote.id == id
&& fsp_flags_match(flags, remote.flags);
+
if (remote.valid) {
valid_tablespaces_found++;
} else {
@@ -4439,8 +4386,8 @@ fil_open_single_table_tablespace(
/* Read the first page of the datadir tablespace, if found. */
if (dict.success) {
dict.check_msg = fil_read_first_page(
- dict.file, FALSE, &dict.flags, &dict.id,
- &dict.lsn, &dict.lsn, &dict.crypt_data);
+ dict.file, false, &dict.flags, &dict.id,
+ NULL, &dict.crypt_data);
if (table) {
table->crypt_data = dict.crypt_data;
@@ -4472,14 +4419,16 @@ fil_open_single_table_tablespace(
"See " REFMAN "innodb-troubleshooting-datadict.html "
"for how to resolve the issue.",
tablename);
+
if (IS_XTRABACKUP() && fix_dict) {
ib_logf(IB_LOG_LEVEL_WARN,
- "It will be removed from the data dictionary.");
+ "It will be removed from the data dictionary.");
if (purge_sys) {
fil_remove_invalid_table_from_data_dict(tablename);
}
}
+
err = DB_CORRUPTION;
goto cleanup_and_exit;
@@ -4491,26 +4440,32 @@ fil_open_single_table_tablespace(
ib_logf(IB_LOG_LEVEL_ERROR,
"A tablespace for %s has been found in "
"multiple places;", tablename);
+
if (def.success) {
ib_logf(IB_LOG_LEVEL_ERROR,
- "Default location; %s, LSN=" LSN_PF
- ", Space ID=%lu, Flags=%lu",
- def.filepath, def.lsn,
- (ulong) def.id, (ulong) def.flags);
+ "Default location; %s"
+ ", Space ID=" ULINTPF " , Flags=" ULINTPF " .",
+ def.filepath,
+ def.id,
+ def.flags);
}
+
if (remote.success) {
ib_logf(IB_LOG_LEVEL_ERROR,
- "Remote location; %s, LSN=" LSN_PF
- ", Space ID=%lu, Flags=%lu",
- remote.filepath, remote.lsn,
- (ulong) remote.id, (ulong) remote.flags);
+ "Remote location; %s"
+ ", Space ID=" ULINTPF " , Flags=" ULINTPF " .",
+ remote.filepath,
+ remote.id,
+ remote.flags);
}
+
if (dict.success) {
ib_logf(IB_LOG_LEVEL_ERROR,
- "Dictionary location; %s, LSN=" LSN_PF
- ", Space ID=%lu, Flags=%lu",
- dict.filepath, dict.lsn,
- (ulong) dict.id, (ulong) dict.flags);
+ "Dictionary location; %s"
+ ", Space ID=" ULINTPF " , Flags=" ULINTPF " .",
+ dict.filepath,
+ dict.id,
+ dict.flags);
}
/* Force-recovery will allow some tablespaces to be
@@ -4543,6 +4498,7 @@ fil_open_single_table_tablespace(
os_file_close(def.file);
tablespaces_found--;
}
+
if (dict.success && !dict.valid) {
dict.success = false;
os_file_close(dict.file);
@@ -4658,7 +4614,17 @@ cleanup_and_exit:
mem_free(def.filepath);
- if (err == DB_SUCCESS && !srv_read_only_mode) {
+ /* We need to check fsp flags when no errors has happened and
+ server was not started on read only mode and tablespace validation
+ was requested or flags contain other table options except
+ low order bits to FSP_FLAGS_POS_PAGE_SSIZE position.
+ Note that flag comparison is pessimistic. Adjust is required
+ only when flags contain buggy MariaDB 10.1.0 -
+ MariaDB 10.1.20 flags. */
+ if (err == DB_SUCCESS
+ && !srv_read_only_mode
+ && (validate
+ || flags >= (1U << FSP_FLAGS_POS_PAGE_SSIZE))) {
fsp_flags_try_adjust(id, flags & ~FSP_FLAGS_MEM_MASK);
}
@@ -4895,8 +4861,8 @@ fil_validate_single_table_tablespace(
check_first_page:
fsp->success = TRUE;
if (const char* check_msg = fil_read_first_page(
- fsp->file, FALSE, &fsp->flags, &fsp->id,
- &fsp->lsn, &fsp->lsn, &fsp->crypt_data)) {
+ fsp->file, false, &fsp->flags, &fsp->id,
+ NULL, &fsp->crypt_data)) {
ib_logf(IB_LOG_LEVEL_ERROR,
"%s in tablespace %s (table %s)",
check_msg, fsp->filepath, tablename);
@@ -4909,6 +4875,7 @@ check_first_page:
in Xtrabackup, this does not work.*/
return;
}
+
if (!restore_attempted) {
if (!fil_user_tablespace_find_space_id(fsp)) {
return;
@@ -5152,11 +5119,11 @@ will_not_choose:
if (def.success && remote.success) {
ib_logf(IB_LOG_LEVEL_ERROR,
"Tablespaces for %s have been found in two places;\n"
- "Location 1: SpaceID: %lu LSN: %lu File: %s\n"
- "Location 2: SpaceID: %lu LSN: %lu File: %s\n"
+ "Location 1: SpaceID: " ULINTPF " File: %s\n"
+ "Location 2: SpaceID: " ULINTPF " File: %s\n"
"You must delete one of them.",
- tablename, (ulong) def.id, (ulong) def.lsn,
- def.filepath, (ulong) remote.id, (ulong) remote.lsn,
+ tablename, def.id,
+ def.filepath, remote.id,
remote.filepath);
def.success = FALSE;
@@ -6141,19 +6108,19 @@ fil_report_invalid_page_access(
ulint len, /*!< in: I/O length */
ulint type) /*!< in: I/O type */
{
- fprintf(stderr,
- "InnoDB: Error: trying to access page number %lu"
- " in space %lu,\n"
- "InnoDB: space name %s,\n"
- "InnoDB: which is outside the tablespace bounds.\n"
- "InnoDB: Byte offset %lu, len %lu, i/o type %lu.\n"
- "InnoDB: If you get this error at mysqld startup,"
- " please check that\n"
- "InnoDB: your my.cnf matches the ibdata files"
- " that you have in the\n"
- "InnoDB: MySQL server.\n",
- (ulong) block_offset, (ulong) space_id, space_name,
- (ulong) byte_offset, (ulong) len, (ulong) type);
+ ib_logf(IB_LOG_LEVEL_FATAL,
+ "Trying to access page number " ULINTPF
+ " in space " ULINTPF
+ " space name %s,"
+ " which is outside the tablespace bounds."
+ " Byte offset " ULINTPF ", len " ULINTPF
+ " i/o type " ULINTPF ".%s",
+ block_offset, space_id, space_name,
+ byte_offset, len, type,
+ space_id == 0 && !srv_was_started
+ ? "Please check that the configuration matches"
+ " the InnoDB system tablespace location (ibdata files)"
+ : "");
}
/********************************************************************//**
@@ -6373,11 +6340,10 @@ _fil_io(
mutex_exit(&fil_system->mutex);
return(DB_ERROR);
}
+
fil_report_invalid_page_access(
block_offset, space_id, space->name,
byte_offset, len, type);
-
- ut_error;
}
/* Open file if closed */
@@ -6389,10 +6355,11 @@ _fil_io(
ib_logf(IB_LOG_LEVEL_ERROR,
"Trying to do i/o to a tablespace which "
"exists without .ibd data file. "
- "i/o type %lu, space id %lu, page no %lu, "
- "i/o length %lu bytes",
- (ulong) type, (ulong) space_id,
- (ulong) block_offset, (ulong) len);
+ "i/o type " ULINTPF ", space id "
+ ULINTPF ", page no " ULINTPF ", "
+ "i/o length " ULINTPF " bytes",
+ type, space_id,
+ block_offset, len);
return(DB_TABLESPACE_DELETED);
}
@@ -6412,8 +6379,6 @@ _fil_io(
fil_report_invalid_page_access(
block_offset, space_id, space->name, byte_offset,
len, type);
-
- ut_error;
}
/* Now we have made the changes in the data structures of fil_system */
@@ -6886,7 +6851,7 @@ fil_buf_block_init(
}
struct fil_iterator_t {
- os_file_t file; /*!< File handle */
+ pfs_os_file_t file; /*!< File handle */
const char* filepath; /*!< File path name */
os_offset_t start; /*!< From where to start */
os_offset_t end; /*!< Where to stop */
@@ -6929,15 +6894,15 @@ fil_iterate(
/* TODO: For compressed tables we do a lot of useless
copying for non-index pages. Unfortunately, it is
required by buf_zip_decompress() */
+ const bool row_compressed = callback.get_zip_size() > 0;
for (offset = iter.start; offset < iter.end; offset += n_bytes) {
byte* io_buffer = iter.io_buffer;
- bool row_compressed = false;
block->frame = io_buffer;
- if (callback.get_zip_size() > 0) {
+ if (row_compressed) {
page_zip_des_init(&block->page.zip);
page_zip_set_size(&block->page.zip, iter.page_size);
block->page.zip.data = block->frame + UNIV_PAGE_SIZE;
@@ -6946,9 +6911,6 @@ fil_iterate(
/* Zip IO is done in the compressed page buffer. */
io_buffer = block->page.zip.data;
- row_compressed = true;
- } else {
- io_buffer = iter.io_buffer;
}
/* We have to read the exact number of bytes. Otherwise the
@@ -6961,16 +6923,12 @@ fil_iterate(
ut_ad(n_bytes > 0);
ut_ad(!(n_bytes % iter.page_size));
- byte* readptr = io_buffer;
- byte* writeptr = io_buffer;
- bool encrypted = false;
-
+ const bool encrypted = iter.crypt_data != NULL
+ && iter.crypt_data->should_encrypt();
/* Use additional crypt io buffer if tablespace is encrypted */
- if (iter.crypt_data != NULL && iter.crypt_data->should_encrypt()) {
- encrypted = true;
- readptr = iter.crypt_io_buffer;
- writeptr = iter.crypt_io_buffer;
- }
+ byte* const readptr = encrypted
+ ? iter.crypt_io_buffer : io_buffer;
+ byte* const writeptr = readptr;
if (!os_file_read(iter.file, readptr, offset, (ulint) n_bytes)) {
@@ -6993,8 +6951,9 @@ fil_iterate(
ulint page_type = mach_read_from_2(src+FIL_PAGE_TYPE);
- bool page_compressed = (page_type == FIL_PAGE_PAGE_COMPRESSED_ENCRYPTED ||
- page_type == FIL_PAGE_PAGE_COMPRESSED);
+ const bool page_compressed
+ = page_type == FIL_PAGE_PAGE_COMPRESSED_ENCRYPTED
+ || page_type == FIL_PAGE_PAGE_COMPRESSED;
/* If tablespace is encrypted, we need to decrypt
the page. Note that tablespaces are not in
@@ -7096,8 +7055,7 @@ fil_iterate(
0,/* FIXME: compression level */
512,/* FIXME: use proper block size */
encrypted,
- &len,
- NULL);
+ &len);
updated = true;
}
@@ -7165,7 +7123,7 @@ fil_tablespace_iterate(
PageCallback& callback)
{
dberr_t err;
- os_file_t file;
+ pfs_os_file_t file;
char* filepath;
ut_a(n_io_buffers > 0);
diff --git a/storage/xtradb/fil/fil0pagecompress.cc b/storage/xtradb/fil/fil0pagecompress.cc
index 8b2449983df..2b6ae95640f 100644
--- a/storage/xtradb/fil/fil0pagecompress.cc
+++ b/storage/xtradb/fil/fil0pagecompress.cc
@@ -99,17 +99,16 @@ fil_compress_page(
ulint level, /* in: compression level */
ulint block_size, /*!< in: block size */
bool encrypted, /*!< in: is page also encrypted */
- ulint* out_len, /*!< out: actual length of compressed
+ ulint* out_len) /*!< out: actual length of compressed
page */
- byte* lzo_mem) /*!< in: temporal memory used by LZO */
{
int err = Z_OK;
int comp_level = level;
ulint header_len = FIL_PAGE_DATA + FIL_PAGE_COMPRESSED_SIZE;
- ulint write_size=0;
+ ulint write_size = 0;
/* Cache to avoid change during function execution */
ulint comp_method = innodb_compression_algorithm;
- bool allocated=false;
+ bool allocated = false;
/* page_compression does not apply to tables or tablespaces
that use ROW_FORMAT=COMPRESSED */
@@ -121,13 +120,23 @@ fil_compress_page(
if (!out_buf) {
allocated = true;
- out_buf = static_cast<byte *>(ut_malloc(UNIV_PAGE_SIZE));
-#ifdef HAVE_LZO
+ ulint size = UNIV_PAGE_SIZE;
+
+ /* Both snappy and lzo compression methods require that
+ output buffer used for compression is bigger than input
+ buffer. Increase the allocated buffer size accordingly. */
+#if HAVE_SNAPPY
+ if (comp_method == PAGE_SNAPPY_ALGORITHM) {
+ size = snappy_max_compressed_length(size);
+ }
+#endif
+#if HAVE_LZO
if (comp_method == PAGE_LZO_ALGORITHM) {
- lzo_mem = static_cast<byte *>(ut_malloc(LZO1X_1_15_MEM_COMPRESS));
- memset(lzo_mem, 0, LZO1X_1_15_MEM_COMPRESS);
+ size += LZO1X_1_15_MEM_COMPRESS;
}
#endif
+
+ out_buf = static_cast<byte *>(ut_malloc(size));
}
ut_ad(buf);
@@ -163,8 +172,14 @@ fil_compress_page(
switch(comp_method) {
#ifdef HAVE_LZ4
case PAGE_LZ4_ALGORITHM:
+
+#ifdef HAVE_LZ4_COMPRESS_DEFAULT
+ err = LZ4_compress_default((const char *)buf,
+ (char *)out_buf+header_len, len, write_size);
+#else
err = LZ4_compress_limitedOutput((const char *)buf,
(char *)out_buf+header_len, len, write_size);
+#endif /* HAVE_LZ4_COMPRESS_DEFAULT */
write_size = err;
if (err == 0) {
@@ -192,7 +207,7 @@ fil_compress_page(
#ifdef HAVE_LZO
case PAGE_LZO_ALGORITHM:
err = lzo1x_1_15_compress(
- buf, len, out_buf+header_len, &write_size, lzo_mem);
+ buf, len, out_buf+header_len, &write_size, out_buf+UNIV_PAGE_SIZE);
if (err != LZO_E_OK || write_size > UNIV_PAGE_SIZE-header_len) {
if (space && !space->printed_compression_failure) {
@@ -283,6 +298,7 @@ fil_compress_page(
case PAGE_SNAPPY_ALGORITHM:
{
snappy_status cstatus;
+ write_size = snappy_max_compressed_length(UNIV_PAGE_SIZE);
cstatus = snappy_compress(
(const char *)buf,
@@ -438,11 +454,6 @@ fil_compress_page(
err_exit:
if (allocated) {
ut_free(out_buf);
-#ifdef HAVE_LZO
- if (comp_method == PAGE_LZO_ALGORITHM) {
- ut_free(lzo_mem);
- }
-#endif
}
return (buf);
@@ -504,7 +515,7 @@ fil_decompress_page(
ptype != FIL_PAGE_PAGE_COMPRESSED_ENCRYPTED)) {
ib_logf(IB_LOG_LEVEL_ERROR,
"Corruption: We try to uncompress corrupted page"
- " CRC %lu type %lu len %lu.",
+ " CRC " ULINTPF " type " ULINTPF " len " ULINTPF ".",
mach_read_from_4(buf+FIL_PAGE_SPACE_OR_CHKSUM),
mach_read_from_2(buf+FIL_PAGE_TYPE), len);
@@ -528,7 +539,7 @@ fil_decompress_page(
if (actual_size == 0 || actual_size > UNIV_PAGE_SIZE) {
ib_logf(IB_LOG_LEVEL_ERROR,
"Corruption: We try to uncompress corrupted page"
- " actual size %lu compression %s.",
+ " actual size " ULINTPF " compression %s.",
actual_size, fil_get_compression_alg_name(compression_alg));
fflush(stderr);
if (return_error) {
@@ -543,12 +554,9 @@ fil_decompress_page(
*write_size = actual_size;
}
-#ifdef UNIV_PAGECOMPRESS_DEBUG
- ib_logf(IB_LOG_LEVEL_INFO,
- "Preparing for decompress for len %lu\n",
- actual_size);
-#endif /* UNIV_PAGECOMPRESS_DEBUG */
-
+ DBUG_PRINT("compress",
+ ("Preparing for decompress for len " ULINTPF ".",
+ actual_size));
switch(compression_alg) {
case PAGE_ZLIB_ALGORITHM:
@@ -560,7 +568,7 @@ fil_decompress_page(
ib_logf(IB_LOG_LEVEL_ERROR,
"Corruption: Page is marked as compressed"
" but uncompress failed with error %d "
- " size %lu len %lu.",
+ " size " ULINTPF " len " ULINTPF ".",
err, actual_size, len);
fflush(stderr);
@@ -579,9 +587,10 @@ fil_decompress_page(
if (err != (int)actual_size) {
ib_logf(IB_LOG_LEVEL_ERROR,
"Corruption: Page is marked as compressed"
- " but decompression read only %d bytes "
- " size %lu len %lu.",
+ " but uncompress failed with error %d "
+ " size " ULINTPF " len " ULINTPF ".",
err, actual_size, len);
+
fflush(stderr);
if (return_error) {
@@ -593,16 +602,17 @@ fil_decompress_page(
#endif /* HAVE_LZ4 */
#ifdef HAVE_LZO
case PAGE_LZO_ALGORITHM: {
- ulint olen=0;
+ ulint olen = 0;
err = lzo1x_decompress((const unsigned char *)buf+header_len,
actual_size,(unsigned char *)in_buf, &olen, NULL);
if (err != LZO_E_OK || (olen == 0 || olen > UNIV_PAGE_SIZE)) {
ib_logf(IB_LOG_LEVEL_ERROR,
"Corruption: Page is marked as compressed"
- " but decompression read only %ld bytes"
- " size %lu len %lu.",
- olen, actual_size, len);
+ " but uncompress failed with error %d "
+ " size " ULINTPF " len " ULINTPF ".",
+ err, actual_size, len);
+
fflush(stderr);
if (return_error) {
@@ -637,7 +647,7 @@ fil_decompress_page(
ib_logf(IB_LOG_LEVEL_ERROR,
"Corruption: Page is marked as compressed"
" but decompression read only %ld bytes"
- " size %lu len %lu.",
+ " size " ULINTPF "len " ULINTPF ".",
dst_pos, actual_size, len);
fflush(stderr);
@@ -666,7 +676,7 @@ fil_decompress_page(
ib_logf(IB_LOG_LEVEL_ERROR,
"Corruption: Page is marked as compressed"
" but decompression read only %du bytes"
- " size %lu len %lu err %d.",
+ " size " ULINTPF " len " ULINTPF " err %d.",
dst_pos, actual_size, len, err);
fflush(stderr);
@@ -682,7 +692,7 @@ fil_decompress_page(
case PAGE_SNAPPY_ALGORITHM:
{
snappy_status cstatus;
- ulint olen = 0;
+ ulint olen = UNIV_PAGE_SIZE;
cstatus = snappy_uncompress(
(const char *)(buf+header_len),
@@ -690,11 +700,11 @@ fil_decompress_page(
(char *)in_buf,
(size_t*)&olen);
- if (cstatus != SNAPPY_OK || (olen == 0 || olen > UNIV_PAGE_SIZE)) {
+ if (cstatus != SNAPPY_OK || olen != UNIV_PAGE_SIZE) {
ib_logf(IB_LOG_LEVEL_ERROR,
"Corruption: Page is marked as compressed"
- " but decompression read only %lu bytes"
- " size %lu len %lu err %d.",
+ " but decompression read only " ULINTPF " bytes"
+ " size " ULINTPF " len " ULINTPF " err %d.",
olen, actual_size, len, (int)cstatus);
fflush(stderr);
@@ -703,6 +713,7 @@ fil_decompress_page(
}
ut_error;
}
+
break;
}
#endif /* HAVE_SNAPPY */
@@ -728,8 +739,7 @@ fil_decompress_page(
memcpy(buf, in_buf, len);
error_return:
- // Need to free temporal buffer if no buffer was given
- if (page_buf == NULL) {
+ if (page_buf != in_buf) {
ut_free(in_buf);
}
}
diff --git a/storage/xtradb/fsp/fsp0fsp.cc b/storage/xtradb/fsp/fsp0fsp.cc
index 934824c6462..df8c6ffe222 100644
--- a/storage/xtradb/fsp/fsp0fsp.cc
+++ b/storage/xtradb/fsp/fsp0fsp.cc
@@ -673,16 +673,13 @@ fsp_header_init_fields(
}
#ifndef UNIV_HOTBACKUP
-/**********************************************************************//**
-Initializes the space header of a new created space and creates also the
-insert buffer tree root if space == 0. */
+/** Initialize a tablespace header.
+@param[in] space_id space id
+@param[in] size current size in blocks
+@param[in,out] mtr mini-transaction */
UNIV_INTERN
void
-fsp_header_init(
-/*============*/
- ulint space_id, /*!< in: space id */
- ulint size, /*!< in: current size in blocks */
- mtr_t* mtr) /*!< in/out: mini-transaction */
+fsp_header_init(ulint space_id, ulint size, mtr_t* mtr)
{
fsp_header_t* header;
buf_block_t* block;
@@ -725,14 +722,8 @@ fsp_header_init(
flst_init(header + FSP_SEG_INODES_FREE, mtr);
mlog_write_ull(header + FSP_SEG_ID, 1, mtr);
- if (space_id == 0) {
- fsp_fill_free_list(FALSE, space_id, header, mtr);
- btr_create(DICT_CLUSTERED | DICT_UNIVERSAL | DICT_IBUF,
- 0, 0, DICT_IBUF_ID_MIN + space_id,
- dict_ind_redundant, mtr);
- } else {
- fsp_fill_free_list(TRUE, space_id, header, mtr);
- }
+
+ fsp_fill_free_list(space_id != TRX_SYS_SPACE, space_id, header, mtr);
fil_space_t* space = fil_space_acquire(space_id);
ut_ad(space);
@@ -2075,7 +2066,6 @@ fseg_create_general(
inode = fsp_alloc_seg_inode(space_header, mtr);
if (inode == NULL) {
-
goto funct_exit;
}
@@ -2750,7 +2740,6 @@ fsp_reserve_free_extents(
ibool success;
ulint n_pages_added;
size_t total_reserved = 0;
- ulint rounds = 0;
ut_ad(mtr);
*n_reserved = n_ext;
@@ -2829,17 +2818,7 @@ try_to_extend:
success = fsp_try_extend_data_file(&n_pages_added, space,
space_header, mtr);
if (success && n_pages_added > 0) {
-
- rounds++;
total_reserved += n_pages_added;
-
- if (rounds > 50) {
- ib_logf(IB_LOG_LEVEL_INFO,
- "Space id %lu trying to reserve %lu extents actually reserved %lu "
- " reserve %lu free %lu size %lu rounds %lu total_reserved %llu",
- space, n_ext, n_pages_added, reserve, n_free, size, rounds, (ullint) total_reserved);
- }
-
goto try_again;
}
@@ -4160,20 +4139,8 @@ ulint
fsp_header_get_crypt_offset(
const ulint zip_size)
{
- ulint pageno = 0;
- /* compute first page_no that will have xdes stored on page != 0*/
- for (ulint i = 0;
- (pageno = xdes_calc_descriptor_page(zip_size, i)) == 0; )
- i++;
-
- /* use pageno prior to this...i.e last page on page 0 */
- ut_ad(pageno > 0);
- pageno--;
-
- ulint iv_offset = XDES_ARR_OFFSET +
- XDES_SIZE * (1 + xdes_calc_descriptor_index(zip_size, pageno));
-
- return FSP_HEADER_OFFSET + iv_offset;
+ return (FSP_HEADER_OFFSET + (XDES_ARR_OFFSET + XDES_SIZE *
+ (zip_size ? zip_size : UNIV_PAGE_SIZE) / FSP_EXTENT_SIZE));
}
/**********************************************************************//**
diff --git a/storage/xtradb/fts/fts0que.cc b/storage/xtradb/fts/fts0que.cc
index 2e335c1c255..f24973e26fb 100644
--- a/storage/xtradb/fts/fts0que.cc
+++ b/storage/xtradb/fts/fts0que.cc
@@ -953,6 +953,18 @@ fts_query_free_doc_ids(
query->total_size -= SIZEOF_RBT_CREATE;
}
+/**
+Free the query intersection
+@param[in] query query instance */
+static
+void
+fts_query_free_intersection(
+ fts_query_t* query)
+{
+ fts_query_free_doc_ids(query, query->intersection);
+ query->intersection = NULL;
+}
+
/*******************************************************************//**
Add the word to the documents "list" of matching words from
the query. We make a copy of the word from the query heap. */
@@ -1311,6 +1323,7 @@ fts_query_intersect(
/* error is passed by 'query->error' */
if (query->error != DB_SUCCESS) {
ut_ad(query->error == DB_FTS_EXCEED_RESULT_CACHE_LIMIT);
+ fts_query_free_intersection(query);
return(query->error);
}
@@ -1339,6 +1352,8 @@ fts_query_intersect(
ut_a(!query->multi_exist || (query->multi_exist
&& rbt_size(query->doc_ids) <= n_doc_ids));
+ } else if (query->intersection != NULL) {
+ fts_query_free_intersection(query);
}
}
@@ -1557,6 +1572,11 @@ fts_merge_doc_ids(
query, ranking->doc_id, ranking->rank);
if (query->error != DB_SUCCESS) {
+ if (query->intersection != NULL)
+ {
+ ut_a(query->oper == FTS_EXIST);
+ fts_query_free_intersection(query);
+ }
DBUG_RETURN(query->error);
}
diff --git a/storage/xtradb/handler/ha_innodb.cc b/storage/xtradb/handler/ha_innodb.cc
index 49b0fedb3b4..28f5acb2dcc 100644
--- a/storage/xtradb/handler/ha_innodb.cc
+++ b/storage/xtradb/handler/ha_innodb.cc
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 2000, 2016, Oracle and/or its affiliates.
+Copyright (c) 2000, 2017, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2013, 2017, MariaDB Corporation.
Copyright (c) 2008, 2009 Google Inc.
Copyright (c) 2009, Percona Inc.
@@ -876,17 +876,31 @@ innobase_purge_changed_page_bitmaps(
/*================================*/
ulonglong lsn) __attribute__((unused)); /*!< in: LSN to purge files up to */
+/** Empty free list algorithm.
+Checks if buffer pool is big enough to enable backoff algorithm.
+InnoDB empty free list algorithm backoff requires free pages
+from LRU for the best performance.
+buf_LRU_buf_pool_running_out cancels query if 1/4 of
+buffer pool belongs to LRU or freelist.
+At the same time buf_flush_LRU_list_batch
+keeps up to BUF_LRU_MIN_LEN in LRU.
+In order to avoid deadlock baclkoff requires buffer pool
+to be at least 4*BUF_LRU_MIN_LEN,
+but flush peformance is bad because of trashing
+and additional BUF_LRU_MIN_LEN pages are requested.
+@param[in] algorithm desired algorithm from srv_empty_free_list_t
+@return true if it's possible to enable backoff. */
+static inline
+bool
+innodb_empty_free_list_algorithm_allowed(
+ srv_empty_free_list_t algorithm)
+{
+ long long buf_pool_pages = srv_buf_pool_size / srv_page_size
+ / srv_buf_pool_instances;
-/*****************************************************************//**
-Check whether this is a fake change transaction.
-@return TRUE if a fake change transaction */
-static
-my_bool
-innobase_is_fake_change(
-/*====================*/
- handlerton *hton, /*!< in: InnoDB handlerton */
- THD* thd) __attribute__((unused)); /*!< in: MySQL thread handle of the user for
- whom the transaction is being committed */
+ return(buf_pool_pages >= BUF_LRU_MIN_LEN * (4 + 1)
+ || algorithm != SRV_EMPTY_FREE_LIST_BACKOFF);
+}
/** Get the list of foreign keys referencing a specified table
table.
@@ -1601,14 +1615,11 @@ innobase_drop_database(
the path is used as the database name:
for example, in 'mysql/data/test' the
database name is 'test' */
-/*******************************************************************//**
-Closes an InnoDB database. */
+/** Shut down the InnoDB storage engine.
+@return 0 */
static
int
-innobase_end(
-/*=========*/
- handlerton* hton, /* in: Innodb handlerton */
- ha_panic_function type);
+innobase_end(handlerton*, ha_panic_function);
#if NOT_USED
/*****************************************************************//**
@@ -1703,28 +1714,6 @@ normalize_table_name_low(
ibool set_lower_case); /* in: TRUE if we want to set
name to lower case */
-/*************************************************************//**
-Checks if buffer pool is big enough to enable backoff algorithm.
-InnoDB empty free list algorithm backoff requires free pages
-from LRU for the best performance.
-buf_LRU_buf_pool_running_out cancels query if 1/4 of
-buffer pool belongs to LRU or freelist.
-At the same time buf_flush_LRU_list_batch
-keeps up to BUF_LRU_MIN_LEN in LRU.
-In order to avoid deadlock baclkoff requires buffer pool
-to be at least 4*BUF_LRU_MIN_LEN,
-but flush peformance is bad because of trashing
-and additional BUF_LRU_MIN_LEN pages are requested.
-@return true if it's possible to enable backoff. */
-static
-bool
-innodb_empty_free_list_algorithm_backoff_allowed(
- srv_empty_free_list_t
- algorithm, /*!< in: desired algorithm
- from srv_empty_free_list_t */
- long long buf_pool_pages); /*!< in: total number
- of pages inside buffer pool */
-
#ifdef NOT_USED
/*************************************************************//**
Removes old archived transaction log files.
@@ -2596,16 +2585,11 @@ innobase_get_stmt(
THD* thd, /*!< in: MySQL thread handle */
size_t* length) /*!< out: length of the SQL statement */
{
- const char* query = NULL;
- LEX_STRING *stmt = NULL;
- if (thd) {
- stmt = thd_query_string(thd);
- if (stmt) {
- *length = stmt->length;
- query = stmt->str;
- }
+ if (const LEX_STRING *stmt = thd_query_string(thd)) {
+ *length = stmt->length;
+ return stmt->str;
}
- return (query);
+ return NULL;
}
/**********************************************************************//**
@@ -3505,13 +3489,13 @@ innobase_convert_identifier(
ibool file_id)/*!< in: TRUE=id is a table or database name;
FALSE=id is an UTF-8 string */
{
+ char nz2[MAX_TABLE_NAME_LEN + 1];
const char* s = id;
int q;
if (file_id) {
char nz[MAX_TABLE_NAME_LEN + 1];
- char nz2[MAX_TABLE_NAME_LEN + 1];
/* Decode the table name. The MySQL function expects
a NUL-terminated string. The input and output strings
@@ -4374,10 +4358,9 @@ innobase_change_buffering_inited_ok:
srv_use_posix_fallocate = (ibool) innobase_use_fallocate;
#endif
/* Do not enable backoff algorithm for small buffer pool. */
- if (!innodb_empty_free_list_algorithm_backoff_allowed(
+ if (!innodb_empty_free_list_algorithm_allowed(
static_cast<srv_empty_free_list_t>(
- srv_empty_free_list_algorithm),
- innobase_buffer_pool_size / srv_page_size)) {
+ srv_empty_free_list_algorithm))) {
sql_print_information(
"InnoDB: innodb_empty_free_list_algorithm "
"has been changed to legacy "
@@ -4512,21 +4495,13 @@ error:
DBUG_RETURN(TRUE);
}
-/*******************************************************************//**
-Closes an InnoDB database.
-@return TRUE if error */
+/** Shut down the InnoDB storage engine.
+@return 0 */
static
int
-innobase_end(
-/*=========*/
- handlerton* hton, /*!< in/out: InnoDB handlerton */
- ha_panic_function type MY_ATTRIBUTE((unused)))
- /*!< in: ha_panic() parameter */
+innobase_end(handlerton*, ha_panic_function)
{
- int err= 0;
-
DBUG_ENTER("innobase_end");
- DBUG_ASSERT(hton == innodb_hton_ptr);
if (innodb_inited) {
@@ -4543,9 +4518,7 @@ innobase_end(
innodb_inited = 0;
hash_table_free(innobase_open_tables);
innobase_open_tables = NULL;
- if (innobase_shutdown_for_mysql() != DB_SUCCESS) {
- err = 1;
- }
+ innodb_shutdown();
srv_free_paths_and_sizes();
my_free(internal_innobase_data_file_path);
mysql_mutex_destroy(&innobase_share_mutex);
@@ -4554,7 +4527,7 @@ innobase_end(
mysql_mutex_destroy(&pending_checkpoint_mutex);
}
- DBUG_RETURN(err);
+ DBUG_RETURN(0);
}
/****************************************************************//**
@@ -4609,22 +4582,6 @@ innobase_purge_changed_page_bitmaps(
}
/*****************************************************************//**
-Check whether this is a fake change transaction.
-@return TRUE if a fake change transaction */
-static
-my_bool
-innobase_is_fake_change(
-/*====================*/
- handlerton *hton MY_ATTRIBUTE((unused)),
- /*!< in: InnoDB handlerton */
- THD* thd) /*!< in: MySQL thread handle of the user for
- whom the transaction is being committed */
-{
- trx_t* trx = check_trx_exists(thd);
- return UNIV_UNLIKELY(trx->fake_changes);
-}
-
-/*****************************************************************//**
Commits a transaction in an InnoDB database. */
static
void
@@ -8172,17 +8129,31 @@ build_template_field(
templ->rec_field_is_prefix = FALSE;
if (dict_index_is_clust(index)) {
+ templ->rec_field_is_prefix = false;
templ->rec_field_no = templ->clust_rec_field_no;
templ->rec_prefix_field_no = ULINT_UNDEFINED;
} else {
- /* If we're in a secondary index, keep track
- * of the original index position even if this
- * is just a prefix index; we will use this
- * later to avoid a cluster index lookup in
- * some cases.*/
+ /* If we're in a secondary index, keep track of the original
+ index position even if this is just a prefix index; we will use
+ this later to avoid a cluster index lookup in some cases.*/
templ->rec_field_no = dict_index_get_nth_col_pos(index, i,
&templ->rec_prefix_field_no);
+ templ->rec_field_is_prefix
+ = (templ->rec_field_no == ULINT_UNDEFINED)
+ && (templ->rec_prefix_field_no != ULINT_UNDEFINED);
+#ifdef UNIV_DEBUG
+ if (templ->rec_prefix_field_no != ULINT_UNDEFINED)
+ {
+ const dict_field_t* field = dict_index_get_nth_field(
+ index,
+ templ->rec_prefix_field_no);
+ ut_ad(templ->rec_field_is_prefix
+ == (field->prefix_len != 0));
+ } else {
+ ut_ad(!templ->rec_field_is_prefix);
+ }
+#endif
}
if (field->real_maybe_null()) {
@@ -8572,8 +8543,8 @@ ha_innobase::innobase_lock_autoinc(void)
break;
}
}
- /* Fall through to old style locking. */
-
+ /* Use old style locking. */
+ /* fall through */
case AUTOINC_OLD_STYLE_LOCKING:
DBUG_EXECUTE_IF("die_if_autoinc_old_lock_style_used",
ut_ad(0););
@@ -9150,8 +9121,8 @@ calc_row_difference(
}
}
- if (o_len != n_len || (o_len != UNIV_SQL_NULL &&
- 0 != memcmp(o_ptr, n_ptr, o_len))) {
+ if (o_len != n_len || (o_len != 0 && o_len != UNIV_SQL_NULL
+ && 0 != memcmp(o_ptr, n_ptr, o_len))) {
/* The field has changed */
ufield = uvect->fields + n_changed;
@@ -11875,7 +11846,8 @@ create_options_are_invalid(
case ROW_TYPE_DYNAMIC:
CHECK_ERROR_ROW_TYPE_NEEDS_FILE_PER_TABLE(use_tablespace);
CHECK_ERROR_ROW_TYPE_NEEDS_GT_ANTELOPE;
- /* fall through since dynamic also shuns KBS */
+ /* ROW_FORMAT=DYNAMIC also shuns KEY_BLOCK_SIZE */
+ /* fall through */
case ROW_TYPE_COMPACT:
case ROW_TYPE_REDUNDANT:
if (kbs_specified) {
@@ -12285,7 +12257,8 @@ index_bad:
break; /* Correct row_format */
}
zip_allowed = FALSE;
- /* fall through to set row_format = COMPACT */
+ /* Set ROW_FORMAT = COMPACT */
+ /* fall through */
case ROW_TYPE_NOT_USED:
case ROW_TYPE_FIXED:
case ROW_TYPE_PAGE:
@@ -12294,6 +12267,7 @@ index_bad:
thd, Sql_condition::WARN_LEVEL_WARN,
ER_ILLEGAL_HA_CREATE_OPTION,
"InnoDB: assuming ROW_FORMAT=COMPACT.");
+ /* fall through */
case ROW_TYPE_DEFAULT:
/* If we fell through, set row format to Compact. */
row_format = ROW_TYPE_COMPACT;
@@ -13109,7 +13083,8 @@ ha_innobase::delete_table(
extension, in contrast to ::create */
normalize_table_name(norm_name, name);
- if (srv_read_only_mode) {
+ if (srv_read_only_mode
+ || srv_force_recovery >= SRV_FORCE_NO_UNDO_LOG_SCAN) {
DBUG_RETURN(HA_ERR_TABLE_READONLY);
} else if (row_is_magic_monitor_table(norm_name)
&& check_global_access(thd, PROCESS_ACL)) {
@@ -15078,7 +15053,8 @@ fill_foreign_key_list(THD* thd,
{
ut_ad(mutex_own(&dict_sys->mutex));
- for (dict_foreign_set::iterator it = table->referenced_set.begin();
+ for (dict_foreign_set::const_iterator it
+ = table->referenced_set.begin();
it != table->referenced_set.end(); ++it) {
dict_foreign_t* foreign = *it;
@@ -18633,15 +18609,17 @@ innodb_buffer_pool_evict_uncompressed(void)
ut_ad(block->page.in_LRU_list);
mutex_enter(&block->mutex);
- if (!buf_LRU_free_page(&block->page, false)) {
- mutex_exit(&block->mutex);
- all_evicted = false;
- } else {
- mutex_exit(&block->mutex);
+ all_evicted = buf_LRU_free_page(&block->page, false);
+ mutex_exit(&block->mutex);
+
+ if (all_evicted) {
+
mutex_enter(&buf_pool->LRU_list_mutex);
- }
+ block = UT_LIST_GET_LAST(buf_pool->unzip_LRU);
+ } else {
- block = prev_block;
+ block = prev_block;
+ }
}
mutex_exit(&buf_pool->LRU_list_mutex);
@@ -18832,6 +18810,10 @@ innodb_sched_priority_cleaner_update(
const void* save) /*!< in: immediate result
from check function */
{
+ if (srv_read_only_mode) {
+ return;
+ }
+
ulint priority = *static_cast<const ulint *>(save);
ulint actual_priority;
ulint nice = 0;
@@ -18858,10 +18840,6 @@ innodb_sched_priority_cleaner_update(
}
/* Set the priority for the page cleaner thread */
- if (srv_read_only_mode) {
-
- return;
- }
ut_ad(buf_page_cleaner_is_active);
nice = os_thread_get_priority(srv_cleaner_tid);
@@ -19252,8 +19230,15 @@ checkpoint_now_set(
log_make_checkpoint_at(LSN_MAX, TRUE);
fil_flush_file_spaces(FIL_LOG);
}
- fil_write_flushed_lsn_to_data_files(log_sys->lsn, 0);
- fil_flush_file_spaces(FIL_TABLESPACE);
+
+ dberr_t err = fil_write_flushed_lsn(log_sys->lsn);
+
+ if (err != DB_SUCCESS) {
+ ib_logf(IB_LOG_LEVEL_ERROR,
+ "Failed to write flush lsn to the "
+ "system tablespace at checkpoint err=%s",
+ ut_strerr(err));
+ }
}
}
@@ -19855,32 +19840,6 @@ wsrep_fake_trx_id(
/*************************************************************//**
-Empty free list algorithm.
-Checks if buffer pool is big enough to enable backoff algorithm.
-InnoDB empty free list algorithm backoff requires free pages
-from LRU for the best performance.
-buf_LRU_buf_pool_running_out cancels query if 1/4 of
-buffer pool belongs to LRU or freelist.
-At the same time buf_flush_LRU_list_batch
-keeps up to BUF_LRU_MIN_LEN in LRU.
-In order to avoid deadlock baclkoff requires buffer pool
-to be at least 4*BUF_LRU_MIN_LEN,
-but flush peformance is bad because of trashing
-and additional BUF_LRU_MIN_LEN pages are requested.
-@return true if it's possible to enable backoff. */
-static
-bool
-innodb_empty_free_list_algorithm_backoff_allowed(
- srv_empty_free_list_t algorithm, /*!< in: desired algorithm
- from srv_empty_free_list_t */
- long long buf_pool_pages) /*!< in: total number
- of pages inside buffer pool */
-{
- return(buf_pool_pages >= BUF_LRU_MIN_LEN * (4 + 1)
- || algorithm != SRV_EMPTY_FREE_LIST_BACKOFF);
-}
-
-/*************************************************************//**
Empty free list algorithm. This function is registered as
a callback with MySQL.
@return 0 for valid algorithm */
@@ -19921,13 +19880,11 @@ innodb_srv_empty_free_list_algorithm_validate(
return(1);
algorithm = static_cast<srv_empty_free_list_t>(algo);
- if (!innodb_empty_free_list_algorithm_backoff_allowed(
- algorithm,
- innobase_buffer_pool_size / srv_page_size)) {
+ if (!innodb_empty_free_list_algorithm_allowed(algorithm)) {
sql_print_warning(
"InnoDB: innodb_empty_free_list_algorithm "
"= 'backoff' requires at least"
- " 20MB buffer pool.\n");
+ " 20MB buffer pool instances.\n");
return(1);
}
diff --git a/storage/xtradb/handler/ha_innodb.h b/storage/xtradb/handler/ha_innodb.h
index f6f2f1b0eee..3bb67532954 100644
--- a/storage/xtradb/handler/ha_innodb.h
+++ b/storage/xtradb/handler/ha_innodb.h
@@ -540,7 +540,7 @@ innobase_index_name_is_reserved(
const KEY* key_info, /*!< in: Indexes to be created */
ulint num_of_keys) /*!< in: Number of indexes to
be created. */
- MY_ATTRIBUTE((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull(1), warn_unused_result));
/*****************************************************************//**
#ifdef WITH_WSREP
diff --git a/storage/xtradb/handler/handler0alter.cc b/storage/xtradb/handler/handler0alter.cc
index ef8c62849ae..0e7cc9a655b 100644
--- a/storage/xtradb/handler/handler0alter.cc
+++ b/storage/xtradb/handler/handler0alter.cc
@@ -2307,10 +2307,10 @@ online_retry_drop_indexes_with_trx(
@param drop_fk constraints being dropped
@param n_drop_fk number of constraints that are being dropped
@return whether the constraint is being dropped */
-inline MY_ATTRIBUTE((pure, nonnull, warn_unused_result))
+MY_ATTRIBUTE((pure, nonnull(1), warn_unused_result))
+inline
bool
innobase_dropping_foreign(
-/*======================*/
const dict_foreign_t* foreign,
dict_foreign_t** drop_fk,
ulint n_drop_fk)
@@ -2334,10 +2334,10 @@ column that is being dropped or modified to NOT NULL.
@retval true Not allowed (will call my_error())
@retval false Allowed
*/
-static MY_ATTRIBUTE((pure, nonnull, warn_unused_result))
+MY_ATTRIBUTE((pure, nonnull(1,4), warn_unused_result))
+static
bool
innobase_check_foreigns_low(
-/*========================*/
const dict_table_t* user_table,
dict_foreign_t** drop_fk,
ulint n_drop_fk,
@@ -2434,10 +2434,10 @@ column that is being dropped or modified to NOT NULL.
@retval true Not allowed (will call my_error())
@retval false Allowed
*/
-static MY_ATTRIBUTE((pure, nonnull, warn_unused_result))
+MY_ATTRIBUTE((pure, nonnull(1,2,3,4), warn_unused_result))
+static
bool
innobase_check_foreigns(
-/*====================*/
Alter_inplace_info* ha_alter_info,
const TABLE* altered_table,
const TABLE* old_table,
diff --git a/storage/xtradb/handler/i_s.cc b/storage/xtradb/handler/i_s.cc
index 086d5642dbb..9cef04c4244 100644
--- a/storage/xtradb/handler/i_s.cc
+++ b/storage/xtradb/handler/i_s.cc
@@ -1,7 +1,7 @@
/*****************************************************************************
-Copyright (c) 2007, 2016, Oracle and/or its affiliates.
-Copyrigth (c) 2014, 2017, MariaDB Corporation
+Copyright (c) 2007, 2016, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2014, 2017, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -2966,14 +2966,16 @@ i_s_fts_deleted_generic_fill(
fields = table->field;
+ int ret = 0;
+
for (ulint j = 0; j < ib_vector_size(deleted->doc_ids); ++j) {
doc_id_t doc_id;
doc_id = *(doc_id_t*) ib_vector_get_const(deleted->doc_ids, j);
- OK(fields[I_S_FTS_DOC_ID]->store((longlong) doc_id, true));
+ BREAK_IF(ret = fields[I_S_FTS_DOC_ID]->store(doc_id, true));
- OK(schema_table_store_record(thd, table));
+ BREAK_IF(ret = schema_table_store_record(thd, table));
}
trx_free_for_background(trx);
@@ -2984,7 +2986,7 @@ i_s_fts_deleted_generic_fill(
rw_lock_s_unlock(&dict_operation_lock);
- DBUG_RETURN(0);
+ DBUG_RETURN(ret);
}
/*******************************************************************//**
@@ -3222,13 +3224,13 @@ i_s_fts_index_cache_fill_one_index(
/*===============================*/
fts_index_cache_t* index_cache, /*!< in: FTS index cache */
THD* thd, /*!< in: thread */
+ fts_string_t* conv_str, /*!< in/out: buffer */
TABLE_LIST* tables) /*!< in/out: tables to fill */
{
TABLE* table = (TABLE*) tables->table;
Field** fields;
CHARSET_INFO* index_charset;
const ib_rbt_node_t* rbt_node;
- fts_string_t conv_str;
uint dummy_errors;
char* word_str;
@@ -3237,10 +3239,9 @@ i_s_fts_index_cache_fill_one_index(
fields = table->field;
index_charset = index_cache->charset;
- conv_str.f_len = system_charset_info->mbmaxlen
- * FTS_MAX_WORD_LEN_IN_CHAR;
- conv_str.f_str = static_cast<byte*>(ut_malloc(conv_str.f_len));
- conv_str.f_n_char = 0;
+ conv_str->f_n_char = 0;
+
+ int ret = 0;
/* Go through each word in the index cache */
for (rbt_node = rbt_first(index_cache->words);
@@ -3252,16 +3253,16 @@ i_s_fts_index_cache_fill_one_index(
/* Convert word from index charset to system_charset_info */
if (index_charset->cset != system_charset_info->cset) {
- conv_str.f_n_char = my_convert(
- reinterpret_cast<char*>(conv_str.f_str),
- static_cast<uint32>(conv_str.f_len),
+ conv_str->f_n_char = my_convert(
+ reinterpret_cast<char*>(conv_str->f_str),
+ static_cast<uint32>(conv_str->f_len),
system_charset_info,
reinterpret_cast<char*>(word->text.f_str),
static_cast<uint32>(word->text.f_len),
index_charset, &dummy_errors);
- ut_ad(conv_str.f_n_char <= conv_str.f_len);
- conv_str.f_str[conv_str.f_n_char] = 0;
- word_str = reinterpret_cast<char*>(conv_str.f_str);
+ ut_ad(conv_str->f_n_char <= conv_str->f_len);
+ conv_str->f_str[conv_str->f_n_char] = 0;
+ word_str = reinterpret_cast<char*>(conv_str->f_str);
} else {
word_str = reinterpret_cast<char*>(word->text.f_str);
}
@@ -3319,9 +3320,7 @@ i_s_fts_index_cache_fill_one_index(
}
}
- ut_free(conv_str.f_str);
-
- DBUG_RETURN(0);
+ DBUG_RETURN(ret);
}
/*******************************************************************//**
Fill the dynamic table INFORMATION_SCHEMA.INNODB_FT_INDEX_CACHED
@@ -3365,18 +3364,27 @@ i_s_fts_index_cache_fill(
ut_a(cache);
+ int ret = 0;
+ fts_string_t conv_str;
+ conv_str.f_len = system_charset_info->mbmaxlen
+ * FTS_MAX_WORD_LEN_IN_CHAR;
+ conv_str.f_str = static_cast<byte*>(ut_malloc(conv_str.f_len));
+
for (ulint i = 0; i < ib_vector_size(cache->indexes); i++) {
fts_index_cache_t* index_cache;
index_cache = static_cast<fts_index_cache_t*> (
ib_vector_get(cache->indexes, i));
- i_s_fts_index_cache_fill_one_index(index_cache, thd, tables);
+ BREAK_IF(ret = i_s_fts_index_cache_fill_one_index(
+ index_cache, thd, &conv_str, tables));
}
+ ut_free(conv_str.f_str);
+
dict_table_close(user_table, FALSE, FALSE);
- DBUG_RETURN(0);
+ DBUG_RETURN(ret);
}
/*******************************************************************//**
@@ -3679,8 +3687,6 @@ i_s_fts_index_table_fill_one_fetch(
}
}
- i_s_fts_index_table_free_one_fetch(words);
-
DBUG_RETURN(ret);
}
@@ -3694,13 +3700,13 @@ i_s_fts_index_table_fill_one_index(
/*===============================*/
dict_index_t* index, /*!< in: FTS index */
THD* thd, /*!< in: thread */
+ fts_string_t* conv_str, /*!< in/out: buffer */
TABLE_LIST* tables) /*!< in/out: tables to fill */
{
ib_vector_t* words;
mem_heap_t* heap;
fts_string_t word;
CHARSET_INFO* index_charset;
- fts_string_t conv_str;
dberr_t error;
int ret = 0;
@@ -3717,10 +3723,6 @@ i_s_fts_index_table_fill_one_index(
word.f_n_char = 0;
index_charset = fts_index_get_charset(index);
- conv_str.f_len = system_charset_info->mbmaxlen
- * FTS_MAX_WORD_LEN_IN_CHAR;
- conv_str.f_str = static_cast<byte*>(ut_malloc(conv_str.f_len));
- conv_str.f_n_char = 0;
/* Iterate through each auxiliary table as described in
fts_index_selector */
@@ -3754,17 +3756,17 @@ i_s_fts_index_table_fill_one_index(
/* Fill into tables */
ret = i_s_fts_index_table_fill_one_fetch(
- index_charset, thd, tables, words, &conv_str, has_more);
+ index_charset, thd, tables, words, conv_str,
+ has_more);
+ i_s_fts_index_table_free_one_fetch(words);
if (ret != 0) {
- i_s_fts_index_table_free_one_fetch(words);
goto func_exit;
}
} while (has_more);
}
func_exit:
- ut_free(conv_str.f_str);
mem_heap_free(heap);
DBUG_RETURN(ret);
@@ -3806,10 +3808,17 @@ i_s_fts_index_table_fill(
DBUG_RETURN(0);
}
+ int ret = 0;
+ fts_string_t conv_str;
+ conv_str.f_len = system_charset_info->mbmaxlen
+ * FTS_MAX_WORD_LEN_IN_CHAR;
+ conv_str.f_str = static_cast<byte*>(ut_malloc(conv_str.f_len));
+
for (index = dict_table_get_first_index(user_table);
index; index = dict_table_get_next_index(index)) {
if (index->type & DICT_FTS) {
- i_s_fts_index_table_fill_one_index(index, thd, tables);
+ BREAK_IF(ret = i_s_fts_index_table_fill_one_index(
+ index, thd, &conv_str, tables));
}
}
@@ -3817,7 +3826,9 @@ i_s_fts_index_table_fill(
rw_lock_s_unlock(&dict_operation_lock);
- DBUG_RETURN(0);
+ ut_free(conv_str.f_str);
+
+ DBUG_RETURN(ret);
}
/*******************************************************************//**
@@ -3982,6 +3993,8 @@ i_s_fts_config_fill(
DBUG_ASSERT(!dict_index_is_online_ddl(index));
}
+ int ret = 0;
+
while (fts_config_key[i]) {
fts_string_t value;
char* key_name;
@@ -4006,13 +4019,14 @@ i_s_fts_config_fill(
ut_free(key_name);
}
- OK(field_store_string(
- fields[FTS_CONFIG_KEY], fts_config_key[i]));
+ BREAK_IF(ret = field_store_string(
+ fields[FTS_CONFIG_KEY], fts_config_key[i]));
- OK(field_store_string(
- fields[FTS_CONFIG_VALUE], (const char*) value.f_str));
+ BREAK_IF(ret = field_store_string(
+ fields[FTS_CONFIG_VALUE],
+ reinterpret_cast<const char*>(value.f_str)));
- OK(schema_table_store_record(thd, table));
+ BREAK_IF(ret = schema_table_store_record(thd, table));
i++;
}
@@ -4025,7 +4039,7 @@ i_s_fts_config_fill(
rw_lock_s_unlock(&dict_operation_lock);
- DBUG_RETURN(0);
+ DBUG_RETURN(ret);
}
/*******************************************************************//**
@@ -4864,34 +4878,29 @@ i_s_innodb_buffer_page_fill(
state_str = NULL;
OK(fields[IDX_BUFFER_POOL_ID]->store(
- static_cast<double>(page_info->pool_id)));
+ page_info->pool_id, true));
OK(fields[IDX_BUFFER_BLOCK_ID]->store(
- static_cast<double>(page_info->block_id)));
+ page_info->block_id, true));
OK(fields[IDX_BUFFER_PAGE_SPACE]->store(
- static_cast<double>(page_info->space_id)));
+ page_info->space_id, true));
OK(fields[IDX_BUFFER_PAGE_NUM]->store(
- static_cast<double>(page_info->page_num)));
+ page_info->page_num, true));
OK(field_store_string(
fields[IDX_BUFFER_PAGE_TYPE],
i_s_page_type[page_info->page_type].type_str));
OK(fields[IDX_BUFFER_PAGE_FLUSH_TYPE]->store(
- page_info->flush_type));
+ page_info->flush_type, true));
OK(fields[IDX_BUFFER_PAGE_FIX_COUNT]->store(
- page_info->fix_count));
+ page_info->fix_count, true));
- if (page_info->hashed) {
- OK(field_store_string(
- fields[IDX_BUFFER_PAGE_HASHED], "YES"));
- } else {
- OK(field_store_string(
- fields[IDX_BUFFER_PAGE_HASHED], "NO"));
- }
+ OK(field_store_string(fields[IDX_BUFFER_PAGE_HASHED],
+ page_info->hashed ? "YES" : "NO"));
OK(fields[IDX_BUFFER_PAGE_NEWEST_MOD]->store(
(longlong) page_info->newest_mod, true));
@@ -4900,7 +4909,7 @@ i_s_innodb_buffer_page_fill(
(longlong) page_info->oldest_mod, true));
OK(fields[IDX_BUFFER_PAGE_ACCESS_TIME]->store(
- page_info->access_time));
+ page_info->access_time, true));
fields[IDX_BUFFER_PAGE_TABLE_NAME]->set_null();
@@ -4909,44 +4918,48 @@ i_s_innodb_buffer_page_fill(
/* If this is an index page, fetch the index name
and table name */
if (page_info->page_type == I_S_PAGE_TYPE_INDEX) {
- const dict_index_t* index;
+ bool ret = false;
mutex_enter(&dict_sys->mutex);
- index = dict_index_get_if_in_cache_low(
- page_info->index_id);
-
- if (index) {
+ if (const dict_index_t* index =
+ dict_index_get_if_in_cache_low(
+ page_info->index_id)) {
table_name_end = innobase_convert_name(
table_name, sizeof(table_name),
index->table_name,
strlen(index->table_name),
thd, TRUE);
- OK(fields[IDX_BUFFER_PAGE_TABLE_NAME]->store(
- table_name,
- static_cast<uint>(table_name_end - table_name),
- system_charset_info));
- fields[IDX_BUFFER_PAGE_TABLE_NAME]->set_notnull();
-
- OK(field_store_index_name(
- fields[IDX_BUFFER_PAGE_INDEX_NAME],
- index->name));
+ ret = fields[IDX_BUFFER_PAGE_TABLE_NAME]
+ ->store(table_name,
+ static_cast<uint>(
+ table_name_end
+ - table_name),
+ system_charset_info)
+ || field_store_index_name(
+ fields
+ [IDX_BUFFER_PAGE_INDEX_NAME],
+ index->name);
}
mutex_exit(&dict_sys->mutex);
+
+ OK(ret);
+
+ fields[IDX_BUFFER_PAGE_TABLE_NAME]->set_notnull();
}
OK(fields[IDX_BUFFER_PAGE_NUM_RECS]->store(
- page_info->num_recs));
+ page_info->num_recs, true));
OK(fields[IDX_BUFFER_PAGE_DATA_SIZE]->store(
- page_info->data_size));
+ page_info->data_size, true));
OK(fields[IDX_BUFFER_PAGE_ZIP_SIZE]->store(
- page_info->zip_ssize
- ? (UNIV_ZIP_SIZE_MIN >> 1) << page_info->zip_ssize
- : 0));
+ page_info->zip_ssize
+ ? (UNIV_ZIP_SIZE_MIN >> 1) << page_info->zip_ssize
+ : 0, true));
#if BUF_PAGE_STATE_BITS > 3
# error "BUF_PAGE_STATE_BITS > 3, please ensure that all 1<<BUF_PAGE_STATE_BITS values are checked for"
@@ -4984,32 +4997,29 @@ i_s_innodb_buffer_page_fill(
switch (page_info->io_fix) {
case BUF_IO_NONE:
- OK(field_store_string(fields[IDX_BUFFER_PAGE_IO_FIX],
- "IO_NONE"));
+ state_str = "IO_NONE";
break;
case BUF_IO_READ:
- OK(field_store_string(fields[IDX_BUFFER_PAGE_IO_FIX],
- "IO_READ"));
+ state_str = "IO_READ";
break;
case BUF_IO_WRITE:
- OK(field_store_string(fields[IDX_BUFFER_PAGE_IO_FIX],
- "IO_WRITE"));
+ state_str = "IO_WRITE";
break;
case BUF_IO_PIN:
- OK(field_store_string(fields[IDX_BUFFER_PAGE_IO_FIX],
- "IO_PIN"));
+ state_str = "IO_PIN";
break;
}
+ OK(field_store_string(fields[IDX_BUFFER_PAGE_IO_FIX],
+ state_str));
+
OK(field_store_string(fields[IDX_BUFFER_PAGE_IS_OLD],
(page_info->is_old) ? "YES" : "NO"));
OK(fields[IDX_BUFFER_PAGE_FREE_CLOCK]->store(
page_info->freed_page_clock));
- if (schema_table_store_record(thd, table)) {
- DBUG_RETURN(1);
- }
+ OK(schema_table_store_record(thd, table));
}
DBUG_RETURN(0);
@@ -5550,17 +5560,10 @@ i_s_innodb_buf_page_lru_fill(
ulint num_page) /*!< in: number of page info
cached */
{
- TABLE* table;
- Field** fields;
- mem_heap_t* heap;
-
DBUG_ENTER("i_s_innodb_buf_page_lru_fill");
- table = tables->table;
-
- fields = table->field;
-
- heap = mem_heap_create(1000);
+ TABLE* table = tables->table;
+ Field** fields = table->field;
/* Iterate through the cached array and fill the I_S table rows */
for (ulint i = 0; i < num_page; i++) {
@@ -5575,43 +5578,37 @@ i_s_innodb_buf_page_lru_fill(
page_info = info_array + i;
OK(fields[IDX_BUF_LRU_POOL_ID]->store(
- static_cast<double>(page_info->pool_id)));
-
+ page_info->pool_id, true));
OK(fields[IDX_BUF_LRU_POS]->store(
- static_cast<double>(page_info->block_id)));
+ page_info->block_id, true));
OK(fields[IDX_BUF_LRU_PAGE_SPACE]->store(
- static_cast<double>(page_info->space_id)));
+ page_info->space_id, true));
OK(fields[IDX_BUF_LRU_PAGE_NUM]->store(
- static_cast<double>(page_info->page_num)));
+ page_info->page_num, true));
OK(field_store_string(
- fields[IDX_BUF_LRU_PAGE_TYPE],
- i_s_page_type[page_info->page_type].type_str));
+ fields[IDX_BUF_LRU_PAGE_TYPE],
+ i_s_page_type[page_info->page_type].type_str));
OK(fields[IDX_BUF_LRU_PAGE_FLUSH_TYPE]->store(
- static_cast<double>(page_info->flush_type)));
+ page_info->flush_type, true));
OK(fields[IDX_BUF_LRU_PAGE_FIX_COUNT]->store(
- static_cast<double>(page_info->fix_count)));
+ page_info->fix_count, true));
- if (page_info->hashed) {
- OK(field_store_string(
- fields[IDX_BUF_LRU_PAGE_HASHED], "YES"));
- } else {
- OK(field_store_string(
- fields[IDX_BUF_LRU_PAGE_HASHED], "NO"));
- }
+ OK(field_store_string(fields[IDX_BUF_LRU_PAGE_HASHED],
+ page_info->hashed ? "YES" : "NO"));
OK(fields[IDX_BUF_LRU_PAGE_NEWEST_MOD]->store(
- page_info->newest_mod, true));
+ page_info->newest_mod, true));
OK(fields[IDX_BUF_LRU_PAGE_OLDEST_MOD]->store(
- page_info->oldest_mod, true));
+ page_info->oldest_mod, true));
OK(fields[IDX_BUF_LRU_PAGE_ACCESS_TIME]->store(
- page_info->access_time));
+ page_info->access_time, true));
fields[IDX_BUF_LRU_PAGE_TABLE_NAME]->set_null();
@@ -5620,43 +5617,47 @@ i_s_innodb_buf_page_lru_fill(
/* If this is an index page, fetch the index name
and table name */
if (page_info->page_type == I_S_PAGE_TYPE_INDEX) {
- const dict_index_t* index;
+ bool ret = false;
mutex_enter(&dict_sys->mutex);
- index = dict_index_get_if_in_cache_low(
- page_info->index_id);
-
- if (index) {
+ if (const dict_index_t* index =
+ dict_index_get_if_in_cache_low(
+ page_info->index_id)) {
table_name_end = innobase_convert_name(
table_name, sizeof(table_name),
index->table_name,
strlen(index->table_name),
thd, TRUE);
- OK(fields[IDX_BUF_LRU_PAGE_TABLE_NAME]->store(
- table_name,
- static_cast<uint>(table_name_end - table_name),
- system_charset_info));
- fields[IDX_BUF_LRU_PAGE_TABLE_NAME]->set_notnull();
-
- OK(field_store_index_name(
- fields[IDX_BUF_LRU_PAGE_INDEX_NAME],
- index->name));
+ ret = fields[IDX_BUF_LRU_PAGE_TABLE_NAME]
+ ->store(table_name,
+ static_cast<uint>(
+ table_name_end
+ - table_name),
+ system_charset_info)
+ || field_store_index_name(
+ fields
+ [IDX_BUF_LRU_PAGE_INDEX_NAME],
+ index->name);
}
mutex_exit(&dict_sys->mutex);
+
+ OK(ret);
+
+ fields[IDX_BUF_LRU_PAGE_TABLE_NAME]->set_notnull();
}
OK(fields[IDX_BUF_LRU_PAGE_NUM_RECS]->store(
- page_info->num_recs));
+ page_info->num_recs, true));
OK(fields[IDX_BUF_LRU_PAGE_DATA_SIZE]->store(
- page_info->data_size));
+ page_info->data_size, true));
OK(fields[IDX_BUF_LRU_PAGE_ZIP_SIZE]->store(
- page_info->zip_ssize ?
- 512 << page_info->zip_ssize : 0));
+ page_info->zip_ssize
+ ? 512 << page_info->zip_ssize : 0, true));
state = static_cast<enum buf_page_state>(page_info->page_state);
@@ -5685,35 +5686,31 @@ i_s_innodb_buf_page_lru_fill(
switch (page_info->io_fix) {
case BUF_IO_NONE:
- OK(field_store_string(fields[IDX_BUF_LRU_PAGE_IO_FIX],
- "IO_NONE"));
+ state_str = "IO_NONE";
break;
case BUF_IO_READ:
- OK(field_store_string(fields[IDX_BUF_LRU_PAGE_IO_FIX],
- "IO_READ"));
+ state_str = "IO_READ";
break;
case BUF_IO_WRITE:
- OK(field_store_string(fields[IDX_BUF_LRU_PAGE_IO_FIX],
- "IO_WRITE"));
+ state_str = "IO_WRITE";
+ break;
+ case BUF_IO_PIN:
+ state_str = "IO_PIN";
break;
}
+ OK(field_store_string(fields[IDX_BUF_LRU_PAGE_IO_FIX],
+ state_str));
+
OK(field_store_string(fields[IDX_BUF_LRU_PAGE_IS_OLD],
- (page_info->is_old) ? "YES" : "NO"));
+ page_info->is_old ? "YES" : "NO"));
OK(fields[IDX_BUF_LRU_PAGE_FREE_CLOCK]->store(
- page_info->freed_page_clock));
-
- if (schema_table_store_record(thd, table)) {
- mem_heap_free(heap);
- DBUG_RETURN(1);
- }
+ page_info->freed_page_clock, true));
- mem_heap_empty(heap);
+ OK(schema_table_store_record(thd, table));
}
- mem_heap_free(heap);
-
DBUG_RETURN(0);
}
diff --git a/storage/xtradb/handler/i_s.h b/storage/xtradb/handler/i_s.h
index 55ef6e7ea42..4bb3ea33462 100644
--- a/storage/xtradb/handler/i_s.h
+++ b/storage/xtradb/handler/i_s.h
@@ -1,7 +1,7 @@
/*****************************************************************************
Copyright (c) 2007, 2013, Oracle and/or its affiliates. All Rights Reserved.
-Copyrigth (c) 2014, 2015, MariaDB Corporation
+Copyrigth (c) 2014, 2017, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -77,6 +77,8 @@ extern struct st_mysql_plugin i_s_innodb_changed_page_bitmaps;
DBUG_RETURN(1); \
}
+#define BREAK_IF(expr) if ((expr)) break
+
#define RETURN_IF_INNODB_NOT_STARTED(plugin_name) \
do { \
if (!srv_was_started) { \
diff --git a/storage/xtradb/ibuf/ibuf0ibuf.cc b/storage/xtradb/ibuf/ibuf0ibuf.cc
index e66568565e1..0445bb557e1 100644
--- a/storage/xtradb/ibuf/ibuf0ibuf.cc
+++ b/storage/xtradb/ibuf/ibuf0ibuf.cc
@@ -2963,8 +2963,7 @@ ibuf_get_volume_buffered_hash(
fold = ut_fold_binary(data, len);
hash += (fold / (CHAR_BIT * sizeof *hash)) % size;
- bitmask = static_cast<ulint>(
- 1 << (fold % (CHAR_BIT * sizeof(*hash))));
+ bitmask = static_cast<ulint>(1) << (fold % (CHAR_BIT * sizeof(*hash)));
if (*hash & bitmask) {
@@ -3733,7 +3732,7 @@ fail_exit:
if (mode == BTR_MODIFY_PREV) {
err = btr_cur_optimistic_insert(
- BTR_NO_LOCKING_FLAG,
+ BTR_NO_LOCKING_FLAG | BTR_NO_UNDO_LOG_FLAG,
cursor, &offsets, &offsets_heap,
ibuf_entry, &ins_rec,
&dummy_big_rec, 0, thr, &mtr);
diff --git a/storage/xtradb/include/btr0cur.h b/storage/xtradb/include/btr0cur.h
index 960bd55d3d9..e478b33bf8e 100644
--- a/storage/xtradb/include/btr0cur.h
+++ b/storage/xtradb/include/btr0cur.h
@@ -1,6 +1,7 @@
/*****************************************************************************
Copyright (c) 1994, 2016, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2017, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -220,15 +221,17 @@ btr_cur_optimistic_insert(
btr_cur_t* cursor, /*!< in: cursor on page after which to insert;
cursor stays valid */
ulint** offsets,/*!< out: offsets on *rec */
- mem_heap_t** heap, /*!< in/out: pointer to memory heap, or NULL */
+ mem_heap_t** heap, /*!< in/out: pointer to memory heap */
dtuple_t* entry, /*!< in/out: entry to insert */
rec_t** rec, /*!< out: pointer to inserted record if
succeed */
big_rec_t** big_rec,/*!< out: big rec vector whose fields have to
- be stored externally by the caller, or
- NULL */
+ be stored externally by the caller */
ulint n_ext, /*!< in: number of externally stored columns */
- que_thr_t* thr, /*!< in: query thread or NULL */
+ que_thr_t* thr, /*!< in/out: query thread; can be NULL if
+ !(~flags
+ & (BTR_NO_LOCKING_FLAG
+ | BTR_NO_UNDO_LOG_FLAG)) */
mtr_t* mtr) /*!< in/out: mini-transaction;
if this function returns DB_SUCCESS on
a leaf page of a secondary index in a
@@ -256,15 +259,17 @@ btr_cur_pessimistic_insert(
cursor stays valid */
ulint** offsets,/*!< out: offsets on *rec */
mem_heap_t** heap, /*!< in/out: pointer to memory heap
- that can be emptied, or NULL */
+ that can be emptied */
dtuple_t* entry, /*!< in/out: entry to insert */
rec_t** rec, /*!< out: pointer to inserted record if
succeed */
big_rec_t** big_rec,/*!< out: big rec vector whose fields have to
- be stored externally by the caller, or
- NULL */
+ be stored externally by the caller */
ulint n_ext, /*!< in: number of externally stored columns */
- que_thr_t* thr, /*!< in: query thread or NULL */
+ que_thr_t* thr, /*!< in/out: query thread; can be NULL if
+ !(~flags
+ & (BTR_NO_LOCKING_FLAG
+ | BTR_NO_UNDO_LOG_FLAG)) */
mtr_t* mtr) /*!< in/out: mini-transaction */
MY_ATTRIBUTE((nonnull(2,3,4,5,6,7,10), warn_unused_result));
/*************************************************************//**
@@ -392,12 +397,12 @@ btr_cur_pessimistic_update(
ulint** offsets,/*!< out: offsets on cursor->page_cur.rec */
mem_heap_t** offsets_heap,
/*!< in/out: pointer to memory heap
- that can be emptied, or NULL */
+ that can be emptied */
mem_heap_t* entry_heap,
/*!< in/out: memory heap for allocating
big_rec and the index tuple */
big_rec_t** big_rec,/*!< out: big rec vector whose fields have to
- be stored externally by the caller, or NULL */
+ be stored externally by the caller */
const upd_t* update, /*!< in: update vector; this is allowed also
contain trx id and roll ptr fields, but
the values in update vector have no effect */
diff --git a/storage/xtradb/include/btr0defragment.h b/storage/xtradb/include/btr0defragment.h
index 5c54b898e37..477824c1a35 100644
--- a/storage/xtradb/include/btr0defragment.h
+++ b/storage/xtradb/include/btr0defragment.h
@@ -1,7 +1,7 @@
/*****************************************************************************
Copyright (C) 2013, 2014 Facebook, Inc. All Rights Reserved.
-Copyright (C) 2014, 2015, MariaDB Corporation.
+Copyright (C) 2014, 2017, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -89,15 +89,14 @@ UNIV_INTERN
void
btr_defragment_save_defrag_stats_if_needed(
dict_index_t* index); /*!< in: index */
-/******************************************************************//**
-Thread that merges consecutive b-tree pages into fewer pages to defragment
-the index. */
+
+/** Merge consecutive b-tree pages into fewer pages to defragment indexes */
extern "C" UNIV_INTERN
os_thread_ret_t
-DECLARE_THREAD(btr_defragment_thread)(
-/*==========================================*/
- void* arg); /*!< in: a dummy parameter required by
- os_thread_create */
+DECLARE_THREAD(btr_defragment_thread)(void*);
+
+/** Whether btr_defragment_thread is active */
+extern bool btr_defragment_thread_active;
#endif /* !UNIV_HOTBACKUP */
#endif
diff --git a/storage/xtradb/include/buf0buf.h b/storage/xtradb/include/buf0buf.h
index 9b4276efaa8..1899165ace0 100644
--- a/storage/xtradb/include/buf0buf.h
+++ b/storage/xtradb/include/buf0buf.h
@@ -1577,20 +1577,13 @@ directory (buf) to see it. Do not use from outside! */
typedef struct {
bool reserved; /*!< true if this slot is reserved
*/
-#ifdef HAVE_LZO
- byte* lzo_mem; /*!< Temporal memory used by LZO */
-#endif
byte* crypt_buf; /*!< for encryption the data needs to be
copied to a separate buffer before it's
encrypted&written. this as a page can be
read while it's being flushed */
- byte* crypt_buf_free; /*!< for encryption, allocated buffer
- that is then alligned */
byte* comp_buf; /*!< for compression we need
temporal buffer because page
can be read while it's being flushed */
- byte* comp_buf_free; /*!< for compression, allocated
- buffer that is then alligned */
byte* out_buf; /*!< resulting buffer after
encryption/compression. This is a
pointer and not allocated. */
diff --git a/storage/xtradb/include/buf0dblwr.h b/storage/xtradb/include/buf0dblwr.h
index 5582778825c..7b7464761cc 100644
--- a/storage/xtradb/include/buf0dblwr.h
+++ b/storage/xtradb/include/buf0dblwr.h
@@ -1,7 +1,7 @@
/*****************************************************************************
-Copyright (c) 1995, 2014, Oracle and/or its affiliates. All Rights Reserved.
-Copyright (c) 2017, MariaDB Corporation. All Rights Reserved.
+Copyright (c) 1995, 2017, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2017, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -39,13 +39,15 @@ extern buf_dblwr_t* buf_dblwr;
/** Set to TRUE when the doublewrite buffer is being created */
extern ibool buf_dblwr_being_created;
-/****************************************************************//**
-Creates the doublewrite buffer to a new InnoDB installation. The header of the
-doublewrite buffer is placed on the trx system header page. */
+/** Create the doublewrite buffer if the doublewrite buffer header
+is not present in the TRX_SYS page.
+@return whether the operation succeeded
+@retval true if the doublewrite buffer exists or was created
+@retval false if the creation failed (too small first data file) */
UNIV_INTERN
-void
-buf_dblwr_create(void);
-/*==================*/
+bool
+buf_dblwr_create()
+ MY_ATTRIBUTE((warn_unused_result));
/****************************************************************//**
At a database startup initializes the doublewrite buffer memory structure if
@@ -56,7 +58,7 @@ recovery, this function loads the pages from double write buffer into memory. */
void
buf_dblwr_init_or_load_pages(
/*=========================*/
- os_file_t file,
+ pfs_os_file_t file,
char* path,
bool load_corrupt_pages);
diff --git a/storage/xtradb/include/buf0flu.h b/storage/xtradb/include/buf0flu.h
index af50a5498ef..6089baf81e8 100644
--- a/storage/xtradb/include/buf0flu.h
+++ b/storage/xtradb/include/buf0flu.h
@@ -34,7 +34,7 @@ Created 11/5/1995 Heikki Tuuri
#include "buf0types.h"
/** Flag indicating if the page_cleaner is in active state. */
-extern ibool buf_page_cleaner_is_active;
+extern bool buf_page_cleaner_is_active;
/** Flag indicating if the lru_manager is in active state. */
extern bool buf_lru_manager_is_active;
diff --git a/storage/xtradb/include/data0type.ic b/storage/xtradb/include/data0type.ic
index 555852474aa..8f5cee0fd5f 100644
--- a/storage/xtradb/include/data0type.ic
+++ b/storage/xtradb/include/data0type.ic
@@ -576,7 +576,8 @@ dtype_get_fixed_size_low(
#else /* !UNIV_HOTBACKUP */
return(len);
#endif /* !UNIV_HOTBACKUP */
- /* fall through for variable-length charsets */
+ /* Treat as variable-length. */
+ /* Fall through */
case DATA_VARCHAR:
case DATA_BINARY:
case DATA_DECIMAL:
diff --git a/storage/xtradb/include/dict0dict.h b/storage/xtradb/include/dict0dict.h
index 6da8eb892d9..0290b884ece 100644
--- a/storage/xtradb/include/dict0dict.h
+++ b/storage/xtradb/include/dict0dict.h
@@ -1192,7 +1192,7 @@ dict_index_get_nth_col_pos(
const dict_index_t* index, /*!< in: index */
ulint n, /*!< in: column number */
ulint* prefix_col_pos) /*!< out: col num if prefix */
- __attribute__((nonnull(1), warn_unused_result));
+ MY_ATTRIBUTE((nonnull(1), warn_unused_result));
/********************************************************************//**
Looks for column n in an index.
@return position in internal representation of the index;
@@ -1207,7 +1207,7 @@ dict_index_get_nth_col_or_prefix_pos(
column prefixes too */
ulint* prefix_col_pos) /*!< out: col num if prefix */
- __attribute__((nonnull(1), warn_unused_result));
+ MY_ATTRIBUTE((nonnull(1), warn_unused_result));
/********************************************************************//**
Returns TRUE if the index contains a column or a prefix of that column.
@return TRUE if contains the column or its prefix */
diff --git a/storage/xtradb/include/dict0stats_bg.h b/storage/xtradb/include/dict0stats_bg.h
index d5f0870718d..8f3385eb22b 100644
--- a/storage/xtradb/include/dict0stats_bg.h
+++ b/storage/xtradb/include/dict0stats_bg.h
@@ -1,7 +1,7 @@
/*****************************************************************************
Copyright (c) 2012, 2016, Oracle and/or its affiliates. All Rights Reserved.
-Copyright (c) 2017, MariaDB Corporation. All Rights Reserved.
+Copyright (c) 2017, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -144,6 +144,10 @@ DECLARE_THREAD(dict_stats_thread)(
void* arg); /*!< in: a dummy parameter
required by os_thread_create */
+/** Shut down the dict_stats_thread. */
+void
+dict_stats_shutdown();
+
# ifndef UNIV_NONINL
# include "dict0stats_bg.ic"
# endif
diff --git a/storage/xtradb/include/fil0fil.h b/storage/xtradb/include/fil0fil.h
index 2f03d2aa0f5..a09833c3a73 100644
--- a/storage/xtradb/include/fil0fil.h
+++ b/storage/xtradb/include/fil0fil.h
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1995, 2017, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2013, 2017, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
@@ -210,9 +210,8 @@ struct fsp_open_info {
ibool success; /*!< Has the tablespace been opened? */
const char* check_msg; /*!< fil_check_first_page() message */
ibool valid; /*!< Is the tablespace valid? */
- os_file_t file; /*!< File handle */
+ pfs_os_file_t file; /*!< File handle */
char* filepath; /*!< File path to open */
- lsn_t lsn; /*!< Flushed LSN from header page */
ulint id; /*!< Space ID */
ulint flags; /*!< Tablespace flags */
ulint encryption_error; /*!< if an encryption error occurs */
@@ -228,7 +227,7 @@ struct fil_node_t {
belongs */
char* name; /*!< path to the file */
ibool open; /*!< TRUE if file open */
- os_file_t handle; /*!< OS handle to the file, if file open */
+ pfs_os_file_t handle; /*!< OS handle to the file, if file open */
os_event_t sync_event;/*!< Condition event to group and
serialize calls to fsync;
os_event_set() and os_event_reset()
@@ -351,9 +350,6 @@ struct fil_space_t {
compression failure */
fil_space_crypt_t* crypt_data;
/*!< tablespace crypt data or NULL */
- bool page_0_crypt_read;
- /*!< tablespace crypt data has been
- read */
ulint file_block_size;
/*!< file system block size */
@@ -643,17 +639,17 @@ void
fil_set_max_space_id_if_bigger(
/*===========================*/
ulint max_id);/*!< in: maximum known id */
+
#ifndef UNIV_HOTBACKUP
-/****************************************************************//**
-Writes the flushed lsn and the latest archived log number to the page
-header of the first page of each data file in the system tablespace.
-@return DB_SUCCESS or error number */
-UNIV_INTERN
+
+/** Write the flushed LSN to the page header of the first page in the
+system tablespace.
+@param[in] lsn flushed LSN
+@return DB_SUCCESS or error number */
dberr_t
-fil_write_flushed_lsn_to_data_files(
-/*================================*/
- lsn_t lsn, /*!< in: lsn to write */
- ulint arch_log_no); /*!< in: latest archived log file number */
+fil_write_flushed_lsn(
+ lsn_t lsn)
+ MY_ATTRIBUTE((warn_unused_result));
/** Acquire a tablespace when it could be dropped concurrently.
Used by background threads that do not necessarily hold proper locks
@@ -799,28 +795,28 @@ private:
fil_space_t* m_space;
};
-/*******************************************************************//**
-Reads the flushed lsn, arch no, and tablespace flag fields from a data
-file at database startup.
+/** Reads the flushed lsn, arch no, space_id and tablespace flag fields from
+the first page of a first data file at database startup.
+@param[in] data_file open data file
+@param[in] one_read_only true if first datafile is already
+ read
+@param[out] flags FSP_SPACE_FLAGS
+@param[out] space_id tablepspace ID
+@param[out] flushed_lsn flushed lsn value
+@param[out] crypt_data encryption crypt data
@retval NULL on success, or if innodb_force_recovery is set
@return pointer to an error message string */
UNIV_INTERN
const char*
fil_read_first_page(
-/*================*/
- os_file_t data_file, /*!< in: open data file */
- ibool one_read_already, /*!< in: TRUE if min and max
- parameters below already
- contain sensible data */
- ulint* flags, /*!< out: FSP_SPACE_FLAGS */
- ulint* space_id, /*!< out: tablespace ID */
- lsn_t* min_flushed_lsn, /*!< out: min of flushed
- lsn values in data files */
- lsn_t* max_flushed_lsn, /*!< out: max of flushed
- lsn values in data files */
- fil_space_crypt_t** crypt_data) /*!< out: crypt data */
-
- __attribute__((warn_unused_result));
+ pfs_os_file_t data_file,
+ ibool one_read_already,
+ ulint* flags,
+ ulint* space_id,
+ lsn_t* flushed_lsn,
+ fil_space_crypt_t** crypt_data)
+ MY_ATTRIBUTE((warn_unused_result));
+
#endif /* !UNIV_HOTBACKUP */
/*******************************************************************//**
Parses the body of a log record written about an .ibd file operation. That is,
@@ -1006,7 +1002,7 @@ fil_create_new_single_table_tablespace(
must be >= FIL_IBD_FILE_INITIAL_SIZE */
fil_encryption_t mode, /*!< in: encryption mode */
ulint key_id) /*!< in: encryption key_id */
- __attribute__((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull(2), warn_unused_result));
#ifndef UNIV_HOTBACKUP
/** Try to adjust FSP_SPACE_FLAGS if they differ from the expectations.
(Typically when upgrading from MariaDB 10.1.0..10.1.20.)
@@ -1340,12 +1336,12 @@ struct PageCallback {
Called for every page in the tablespace. If the page was not
updated then its state must be set to BUF_PAGE_NOT_USED. For
compressed tables the page descriptor memory will be at offset:
- block->frame + UNIV_PAGE_SIZE;
+ block->frame + UNIV_PAGE_SIZE;
@param offset - physical offset within the file
@param block - block read from file, note it is not from the buffer pool
@retval DB_SUCCESS or error code. */
virtual dberr_t operator()(
- os_offset_t offset,
+ os_offset_t offset,
buf_block_t* block) UNIV_NOTHROW = 0;
/**
@@ -1353,7 +1349,7 @@ struct PageCallback {
to open it for the file that is being iterated over.
@param filename - then physical name of the tablespace file.
@param file - OS file handle */
- void set_file(const char* filename, os_file_t file) UNIV_NOTHROW
+ void set_file(const char* filename, pfs_os_file_t file) UNIV_NOTHROW
{
m_file = file;
m_filepath = filename;
@@ -1389,7 +1385,7 @@ struct PageCallback {
ulint m_page_size;
/** File handle to the tablespace */
- os_file_t m_file;
+ pfs_os_file_t m_file;
/** Physical file path. */
const char* m_filepath;
diff --git a/storage/xtradb/include/fil0pagecompress.h b/storage/xtradb/include/fil0pagecompress.h
index 73667c5420e..03e16699ce3 100644
--- a/storage/xtradb/include/fil0pagecompress.h
+++ b/storage/xtradb/include/fil0pagecompress.h
@@ -65,9 +65,8 @@ fil_compress_page(
ulint level, /* in: compression level */
ulint block_size, /*!< in: block size */
bool encrypted, /*!< in: is page also encrypted */
- ulint* out_len, /*!< out: actual length of compressed
+ ulint* out_len); /*!< out: actual length of compressed
page */
- byte* lzo_mem); /*!< in: temporal memory used by LZO */
/****************************************************************//**
For page compressed pages decompress the page after actual read
diff --git a/storage/xtradb/include/fsp0fsp.h b/storage/xtradb/include/fsp0fsp.h
index 6ed78eba6f9..715572199ab 100644
--- a/storage/xtradb/include/fsp0fsp.h
+++ b/storage/xtradb/include/fsp0fsp.h
@@ -519,16 +519,14 @@ fsp_header_init_fields(
ulint space_id, /*!< in: space id */
ulint flags); /*!< in: tablespace flags (FSP_SPACE_FLAGS):
0, or table->flags if newer than COMPACT */
-/**********************************************************************//**
-Initializes the space header of a new created space and creates also the
-insert buffer tree root if space == 0. */
+/** Initialize a tablespace header.
+@param[in] space_id space id
+@param[in] size current size in blocks
+@param[in,out] mtr mini-transaction */
UNIV_INTERN
void
-fsp_header_init(
-/*============*/
- ulint space, /*!< in: space id */
- ulint size, /*!< in: current size in blocks */
- mtr_t* mtr); /*!< in/out: mini-transaction */
+fsp_header_init(ulint space_id, ulint size, mtr_t* mtr);
+
/**********************************************************************//**
Increases the space size field of a space. */
UNIV_INTERN
diff --git a/storage/xtradb/include/ha0ha.h b/storage/xtradb/include/ha0ha.h
index 7351b407e8c..58eb581e76a 100644
--- a/storage/xtradb/include/ha0ha.h
+++ b/storage/xtradb/include/ha0ha.h
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1994, 2011, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1994, 2016, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -107,7 +107,7 @@ chosen to be a slightly bigger prime number.
@param level in: level of the mutexes in the latching order
@param n_m in: number of mutexes to protect the hash table;
must be a power of 2, or 0 */
-# define ha_create(n_c,n_m,type,level) ha_create_func(n_c,level,n_m,type)
+# define ib_create(n_c,n_m,type,level) ha_create_func(n_c,level,n_m,type)
#else /* UNIV_SYNC_DEBUG */
/** Creates a hash table.
@return own: created table
@@ -116,7 +116,7 @@ chosen to be a slightly bigger prime number.
@param level in: level of the mutexes in the latching order
@param n_m in: number of mutexes to protect the hash table;
must be a power of 2, or 0 */
-# define ha_create(n_c,n_m,type,level) ha_create_func(n_c,n_m,type)
+# define ib_create(n_c,n_m,type,level) ha_create_func(n_c,n_m,type)
#endif /* UNIV_SYNC_DEBUG */
/*************************************************************//**
diff --git a/storage/xtradb/include/ha_prototypes.h b/storage/xtradb/include/ha_prototypes.h
index a161ec8c06c..b053be9e61d 100644
--- a/storage/xtradb/include/ha_prototypes.h
+++ b/storage/xtradb/include/ha_prototypes.h
@@ -1,6 +1,7 @@
/*****************************************************************************
Copyright (c) 2006, 2016, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2017, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -144,7 +145,7 @@ enum durability_properties
thd_requested_durability(
/*=====================*/
const THD* thd) /*!< in: thread handle */
- MY_ATTRIBUTE((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((warn_unused_result));
/******************************************************************//**
Returns true if the transaction this thread is processing has edited
diff --git a/storage/xtradb/include/log0online.h b/storage/xtradb/include/log0online.h
index 722336dd6b4..5c3e7d07fd9 100644
--- a/storage/xtradb/include/log0online.h
+++ b/storage/xtradb/include/log0online.h
@@ -130,7 +130,7 @@ log_online_bitmap_iterator_next(
/** Struct for single bitmap file information */
struct log_online_bitmap_file_struct {
char name[FN_REFLEN]; /*!< Name with full path */
- os_file_t file; /*!< Handle to opened file */
+ pfs_os_file_t file; /*!< Handle to opened file */
ib_uint64_t size; /*!< Size of the file */
os_offset_t offset; /*!< Offset of the next read,
or count of already-read bytes
diff --git a/storage/xtradb/include/log0recv.h b/storage/xtradb/include/log0recv.h
index e7b6a937f01..73d53d2ddab 100644
--- a/storage/xtradb/include/log0recv.h
+++ b/storage/xtradb/include/log0recv.h
@@ -137,26 +137,25 @@ a freshly read page)
*/
# define recv_recover_page(jri, block) recv_recover_page_func(block)
#endif /* !UNIV_HOTBACKUP */
-/********************************************************//**
-Recovers from a checkpoint. When this function returns, the database is able
+
+/** Recovers from a checkpoint. When this function returns, the database is able
to start processing of new user transactions, but the function
recv_recovery_from_checkpoint_finish should be called later to complete
the recovery and free the resources used in it.
+@param[in] type LOG_CHECKPOINT or LOG_ARCHIVE
+@param[in] limit_lsn recover up to this lsn if possible
+@param[in] flushed_lsn flushed lsn from first data file
@return error code or DB_SUCCESS */
UNIV_INTERN
dberr_t
recv_recovery_from_checkpoint_start_func(
-/*=====================================*/
#ifdef UNIV_LOG_ARCHIVE
- ulint type, /*!< in: LOG_CHECKPOINT or
- LOG_ARCHIVE */
- lsn_t limit_lsn, /*!< in: recover up to this lsn
- if possible */
+ ulint type,
+ lsn_t limit_lsn,
#endif /* UNIV_LOG_ARCHIVE */
- lsn_t min_flushed_lsn,/*!< in: min flushed lsn from
- data files */
- lsn_t max_flushed_lsn);/*!< in: max flushed lsn from
- data files */
+ lsn_t flushed_lsn)
+ MY_ATTRIBUTE((warn_unused_result));
+
#ifdef UNIV_LOG_ARCHIVE
/** Wrapper for recv_recovery_from_checkpoint_start_func().
Recovers from a checkpoint. When this function returns, the database is able
@@ -165,11 +164,10 @@ recv_recovery_from_checkpoint_finish should be called later to complete
the recovery and free the resources used in it.
@param type in: LOG_CHECKPOINT or LOG_ARCHIVE
@param lim in: recover up to this log sequence number if possible
-@param min in: minimum flushed log sequence number from data files
-@param max in: maximum flushed log sequence number from data files
+@param lsn in: flushed log sequence number from first data file
@return error code or DB_SUCCESS */
-# define recv_recovery_from_checkpoint_start(type,lim,min,max) \
- recv_recovery_from_checkpoint_start_func(type,lim,min,max)
+# define recv_recovery_from_checkpoint_start(type,lim,lsn) \
+ recv_recovery_from_checkpoint_start_func(type,lim,lsn)
#else /* UNIV_LOG_ARCHIVE */
/** Wrapper for recv_recovery_from_checkpoint_start_func().
Recovers from a checkpoint. When this function returns, the database is able
@@ -178,12 +176,12 @@ recv_recovery_from_checkpoint_finish should be called later to complete
the recovery and free the resources used in it.
@param type ignored: LOG_CHECKPOINT or LOG_ARCHIVE
@param lim ignored: recover up to this log sequence number if possible
-@param min in: minimum flushed log sequence number from data files
-@param max in: maximum flushed log sequence number from data files
+@param lsn in: flushed log sequence number from first data file
@return error code or DB_SUCCESS */
-# define recv_recovery_from_checkpoint_start(type,lim,min,max) \
- recv_recovery_from_checkpoint_start_func(min,max)
+# define recv_recovery_from_checkpoint_start(type,lim,lsn) \
+ recv_recovery_from_checkpoint_start_func(lsn)
#endif /* UNIV_LOG_ARCHIVE */
+
/********************************************************//**
Completes recovery from a checkpoint. */
UNIV_INTERN
diff --git a/storage/xtradb/include/mach0data.ic b/storage/xtradb/include/mach0data.ic
index 3904d96c09f..3b1cf9c0378 100644
--- a/storage/xtradb/include/mach0data.ic
+++ b/storage/xtradb/include/mach0data.ic
@@ -1,6 +1,7 @@
/*****************************************************************************
Copyright (c) 1995, 2009, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2017, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -779,13 +780,13 @@ mach_swap_byte_order(
dest += len;
switch (len & 0x7) {
- case 0: *--dest = *from++;
- case 7: *--dest = *from++;
- case 6: *--dest = *from++;
- case 5: *--dest = *from++;
- case 4: *--dest = *from++;
- case 3: *--dest = *from++;
- case 2: *--dest = *from++;
+ case 0: *--dest = *from++; /* fall through */
+ case 7: *--dest = *from++; /* fall through */
+ case 6: *--dest = *from++; /* fall through */
+ case 5: *--dest = *from++; /* fall through */
+ case 4: *--dest = *from++; /* fall through */
+ case 3: *--dest = *from++; /* fall through */
+ case 2: *--dest = *from++; /* fall through */
case 1: *--dest = *from;
}
}
diff --git a/storage/xtradb/include/os0file.h b/storage/xtradb/include/os0file.h
index 06bb6a6fbac..b17e09cf0fa 100644
--- a/storage/xtradb/include/os0file.h
+++ b/storage/xtradb/include/os0file.h
@@ -1,6 +1,6 @@
/***********************************************************************
-Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1995, 2017, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2009, Percona Inc.
Copyright (c) 2013, 2017, MariaDB Corporation.
@@ -65,26 +65,54 @@ extern ibool os_aio_print_debug;
/** File offset in bytes */
typedef ib_uint64_t os_offset_t;
-#ifdef __WIN__
-#define SRV_PATH_SEPARATOR '\\'
+#ifdef _WIN32
+# define SRV_PATH_SEPARATOR '\\'
/** File handle */
-# define os_file_t HANDLE
-# define os_file_invalid INVALID_HANDLE_VALUE
+typedef HANDLE os_file_t;
/** Convert a C file descriptor to a native file handle
@param fd file descriptor
@return native file handle */
-# define OS_FILE_FROM_FD(fd) (HANDLE) _get_osfhandle(fd)
+# define OS_FILE_FROM_FD(fd) reinterpret_cast<HANDLE>(_get_osfhandle(fd))
#else
-#define SRV_PATH_SEPARATOR '/'
+# define SRV_PATH_SEPARATOR '/'
/** File handle */
typedef int os_file_t;
-# define os_file_invalid (-1)
/** Convert a C file descriptor to a native file handle
@param fd file descriptor
@return native file handle */
# define OS_FILE_FROM_FD(fd) fd
#endif
+/** File descriptor with optional PERFORMANCE_SCHEMA instrumentation */
+struct pfs_os_file_t
+{
+ /** Default constructor */
+ pfs_os_file_t() : m_file(
+#ifdef _WIN32
+ INVALID_HANDLE_VALUE
+#else
+ -1
+#endif
+ )
+#ifdef UNIV_PFS_IO
+ , m_psi(NULL)
+#endif
+ {}
+
+ /** The wrapped file handle */
+ os_file_t m_file;
+#ifdef UNIV_PFS_IO
+ /** PERFORMANCE_SCHEMA descriptor */
+ struct PSI_file *m_psi;
+#endif
+ /** Implicit type conversion.
+ @return the wrapped file handle */
+ operator os_file_t() const { return m_file; }
+ /** Assignment operator.
+ @param[in] file file handle to be assigned */
+ void operator=(os_file_t file) { m_file = file; }
+};
+
/** Umask for creating files */
extern ulint os_innodb_umask;
@@ -120,6 +148,21 @@ enum os_file_create_t {
ON_ERROR_NO_EXIT is set */
};
+/** Options for os_file_advise_func @{ */
+enum os_file_advise_t {
+ OS_FILE_ADVISE_NORMAL = 1, /*!< no advice on access pattern
+ (default) */
+ OS_FILE_ADVISE_RANDOM = 2, /*!< access in random order */
+ OS_FILE_ADVISE_SEQUENTIAL = 4, /*!< access the specified data
+ sequentially (with lower offsets read
+ before higher ones) */
+ OS_FILE_ADVISE_WILLNEED = 8, /*!< specified data will be accessed
+ in the near future */
+ OS_FILE_ADVISE_DONTNEED = 16, /*!< specified data will not be
+ accessed in the near future */
+ OS_FILE_ADVISE_NOREUSE = 32 /*!< access only once */
+};
+
#define OS_FILE_READ_ONLY 333
#define OS_FILE_READ_WRITE 444
#define OS_FILE_READ_ALLOW_DELETE 555 /* for mysqlbackup */
@@ -221,6 +264,8 @@ extern mysql_pfs_key_t innodb_file_bmp_key;
various file I/O operations with performance schema.
1) register_pfs_file_open_begin() and register_pfs_file_open_end() are
used to register file creation, opening, closing and renaming.
+2) register_pfs_file_rename_begin() and register_pfs_file_rename_end()
+are used to register file renaming
2) register_pfs_file_io_begin() and register_pfs_file_io_end() are
used to register actual file read, write and flush
3) register_pfs_file_close_begin() and register_pfs_file_close_end()
@@ -230,17 +275,30 @@ are used to register file deletion operations*/
do { \
locker = PSI_FILE_CALL(get_thread_file_name_locker)( \
state, key, op, name, &locker); \
- if (UNIV_LIKELY(locker != NULL)) { \
+ if (locker != NULL) { \
PSI_FILE_CALL(start_file_open_wait)( \
locker, src_file, src_line); \
} \
} while (0)
-# define register_pfs_file_open_end(locker, file) \
+# define register_pfs_file_open_end(locker, file, result) \
do { \
- if (UNIV_LIKELY(locker != NULL)) { \
- PSI_FILE_CALL(end_file_open_wait_and_bind_to_descriptor)(\
- locker, file); \
+ if (locker != NULL) { \
+ file.m_psi = PSI_FILE_CALL( \
+ end_file_open_wait)( \
+ locker, result); \
+ } \
+} while (0)
+
+# define register_pfs_file_rename_begin(state, locker, key, op, name, \
+ src_file, src_line) \
+ register_pfs_file_open_begin(state, locker, key, op, name, \
+ src_file, src_line) \
+
+# define register_pfs_file_rename_end(locker, result) \
+do { \
+ if (locker != NULL) { \
+ PSI_FILE_CALL(end_file_open_wait)(locker, result); \
} \
} while (0)
@@ -266,9 +324,9 @@ do { \
# define register_pfs_file_io_begin(state, locker, file, count, op, \
src_file, src_line) \
do { \
- locker = PSI_FILE_CALL(get_thread_file_descriptor_locker)( \
- state, file, op); \
- if (UNIV_LIKELY(locker != NULL)) { \
+ locker = PSI_FILE_CALL(get_thread_file_stream_locker)( \
+ state, file.m_psi, op); \
+ if (locker != NULL) { \
PSI_FILE_CALL(start_file_wait)( \
locker, count, src_file, src_line); \
} \
@@ -276,7 +334,7 @@ do { \
# define register_pfs_file_io_end(locker, count) \
do { \
- if (UNIV_LIKELY(locker != NULL)) { \
+ if (locker != NULL) { \
PSI_FILE_CALL(end_file_wait)(locker, count); \
} \
} while (0)
@@ -290,11 +348,16 @@ os_file_create
os_file_create_simple
os_file_create_simple_no_error_handling
os_file_close
+os_file_close_no_error_handling
os_file_rename
os_aio
os_file_read
os_file_read_no_error_handling
+os_file_read_no_error_handling_int_fd
os_file_write
+os_file_write_int_fd
+os_file_set_eof_at
+os_file_allocate
The wrapper functions have the prefix of "innodb_". */
@@ -315,6 +378,9 @@ The wrapper functions have the prefix of "innodb_". */
# define os_file_close(file) \
pfs_os_file_close_func(file, __FILE__, __LINE__)
+# define os_file_close_no_error_handling(file) \
+ pfs_os_file_close_no_error_handling_func(file, __FILE__, __LINE__)
+
# define os_aio(type, is_log, mode, name, file, buf, offset, \
n, page_size, message1, message2, space_id, \
trx, write_size) \
@@ -334,9 +400,18 @@ The wrapper functions have the prefix of "innodb_". */
pfs_os_file_read_no_error_handling_func(file, buf, offset, n, \
__FILE__, __LINE__)
-# define os_file_write(name, file, buf, offset, n) \
- pfs_os_file_write_func(name, file, buf, offset, n, \
- __FILE__, __LINE__)
+# define os_file_read_no_error_handling_int_fd( \
+ file, buf, offset, n) \
+ pfs_os_file_read_no_error_handling_int_fd_func( \
+ file, buf, offset, n, __FILE__, __LINE__)
+
+# define os_file_write(name, file, buf, offset, n) \
+ pfs_os_file_write_func(name, file, buf, offset, \
+ n, __FILE__, __LINE__)
+
+# define os_file_write_int_fd(name, file, buf, offset, n) \
+ pfs_os_file_write_int_fd_func(name, file, buf, offset, \
+ n, __FILE__, __LINE__)
# define os_file_flush(file) \
pfs_os_file_flush_func(file, __FILE__, __LINE__)
@@ -349,6 +424,15 @@ The wrapper functions have the prefix of "innodb_". */
# define os_file_delete_if_exists(key, name) \
pfs_os_file_delete_if_exists_func(key, name, __FILE__, __LINE__)
+
+# define os_file_set_eof_at(file, new_len) \
+ pfs_os_file_set_eof_at_func(file, new_len, __FILE__, __LINE__)
+
+# ifdef HAVE_POSIX_FALLOCATE
+# define os_file_allocate(file, offset, len) \
+ pfs_os_file_allocate_func(file, offset, len, __FILE__, __LINE__)
+# endif
+
#else /* UNIV_PFS_IO */
/* If UNIV_PFS_IO is not defined, these I/O APIs point
@@ -364,7 +448,11 @@ to original un-instrumented file I/O APIs */
os_file_create_simple_no_error_handling_func( \
name, create_mode, access, success, atomic_writes)
-# define os_file_close(file) os_file_close_func(file)
+# define os_file_close(file) \
+ os_file_close_func(file)
+
+# define os_file_close_no_error_handling(file) \
+ os_file_close_no_error_handling_func(file)
# define os_aio(type, is_log, mode, name, file, buf, offset, n, page_size, message1, \
message2, space_id, trx, write_size) \
@@ -379,11 +467,17 @@ to original un-instrumented file I/O APIs */
# define os_file_read_no_error_handling(file, buf, offset, n) \
os_file_read_no_error_handling_func(file, buf, offset, n)
+# define os_file_read_no_error_handling_int_fd( \
+ file, buf, offset, n) \
+ os_file_read_no_error_handling_func(file, buf, offset, n)
+# define os_file_write_int_fd(name, file, buf, offset, n) \
+ os_file_write_func(name, file, buf, offset, n)
# define os_file_write(name, file, buf, offset, n) \
os_file_write_func(name, file, buf, offset, n)
-# define os_file_flush(file) os_file_flush_func(file)
+
+# define os_file_flush(file) os_file_flush_func(file)
# define os_file_rename(key, oldpath, newpath) \
os_file_rename_func(oldpath, newpath)
@@ -393,6 +487,9 @@ to original un-instrumented file I/O APIs */
# define os_file_delete_if_exists(key, name) \
os_file_delete_if_exists_func(name)
+# define os_file_set_eof_at(file, new_len) \
+ os_file_set_eof_at_func(file, new_len)
+
#endif /* UNIV_PFS_IO */
/* File types for directory entry data type */
@@ -530,7 +627,7 @@ A simple function to open or create a file.
@return own: handle to the file, not defined if error, error number
can be retrieved with os_file_get_last_error */
UNIV_INTERN
-os_file_t
+pfs_os_file_t
os_file_create_simple_no_error_handling_func(
/*=========================================*/
const char* name, /*!< in: name of the file or path as a
@@ -565,7 +662,7 @@ Opens an existing file or creates a new.
@return own: handle to the file, not defined if error, error number
can be retrieved with os_file_get_last_error */
UNIV_INTERN
-os_file_t
+pfs_os_file_t
os_file_create_func(
/*================*/
const char* name, /*!< in: name of the file or path as a
@@ -626,6 +723,42 @@ ibool
os_file_close_func(
/*===============*/
os_file_t file); /*!< in, own: handle to a file */
+/***********************************************************************//**
+NOTE! Use the corresponding macro os_file_close(), not directly this
+function!
+Closes a file handle. In case of error, error number can be retrieved with
+os_file_get_last_error.
+@return TRUE if success */
+UNIV_INTERN
+bool
+os_file_close_no_error_handling_func(
+/*===============*/
+ os_file_t file); /*!< in, own: handle to a file */
+
+/***********************************************************************//**
+NOTE! Please use the corresponding macro os_file_set_eof_at(), not
+directly this function!
+Truncates a file at the specified position.
+@return TRUE if success */
+UNIV_INTERN
+bool
+os_file_set_eof_at_func(
+ os_file_t file, /*!< in: handle to a file */
+ ib_uint64_t new_len);/*!< in: new file length */
+
+#ifdef HAVE_POSIX_FALLOCATE
+/***********************************************************************//**
+NOTE! Please use the corresponding macro os_file_allocate(), not
+directly this function!
+Ensures that disk space is allocated for the file.
+@return TRUE if success */
+UNIV_INTERN
+bool
+os_file_allocate_func(
+ os_file_t file, /*!< in, own: handle to a file */
+ os_offset_t offset, /*!< in: file region offset */
+ os_offset_t len); /*!< in: file region length */
+#endif
#ifdef UNIV_PFS_IO
/****************************************************************//**
@@ -636,7 +769,7 @@ os_file_create_simple() which opens or creates a file.
@return own: handle to the file, not defined if error, error number
can be retrieved with os_file_get_last_error */
UNIV_INLINE
-os_file_t
+pfs_os_file_t
pfs_os_file_create_simple_func(
/*===========================*/
mysql_pfs_key_t key, /*!< in: Performance Schema Key */
@@ -661,7 +794,7 @@ monitor file creation/open.
@return own: handle to the file, not defined if error, error number
can be retrieved with os_file_get_last_error */
UNIV_INLINE
-os_file_t
+pfs_os_file_t
pfs_os_file_create_simple_no_error_handling_func(
/*=============================================*/
mysql_pfs_key_t key, /*!< in: Performance Schema Key */
@@ -687,7 +820,7 @@ Add instrumentation to monitor file creation/open.
@return own: handle to the file, not defined if error, error number
can be retrieved with os_file_get_last_error */
UNIV_INLINE
-os_file_t
+pfs_os_file_t
pfs_os_file_create_func(
/*====================*/
mysql_pfs_key_t key, /*!< in: Performance Schema Key */
@@ -718,7 +851,20 @@ UNIV_INLINE
ibool
pfs_os_file_close_func(
/*===================*/
- os_file_t file, /*!< in, own: handle to a file */
+ pfs_os_file_t file, /*!< in, own: handle to a file */
+ const char* src_file,/*!< in: file name where func invoked */
+ ulint src_line);/*!< in: line where the func invoked */
+/***********************************************************************//**
+NOTE! Please use the corresponding macro os_file_close_no_error_handling(),
+not directly this function!
+A performance schema instrumented wrapper function for
+os_file_close_no_error_handling().
+@return TRUE if success */
+UNIV_INLINE
+bool
+pfs_os_file_close_no_error_handling_func(
+/*===================*/
+ pfs_os_file_t file, /*!< in, own: handle to a file */
const char* src_file,/*!< in: file name where func invoked */
ulint src_line);/*!< in: line where the func invoked */
/*******************************************************************//**
@@ -731,7 +877,7 @@ UNIV_INLINE
ibool
pfs_os_file_read_func(
/*==================*/
- os_file_t file, /*!< in: handle to a file */
+ pfs_os_file_t file, /*!< in: handle to a file */
void* buf, /*!< in: buffer where to read */
os_offset_t offset, /*!< in: file offset where to read */
ulint n, /*!< in: number of bytes to read */
@@ -750,7 +896,7 @@ UNIV_INLINE
ibool
pfs_os_file_read_no_error_handling_func(
/*====================================*/
- os_file_t file, /*!< in: handle to a file */
+ pfs_os_file_t file, /*!< in: handle to a file */
void* buf, /*!< in: buffer where to read */
os_offset_t offset, /*!< in: file offset where to read */
ulint n, /*!< in: number of bytes to read */
@@ -772,7 +918,7 @@ pfs_os_aio_func(
ulint mode, /*!< in: OS_AIO_NORMAL etc. I/O mode */
const char* name, /*!< in: name of the file or path as a
null-terminated string */
- os_file_t file, /*!< in: handle to a file */
+ pfs_os_file_t file, /*!< in: handle to a file */
void* buf, /*!< in: buffer where to read or from which
to write */
os_offset_t offset, /*!< in: file offset where to read or write */
@@ -807,7 +953,7 @@ pfs_os_file_write_func(
/*===================*/
const char* name, /*!< in: name of the file or path as a
null-terminated string */
- os_file_t file, /*!< in: handle to a file */
+ pfs_os_file_t file, /*!< in: handle to a file */
const void* buf, /*!< in: buffer from which to write */
os_offset_t offset, /*!< in: file offset where to write */
ulint n, /*!< in: number of bytes to write */
@@ -824,7 +970,7 @@ UNIV_INLINE
ibool
pfs_os_file_flush_func(
/*===================*/
- os_file_t file, /*!< in, own: handle to a file */
+ pfs_os_file_t file, /*!< in, own: handle to a file */
const char* src_file,/*!< in: file name where func invoked */
ulint src_line);/*!< in: line where the func invoked */
@@ -876,16 +1022,66 @@ pfs_os_file_delete_if_exists_func(
string */
const char* src_file,/*!< in: file name where func invoked */
ulint src_line);/*!< in: line where the func invoked */
+
+/***********************************************************************//**
+NOTE! Please use the corresponding macro os_file_set_eof_at(), not
+directly this function!
+This is the performance schema instrumented wrapper function for
+os_file_set_eof_at()
+@return TRUE if success */
+UNIV_INLINE
+bool
+pfs_os_file_set_eof_at_func(
+ pfs_os_file_t file, /*!< in: handle to a file */
+ ib_uint64_t new_len,/*!< in: new file length */
+ const char* src_file,/*!< in: file name where func invoked */
+ ulint src_line);/*!< in: line where the func invoked */
+
+#ifdef HAVE_POSIX_FALLOCATE
+/***********************************************************************//**
+NOTE! Please use the corresponding macro os_file_allocate(), not
+directly this function!
+Ensures that disk space is allocated for the file.
+@return TRUE if success */
+UNIV_INLINE
+bool
+pfs_os_file_allocate_func(
+ pfs_os_file_t file, /*!< in, own: handle to a file */
+ os_offset_t offset, /*!< in: file region offset */
+ os_offset_t len, /*!< in: file region length */
+ const char* src_file,/*!< in: file name where func invoked */
+ ulint src_line);/*!< in: line where the func invoked */
+#endif
+
#endif /* UNIV_PFS_IO */
/***********************************************************************//**
-Closes a file handle.
-@return TRUE if success */
+Checks if the file is marked as invalid.
+@return TRUE if invalid */
UNIV_INTERN
-ibool
-os_file_close_no_error_handling(
-/*============================*/
- os_file_t file); /*!< in, own: handle to a file */
+bool
+os_file_is_invalid(
+ pfs_os_file_t file); /*!< in, own: handle to a file */
+
+/***********************************************************************//**
+Marks the file as invalid. */
+UNIV_INTERN
+void
+os_file_mark_invalid(
+ pfs_os_file_t* file); /*!< out: pointer to a handle to a file */
+
+/***********************************************************************//**
+Announces an intention to access file data in a specific pattern in the
+future.
+@return TRUE if success */
+UNIV_INTERN
+bool
+os_file_advise(
+ pfs_os_file_t file, /*!< in, own: handle to a file */
+ os_offset_t offset, /*!< in: file region offset */
+ os_offset_t len, /*!< in: file region length */
+ ulint advice);/*!< in: advice for access pattern */
+
/***********************************************************************//**
Gets a file size.
@return file size, or (os_offset_t) -1 on failure */
@@ -893,7 +1089,7 @@ UNIV_INTERN
os_offset_t
os_file_get_size(
/*=============*/
- os_file_t file) /*!< in: handle to a file */
+ pfs_os_file_t file) /*!< in: handle to a file */
MY_ATTRIBUTE((warn_unused_result));
/** Set the size of a newly created file.
@param[in] name file name
@@ -905,7 +1101,7 @@ UNIV_INTERN
bool
os_file_set_size(
const char* name,
- os_file_t file,
+ pfs_os_file_t file,
os_offset_t size,
bool is_sparse = false)
MY_ATTRIBUTE((nonnull, warn_unused_result));
@@ -918,14 +1114,6 @@ os_file_set_eof(
/*============*/
FILE* file); /*!< in: file to be truncated */
/***********************************************************************//**
-Truncates a file at the specified position.
-@return TRUE if success */
-UNIV_INTERN
-ibool
-os_file_set_eof_at(
- os_file_t file, /*!< in: handle to a file */
- ib_uint64_t new_len);/*!< in: new file length */
-/***********************************************************************//**
NOTE! Use the corresponding macro os_file_flush(), not directly this function!
Flushes the write buffers of a given file to the disk.
@return TRUE if success */
@@ -1155,7 +1343,7 @@ os_aio_func(
caution! */
const char* name, /*!< in: name of the file or path as a
null-terminated string */
- os_file_t file, /*!< in: handle to a file */
+ pfs_os_file_t file, /*!< in: handle to a file */
void* buf, /*!< in: buffer where to read or from which
to write */
os_offset_t offset, /*!< in: file offset where to read or write */
diff --git a/storage/xtradb/include/os0file.ic b/storage/xtradb/include/os0file.ic
index b284d7ea9ac..72ac9d9dd6a 100644
--- a/storage/xtradb/include/os0file.ic
+++ b/storage/xtradb/include/os0file.ic
@@ -1,7 +1,7 @@
/*****************************************************************************
-Copyright (c) 2010, 2011, Oracle and/or its affiliates. All Rights Reserved.
-Copyright (c) 2013, SkySQL Ab. All Rights Reserved.
+Copyright (c) 2010, 2017, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2013, 2017, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -35,7 +35,7 @@ os_file_create_simple() which opens or creates a file.
@return own: handle to the file, not defined if error, error number
can be retrieved with os_file_get_last_error */
UNIV_INLINE
-os_file_t
+pfs_os_file_t
pfs_os_file_create_simple_func(
/*===========================*/
mysql_pfs_key_t key, /*!< in: Performance Schema Key */
@@ -50,7 +50,7 @@ pfs_os_file_create_simple_func(
const char* src_file,/*!< in: file name where func invoked */
ulint src_line)/*!< in: line where the func invoked */
{
- os_file_t file;
+ pfs_os_file_t file;
struct PSI_file_locker* locker = NULL;
PSI_file_locker_state state;
@@ -64,8 +64,9 @@ pfs_os_file_create_simple_func(
file = os_file_create_simple_func(name, create_mode,
access_type, success, atomic_writes);
- /* Regsiter the returning "file" value with the system */
- register_pfs_file_open_end(locker, file);
+ /* Register psi value for the file */
+ register_pfs_file_open_end(locker, file,
+ (*success == TRUE ? success : 0));
return(file);
}
@@ -79,7 +80,7 @@ monitor file creation/open.
@return own: handle to the file, not defined if error, error number
can be retrieved with os_file_get_last_error */
UNIV_INLINE
-os_file_t
+pfs_os_file_t
pfs_os_file_create_simple_no_error_handling_func(
/*=============================================*/
mysql_pfs_key_t key, /*!< in: Performance Schema Key */
@@ -96,7 +97,7 @@ pfs_os_file_create_simple_no_error_handling_func(
const char* src_file,/*!< in: file name where func invoked */
ulint src_line)/*!< in: line where the func invoked */
{
- os_file_t file;
+ pfs_os_file_t file;
struct PSI_file_locker* locker = NULL;
PSI_file_locker_state state;
@@ -108,9 +109,10 @@ pfs_os_file_create_simple_no_error_handling_func(
name, src_file, src_line);
file = os_file_create_simple_no_error_handling_func(
- name, create_mode, access_type, success, atomic_writes);
+ name, create_mode, access_type, success, atomic_writes);
- register_pfs_file_open_end(locker, file);
+ register_pfs_file_open_end(locker, file,
+ (*success == TRUE ? success : 0));
return(file);
}
@@ -123,7 +125,7 @@ Add instrumentation to monitor file creation/open.
@return own: handle to the file, not defined if error, error number
can be retrieved with os_file_get_last_error */
UNIV_INLINE
-os_file_t
+pfs_os_file_t
pfs_os_file_create_func(
/*====================*/
mysql_pfs_key_t key, /*!< in: Performance Schema Key */
@@ -144,7 +146,7 @@ pfs_os_file_create_func(
const char* src_file,/*!< in: file name where func invoked */
ulint src_line)/*!< in: line where the func invoked */
{
- os_file_t file;
+ pfs_os_file_t file;
struct PSI_file_locker* locker = NULL;
PSI_file_locker_state state;
@@ -158,7 +160,8 @@ pfs_os_file_create_func(
file = os_file_create_func(name, create_mode, purpose, type,
success, atomic_writes);
- register_pfs_file_open_end(locker, file);
+ register_pfs_file_open_end(locker, file,
+ (*success == TRUE ? success : 0));
return(file);
}
@@ -172,7 +175,7 @@ UNIV_INLINE
ibool
pfs_os_file_close_func(
/*===================*/
- os_file_t file, /*!< in, own: handle to a file */
+ pfs_os_file_t file, /*!< in, own: handle to a file */
const char* src_file,/*!< in: file name where func invoked */
ulint src_line)/*!< in: line where the func invoked */
{
@@ -190,6 +193,34 @@ pfs_os_file_close_func(
return(result);
}
+/***********************************************************************//**
+NOTE! Please use the corresponding macro os_file_close_no_error_handling(),
+not directly this function!
+A performance schema instrumented wrapper function for
+os_file_close_no_error_handling().
+@return TRUE if success */
+UNIV_INLINE
+bool
+pfs_os_file_close_no_error_handling_func(
+/*===================*/
+ pfs_os_file_t file, /*!< in, own: handle to a file */
+ const char* src_file,/*!< in: file name where func invoked */
+ ulint src_line)/*!< in: line where the func invoked */
+{
+ bool result;
+ struct PSI_file_locker* locker = NULL;
+ PSI_file_locker_state state;
+
+ /* register the file close */
+ register_pfs_file_io_begin(&state, locker, file, 0, PSI_FILE_CLOSE,
+ src_file, src_line);
+
+ result = os_file_close_no_error_handling_func(file);
+
+ register_pfs_file_io_end(locker, 0);
+
+ return(result);
+}
/*******************************************************************//**
NOTE! Please use the corresponding macro os_aio(), not directly this
@@ -206,7 +237,7 @@ pfs_os_aio_func(
ulint mode, /*!< in: OS_AIO_NORMAL etc. I/O mode */
const char* name, /*!< in: name of the file or path as a
null-terminated string */
- os_file_t file, /*!< in: handle to a file */
+ pfs_os_file_t file, /*!< in: handle to a file */
void* buf, /*!< in: buffer where to read or from which
to write */
os_offset_t offset, /*!< in: file offset where to read or write */
@@ -260,7 +291,7 @@ UNIV_INLINE
ibool
pfs_os_file_read_func(
/*==================*/
- os_file_t file, /*!< in: handle to a file */
+ pfs_os_file_t file, /*!< in: handle to a file */
void* buf, /*!< in: buffer where to read */
os_offset_t offset, /*!< in: file offset where to read */
ulint n, /*!< in: number of bytes to read */
@@ -294,7 +325,7 @@ UNIV_INLINE
ibool
pfs_os_file_read_no_error_handling_func(
/*====================================*/
- os_file_t file, /*!< in: handle to a file */
+ pfs_os_file_t file, /*!< in: handle to a file */
void* buf, /*!< in: buffer where to read */
os_offset_t offset, /*!< in: file offset where to read */
ulint n, /*!< in: number of bytes to read */
@@ -315,6 +346,42 @@ pfs_os_file_read_no_error_handling_func(
return(result);
}
+/** NOTE! Please use the corresponding macro
+os_file_read_no_error_handling_int_fd(), not directly this function!
+This is the performance schema instrumented wrapper function for
+os_file_read_no_error_handling_int_fd_func() which requests a
+synchronous read operation.
+@return TRUE if request was successful, FALSE if fail */
+UNIV_INLINE
+ibool
+pfs_os_file_read_no_error_handling_int_fd_func(
+ int file, /*!< in: handle to a file */
+ void* buf, /*!< in: buffer where to read */
+ os_offset_t offset, /*!< in: file offset where to read */
+ ulint n, /*!< in: number of bytes to read */
+ const char* src_file,/*!< in: file name where func invoked */
+ ulint src_line)/*!< in: line where the func invoked */
+{
+ PSI_file_locker_state state;
+ struct PSI_file_locker* locker;
+
+ locker = PSI_FILE_CALL(get_thread_file_descriptor_locker)(
+ &state, file, PSI_FILE_READ);
+ if (locker != NULL) {
+ PSI_FILE_CALL(start_file_wait)(
+ locker, n,
+ __FILE__, __LINE__);
+ }
+ ibool result = os_file_read_no_error_handling_func(
+ OS_FILE_FROM_FD(file), buf, offset, n);
+
+ if (locker != NULL) {
+ PSI_FILE_CALL(end_file_wait)(locker, n);
+ }
+
+ return(result);
+}
+
/*******************************************************************//**
NOTE! Please use the corresponding macro os_file_write(), not directly
this function!
@@ -327,7 +394,7 @@ pfs_os_file_write_func(
/*===================*/
const char* name, /*!< in: name of the file or path as a
null-terminated string */
- os_file_t file, /*!< in: handle to a file */
+ pfs_os_file_t file, /*!< in: handle to a file */
const void* buf, /*!< in: buffer from which to write */
os_offset_t offset, /*!< in: file offset where to write */
ulint n, /*!< in: number of bytes to write */
@@ -348,6 +415,43 @@ pfs_os_file_write_func(
return(result);
}
+/** NOTE! Please use the corresponding macro os_file_write(), not
+directly this function!
+This is the performance schema instrumented wrapper function for
+os_file_write() which requests a synchronous write operation.
+@return TRUE if request was successful, FALSE if fail */
+UNIV_INLINE
+ibool
+pfs_os_file_write_int_fd_func(
+ const char* name, /*!< in: name of the file or path as a
+ null-terminated string */
+ int file, /*!< in: handle to a file */
+ const void* buf, /*!< in: buffer from which to write */
+ os_offset_t offset, /*!< in: file offset where to write */
+ ulint n, /*!< in: number of bytes to write */
+ const char* src_file,/*!< in: file name where func invoked */
+ ulint src_line)/*!< in: line where the func invoked */
+{
+ PSI_file_locker_state state;
+ struct PSI_file_locker* locker = NULL;
+
+ locker = PSI_FILE_CALL(get_thread_file_descriptor_locker)(
+ &state, file, PSI_FILE_WRITE);
+ if (locker != NULL) {
+ PSI_FILE_CALL(start_file_wait)(
+ locker, n,
+ __FILE__, __LINE__);
+ }
+ ibool result = os_file_write_func(
+ name, OS_FILE_FROM_FD(file), buf, offset, n);
+
+ if (locker != NULL) {
+ PSI_FILE_CALL(end_file_wait)(locker, n);
+ }
+
+ return(result);
+}
+
/***********************************************************************//**
NOTE! Please use the corresponding macro os_file_flush(), not directly
this function!
@@ -358,7 +462,7 @@ UNIV_INLINE
ibool
pfs_os_file_flush_func(
/*===================*/
- os_file_t file, /*!< in, own: handle to a file */
+ pfs_os_file_t file, /*!< in, own: handle to a file */
const char* src_file,/*!< in: file name where func invoked */
ulint src_line)/*!< in: line where the func invoked */
{
@@ -396,12 +500,12 @@ pfs_os_file_rename_func(
struct PSI_file_locker* locker = NULL;
PSI_file_locker_state state;
- register_pfs_file_open_begin(&state, locker, key, PSI_FILE_RENAME, newpath,
+ register_pfs_file_rename_begin(&state, locker, key, PSI_FILE_RENAME, newpath,
src_file, src_line);
result = os_file_rename_func(oldpath, newpath);
- register_pfs_file_open_end(locker, 0);
+ register_pfs_file_rename_end(locker, 0);
return(result);
}
@@ -465,4 +569,61 @@ pfs_os_file_delete_if_exists_func(
return(result);
}
+
+/***********************************************************************//**
+NOTE! Please use the corresponding macro os_file_set_eof_at(), not
+directly this function!
+This is the performance schema instrumented wrapper function for
+os_file_set_eof_at()
+@return TRUE if success */
+UNIV_INLINE
+bool
+pfs_os_file_set_eof_at_func(
+ pfs_os_file_t file, /*!< in: handle to a file */
+ ib_uint64_t new_len,/*!< in: new file length */
+ const char* src_file,/*!< in: file name where func invoked */
+ ulint src_line)/*!< in: line where the func invoked */
+{
+ bool result;
+ struct PSI_file_locker* locker = NULL;
+ PSI_file_locker_state state;
+
+ register_pfs_file_io_begin(&state, locker, file, 0, PSI_FILE_CHSIZE,
+ src_file, src_line);
+ result = os_file_set_eof_at_func(file, new_len);
+
+ register_pfs_file_io_end(locker, 0);
+
+ return(result);
+}
+
+#ifdef HAVE_POSIX_FALLOCATE
+/***********************************************************************//**
+NOTE! Please use the corresponding macro os_file_allocate(), not
+directly this function!
+Ensures that disk space is allocated for the file.
+@return TRUE if success */
+UNIV_INLINE
+bool
+pfs_os_file_allocate_func(
+ pfs_os_file_t file, /*!< in, own: handle to a file */
+ os_offset_t offset, /*!< in: file region offset */
+ os_offset_t len, /*!< in: file region length */
+ const char* src_file,/*!< in: file name where func invoked */
+ ulint src_line)/*!< in: line where the func invoked */
+{
+ bool result;
+ struct PSI_file_locker* locker = NULL;
+ PSI_file_locker_state state;
+
+ register_pfs_file_io_begin(&state, locker, file, 0, PSI_FILE_CHSIZE,
+ src_file, src_line);
+ result = os_file_allocate_func(file, offset, len);
+
+ register_pfs_file_io_end(locker, 0);
+
+ return(result);
+}
+#endif
+
#endif /* UNIV_PFS_IO */
diff --git a/storage/xtradb/include/os0sync.h b/storage/xtradb/include/os0sync.h
index 62f651413e1..ce03f6a2124 100644
--- a/storage/xtradb/include/os0sync.h
+++ b/storage/xtradb/include/os0sync.h
@@ -959,7 +959,14 @@ struct MY_ALIGNED(CACHE_LINE_SIZE) simple_counter
{
compile_time_assert(!atomic || sizeof(Type) == sizeof(ulint));
if (atomic) {
- return os_atomic_increment_ulint(&m_counter, i);
+ /* GCC would perform a type check in this code
+ also in case the template is instantiated with
+ simple_counter<Type=not_ulint, atomic=false>.
+ On Solaris, os_atomic_increment_ulint() maps
+ to atomic_add_long_nv(), which expects the
+ parameter to be correctly typed. */
+ return os_atomic_increment_ulint(
+ reinterpret_cast<ulint*>(&m_counter), i);
} else {
return m_counter += i;
}
diff --git a/storage/xtradb/include/page0zip.ic b/storage/xtradb/include/page0zip.ic
index 6c7d8cd32c7..9a583086925 100644
--- a/storage/xtradb/include/page0zip.ic
+++ b/storage/xtradb/include/page0zip.ic
@@ -2,6 +2,7 @@
Copyright (c) 2005, 2013, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2012, Facebook Inc.
+Copyright (c) 2017, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -172,7 +173,8 @@ page_zip_rec_needs_ext(
ignored if zip_size == 0 */
ulint zip_size) /*!< in: compressed page size in bytes, or 0 */
{
- ut_ad(rec_size > comp ? REC_N_NEW_EXTRA_BYTES : REC_N_OLD_EXTRA_BYTES);
+ ut_ad(rec_size
+ > (comp ? REC_N_NEW_EXTRA_BYTES : REC_N_OLD_EXTRA_BYTES));
ut_ad(ut_is_2pow(zip_size));
ut_ad(comp || !zip_size);
diff --git a/storage/xtradb/include/row0mysql.h b/storage/xtradb/include/row0mysql.h
index 2bd17980896..a8503a5cfda 100644
--- a/storage/xtradb/include/row0mysql.h
+++ b/storage/xtradb/include/row0mysql.h
@@ -614,7 +614,7 @@ struct mysql_row_templ_t {
Innobase record in the current index;
not defined if template_type is
ROW_MYSQL_WHOLE_ROW */
- ibool rec_field_is_prefix; /* is this field in a prefix index? */
+ bool rec_field_is_prefix; /* is this field in a prefix index? */
ulint rec_prefix_field_no; /* record field, even if just a
prefix; same as rec_field_no when not a
prefix, otherwise rec_field_no is
diff --git a/storage/xtradb/include/srv0srv.h b/storage/xtradb/include/srv0srv.h
index cf7824d91e7..4e98ce0f1cb 100644
--- a/storage/xtradb/include/srv0srv.h
+++ b/storage/xtradb/include/srv0srv.h
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1995, 2016, Oracle and/or its affiliates. All rights reserved.
+Copyright (c) 1995, 2017, Oracle and/or its affiliates. All rights reserved.
Copyright (c) 2008, 2009, Google Inc.
Copyright (c) 2009, Percona Inc.
Copyright (c) 2013, 2017, MariaDB Corporation.
@@ -1096,6 +1096,13 @@ UNIV_INTERN
void
srv_purge_wakeup();
+/** Check whether given space id is undo tablespace id
+@param[in] space_id space id to check
+@return true if it is undo tablespace else false. */
+bool
+srv_is_undo_tablespace(
+ ulint space_id);
+
/** Status variables to be passed to MySQL */
struct export_var_t{
ulint innodb_adaptive_hash_hash_searches;
diff --git a/storage/xtradb/include/srv0start.h b/storage/xtradb/include/srv0start.h
index 963b767f0fb..b055a9d834f 100644
--- a/storage/xtradb/include/srv0start.h
+++ b/storage/xtradb/include/srv0start.h
@@ -1,6 +1,7 @@
/*****************************************************************************
-Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1995, 2017, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2017, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -75,22 +76,12 @@ are not found and the user wants.
@return DB_SUCCESS or error code */
UNIV_INTERN
dberr_t
-innobase_start_or_create_for_mysql(void);
-/*====================================*/
-/****************************************************************//**
-Shuts down the Innobase database.
-@return DB_SUCCESS or error code */
-UNIV_INTERN
-dberr_t
-innobase_shutdown_for_mysql(void);
+innobase_start_or_create_for_mysql();
-/********************************************************************
-Signal all per-table background threads to shutdown, and wait for them to do
-so. */
+/** Shut down InnoDB. */
UNIV_INTERN
void
-srv_shutdown_table_bg_threads(void);
-/*=============================*/
+innodb_shutdown();
/*************************************************************//**
Copy the file path component of the physical file to parameter. It will
@@ -139,6 +130,8 @@ extern ibool srv_startup_is_before_trx_rollback_phase;
/** TRUE if a raw partition is in use */
extern ibool srv_start_raw_disk_in_use;
+/** Undo tablespaces starts with space_id. */
+extern ulint srv_undo_space_id_start;
/** Shutdown state */
enum srv_shutdown_state {
@@ -156,6 +149,9 @@ enum srv_shutdown_state {
SRV_SHUTDOWN_EXIT_THREADS/*!< Exit all threads */
};
+/** Whether any undo log records can be generated */
+extern bool srv_undo_sources;
+
/** At a shutdown this value climbs from SRV_SHUTDOWN_NONE to
SRV_SHUTDOWN_CLEANUP and then to SRV_SHUTDOWN_LAST_PHASE, and so on */
extern enum srv_shutdown_state srv_shutdown_state;
diff --git a/storage/xtradb/include/trx0rec.h b/storage/xtradb/include/trx0rec.h
index 359937e3583..a6e202d04e4 100644
--- a/storage/xtradb/include/trx0rec.h
+++ b/storage/xtradb/include/trx0rec.h
@@ -1,6 +1,7 @@
/*****************************************************************************
Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2017, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -212,10 +213,6 @@ UNIV_INTERN
dberr_t
trx_undo_report_row_operation(
/*==========================*/
- ulint flags, /*!< in: if BTR_NO_UNDO_LOG_FLAG bit is
- set, does nothing */
- ulint op_type, /*!< in: TRX_UNDO_INSERT_OP or
- TRX_UNDO_MODIFY_OP */
que_thr_t* thr, /*!< in: query thread */
dict_index_t* index, /*!< in: clustered index */
const dtuple_t* clust_entry, /*!< in: in the case of an insert,
@@ -233,7 +230,7 @@ trx_undo_report_row_operation(
inserted undo log record,
0 if BTR_NO_UNDO_LOG
flag was specified */
- MY_ATTRIBUTE((nonnull(3,4,10), warn_unused_result));
+ MY_ATTRIBUTE((nonnull(1,2,8), warn_unused_result));
/******************************************************************//**
Copies an undo record to heap. This function can be called if we know that
the undo log record exists.
@@ -313,10 +310,6 @@ record */
storage fields: used by purge to
free the external storage */
-/* Operation type flags used in trx_undo_report_row_operation */
-#define TRX_UNDO_INSERT_OP 1
-#define TRX_UNDO_MODIFY_OP 2
-
#ifndef UNIV_NONINL
#include "trx0rec.ic"
#endif
diff --git a/storage/xtradb/include/trx0rseg.h b/storage/xtradb/include/trx0rseg.h
index b9c84ef2b06..e2853df7045 100644
--- a/storage/xtradb/include/trx0rseg.h
+++ b/storage/xtradb/include/trx0rseg.h
@@ -1,6 +1,7 @@
/*****************************************************************************
Copyright (c) 1996, 2011, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2017, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -124,13 +125,13 @@ trx_rseg_mem_free(
/*==============*/
trx_rseg_t* rseg); /*!< in, own: instance to free */
-/*********************************************************************
-Creates a rollback segment. */
+/** Create a rollback segment.
+@param[in] space undo tablespace ID
+@return pointer to new rollback segment
+@retval NULL on failure */
UNIV_INTERN
trx_rseg_t*
-trx_rseg_create(
-/*============*/
- ulint space); /*!< in: id of UNDO tablespace */
+trx_rseg_create(ulint space);
/********************************************************************
Get the number of unique rollback tablespaces in use except space id 0.
diff --git a/storage/xtradb/include/trx0trx.h b/storage/xtradb/include/trx0trx.h
index 766d61039b4..1b490eca2af 100644
--- a/storage/xtradb/include/trx0trx.h
+++ b/storage/xtradb/include/trx0trx.h
@@ -1,8 +1,7 @@
/*****************************************************************************
Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved.
-Copyright (c) 2015, 2016, MariaDB Corporation. All Rights Reserved.
-
+Copyright (c) 2015, 2017, MariaDB Corporation
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -334,6 +333,24 @@ trx_print_low(
/*!< in: mem_heap_get_size(trx->lock.lock_heap) */
MY_ATTRIBUTE((nonnull));
+#ifdef WITH_WSREP
+/**********************************************************************//**
+Prints info about a transaction.
+Transaction information may be retrieved without having trx_sys->mutex acquired
+so it may not be completely accurate. The caller must own lock_sys->mutex
+and the trx must have some locks to make sure that it does not escape
+without locking lock_sys->mutex. */
+UNIV_INTERN
+void
+wsrep_trx_print_locking(
+/*==============*/
+ FILE* f, /*!< in: output stream */
+ const trx_t* trx, /*!< in: transaction */
+ ulint max_query_len) /*!< in: max query length to print,
+ or 0 to use the default max length */
+ MY_ATTRIBUTE((nonnull));
+#endif /* WITH_WSREP */
+
/**********************************************************************//**
Prints info about a transaction.
The caller must hold lock_sys->mutex and trx_sys->mutex.
diff --git a/storage/xtradb/include/trx0xa.h b/storage/xtradb/include/trx0xa.h
index 7caddfb7ba4..4d5adc68dcd 100644
--- a/storage/xtradb/include/trx0xa.h
+++ b/storage/xtradb/include/trx0xa.h
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1995, 2009, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -24,6 +24,8 @@ this program; if not, write to the Free Software Foundation, Inc.,
#ifndef XA_H
#define XA_H
+#include "handler.h"
+
/*
* Transaction branch identification: XID and NULLXID:
*/
@@ -35,17 +37,6 @@ this program; if not, write to the Free Software Foundation, Inc.,
#define MAXGTRIDSIZE 64 /*!< maximum size in bytes of gtrid */
#define MAXBQUALSIZE 64 /*!< maximum size in bytes of bqual */
-/** X/Open XA distributed transaction identifier */
-struct xid_t {
- long formatID; /*!< format identifier; -1
- means that the XID is null */
- long gtrid_length; /*!< value from 1 through 64 */
- long bqual_length; /*!< value from 1 through 64 */
- char data[XIDDATASIZE]; /*!< distributed transaction
- identifier */
-};
-/** X/Open XA distributed transaction identifier */
-typedef struct xid_t XID;
#endif
/** X/Open XA distributed transaction status codes */
/* @{ */
diff --git a/storage/xtradb/include/univ.i b/storage/xtradb/include/univ.i
index 310053b9145..23c8c0a659d 100644
--- a/storage/xtradb/include/univ.i
+++ b/storage/xtradb/include/univ.i
@@ -45,10 +45,10 @@ Created 1/20/1994 Heikki Tuuri
#define INNODB_VERSION_MAJOR 5
#define INNODB_VERSION_MINOR 6
-#define INNODB_VERSION_BUGFIX 35
+#define INNODB_VERSION_BUGFIX 36
#ifndef PERCONA_INNODB_VERSION
-#define PERCONA_INNODB_VERSION 80.0
+#define PERCONA_INNODB_VERSION 82.0
#endif
/* Enable UNIV_LOG_ARCHIVE in XtraDB */
@@ -146,14 +146,8 @@ HAVE_PSI_INTERFACE is defined. */
#if defined HAVE_PSI_INTERFACE && !defined UNIV_HOTBACKUP
# define UNIV_PFS_MUTEX
# define UNIV_PFS_RWLOCK
-/* For I/O instrumentation, performance schema rely
-on a native descriptor to identify the file, this
-descriptor could conflict with our OS level descriptor.
-Disable IO instrumentation on Windows until this is
-resolved */
-# ifndef __WIN__
-# define UNIV_PFS_IO
-# endif
+
+# define UNIV_PFS_IO
# define UNIV_PFS_THREAD
/* There are mutexes/rwlocks that we want to exclude from
diff --git a/storage/xtradb/include/ut0rnd.ic b/storage/xtradb/include/ut0rnd.ic
index 024c59e553b..987dfac03c1 100644
--- a/storage/xtradb/include/ut0rnd.ic
+++ b/storage/xtradb/include/ut0rnd.ic
@@ -1,6 +1,7 @@
/*****************************************************************************
Copyright (c) 1994, 2009, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2017, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -237,16 +238,22 @@ ut_fold_binary(
switch (len & 0x7) {
case 7:
fold = ut_fold_ulint_pair(fold, (ulint)(*str++));
+ /* fall through */
case 6:
fold = ut_fold_ulint_pair(fold, (ulint)(*str++));
+ /* fall through */
case 5:
fold = ut_fold_ulint_pair(fold, (ulint)(*str++));
+ /* fall through */
case 4:
fold = ut_fold_ulint_pair(fold, (ulint)(*str++));
+ /* fall through */
case 3:
fold = ut_fold_ulint_pair(fold, (ulint)(*str++));
+ /* fall through */
case 2:
fold = ut_fold_ulint_pair(fold, (ulint)(*str++));
+ /* fall through */
case 1:
fold = ut_fold_ulint_pair(fold, (ulint)(*str++));
}
diff --git a/storage/xtradb/lock/lock0lock.cc b/storage/xtradb/lock/lock0lock.cc
index 717fbf02536..71612f66fcd 100644
--- a/storage/xtradb/lock/lock0lock.cc
+++ b/storage/xtradb/lock/lock0lock.cc
@@ -921,12 +921,18 @@ lock_reset_lock_and_trx_wait(
const char* stmt2=NULL;
size_t stmt_len;
trx_id_t trx_id = 0;
- stmt = innobase_get_stmt(lock->trx->mysql_thd, &stmt_len);
+ stmt = lock->trx->mysql_thd
+ ? innobase_get_stmt(lock->trx->mysql_thd, &stmt_len)
+ : NULL;
if (lock->trx->lock.wait_lock &&
lock->trx->lock.wait_lock->trx) {
trx_id = lock->trx->lock.wait_lock->trx->id;
- stmt2 = innobase_get_stmt(lock->trx->lock.wait_lock->trx->mysql_thd, &stmt_len);
+ stmt2 = lock->trx->lock.wait_lock->trx->mysql_thd
+ ? innobase_get_stmt(
+ lock->trx->lock.wait_lock
+ ->trx->mysql_thd, &stmt_len)
+ : NULL;
}
ib_logf(IB_LOG_LEVEL_INFO,
@@ -5636,13 +5642,11 @@ lock_rec_unlock(
trx_mutex_exit(trx);
stmt = innobase_get_stmt(trx->mysql_thd, &stmt_len);
- ut_print_timestamp(stderr);
- fprintf(stderr,
- " InnoDB: Error: unlock row could not"
- " find a %lu mode lock on the record\n",
- (ulong) lock_mode);
- ut_print_timestamp(stderr);
- fprintf(stderr, " InnoDB: current statement: %.*s\n",
+
+ ib_logf(IB_LOG_LEVEL_ERROR,
+ "unlock row could not find a %u mode lock on the record;"
+ " statement=%.*s",
+ lock_mode,
(int) stmt_len, stmt);
return;
diff --git a/storage/xtradb/log/log0log.cc b/storage/xtradb/log/log0log.cc
index 309de7daaf8..3252cd793c9 100644
--- a/storage/xtradb/log/log0log.cc
+++ b/storage/xtradb/log/log0log.cc
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1995, 2017, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2009, Google Inc.
Copyright (c) 2014, 2017, MariaDB Corporation.
@@ -55,12 +55,13 @@ Created 12/9/1995 Heikki Tuuri
#include "mem0mem.h"
#include "buf0buf.h"
#include "buf0flu.h"
-#include "srv0srv.h"
#include "lock0lock.h"
#include "log0recv.h"
#include "fil0fil.h"
#include "dict0boot.h"
-#include "dict0stats_bg.h" /* dict_stats_event */
+#include "dict0stats_bg.h"
+#include "dict0stats_bg.h"
+#include "btr0defragment.h"
#include "srv0srv.h"
#include "srv0start.h"
#include "trx0sys.h"
@@ -2804,7 +2805,7 @@ log_group_archive(
/*==============*/
log_group_t* group) /*!< in: log group */
{
- os_file_t file_handle;
+ pfs_os_file_t file_handle;
lsn_t start_lsn;
lsn_t end_lsn;
char name[OS_FILE_MAX_PATH];
@@ -3618,6 +3619,8 @@ loop:
thread_name = "lock_wait_timeout_thread";
} else if (srv_buf_dump_thread_active) {
thread_name = "buf_dump_thread";
+ } else if (btr_defragment_thread_active) {
+ thread_name = "btr_defragment_thread";
} else if (srv_fast_shutdown != 2 && trx_rollback_or_clean_is_active) {
thread_name = "rollback of recovered transactions";
} else {
@@ -3639,8 +3642,8 @@ wait_suspend_loop:
switch (srv_get_active_thread_type()) {
case SRV_NONE:
- srv_shutdown_state = SRV_SHUTDOWN_FLUSH_PHASE;
if (!srv_n_fil_crypt_threads_started) {
+ srv_shutdown_state = SRV_SHUTDOWN_FLUSH_PHASE;
break;
}
os_event_set(fil_crypt_threads_event);
@@ -3820,7 +3823,8 @@ wait_suspend_loop:
ut_a(freed);
ut_a(lsn == log_sys->lsn);
- ut_ad(lsn == log_sys->last_checkpoint_lsn);
+ ut_ad(srv_force_recovery >= SRV_FORCE_NO_LOG_REDO
+ || lsn == log_sys->last_checkpoint_lsn);
if (lsn < srv_start_lsn) {
ib_logf(IB_LOG_LEVEL_ERROR,
@@ -3832,9 +3836,14 @@ wait_suspend_loop:
srv_shutdown_lsn = lsn;
if (!srv_read_only_mode) {
- fil_write_flushed_lsn_to_data_files(lsn, 0);
+ dberr_t err = fil_write_flushed_lsn(lsn);
- fil_flush_file_spaces(FIL_TABLESPACE);
+ if (err != DB_SUCCESS) {
+ ib_logf(IB_LOG_LEVEL_ERROR,
+ "Failed to write flush lsn to the "
+ "system tablespace at shutdown err=%s",
+ ut_strerr(err));
+ }
}
fil_close_all_files();
diff --git a/storage/xtradb/log/log0online.cc b/storage/xtradb/log/log0online.cc
index 74f2e2360a8..27382977e5c 100644
--- a/storage/xtradb/log/log0online.cc
+++ b/storage/xtradb/log/log0online.cc
@@ -329,7 +329,7 @@ log_online_read_last_tracked_lsn(void)
lsn_t result;
os_offset_t read_offset = log_bmp_sys->out.offset;
- while (!checksum_ok && read_offset > 0 && !is_last_page)
+ while ((!checksum_ok || !is_last_page) && read_offset > 0)
{
read_offset -= MODIFIED_PAGE_BLOCK_SIZE;
log_bmp_sys->out.offset = read_offset;
@@ -554,9 +554,9 @@ log_online_rotate_bitmap_file(
lsn_t next_file_start_lsn) /*!<in: the start LSN name
part */
{
- if (log_bmp_sys->out.file != os_file_invalid) {
+ if (!os_file_is_invalid(log_bmp_sys->out.file)) {
os_file_close(log_bmp_sys->out.file);
- log_bmp_sys->out.file = os_file_invalid;
+ os_file_mark_invalid(&log_bmp_sys->out.file);
}
log_bmp_sys->out_seq_num++;
log_online_make_bitmap_name(next_file_start_lsn);
@@ -723,7 +723,11 @@ log_online_read_init(void)
}
last_tracked_lsn = log_online_read_last_tracked_lsn();
+ /* Do not rotate if we truncated the file to zero length - we
+ can just start writing there */
+ const bool need_rotate = (last_tracked_lsn != 0);
if (!last_tracked_lsn) {
+
last_tracked_lsn = last_file_start_lsn;
}
@@ -735,7 +739,8 @@ log_online_read_init(void)
} else {
file_start_lsn = tracking_start_lsn;
}
- ut_a(log_online_rotate_bitmap_file(file_start_lsn));
+ ut_a(!need_rotate
+ || log_online_rotate_bitmap_file(file_start_lsn));
if (last_tracked_lsn < tracking_start_lsn) {
@@ -773,9 +778,9 @@ log_online_read_shutdown(void)
ib_rbt_node_t *free_list_node = log_bmp_sys->page_free_list;
- if (log_bmp_sys->out.file != os_file_invalid) {
+ if (!os_file_is_invalid(log_bmp_sys->out.file)) {
os_file_close(log_bmp_sys->out.file);
- log_bmp_sys->out.file = os_file_invalid;
+ os_file_mark_invalid(&log_bmp_sys->out.file);
}
rbt_free(log_bmp_sys->modified_pages);
@@ -1114,6 +1119,18 @@ log_online_write_bitmap_page(
}
});
+ /* A crash injection site that ensures last checkpoint LSN > last
+ tracked LSN, so that LSN tracking for this interval is tested. */
+ DBUG_EXECUTE_IF("crash_before_bitmap_write",
+ {
+ ulint space_id
+ = mach_read_from_4(block
+ + MODIFIED_PAGE_SPACE_ID);
+ if (space_id > 0)
+ DBUG_SUICIDE();
+ });
+
+
ibool success = os_file_write(log_bmp_sys->out.name,
log_bmp_sys->out.file, block,
log_bmp_sys->out.offset,
@@ -1137,10 +1154,8 @@ log_online_write_bitmap_page(
return FALSE;
}
-#ifdef UNIV_LINUX
- posix_fadvise(log_bmp_sys->out.file, log_bmp_sys->out.offset,
- MODIFIED_PAGE_BLOCK_SIZE, POSIX_FADV_DONTNEED);
-#endif
+ os_file_advise(log_bmp_sys->out.file, log_bmp_sys->out.offset,
+ MODIFIED_PAGE_BLOCK_SIZE, OS_FILE_ADVISE_DONTNEED);
log_bmp_sys->out.offset += MODIFIED_PAGE_BLOCK_SIZE;
return TRUE;
@@ -1262,10 +1277,6 @@ log_online_follow_redo_log(void)
group = UT_LIST_GET_NEXT(log_groups, group);
}
- /* A crash injection site that ensures last checkpoint LSN > last
- tracked LSN, so that LSN tracking for this interval is tested. */
- DBUG_EXECUTE_IF("crash_before_bitmap_write", DBUG_SUICIDE(););
-
result = log_online_write_bitmap();
log_bmp_sys->start_lsn = log_bmp_sys->end_lsn;
log_set_tracked_lsn(log_bmp_sys->start_lsn);
@@ -1433,6 +1444,7 @@ log_online_setup_bitmap_file_range(
if (UNIV_UNLIKELY(array_pos >= bitmap_files->count)) {
log_online_diagnose_inconsistent_dir(bitmap_files);
+ os_file_closedir(bitmap_dir);
return FALSE;
}
@@ -1535,10 +1547,8 @@ log_online_open_bitmap_file_read_only(
bitmap_file->size = os_file_get_size(bitmap_file->file);
bitmap_file->offset = 0;
-#ifdef UNIV_LINUX
- posix_fadvise(bitmap_file->file, 0, 0, POSIX_FADV_SEQUENTIAL);
- posix_fadvise(bitmap_file->file, 0, 0, POSIX_FADV_NOREUSE);
-#endif
+ os_file_advise(bitmap_file->file, 0, 0, OS_FILE_ADVISE_SEQUENTIAL);
+ os_file_advise(bitmap_file->file, 0, 0, OS_FILE_ADVISE_NOREUSE);
return TRUE;
}
@@ -1624,7 +1634,7 @@ log_online_bitmap_iterator_init(
/* Empty range */
i->in_files.count = 0;
i->in_files.files = NULL;
- i->in.file = os_file_invalid;
+ os_file_mark_invalid(&i->in.file);
i->page = NULL;
i->failed = FALSE;
return TRUE;
@@ -1642,7 +1652,7 @@ log_online_bitmap_iterator_init(
if (i->in_files.count == 0) {
/* Empty range */
- i->in.file = os_file_invalid;
+ os_file_mark_invalid(&i->in.file);
i->page = NULL;
i->failed = FALSE;
return TRUE;
@@ -1681,10 +1691,10 @@ log_online_bitmap_iterator_release(
{
ut_a(i);
- if (i->in.file != os_file_invalid) {
+ if (!os_file_is_invalid(i->in.file)) {
os_file_close(i->in.file);
- i->in.file = os_file_invalid;
+ os_file_mark_invalid(&i->in.file);
}
if (i->in_files.files) {
@@ -1738,8 +1748,9 @@ log_online_bitmap_iterator_next(
/* Advance file */
i->in_i++;
- success = os_file_close_no_error_handling(i->in.file);
- i->in.file = os_file_invalid;
+ success = os_file_close_no_error_handling(
+ i->in.file);
+ os_file_mark_invalid(&i->in.file);
if (UNIV_UNLIKELY(!success)) {
os_file_get_last_error(TRUE);
@@ -1848,7 +1859,7 @@ log_online_purge_changed_page_bitmaps(
/* If we have to delete the current output file, close it
first. */
os_file_close(log_bmp_sys->out.file);
- log_bmp_sys->out.file = os_file_invalid;
+ os_file_mark_invalid(&log_bmp_sys->out.file);
}
for (i = 0; i < bitmap_files.count; i++) {
diff --git a/storage/xtradb/log/log0recv.cc b/storage/xtradb/log/log0recv.cc
index 978e6051711..fb64309cee4 100644
--- a/storage/xtradb/log/log0recv.cc
+++ b/storage/xtradb/log/log0recv.cc
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1997, 2016, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1997, 2017, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2012, Facebook Inc.
Copyright (c) 2013, 2017, MariaDB Corporation.
@@ -332,6 +332,7 @@ DECLARE_THREAD(recv_writer_thread)(
/*!< in: a dummy parameter required by
os_thread_create */
{
+ my_thread_init();
ut_ad(!srv_read_only_mode);
#ifdef UNIV_PFS_THREAD
@@ -362,6 +363,7 @@ DECLARE_THREAD(recv_writer_thread)(
recv_writer_thread_active = false;
+ my_thread_end();
/* We count the number of threads in os_thread_exit().
A created thread should always use that to exit and not
use return() to exit. */
@@ -3002,11 +3004,6 @@ recv_init_crash_recovery(void)
possible */
if (srv_force_recovery < SRV_FORCE_NO_LOG_REDO) {
-
- ib_logf(IB_LOG_LEVEL_INFO,
- "Restoring possible half-written data pages "
- "from the doublewrite buffer...");
-
buf_dblwr_process();
/* Spawn the background thread to flush dirty pages
@@ -3017,22 +3014,22 @@ recv_init_crash_recovery(void)
}
}
-/********************************************************//**
-Recovers from a checkpoint. When this function returns, the database is able
+/** Recovers from a checkpoint. When this function returns, the database is able
to start processing of new user transactions, but the function
recv_recovery_from_checkpoint_finish should be called later to complete
the recovery and free the resources used in it.
+@param[in] type LOG_CHECKPOINT or LOG_ARCHIVE
+@param[in] limit_lsn recover up to this lsn if possible
+@param[in] flushed_lsn flushed lsn from first data file
@return error code or DB_SUCCESS */
UNIV_INTERN
dberr_t
recv_recovery_from_checkpoint_start_func(
-/*=====================================*/
#ifdef UNIV_LOG_ARCHIVE
- ulint type, /*!< in: LOG_CHECKPOINT or LOG_ARCHIVE */
- lsn_t limit_lsn, /*!< in: recover up to this lsn if possible */
+ ulint type,
+ lsn_t limit_lsn,
#endif /* UNIV_LOG_ARCHIVE */
- lsn_t min_flushed_lsn,/*!< in: min flushed lsn from data files */
- lsn_t max_flushed_lsn)/*!< in: max flushed lsn from data files */
+ lsn_t flushed_lsn)
{
log_group_t* group;
log_group_t* max_cp_group;
@@ -3260,6 +3257,7 @@ recv_recovery_from_checkpoint_start_func(
group = UT_LIST_GET_NEXT(log_groups, group);
}
+
/* Done with startup scan. Clear the flag. */
recv_log_scan_is_startup_type = FALSE;
@@ -3272,38 +3270,16 @@ recv_recovery_from_checkpoint_start_func(
there is something wrong we will print a message to the
user about recovery: */
- if (checkpoint_lsn != max_flushed_lsn
- || checkpoint_lsn != min_flushed_lsn) {
-
- if (checkpoint_lsn < max_flushed_lsn) {
-
- ib_logf(IB_LOG_LEVEL_WARN,
- "The log sequence number "
- "in the ibdata files is higher "
- "than the log sequence number "
- "in the ib_logfiles! Are you sure "
- "you are using the right "
- "ib_logfiles to start up the database. "
- "Log sequence number in the "
- "ib_logfiles is " LSN_PF ", log"
- "sequence numbers stamped "
- "to ibdata file headers are between "
- "" LSN_PF " and " LSN_PF ".",
- checkpoint_lsn,
- min_flushed_lsn,
- max_flushed_lsn);
- }
-
+ if (checkpoint_lsn != flushed_lsn) {
if (!recv_needed_recovery) {
ib_logf(IB_LOG_LEVEL_INFO,
- "The log sequence numbers "
- LSN_PF " and " LSN_PF
- " in ibdata files do not match"
+ "The log sequence number "
+ LSN_PF
+ " in ibdata file do not match"
" the log sequence number "
LSN_PF
" in the ib_logfiles!",
- min_flushed_lsn,
- max_flushed_lsn,
+ flushed_lsn,
checkpoint_lsn);
if (!srv_read_only_mode) {
diff --git a/storage/xtradb/mysql-test/storage_engine/suite.pm b/storage/xtradb/mysql-test/storage_engine/suite.pm
new file mode 100644
index 00000000000..e186a532dcc
--- /dev/null
+++ b/storage/xtradb/mysql-test/storage_engine/suite.pm
@@ -0,0 +1,8 @@
+package My::Suite::SE::XtraDB;
+
+@ISA = qw(My::Suite);
+
+return "Need XtraDB engine";
+
+bless { };
+
diff --git a/storage/xtradb/mysql-test/storage_engine/tbl_opt_data_index_dir.rdiff b/storage/xtradb/mysql-test/storage_engine/tbl_opt_index_dir.rdiff
index e09e50b17ec..e09e50b17ec 100644
--- a/storage/xtradb/mysql-test/storage_engine/tbl_opt_data_index_dir.rdiff
+++ b/storage/xtradb/mysql-test/storage_engine/tbl_opt_index_dir.rdiff
diff --git a/storage/xtradb/os/os0file.cc b/storage/xtradb/os/os0file.cc
index 4f219b18428..20b202506f5 100644
--- a/storage/xtradb/os/os0file.cc
+++ b/storage/xtradb/os/os0file.cc
@@ -1,6 +1,6 @@
/***********************************************************************
-Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1995, 2017, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2009, Percona Inc.
Copyright (c) 2013, 2017, MariaDB Corporation.
@@ -114,10 +114,12 @@ my_umask */
#ifndef __WIN__
/** Umask for creating files */
UNIV_INTERN ulint os_innodb_umask = S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP;
+# define os_file_invalid (-1)
#else
/** Umask for creating files */
UNIV_INTERN ulint os_innodb_umask = 0;
-#define ECANCELED 125
+# define ECANCELED 125
+# define os_file_invalid INVALID_HANDLE_VALUE
#endif /* __WIN__ */
#ifndef UNIV_HOTBACKUP
@@ -221,7 +223,7 @@ struct os_aio_slot_t{
ulint page_size; /*!< UNIV_PAGE_SIZE or zip_size */
os_offset_t offset; /*!< file offset in bytes */
- os_file_t file; /*!< file where to read or write */
+ pfs_os_file_t file; /*!< file where to read or write */
const char* name; /*!< file name or path */
ibool io_already_done;/*!< used only in simulated aio:
TRUE if the physical i/o already
@@ -1568,7 +1570,7 @@ A simple function to open or create a file.
@return own: handle to the file, not defined if error, error number
can be retrieved with os_file_get_last_error */
UNIV_INTERN
-os_file_t
+pfs_os_file_t
os_file_create_simple_no_error_handling_func(
/*=========================================*/
const char* name, /*!< in: name of the file or path as a
@@ -1584,7 +1586,7 @@ os_file_create_simple_no_error_handling_func(
ulint atomic_writes) /*! in: atomic writes table option
value */
{
- os_file_t file;
+ pfs_os_file_t file;
atomic_writes_t awrites = (atomic_writes_t) atomic_writes;
*success = FALSE;
@@ -1593,7 +1595,6 @@ os_file_create_simple_no_error_handling_func(
DWORD create_flag;
DWORD attributes = 0;
DWORD share_mode = FILE_SHARE_READ;
-
ut_a(name);
ut_a(!(create_mode & OS_FILE_ON_ERROR_SILENT));
@@ -1610,8 +1611,8 @@ os_file_create_simple_no_error_handling_func(
ib_logf(IB_LOG_LEVEL_ERROR,
"Unknown file create mode (%lu) for file '%s'",
create_mode, name);
-
- return((os_file_t) -1);
+ file = INVALID_HANDLE_VALUE;
+ return(file);
}
if (access_type == OS_FILE_READ_ONLY) {
@@ -1635,8 +1636,8 @@ os_file_create_simple_no_error_handling_func(
ib_logf(IB_LOG_LEVEL_ERROR,
"Unknown file access type (%lu) for file '%s'",
access_type, name);
-
- return((os_file_t) -1);
+ file = INVALID_HANDLE_VALUE;
+ return(file);
}
if (IS_XTRABACKUP()) {
@@ -1668,11 +1669,10 @@ os_file_create_simple_no_error_handling_func(
}
}
- *success = (file != INVALID_HANDLE_VALUE);
+ *success = file != INVALID_HANDLE_VALUE;
#else /* __WIN__ */
int create_flag;
const char* mode_str = NULL;
-
ut_a(name);
if (create_mode != OS_FILE_OPEN && create_mode != OS_FILE_OPEN_RAW)
WAIT_ALLOW_WRITES();
@@ -1717,13 +1717,13 @@ os_file_create_simple_no_error_handling_func(
ib_logf(IB_LOG_LEVEL_ERROR,
"Unknown file create mode (%lu) for file '%s'",
create_mode, name);
-
- return((os_file_t) -1);
+ file = -1;
+ return(file);
}
- file = ::open(name, create_flag, os_innodb_umask);
+ file = open(name, create_flag, os_innodb_umask);
- *success = file == -1 ? FALSE : TRUE;
+ *success = file != -1;
/* This function is always called for data files, we should disable
OS caching (O_DIRECT) here as we do in os_file_create_func(), so
@@ -1872,7 +1872,7 @@ Opens an existing file or creates a new.
@return own: handle to the file, not defined if error, error number
can be retrieved with os_file_get_last_error */
UNIV_INTERN
-os_file_t
+pfs_os_file_t
os_file_create_func(
/*================*/
const char* name, /*!< in: name of the file or path as a
@@ -1890,7 +1890,7 @@ os_file_create_func(
ulint atomic_writes) /*! in: atomic writes table option
value */
{
- os_file_t file;
+ pfs_os_file_t file;
ibool retry;
ibool on_error_no_exit;
ibool on_error_silent;
@@ -1901,14 +1901,16 @@ os_file_create_func(
"ib_create_table_fail_disk_full",
*success = FALSE;
SetLastError(ERROR_DISK_FULL);
- return((os_file_t) -1);
+ file = INVALID_HANDLE_VALUE;
+ return(file);
);
#else /* __WIN__ */
DBUG_EXECUTE_IF(
"ib_create_table_fail_disk_full",
*success = FALSE;
errno = ENOSPC;
- return((os_file_t) -1);
+ file = -1;
+ return(file);
);
#endif /* __WIN__ */
@@ -1962,7 +1964,8 @@ os_file_create_func(
"Unknown file create mode (%lu) for file '%s'",
create_mode, name);
- return((os_file_t) -1);
+ file = INVALID_HANDLE_VALUE;
+ return(file);
}
DWORD attributes = 0;
@@ -1986,8 +1989,8 @@ os_file_create_func(
ib_logf(IB_LOG_LEVEL_ERROR,
"Unknown purpose flag (%lu) while opening file '%s'",
purpose, name);
-
- return((os_file_t)(-1));
+ file = INVALID_HANDLE_VALUE;
+ return(file);
}
#ifdef UNIV_NON_BUFFERED_IO
@@ -2113,7 +2116,8 @@ os_file_create_func(
"Unknown file create mode (%lu) for file '%s'",
create_mode, name);
- return((os_file_t) -1);
+ file = -1;
+ return(file);
}
ut_a(type == OS_LOG_FILE || type == OS_DATA_FILE);
@@ -2133,7 +2137,7 @@ os_file_create_func(
#endif /* O_SYNC */
do {
- file = ::open(name, create_flag, os_innodb_umask);
+ file = open(name, create_flag, os_innodb_umask);
if (file == -1) {
const char* operation;
@@ -2442,8 +2446,8 @@ os_file_close_func(
Closes a file handle.
@return TRUE if success */
UNIV_INTERN
-ibool
-os_file_close_no_error_handling(
+bool
+os_file_close_no_error_handling_func(
/*============================*/
os_file_t file) /*!< in, own: handle to a file */
{
@@ -2453,10 +2457,10 @@ os_file_close_no_error_handling(
ret = CloseHandle(file);
if (ret) {
- return(TRUE);
+ return(true);
}
- return(FALSE);
+ return(false);
#else
int ret;
@@ -2464,10 +2468,83 @@ os_file_close_no_error_handling(
if (ret == -1) {
- return(FALSE);
+ return(false);
}
- return(TRUE);
+ return(true);
+#endif /* __WIN__ */
+}
+
+#ifdef HAVE_POSIX_FALLOCATE
+/***********************************************************************//**
+Ensures that disk space is allocated for the file.
+@return TRUE if success */
+UNIV_INTERN
+bool
+os_file_allocate_func(
+ os_file_t file, /*!< in, own: handle to a file */
+ os_offset_t offset, /*!< in: file region offset */
+ os_offset_t len) /*!< in: file region length */
+{
+ return(posix_fallocate(file, offset, len) == 0);
+}
+#endif
+
+/***********************************************************************//**
+Checks if the file is marked as invalid.
+@return TRUE if invalid */
+UNIV_INTERN
+bool
+os_file_is_invalid(
+ pfs_os_file_t file) /*!< in, own: handle to a file */
+{
+ return(file == os_file_invalid);
+}
+
+/***********************************************************************//**
+Marks the file as invalid. */
+UNIV_INTERN
+void
+os_file_mark_invalid(
+ pfs_os_file_t* file) /*!< out: pointer to a handle to a file */
+{
+ file->m_file = os_file_invalid;
+}
+
+/***********************************************************************//**
+Announces an intention to access file data in a specific pattern in the
+future.
+@return TRUE if success */
+UNIV_INTERN
+bool
+os_file_advise(
+ pfs_os_file_t file, /*!< in, own: handle to a file */
+ os_offset_t offset, /*!< in: file region offset */
+ os_offset_t len, /*!< in: file region length */
+ ulint advice)/*!< in: advice for access pattern */
+{
+#ifdef __WIN__
+ return(true);
+#else
+#ifdef UNIV_LINUX
+ int native_advice = 0;
+ if ((advice & OS_FILE_ADVISE_NORMAL) != 0)
+ native_advice |= POSIX_FADV_NORMAL;
+ if ((advice & OS_FILE_ADVISE_RANDOM) != 0)
+ native_advice |= POSIX_FADV_RANDOM;
+ if ((advice & OS_FILE_ADVISE_SEQUENTIAL) != 0)
+ native_advice |= POSIX_FADV_SEQUENTIAL;
+ if ((advice & OS_FILE_ADVISE_WILLNEED) != 0)
+ native_advice |= POSIX_FADV_WILLNEED;
+ if ((advice & OS_FILE_ADVISE_DONTNEED) != 0)
+ native_advice |= POSIX_FADV_DONTNEED;
+ if ((advice & OS_FILE_ADVISE_NOREUSE) != 0)
+ native_advice |= POSIX_FADV_NOREUSE;
+
+ return(posix_fadvise(file, offset, len, native_advice) == 0);
+#else
+ return(true);
+#endif
#endif /* __WIN__ */
}
@@ -2478,7 +2555,7 @@ UNIV_INTERN
os_offset_t
os_file_get_size(
/*=============*/
- os_file_t file) /*!< in: handle to a file */
+ pfs_os_file_t file) /*!< in: handle to a file */
{
#ifdef __WIN__
os_offset_t offset;
@@ -2496,6 +2573,7 @@ os_file_get_size(
return(offset);
#else
return((os_offset_t) lseek(file, 0, SEEK_END));
+
#endif /* __WIN__ */
}
@@ -2509,7 +2587,7 @@ UNIV_INTERN
bool
os_file_set_size(
const char* name,
- os_file_t file,
+ pfs_os_file_t file,
os_offset_t size,
bool is_sparse)
{
@@ -2618,8 +2696,8 @@ os_file_set_eof(
Truncates a file at the specified position.
@return TRUE if success */
UNIV_INTERN
-ibool
-os_file_set_eof_at(
+bool
+os_file_set_eof_at_func(
os_file_t file, /*!< in: handle to a file */
ib_uint64_t new_len)/*!< in: new file length */
{
@@ -4553,7 +4631,7 @@ os_aio_array_reserve_slot(
the aio operation */
void* message2,/*!< in: message to be passed along with
the aio operation */
- os_file_t file, /*!< in: file handle */
+ pfs_os_file_t file, /*!< in: file handle */
const char* name, /*!< in: name of the file or path as a
null-terminated string */
void* buf, /*!< in: buffer where to read or from which
@@ -4928,7 +5006,7 @@ os_aio_func(
caution! */
const char* name, /*!< in: name of the file or path as a
null-terminated string */
- os_file_t file, /*!< in: handle to a file */
+ pfs_os_file_t file, /*!< in: handle to a file */
void* buf, /*!< in: buffer where to read or from which
to write */
os_offset_t offset, /*!< in: file offset where to read or write */
@@ -4958,7 +5036,6 @@ os_aio_func(
BOOL ret;
#endif
ulint wake_later;
-
ut_ad(buf);
ut_ad(n > 0);
ut_ad(n % OS_MIN_LOG_BLOCK_SIZE == 0);
@@ -5205,7 +5282,6 @@ os_aio_windows_handle(
break;
}
}
-
*message1 = slot->message1;
*message2 = slot->message2;
@@ -5229,12 +5305,14 @@ os_aio_windows_handle(
switch (slot->type) {
case OS_FILE_WRITE:
- ret_val = os_file_write(slot->name, slot->file, slot->buf,
- slot->offset, slot->len);
+ ret_val = os_file_write(
+ slot->name, slot->file, slot->buf,
+ slot->offset, slot->len);
break;
case OS_FILE_READ:
- ret_val = os_file_read(slot->file, slot->buf,
- slot->offset, slot->len);
+ ret_val = os_file_read(
+ slot->file, slot->buf,
+ slot->offset, slot->len);
break;
default:
ut_error;
@@ -5503,12 +5581,14 @@ found:
iocb = &(slot->control);
if (slot->type == OS_FILE_READ) {
- io_prep_pread(&slot->control, slot->file, slot->buf,
- slot->len, (off_t) slot->offset);
+ io_prep_pread(&slot->control, slot->file,
+ slot->buf, slot->len,
+ (off_t) slot->offset);
} else {
ut_a(slot->type == OS_FILE_WRITE);
- io_prep_pwrite(&slot->control, slot->file, slot->buf,
- slot->len, (off_t) slot->offset);
+ io_prep_pwrite(&slot->control, slot->file,
+ slot->buf, slot->len,
+ (off_t) slot->offset);
}
/* Resubmit an I/O request */
submit_ret = io_submit(array->aio_ctx[segment], 1, &iocb);
@@ -5742,7 +5822,6 @@ consecutive_loop:
os_aio_slot_t* slot;
slot = os_aio_array_get_nth_slot(array, i + segment * n);
-
if (slot->reserved
&& slot != aio_slot
&& slot->offset == aio_slot->offset + aio_slot->len
@@ -6296,7 +6375,9 @@ os_file_trim(
#ifdef __linux__
#if defined(HAVE_FALLOC_PUNCH_HOLE_AND_KEEP_SIZE)
- int ret = fallocate(slot->file, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE, off, trim_len);
+ int ret = fallocate(slot->file,
+ FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
+ off, trim_len);
if (ret) {
/* After first failure do not try to trim again */
@@ -6342,22 +6423,27 @@ os_file_trim(
flt.Ranges[0].Offset = off;
flt.Ranges[0].Length = trim_len;
+ OVERLAPPED overlapped = { 0 };
+ overlapped.hEvent = win_get_syncio_event();
BOOL ret = DeviceIoControl(slot->file, FSCTL_FILE_LEVEL_TRIM,
- &flt, sizeof(flt), NULL, NULL, NULL, NULL);
-
+ &flt, sizeof(flt), NULL, NULL, NULL, &overlapped);
+ DWORD tmp;
+ if (ret) {
+ ret = GetOverlappedResult(slot->file, &overlapped, &tmp, FALSE);
+ }
+ else if (GetLastError() == ERROR_IO_PENDING) {
+ ret = GetOverlappedResult(slot->file, &overlapped, &tmp, TRUE);
+ }
if (!ret) {
+ DWORD last_error = GetLastError();
/* After first failure do not try to trim again */
os_fallocate_failed = true;
srv_use_trim = FALSE;
ut_print_timestamp(stderr);
- fprintf(stderr,
- " InnoDB: Warning: fallocate call failed with error.\n"
- " InnoDB: start: %lu len: %lu payload: %lu\n"
- " InnoDB: Disabling fallocate for now.\n", off, trim_len, len);
- os_file_handle_error_no_exit(slot->name,
- " DeviceIOControl(FSCTL_FILE_LEVEL_TRIM) ",
- FALSE, __FILE__, __LINE__);
+ fprintf(stderr,
+ " InnoDB: Warning: DeviceIoControl(FSCTL_FILE_LEVEL_TRIM) call failed with error %u%s. Disabling trimming.\n",
+ last_error, last_error == ERROR_NOT_SUPPORTED ? "(ERROR_NOT_SUPPORTED)" : "");
if (slot->write_size) {
*slot->write_size = 0;
diff --git a/storage/xtradb/rem/rem0rec.cc b/storage/xtradb/rem/rem0rec.cc
index 6770748c38b..c62e8c90434 100644
--- a/storage/xtradb/rem/rem0rec.cc
+++ b/storage/xtradb/rem/rem0rec.cc
@@ -1293,8 +1293,10 @@ rec_convert_dtuple_to_rec_comp(
}
}
- memcpy(end, dfield_get_data(field), len);
- end += len;
+ if (len) {
+ memcpy(end, dfield_get_data(field), len);
+ end += len;
+ }
}
}
diff --git a/storage/xtradb/row/row0ftsort.cc b/storage/xtradb/row/row0ftsort.cc
index 4542aa31a6c..7ffcc59dc5f 100644
--- a/storage/xtradb/row/row0ftsort.cc
+++ b/storage/xtradb/row/row0ftsort.cc
@@ -249,9 +249,6 @@ row_fts_psort_info_init(
each parallel sort thread. Each "sort bucket" holds records for
a particular "FTS index partition" */
for (j = 0; j < fts_sort_pll_degree; j++) {
-
- UT_LIST_INIT(psort_info[j].fts_doc_list);
-
for (i = 0; i < FTS_NUM_AUX_INDEX; i++) {
psort_info[j].merge_file[i] =
diff --git a/storage/xtradb/row/row0import.cc b/storage/xtradb/row/row0import.cc
index 81d6fda9e53..2f7aece665a 100644
--- a/storage/xtradb/row/row0import.cc
+++ b/storage/xtradb/row/row0import.cc
@@ -1995,6 +1995,7 @@ PageConverter::update_page(
case FIL_PAGE_TYPE_XDES:
err = set_current_xdes(
buf_block_get_page_no(block), get_frame(block));
+ /* fall through */
case FIL_PAGE_INODE:
case FIL_PAGE_TYPE_TRX_SYS:
case FIL_PAGE_IBUF_FREE_LIST:
diff --git a/storage/xtradb/row/row0ins.cc b/storage/xtradb/row/row0ins.cc
index f4f96d32c50..6072b303d3a 100644
--- a/storage/xtradb/row/row0ins.cc
+++ b/storage/xtradb/row/row0ins.cc
@@ -1,6 +1,7 @@
/*****************************************************************************
Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2017, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -2166,14 +2167,10 @@ for a clustered index!
@retval DB_SUCCESS if no error
@retval DB_DUPLICATE_KEY if error,
@retval DB_LOCK_WAIT if we have to wait for a lock on a possible duplicate
-record
-@retval DB_SUCCESS_LOCKED_REC if an exact match of the record was found
-in online table rebuild (flags & (BTR_KEEP_SYS_FLAG | BTR_NO_LOCKING_FLAG)) */
+record */
static MY_ATTRIBUTE((nonnull, warn_unused_result))
dberr_t
row_ins_duplicate_error_in_clust(
-/*=============================*/
- ulint flags, /*!< in: undo logging and locking flags */
btr_cur_t* cursor, /*!< in: B-tree cursor */
const dtuple_t* entry, /*!< in: entry to insert */
que_thr_t* thr, /*!< in: query thread */
@@ -2454,7 +2451,7 @@ row_ins_clust_index_entry_low(
DB_LOCK_WAIT */
err = row_ins_duplicate_error_in_clust(
- flags, &cursor, entry, thr, &mtr);
+ &cursor, entry, thr, &mtr);
}
if (err != DB_SUCCESS) {
diff --git a/storage/xtradb/row/row0log.cc b/storage/xtradb/row/row0log.cc
index 666b59b42db..2cd663fd600 100644
--- a/storage/xtradb/row/row0log.cc
+++ b/storage/xtradb/row/row0log.cc
@@ -1,6 +1,7 @@
/*****************************************************************************
Copyright (c) 2011, 2016, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2017, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -367,9 +368,9 @@ row_log_online_op(
goto err_exit;
}
- ret = os_file_write(
+ ret = os_file_write_int_fd(
"(modification log)",
- OS_FILE_FROM_FD(log->fd),
+ log->fd,
log->tail.block, byte_offset, srv_sort_buf_size);
log->tail.blocks++;
if (!ret) {
@@ -483,9 +484,9 @@ row_log_table_close_func(
goto err_exit;
}
- ret = os_file_write(
+ ret = os_file_write_int_fd(
"(modification log)",
- OS_FILE_FROM_FD(log->fd),
+ log->fd,
log->tail.block, byte_offset, srv_sort_buf_size);
log->tail.blocks++;
if (!ret) {
@@ -1880,6 +1881,7 @@ row_log_table_apply_update(
When applying the subsequent ROW_T_DELETE, no matching
record will be found. */
+ /* fall through */
case DB_SUCCESS:
ut_ad(row != NULL);
break;
@@ -2617,11 +2619,10 @@ all_done:
goto func_exit;
}
- success = os_file_read_no_error_handling(
- OS_FILE_FROM_FD(index->online_log->fd),
+ success = os_file_read_no_error_handling_int_fd(
+ index->online_log->fd,
index->online_log->head.block, ofs,
srv_sort_buf_size);
-
if (!success) {
fprintf(stderr, "InnoDB: unable to read temporary file"
" for table %s\n", index->table_name);
@@ -3444,8 +3445,8 @@ all_done:
goto func_exit;
}
- success = os_file_read_no_error_handling(
- OS_FILE_FROM_FD(index->online_log->fd),
+ success = os_file_read_no_error_handling_int_fd(
+ index->online_log->fd,
index->online_log->head.block, ofs,
srv_sort_buf_size);
diff --git a/storage/xtradb/row/row0merge.cc b/storage/xtradb/row/row0merge.cc
index 57b08801225..6a1298087eb 100644
--- a/storage/xtradb/row/row0merge.cc
+++ b/storage/xtradb/row/row0merge.cc
@@ -967,8 +967,8 @@ row_merge_read(
}
#endif /* UNIV_DEBUG */
- success = os_file_read_no_error_handling(OS_FILE_FROM_FD(fd), buf,
- ofs, srv_sort_buf_size);
+ success = os_file_read_no_error_handling_int_fd(fd, buf,
+ ofs, srv_sort_buf_size);
/* For encrypted tables, decrypt data after reading and copy data */
if (crypt_data && crypt_buf) {
@@ -1023,7 +1023,7 @@ row_merge_write(
mach_write_to_4((byte *)out_buf, 0);
}
- ret = os_file_write("(merge)", OS_FILE_FROM_FD(fd), out_buf, ofs, buf_len);
+ ret = os_file_write_int_fd("(merge)", fd, out_buf, ofs, buf_len);
#ifdef UNIV_DEBUG
if (row_merge_print_block_write) {
@@ -3427,14 +3427,21 @@ row_merge_file_create_low(
performance schema */
struct PSI_file_locker* locker = NULL;
PSI_file_locker_state state;
- register_pfs_file_open_begin(&state, locker, innodb_file_temp_key,
- PSI_FILE_OPEN,
- "Innodb Merge Temp File",
- __FILE__, __LINE__);
+ locker = PSI_FILE_CALL(get_thread_file_name_locker)(
+ &state, innodb_file_temp_key, PSI_FILE_OPEN,
+ "Innodb Merge Temp File", &locker);
+ if (locker != NULL) {
+ PSI_FILE_CALL(start_file_open_wait)(locker,
+ __FILE__,
+ __LINE__);
+ }
#endif
fd = innobase_mysql_tmpfile(path);
#ifdef UNIV_PFS_IO
- register_pfs_file_open_end(locker, fd);
+ if (locker != NULL) {
+ PSI_FILE_CALL(end_file_open_wait_and_bind_to_descriptor)(
+ locker, fd);
+ }
#endif
if (fd < 0) {
@@ -3481,15 +3488,20 @@ row_merge_file_destroy_low(
#ifdef UNIV_PFS_IO
struct PSI_file_locker* locker = NULL;
PSI_file_locker_state state;
- register_pfs_file_io_begin(&state, locker,
- fd, 0, PSI_FILE_CLOSE,
- __FILE__, __LINE__);
+ locker = PSI_FILE_CALL(get_thread_file_descriptor_locker)(
+ &state, fd, PSI_FILE_CLOSE);
+ if (locker != NULL) {
+ PSI_FILE_CALL(start_file_wait)(
+ locker, 0, __FILE__, __LINE__);
+ }
#endif
if (fd >= 0) {
close(fd);
}
#ifdef UNIV_PFS_IO
- register_pfs_file_io_end(locker, 0);
+ if (locker != NULL) {
+ PSI_FILE_CALL(end_file_wait)(locker, 0);
+ }
#endif
}
/*********************************************************************//**
@@ -4025,6 +4037,7 @@ row_merge_build_indexes(
for (i = 0; i < n_indexes; i++) {
merge_files[i].fd = -1;
+ merge_files[i].offset = 0;
}
total_static_cost = COST_BUILD_INDEX_STATIC * n_indexes + COST_READ_CLUSTERED_INDEX;
diff --git a/storage/xtradb/row/row0mysql.cc b/storage/xtradb/row/row0mysql.cc
index 463981f51dd..59568f5c91b 100644
--- a/storage/xtradb/row/row0mysql.cc
+++ b/storage/xtradb/row/row0mysql.cc
@@ -5542,7 +5542,8 @@ loop:
fputs(" InnoDB: Warning: CHECK TABLE on ", stderr);
dict_index_name_print(stderr, prebuilt->trx, index);
fprintf(stderr, " returned %lu\n", ret);
- /* fall through (this error is ignored by CHECK TABLE) */
+ /* (this error is ignored by CHECK TABLE) */
+ /* fall through */
case DB_END_OF_INDEX:
func_exit:
mem_free(buf);
diff --git a/storage/xtradb/row/row0purge.cc b/storage/xtradb/row/row0purge.cc
index 8a1dbd6f69f..333677edf21 100644
--- a/storage/xtradb/row/row0purge.cc
+++ b/storage/xtradb/row/row0purge.cc
@@ -1,6 +1,7 @@
/*****************************************************************************
Copyright (c) 1997, 2016, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2017, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -488,8 +489,9 @@ row_purge_remove_sec_if_poss_leaf(
success = false;
}
}
- /* fall through (the index entry is still needed,
+ /* (The index entry is still needed,
or the deletion succeeded) */
+ /* fall through */
case ROW_NOT_DELETED_REF:
/* The index entry is still needed. */
case ROW_BUFFERED:
diff --git a/storage/xtradb/row/row0sel.cc b/storage/xtradb/row/row0sel.cc
index 7d4435eba5b..8e3ed3d1a4e 100644
--- a/storage/xtradb/row/row0sel.cc
+++ b/storage/xtradb/row/row0sel.cc
@@ -66,6 +66,8 @@ Created 12/19/1997 Heikki Tuuri
#include "my_compare.h" /* enum icp_result */
+#include <vector>
+
/* Maximum number of rows to prefetch; MySQL interface has another parameter */
#define SEL_MAX_N_PREFETCH 16
@@ -2715,7 +2717,8 @@ row_sel_field_store_in_mysql_format_func(
|| !(templ->mysql_col_len % templ->mbmaxlen));
ut_ad(len * templ->mbmaxlen >= templ->mysql_col_len
|| (field_no == templ->icp_rec_field_no
- && field->prefix_len > 0));
+ && field->prefix_len > 0)
+ || templ->rec_field_is_prefix);
ut_ad(!(field->prefix_len % templ->mbmaxlen));
if (templ->mbminlen == 1 && templ->mbmaxlen != 1) {
@@ -2757,27 +2760,32 @@ row_sel_field_store_in_mysql_format_func(
# define row_sel_store_mysql_field(m,p,r,i,o,f,t) \
row_sel_store_mysql_field_func(m,p,r,o,f,t)
#endif /* UNIV_DEBUG */
-/**************************************************************//**
-Convert a field in the Innobase format to a field in the MySQL format. */
+/** Convert a field in the Innobase format to a field in the MySQL format.
+@param[out] mysql_rec record in the MySQL format
+@param[in,out] prebuilt prebuilt struct
+@param[in] rec InnoDB record; must be protected
+ by a page latch
+@param[in] index index of rec
+@param[in] offsets array returned by rec_get_offsets()
+@param[in] field_no templ->rec_field_no or
+ templ->clust_rec_field_no
+ or templ->icp_rec_field_no
+ or sec field no if clust_templ_for_sec
+ is TRUE
+@param[in] templ row template
+*/
static MY_ATTRIBUTE((warn_unused_result))
ibool
row_sel_store_mysql_field_func(
-/*===========================*/
- byte* mysql_rec, /*!< out: record in the
- MySQL format */
- row_prebuilt_t* prebuilt, /*!< in/out: prebuilt struct */
- const rec_t* rec, /*!< in: InnoDB record;
- must be protected by
- a page latch */
+ byte* mysql_rec,
+ row_prebuilt_t* prebuilt,
+ const rec_t* rec,
#ifdef UNIV_DEBUG
- const dict_index_t* index, /*!< in: index of rec */
+ const dict_index_t* index,
#endif
- const ulint* offsets, /*!< in: array returned by
- rec_get_offsets() */
- ulint field_no, /*!< in: templ->rec_field_no or
- templ->clust_rec_field_no or
- templ->icp_rec_field_no */
- const mysql_row_templ_t*templ) /*!< in: row template */
+ const ulint* offsets,
+ ulint field_no,
+ const mysql_row_templ_t*templ)
{
const byte* data;
ulint len;
@@ -2906,31 +2914,31 @@ row_sel_store_mysql_field_func(
return(TRUE);
}
-/**************************************************************//**
-Convert a row in the Innobase format to a row in the MySQL format.
+/** Convert a row in the Innobase format to a row in the MySQL format.
Note that the template in prebuilt may advise us to copy only a few
columns to mysql_rec, other columns are left blank. All columns may not
be needed in the query.
+@param[out] mysql_rec row in the MySQL format
+@param[in] prebuilt prebuilt structure
+@param[in] rec Innobase record in the index
+ which was described in prebuilt's
+ template, or in the clustered index;
+ must be protected by a page latch
+@param[in] rec_clust TRUE if the rec in the clustered index
+@param[in] index index of rec
+@param[in] offsets array returned by rec_get_offsets(rec)
@return TRUE on success, FALSE if not all columns could be retrieved */
static MY_ATTRIBUTE((warn_unused_result))
ibool
row_sel_store_mysql_rec(
-/*====================*/
- byte* mysql_rec, /*!< out: row in the MySQL format */
- row_prebuilt_t* prebuilt, /*!< in: prebuilt struct */
- const rec_t* rec, /*!< in: Innobase record in the index
- which was described in prebuilt's
- template, or in the clustered index;
- must be protected by a page latch */
- ibool rec_clust, /*!< in: TRUE if rec is in the
- clustered index instead of
- prebuilt->index */
- const dict_index_t* index, /*!< in: index of rec */
- const ulint* offsets) /*!< in: array returned by
- rec_get_offsets(rec) */
+ byte* mysql_rec,
+ row_prebuilt_t* prebuilt,
+ const rec_t* rec,
+ ibool rec_clust,
+ const dict_index_t* index,
+ const ulint* offsets)
{
ulint i;
-
ut_ad(rec_clust || index == prebuilt->index);
ut_ad(!rec_clust || dict_index_is_clust(index));
@@ -2946,12 +2954,14 @@ row_sel_store_mysql_rec(
? templ->clust_rec_field_no
: templ->rec_field_no;
/* We should never deliver column prefixes to MySQL,
- except for evaluating innobase_index_cond(). */
+ except for evaluating innobase_index_cond() and if the prefix
+ index is longer than the actual row data. */
/* ...actually, we do want to do this in order to
support the prefix query optimization.
ut_ad(dict_index_get_nth_field(index, field_no)->prefix_len
- == 0);
+ == 0 || templ->rec_field_is_prefix);
+
...so we disable this assert. */
@@ -3681,7 +3691,7 @@ row_search_for_mysql(
trx_t* trx = prebuilt->trx;
dict_index_t* clust_index;
que_thr_t* thr;
- const rec_t* rec;
+ const rec_t* rec = NULL;
const rec_t* result_rec = NULL;
const rec_t* clust_rec;
dberr_t err = DB_SUCCESS;
@@ -3706,7 +3716,7 @@ row_search_for_mysql(
ulint* offsets = offsets_;
ibool table_lock_waited = FALSE;
byte* next_buf = 0;
- ibool use_clustered_index = FALSE;
+ bool use_clustered_index = false;
rec_offs_init(offsets_);
@@ -3966,7 +3976,8 @@ row_search_for_mysql(
if (!row_sel_store_mysql_rec(
buf, prebuilt,
- rec, FALSE, index, offsets)) {
+ rec, FALSE, index,
+ offsets)) {
/* Only fresh inserts may contain
incomplete externally stored
columns. Pretend that such
@@ -4248,7 +4259,6 @@ rec_loop:
}
if (page_rec_is_supremum(rec)) {
-
if (set_also_gap_locks
&& !(srv_locks_unsafe_for_binlog
|| trx->isolation_level <= TRX_ISO_READ_COMMITTED)
@@ -4777,17 +4787,17 @@ locks_ok:
indexes are shorter than the prefix size
This optimization can avoid many IOs for certain schemas.
*/
- ibool row_contains_all_values = TRUE;
- int i;
+ bool row_contains_all_values = true;
+ unsigned int i;
for (i = 0; i < prebuilt->n_template; i++) {
/* Condition (1) from above: is the field in the
index (prefix or not)? */
- mysql_row_templ_t* templ =
+ const mysql_row_templ_t* templ =
prebuilt->mysql_template + i;
ulint secondary_index_field_no =
templ->rec_prefix_field_no;
if (secondary_index_field_no == ULINT_UNDEFINED) {
- row_contains_all_values = FALSE;
+ row_contains_all_values = false;
break;
}
/* Condition (2) from above: if this is a
@@ -4802,8 +4812,9 @@ locks_ok:
index,
secondary_index_field_no);
ut_a(field->prefix_len > 0);
- if (record_size >= field->prefix_len) {
- row_contains_all_values = FALSE;
+ if (record_size >= field->prefix_len
+ / templ->mbmaxlen) {
+ row_contains_all_values = false;
break;
}
}
@@ -4819,7 +4830,7 @@ locks_ok:
templ->rec_prefix_field_no;
ut_a(templ->rec_field_no != ULINT_UNDEFINED);
}
- use_clustered_index = FALSE;
+ use_clustered_index = false;
srv_stats.n_sec_rec_cluster_reads_avoided.inc();
}
}
diff --git a/storage/xtradb/srv/srv0srv.cc b/storage/xtradb/srv/srv0srv.cc
index bf4b9124da7..cc5d1320142 100644
--- a/storage/xtradb/srv/srv0srv.cc
+++ b/storage/xtradb/srv/srv0srv.cc
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1995, 2017, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2008, 2009 Google Inc.
Copyright (c) 2009, Percona Inc.
Copyright (c) 2013, 2017, MariaDB Corporation.
@@ -171,7 +171,8 @@ UNIV_INTERN unsigned long long srv_online_max_size;
OS (provided we compiled Innobase with it in), otherwise we will
use simulated aio we build below with threads.
Currently we support native aio on windows and linux */
-UNIV_INTERN my_bool srv_use_native_aio = TRUE;
+/* make srv_use_native_aio to be visible for other plugins */
+my_bool srv_use_native_aio = TRUE;
UNIV_INTERN my_bool srv_numa_interleave = FALSE;
/* Default compression level if page compression is used and no compression
@@ -450,7 +451,7 @@ UNIV_INTERN my_bool srv_cleaner_thread_priority = FALSE;
UNIV_INTERN my_bool srv_master_thread_priority = FALSE;
/* The number of purge threads to use.*/
-UNIV_INTERN ulong srv_n_purge_threads = 1;
+UNIV_INTERN ulong srv_n_purge_threads;
/* the number of pages to purge in one batch */
UNIV_INTERN ulong srv_purge_batch_size = 20;
@@ -689,16 +690,16 @@ UNIV_INTERN ulong srv_buf_dump_status_frequency = 0;
/** Acquire the system_mutex. */
#define srv_sys_mutex_enter() do { \
- mutex_enter(&srv_sys->mutex); \
+ mutex_enter(&srv_sys.mutex); \
} while (0)
/** Test if the system mutex is owned. */
-#define srv_sys_mutex_own() (mutex_own(&srv_sys->mutex) \
+#define srv_sys_mutex_own() (mutex_own(&srv_sys.mutex) \
&& !srv_read_only_mode)
/** Release the system mutex. */
#define srv_sys_mutex_exit() do { \
- mutex_exit(&srv_sys->mutex); \
+ mutex_exit(&srv_sys.mutex); \
} while (0)
#define fetch_lock_wait_timeout(trx) \
@@ -793,7 +794,7 @@ struct srv_sys_t{
ulint n_sys_threads; /*!< size of the sys_threads
array */
- srv_slot_t* sys_threads; /*!< server thread table;
+ srv_slot_t sys_threads[32 + 1]; /*!< server thread table;
os_event_set() and
os_event_reset() on
sys_threads[]->event are
@@ -817,7 +818,7 @@ struct srv_sys_t{
UNIV_INTERN ib_mutex_t server_mutex;
#endif /* !HAVE_ATOMIC_BUILTINS */
-static srv_sys_t* srv_sys = NULL;
+static srv_sys_t srv_sys;
/** Event to signal srv_monitor_thread. Not protected by a mutex.
Set after setting srv_print_innodb_monitor. */
@@ -839,10 +840,10 @@ and/or load it during startup. */
UNIV_INTERN char srv_buffer_pool_dump_at_shutdown = FALSE;
UNIV_INTERN char srv_buffer_pool_load_at_startup = FALSE;
-/** Slot index in the srv_sys->sys_threads array for the purge thread. */
+/** Slot index in the srv_sys.sys_threads array for the purge thread. */
static const ulint SRV_PURGE_SLOT = 1;
-/** Slot index in the srv_sys->sys_threads array for the master thread. */
+/** Slot index in the srv_sys.sys_threads array for the master thread. */
static const ulint SRV_MASTER_SLOT = 0;
UNIV_INTERN os_event_t srv_checkpoint_completed_event;
@@ -952,21 +953,21 @@ srv_reserve_slot(
switch (type) {
case SRV_MASTER:
- slot = &srv_sys->sys_threads[SRV_MASTER_SLOT];
+ slot = &srv_sys.sys_threads[SRV_MASTER_SLOT];
break;
case SRV_PURGE:
- slot = &srv_sys->sys_threads[SRV_PURGE_SLOT];
+ slot = &srv_sys.sys_threads[SRV_PURGE_SLOT];
break;
case SRV_WORKER:
/* Find an empty slot, skip the master and purge slots. */
- for (slot = &srv_sys->sys_threads[2];
+ for (slot = &srv_sys.sys_threads[2];
slot->in_use;
++slot) {
- ut_a(slot < &srv_sys->sys_threads[
- srv_sys->n_sys_threads]);
+ ut_a(slot < &srv_sys.sys_threads[
+ srv_sys.n_sys_threads]);
}
break;
@@ -982,7 +983,7 @@ srv_reserve_slot(
ut_ad(srv_slot_get_type(slot) == type);
- ++srv_sys->n_threads_active[type];
+ ++srv_sys.n_threads_active[type];
srv_sys_mutex_exit();
@@ -1012,27 +1013,27 @@ srv_suspend_thread_low(
case SRV_MASTER:
/* We have only one master thread and it
should be the first entry always. */
- ut_a(srv_sys->n_threads_active[type] == 1);
+ ut_a(srv_sys.n_threads_active[type] == 1);
break;
case SRV_PURGE:
/* We have only one purge coordinator thread
and it should be the second entry always. */
- ut_a(srv_sys->n_threads_active[type] == 1);
+ ut_a(srv_sys.n_threads_active[type] == 1);
break;
case SRV_WORKER:
ut_a(srv_n_purge_threads > 1);
- ut_a(srv_sys->n_threads_active[type] > 0);
+ ut_a(srv_sys.n_threads_active[type] > 0);
break;
}
ut_a(!slot->suspended);
slot->suspended = TRUE;
- ut_a(srv_sys->n_threads_active[type] > 0);
+ ut_a(srv_sys.n_threads_active[type] > 0);
- srv_sys->n_threads_active[type]--;
+ srv_sys.n_threads_active[type]--;
return(os_event_reset(slot->event));
}
@@ -1087,7 +1088,7 @@ srv_resume_thread(srv_slot_t* slot, ib_int64_t sig_count = 0, bool wait = true,
ut_ad(slot->suspended);
slot->suspended = FALSE;
- ++srv_sys->n_threads_active[slot->type];
+ ++srv_sys.n_threads_active[slot->type];
srv_sys_mutex_exit();
return(timeout);
}
@@ -1109,8 +1110,8 @@ srv_release_threads(enum srv_thread_type type, ulint n)
srv_sys_mutex_enter();
- for (ulint i = 0; i < srv_sys->n_sys_threads; i++) {
- srv_slot_t* slot = &srv_sys->sys_threads[i];
+ for (ulint i = 0; i < srv_sys.n_sys_threads; i++) {
+ srv_slot_t* slot = &srv_sys.sys_threads[i];
if (!slot->in_use || srv_slot_get_type(slot) != type) {
continue;
@@ -1130,7 +1131,7 @@ srv_release_threads(enum srv_thread_type type, ulint n)
should be the first entry always. */
ut_a(n == 1);
ut_a(i == SRV_MASTER_SLOT);
- ut_a(srv_sys->n_threads_active[type] == 0);
+ ut_a(srv_sys.n_threads_active[type] == 0);
break;
case SRV_PURGE:
@@ -1139,12 +1140,12 @@ srv_release_threads(enum srv_thread_type type, ulint n)
ut_a(n == 1);
ut_a(i == SRV_PURGE_SLOT);
ut_a(srv_n_purge_threads > 0);
- ut_a(srv_sys->n_threads_active[type] == 0);
+ ut_a(srv_sys.n_threads_active[type] == 0);
break;
case SRV_WORKER:
ut_a(srv_n_purge_threads > 1);
- ut_a(srv_sys->n_threads_active[type]
+ ut_a(srv_sys.n_threads_active[type]
< srv_n_purge_threads - 1);
break;
}
@@ -1182,9 +1183,6 @@ void
srv_init(void)
/*==========*/
{
- ulint n_sys_threads = 0;
- ulint srv_sys_sz = sizeof(*srv_sys);
-
#ifndef HAVE_ATOMIC_BUILTINS
mutex_create(server_mutex_key, &server_mutex, SYNC_ANY_LATCH);
#endif /* !HAVE_ATOMIC_BUILTINS */
@@ -1192,29 +1190,19 @@ srv_init(void)
mutex_create(srv_innodb_monitor_mutex_key,
&srv_innodb_monitor_mutex, SYNC_NO_ORDER_CHECK);
- if (!srv_read_only_mode) {
-
- /* Number of purge threads + master thread */
- n_sys_threads = srv_n_purge_threads + 1;
-
- srv_sys_sz += n_sys_threads * sizeof(*srv_sys->sys_threads);
- }
-
- srv_sys = static_cast<srv_sys_t*>(mem_zalloc(srv_sys_sz));
-
- srv_sys->n_sys_threads = n_sys_threads;
+ srv_sys.n_sys_threads = srv_read_only_mode
+ ? 0
+ : srv_n_purge_threads + 1/* purge coordinator */;
if (!srv_read_only_mode) {
- mutex_create(srv_sys_mutex_key, &srv_sys->mutex, SYNC_THREADS);
+ mutex_create(srv_sys_mutex_key, &srv_sys.mutex, SYNC_THREADS);
mutex_create(srv_sys_tasks_mutex_key,
- &srv_sys->tasks_mutex, SYNC_ANY_LATCH);
-
- srv_sys->sys_threads = (srv_slot_t*) &srv_sys[1];
+ &srv_sys.tasks_mutex, SYNC_ANY_LATCH);
- for (ulint i = 0; i < srv_sys->n_sys_threads; ++i) {
- srv_slot_t* slot = &srv_sys->sys_threads[i];
+ for (ulint i = 0; i < srv_sys.n_sys_threads; ++i) {
+ srv_slot_t* slot = &srv_sys.sys_threads[i];
slot->event = os_event_create();
@@ -1234,8 +1222,6 @@ srv_init(void)
if (srv_track_changed_pages) {
os_event_set(srv_redo_log_tracked_event);
}
-
- UT_LIST_INIT(srv_sys->tasks);
}
/* page_zip_stat_per_index_mutex is acquired from:
@@ -1283,8 +1269,8 @@ srv_free(void)
if (!srv_read_only_mode) {
- for (ulint i = 0; i < srv_sys->n_sys_threads; i++)
- os_event_free(srv_sys->sys_threads[i].event);
+ for (ulint i = 0; i < srv_sys.n_sys_threads; i++)
+ os_event_free(srv_sys.sys_threads[i].event);
os_event_free(srv_error_event);
srv_error_event = NULL;
@@ -1296,8 +1282,8 @@ srv_free(void)
srv_checkpoint_completed_event = NULL;
os_event_free(srv_redo_log_tracked_event);
srv_redo_log_tracked_event = NULL;
- mutex_free(&srv_sys->mutex);
- mutex_free(&srv_sys->tasks_mutex);
+ mutex_free(&srv_sys.mutex);
+ mutex_free(&srv_sys.tasks_mutex);
}
#ifdef WITH_INNODB_DISALLOW_WRITES
@@ -1311,10 +1297,10 @@ srv_free(void)
mutex_free(&srv_innodb_monitor_mutex);
mutex_free(&page_zip_stat_per_index_mutex);
- mem_free(srv_sys);
- srv_sys = NULL;
-
trx_i_s_cache_free(trx_i_s_cache);
+
+ /* This is needed for Mariabackup. */
+ memset(&srv_sys, 0, sizeof srv_sys);
}
/*********************************************************************//**
@@ -1793,8 +1779,10 @@ srv_export_innodb_status(void)
buf_get_total_stat(&stat);
buf_get_total_list_len(&LRU_len, &free_len, &flush_list_len);
buf_get_total_list_size_in_bytes(&buf_pools_list_size);
- fil_crypt_total_stat(&crypt_stat);
- btr_scrub_total_stat(&scrub_stat);
+ if (!srv_read_only_mode) {
+ fil_crypt_total_stat(&crypt_stat);
+ btr_scrub_total_stat(&scrub_stat);
+ }
mem_adaptive_hash = 0;
@@ -2108,6 +2096,7 @@ srv_export_innodb_status(void)
export_vars.innodb_sec_rec_cluster_reads_avoided =
srv_stats.n_sec_rec_cluster_reads_avoided;
+ if (!srv_read_only_mode) {
export_vars.innodb_encryption_rotation_pages_read_from_cache =
crypt_stat.pages_read_from_cache;
export_vars.innodb_encryption_rotation_pages_read_from_disk =
@@ -2135,6 +2124,7 @@ srv_export_innodb_status(void)
scrub_stat.page_split_failures_missing_index;
export_vars.innodb_scrub_page_split_failures_unknown =
scrub_stat.page_split_failures_unknown;
+ }
mutex_exit(&srv_innodb_monitor_mutex);
}
@@ -2289,7 +2279,7 @@ loop:
}
}
- if (srv_shutdown_state >= SRV_SHUTDOWN_CLEANUP) {
+ if (srv_shutdown_state != SRV_SHUTDOWN_NONE) {
goto exit_func;
}
@@ -2427,7 +2417,7 @@ loop:
os_event_wait_time_low(srv_error_event, 1000000, sig_count);
- if (srv_shutdown_state < SRV_SHUTDOWN_CLEANUP) {
+ if (srv_shutdown_state == SRV_SHUTDOWN_NONE) {
goto loop;
}
@@ -2452,9 +2442,9 @@ srv_inc_activity_count(
is caused by the background
change buffer merge */
{
- srv_sys->activity_count.inc();
+ srv_sys.activity_count.inc();
if (ibuf_merge_activity)
- srv_sys->ibuf_merge_activity_count.inc();
+ srv_sys.ibuf_merge_activity_count.inc();
}
/**********************************************************************//**
@@ -2476,7 +2466,7 @@ srv_get_active_thread_type(void)
srv_sys_mutex_enter();
for (ulint i = SRV_WORKER; i <= SRV_MASTER; ++i) {
- if (srv_sys->n_threads_active[i] != 0) {
+ if (srv_sys.n_threads_active[i] != 0) {
ret = static_cast<srv_thread_type>(i);
break;
}
@@ -2599,7 +2589,8 @@ purge_archived_logs(
if (dirnamelen + strlen(fileinfo.name) + 2 > OS_FILE_MAX_PATH)
continue;
- snprintf(archived_log_filename + dirnamelen, OS_FILE_MAX_PATH,
+ snprintf(archived_log_filename + dirnamelen,
+ OS_FILE_MAX_PATH - dirnamelen - 1,
"%s", fileinfo.name);
if (before_no) {
@@ -2695,12 +2686,12 @@ srv_active_wake_master_thread(void)
srv_inc_activity_count();
- if (srv_sys->n_threads_active[SRV_MASTER] == 0) {
+ if (srv_sys.n_threads_active[SRV_MASTER] == 0) {
srv_slot_t* slot;
srv_sys_mutex_enter();
- slot = &srv_sys->sys_threads[SRV_MASTER_SLOT];
+ slot = &srv_sys.sys_threads[SRV_MASTER_SLOT];
/* Only if the master thread has been started. */
@@ -2727,7 +2718,7 @@ srv_wake_purge_thread_if_not_active(void)
ut_ad(!srv_sys_mutex_own());
if (purge_sys->state == PURGE_STATE_RUN
- && srv_sys->n_threads_active[SRV_PURGE] == 0) {
+ && srv_sys.n_threads_active[SRV_PURGE] == 0) {
srv_release_threads(SRV_PURGE, 1);
}
@@ -2756,7 +2747,7 @@ ulint
srv_get_activity_count(void)
/*========================*/
{
- return(srv_sys->activity_count);
+ return(srv_sys.activity_count);
}
/** Get current server ibuf merge activity count.
@@ -2765,7 +2756,7 @@ static
ulint
srv_get_ibuf_merge_activity_count(void)
{
- return(srv_sys->ibuf_merge_activity_count);
+ return(srv_sys.ibuf_merge_activity_count);
}
/*******************************************************************//**
@@ -2784,14 +2775,14 @@ srv_check_activity(
ULINT_UNDEFINED */
ulint old_ibuf_merge_activity_count)
{
- ulint new_activity_count = srv_sys->activity_count;
+ ulint new_activity_count = srv_sys.activity_count;
if (old_ibuf_merge_activity_count == ULINT_UNDEFINED)
return(new_activity_count != old_activity_count);
/* If we care about ibuf merge activity, then the server is considered
idle if all activity, if any, was due to ibuf merge. */
ulint new_ibuf_merge_activity_count
- = srv_sys->ibuf_merge_activity_count;
+ = srv_sys.ibuf_merge_activity_count;
ut_ad(new_ibuf_merge_activity_count <= new_activity_count);
ut_ad(new_ibuf_merge_activity_count >= old_ibuf_merge_activity_count);
@@ -2871,7 +2862,7 @@ srv_shutdown_print_master_pending(
time_elapsed = ut_difftime(current_time, *last_print_time);
if (time_elapsed > 60) {
- *last_print_time = ut_time();
+ *last_print_time = current_time;
if (n_tables_to_drop) {
ut_print_timestamp(stderr);
@@ -2924,7 +2915,7 @@ srv_master_do_active_tasks(void)
MONITOR_INC_TIME_IN_MICRO_SECS(
MONITOR_SRV_BACKGROUND_DROP_TABLE_MICROSECOND, counter_time);
- if (srv_shutdown_state > 0) {
+ if (srv_shutdown_state != SRV_SHUTDOWN_NONE) {
return;
}
@@ -2958,11 +2949,7 @@ srv_master_do_active_tasks(void)
MONITOR_SRV_MEM_VALIDATE_MICROSECOND, counter_time);
}
#endif
- if (srv_shutdown_state > 0) {
- return;
- }
-
- if (srv_shutdown_state > 0) {
+ if (srv_shutdown_state != SRV_SHUTDOWN_NONE) {
return;
}
@@ -2975,7 +2962,7 @@ srv_master_do_active_tasks(void)
MONITOR_SRV_DICT_LRU_MICROSECOND, counter_time);
}
- if (srv_shutdown_state > 0) {
+ if (srv_shutdown_state != SRV_SHUTDOWN_NONE) {
return;
}
@@ -3019,7 +3006,7 @@ srv_master_do_idle_tasks(void)
MONITOR_SRV_BACKGROUND_DROP_TABLE_MICROSECOND,
counter_time);
- if (srv_shutdown_state > 0) {
+ if (srv_shutdown_state != SRV_SHUTDOWN_NONE) {
return;
}
@@ -3035,7 +3022,7 @@ srv_master_do_idle_tasks(void)
MONITOR_INC_TIME_IN_MICRO_SECS(
MONITOR_SRV_IBUF_MERGE_MICROSECOND, counter_time);
- if (srv_shutdown_state > 0) {
+ if (srv_shutdown_state != SRV_SHUTDOWN_NONE) {
return;
}
@@ -3051,7 +3038,7 @@ srv_master_do_idle_tasks(void)
MONITOR_INC_TIME_IN_MICRO_SECS(
MONITOR_SRV_LOG_FLUSH_MICROSECOND, counter_time);
- if (srv_shutdown_state > 0) {
+ if (srv_shutdown_state != SRV_SHUTDOWN_NONE) {
return;
}
@@ -3072,70 +3059,42 @@ srv_master_do_idle_tasks(void)
}
}
-/*********************************************************************//**
-Perform the tasks during shutdown. The tasks that we do at shutdown
-depend on srv_fast_shutdown:
-2 => very fast shutdown => do no book keeping
-1 => normal shutdown => clear drop table queue and make checkpoint
-0 => slow shutdown => in addition to above do complete purge and ibuf
-merge
-@return TRUE if some work was done. FALSE otherwise */
+/** Perform shutdown tasks.
+@param[in] ibuf_merge whether to complete the change buffer merge */
static
-ibool
-srv_master_do_shutdown_tasks(
-/*=========================*/
- ib_time_t* last_print_time)/*!< last time the function
- print the message */
+void
+srv_shutdown(bool ibuf_merge)
{
- ulint n_bytes_merged = 0;
- ulint n_tables_to_drop = 0;
+ ulint n_bytes_merged = 0;
+ ulint n_tables_to_drop;
+ ib_time_t now = ut_time();
- ut_ad(!srv_read_only_mode);
-
- ++srv_main_shutdown_loops;
-
- ut_a(srv_shutdown_state > 0);
-
- /* In very fast shutdown none of the following is necessary */
- if (srv_fast_shutdown == 2) {
- return(FALSE);
- }
-
- /* ALTER TABLE in MySQL requires on Unix that the table handler
- can drop tables lazily after there no longer are SELECT
- queries to them. */
- srv_main_thread_op_info = "doing background drop tables";
- n_tables_to_drop = row_drop_tables_for_mysql_in_background();
-
- /* make sure that there is enough reusable space in the redo
- log files */
- srv_main_thread_op_info = "checking free log space";
- log_free_check();
-
- /* In case of normal shutdown we don't do ibuf merge or purge */
- if (srv_fast_shutdown == 1) {
- goto func_exit;
- }
-
- /* Do an ibuf merge */
- srv_main_thread_op_info = "doing insert buffer merge";
- n_bytes_merged = ibuf_merge_in_background(true);
-
- /* Flush logs if needed */
- srv_sync_log_buffer_in_background();
-
-func_exit:
- /* Make a new checkpoint about once in 10 seconds */
- srv_main_thread_op_info = "making checkpoint";
- log_checkpoint(TRUE, FALSE, FALSE);
-
- /* Print progress message every 60 seconds during shutdown */
- if (srv_shutdown_state > 0 && srv_print_verbose_log) {
- srv_shutdown_print_master_pending(
- last_print_time, n_tables_to_drop, n_bytes_merged);
- }
+ do {
+ ut_ad(!srv_read_only_mode);
+ ut_ad(srv_shutdown_state == SRV_SHUTDOWN_CLEANUP);
+ ++srv_main_shutdown_loops;
+
+ /* FIXME: Remove the background DROP TABLE queue; it is not
+ crash-safe and breaks ACID. */
+ srv_main_thread_op_info = "doing background drop tables";
+ n_tables_to_drop = row_drop_tables_for_mysql_in_background();
+
+ if (ibuf_merge) {
+ srv_main_thread_op_info = "checking free log space";
+ log_free_check();
+ srv_main_thread_op_info = "doing insert buffer merge";
+ n_bytes_merged = ibuf_merge_in_background(true);
+
+ /* Flush logs if needed */
+ srv_sync_log_buffer_in_background();
+ }
- return(n_bytes_merged || n_tables_to_drop);
+ /* Print progress message every 60 seconds during shutdown */
+ if (srv_print_verbose_log) {
+ srv_shutdown_print_master_pending(
+ &now, n_tables_to_drop, n_bytes_merged);
+ }
+ } while (n_bytes_merged || n_tables_to_drop);
}
/*********************************************************************//**
@@ -3163,11 +3122,12 @@ DECLARE_THREAD(srv_master_thread)(
/*!< in: a dummy parameter required by
os_thread_create */
{
+ my_thread_init();
+
srv_slot_t* slot;
ulint old_activity_count = srv_get_activity_count();
ulint old_ibuf_merge_activity_count
= srv_get_ibuf_merge_activity_count();
- ib_time_t last_print_time;
ut_ad(!srv_read_only_mode);
@@ -3188,9 +3148,8 @@ DECLARE_THREAD(srv_master_thread)(
srv_main_thread_id = os_thread_pf(os_thread_get_curr_id());
slot = srv_reserve_slot(SRV_MASTER);
- ut_a(slot == srv_sys->sys_threads);
+ ut_a(slot == srv_sys.sys_threads);
- last_print_time = ut_time();
loop:
if (srv_force_recovery >= SRV_FORCE_NO_BACKGROUND) {
goto suspend_thread;
@@ -3216,13 +3175,26 @@ loop:
}
}
- while (srv_master_do_shutdown_tasks(&last_print_time)) {
-
- /* Shouldn't loop here in case of very fast shutdown */
- ut_ad(srv_fast_shutdown < 2);
+suspend_thread:
+ switch (srv_shutdown_state) {
+ case SRV_SHUTDOWN_NONE:
+ break;
+ case SRV_SHUTDOWN_FLUSH_PHASE:
+ case SRV_SHUTDOWN_LAST_PHASE:
+ ut_ad(0);
+ /* fall through */
+ case SRV_SHUTDOWN_EXIT_THREADS:
+ /* srv_init_abort() must have been invoked */
+ case SRV_SHUTDOWN_CLEANUP:
+ if (srv_shutdown_state == SRV_SHUTDOWN_CLEANUP
+ && srv_fast_shutdown < 2) {
+ srv_shutdown(srv_fast_shutdown == 0);
+ }
+ srv_suspend_thread(slot);
+ my_thread_end();
+ os_thread_exit(NULL);
}
-suspend_thread:
srv_main_thread_op_info = "suspending";
srv_suspend_thread(slot);
@@ -3234,41 +3206,32 @@ suspend_thread:
srv_main_thread_op_info = "waiting for server activity";
srv_resume_thread(slot);
-
- if (srv_shutdown_state == SRV_SHUTDOWN_EXIT_THREADS) {
- os_thread_exit(NULL);
- }
-
goto loop;
-
- OS_THREAD_DUMMY_RETURN; /* Not reached, avoid compiler warning */
}
-/*********************************************************************//**
-Check if purge should stop.
-@return true if it should shutdown. */
+/** Check if purge should stop.
+@param[in] n_purged pages purged in the last batch
+@return whether purge should exit */
static
bool
-srv_purge_should_exit(
-/*==============*/
- ulint n_purged) /*!< in: pages purged in last batch */
+srv_purge_should_exit(ulint n_purged)
{
- switch (srv_shutdown_state) {
- case SRV_SHUTDOWN_NONE:
- /* Normal operation. */
- break;
+ ut_ad(srv_shutdown_state == SRV_SHUTDOWN_NONE
+ || srv_shutdown_state == SRV_SHUTDOWN_CLEANUP);
- case SRV_SHUTDOWN_CLEANUP:
- case SRV_SHUTDOWN_EXIT_THREADS:
- /* Exit unless slow shutdown requested or all done. */
- return(srv_fast_shutdown != 0 || n_purged == 0);
-
- case SRV_SHUTDOWN_LAST_PHASE:
- case SRV_SHUTDOWN_FLUSH_PHASE:
- ut_error;
+ if (srv_undo_sources) {
+ return(false);
}
-
- return(false);
+ if (srv_fast_shutdown) {
+ return(true);
+ }
+ /* Slow shutdown was requested. */
+ if (n_purged) {
+ /* The previous round still did some work. */
+ return(false);
+ }
+ /* Exit if there are no active transactions to roll back. */
+ return(trx_sys_any_active_transactions() == 0);
}
/*********************************************************************//**
@@ -3284,18 +3247,18 @@ srv_task_execute(void)
ut_ad(!srv_read_only_mode);
ut_a(srv_force_recovery < SRV_FORCE_NO_BACKGROUND);
- mutex_enter(&srv_sys->tasks_mutex);
+ mutex_enter(&srv_sys.tasks_mutex);
- if (UT_LIST_GET_LEN(srv_sys->tasks) > 0) {
+ if (UT_LIST_GET_LEN(srv_sys.tasks) > 0) {
- thr = UT_LIST_GET_FIRST(srv_sys->tasks);
+ thr = UT_LIST_GET_FIRST(srv_sys.tasks);
ut_a(que_node_get_type(thr->child) == QUE_NODE_PURGE);
- UT_LIST_REMOVE(queue, srv_sys->tasks, thr);
+ UT_LIST_REMOVE(queue, srv_sys.tasks, thr);
}
- mutex_exit(&srv_sys->tasks_mutex);
+ mutex_exit(&srv_sys.tasks_mutex);
if (thr != NULL) {
@@ -3322,6 +3285,8 @@ DECLARE_THREAD(srv_worker_thread)(
void* arg MY_ATTRIBUTE((unused))) /*!< in: a dummy parameter
required by os_thread_create */
{
+ my_thread_init();
+
srv_slot_t* slot;
ulint tid_i = os_atomic_increment_ulint(&purge_tid_i, 1);
@@ -3345,7 +3310,7 @@ DECLARE_THREAD(srv_worker_thread)(
srv_sys_mutex_enter();
- ut_a(srv_sys->n_threads_active[SRV_WORKER] < srv_n_purge_threads);
+ ut_a(srv_sys.n_threads_active[SRV_WORKER] < srv_n_purge_threads);
srv_sys_mutex_exit();
@@ -3387,6 +3352,7 @@ DECLARE_THREAD(srv_worker_thread)(
os_thread_pf(os_thread_get_curr_id()));
#endif /* UNIV_DEBUG_THREAD_CREATION */
+ my_thread_end();
/* We count the number of threads in os_thread_exit(). A created
thread should always use that to exit and not use return() to exit. */
os_thread_exit(NULL);
@@ -3540,7 +3506,7 @@ srv_purge_coordinator_suspend(
}
rw_lock_x_unlock(&purge_sys->latch);
- } while (stop);
+ } while (stop && srv_undo_sources);
srv_resume_thread(slot, 0, false);
}
@@ -3555,6 +3521,8 @@ DECLARE_THREAD(srv_purge_coordinator_thread)(
void* arg MY_ATTRIBUTE((unused))) /*!< in: a dummy parameter
required by os_thread_create */
{
+ my_thread_init();
+
srv_slot_t* slot;
ulint n_total_purged = ULINT_UNDEFINED;
@@ -3592,6 +3560,7 @@ DECLARE_THREAD(srv_purge_coordinator_thread)(
purge didn't purge any records then wait for activity. */
if (srv_shutdown_state == SRV_SHUTDOWN_NONE
+ && srv_undo_sources
&& (purge_sys->state == PURGE_STATE_STOP
|| n_total_purged == 0)) {
@@ -3612,36 +3581,8 @@ DECLARE_THREAD(srv_purge_coordinator_thread)(
srv_n_purge_threads, &n_total_purged);
srv_inc_activity_count();
-
} while (!srv_purge_should_exit(n_total_purged));
- /* Ensure that we don't jump out of the loop unless the
- exit condition is satisfied. */
-
- ut_a(srv_purge_should_exit(n_total_purged));
-
- ulint n_pages_purged = ULINT_MAX;
-
- /* Ensure that all records are purged if it is not a fast shutdown.
- This covers the case where a record can be added after we exit the
- loop above. */
- while (srv_fast_shutdown == 0 && n_pages_purged > 0) {
- n_pages_purged = trx_purge(1, srv_purge_batch_size, false);
- }
-
- /* This trx_purge is called to remove any undo records (added by
- background threads) after completion of the above loop. When
- srv_fast_shutdown != 0, a large batch size can cause significant
- delay in shutdown ,so reducing the batch size to magic number 20
- (which was default in 5.5), which we hope will be sufficient to
- remove all the undo records */
- const uint temp_batch_size = 20;
-
- n_pages_purged = trx_purge(1, srv_purge_batch_size <= temp_batch_size
- ? srv_purge_batch_size : temp_batch_size,
- true);
- ut_a(n_pages_purged == 0 || srv_fast_shutdown != 0);
-
/* The task queue should always be empty, independent of fast
shutdown state. */
ut_a(srv_get_task_queue_length() == 0);
@@ -3668,6 +3609,7 @@ DECLARE_THREAD(srv_purge_coordinator_thread)(
srv_release_threads(SRV_WORKER, srv_n_purge_threads - 1);
}
+ my_thread_end();
/* We count the number of threads in os_thread_exit(). A created
thread should always use that to exit and not use return() to exit. */
os_thread_exit(NULL);
@@ -3685,11 +3627,11 @@ srv_que_task_enqueue_low(
que_thr_t* thr) /*!< in: query thread */
{
ut_ad(!srv_read_only_mode);
- mutex_enter(&srv_sys->tasks_mutex);
+ mutex_enter(&srv_sys.tasks_mutex);
- UT_LIST_ADD_LAST(queue, srv_sys->tasks, thr);
+ UT_LIST_ADD_LAST(queue, srv_sys.tasks, thr);
- mutex_exit(&srv_sys->tasks_mutex);
+ mutex_exit(&srv_sys.tasks_mutex);
srv_release_threads(SRV_WORKER, 1);
}
@@ -3706,11 +3648,11 @@ srv_get_task_queue_length(void)
ut_ad(!srv_read_only_mode);
- mutex_enter(&srv_sys->tasks_mutex);
+ mutex_enter(&srv_sys.tasks_mutex);
- n_tasks = UT_LIST_GET_LEN(srv_sys->tasks);
+ n_tasks = UT_LIST_GET_LEN(srv_sys.tasks);
- mutex_exit(&srv_sys->tasks_mutex);
+ mutex_exit(&srv_sys.tasks_mutex);
return(n_tasks);
}
@@ -3733,3 +3675,19 @@ srv_purge_wakeup()
}
}
}
+
+/** Check whether given space id is undo tablespace id
+@param[in] space_id space id to check
+@return true if it is undo tablespace else false. */
+bool
+srv_is_undo_tablespace(
+ ulint space_id)
+{
+ if (srv_undo_space_id_start == 0) {
+ return (false);
+ }
+
+ return(space_id >= srv_undo_space_id_start
+ && space_id < (srv_undo_space_id_start
+ + srv_undo_tablespaces_open));
+}
diff --git a/storage/xtradb/srv/srv0start.cc b/storage/xtradb/srv/srv0start.cc
index aa51012816d..fd129c3e55f 100644
--- a/storage/xtradb/srv/srv0start.cc
+++ b/storage/xtradb/srv/srv0start.cc
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1996, 2016, Oracle and/or its affiliates. All rights reserved.
+Copyright (c) 1996, 2017, Oracle and/or its affiliates. All rights reserved.
Copyright (c) 2008, Google Inc.
Copyright (c) 2009, Percona Inc.
Copyright (c) 2013, 2017, MariaDB Corporation
@@ -121,6 +121,9 @@ UNIV_INTERN ibool srv_have_fullfsync = FALSE;
/** TRUE if a raw partition is in use */
UNIV_INTERN ibool srv_start_raw_disk_in_use = FALSE;
+/** UNDO tablespaces starts with space id. */
+ulint srv_undo_space_id_start;
+
/** TRUE if the server is being started, before rolling back any
incomplete transactions */
UNIV_INTERN ibool srv_startup_is_before_trx_rollback_phase = FALSE;
@@ -129,7 +132,11 @@ UNIV_INTERN ibool srv_is_being_started = FALSE;
/** TRUE if the server was successfully started */
UNIV_INTERN ibool srv_was_started = FALSE;
/** TRUE if innobase_start_or_create_for_mysql() has been called */
-static ibool srv_start_has_been_called = FALSE;
+static ibool srv_start_has_been_called;
+
+/** Whether any undo log records can be generated */
+UNIV_INTERN bool srv_undo_sources;
+
#ifdef UNIV_DEBUG
/** InnoDB system tablespace to set during recovery */
UNIV_INTERN uint srv_sys_space_size_debug;
@@ -139,8 +146,8 @@ UNIV_INTERN uint srv_sys_space_size_debug;
SRV_SHUTDOWN_CLEANUP and then to SRV_SHUTDOWN_LAST_PHASE, and so on */
UNIV_INTERN enum srv_shutdown_state srv_shutdown_state = SRV_SHUTDOWN_NONE;
-/** Files comprising the system tablespace */
-os_file_t files[1000];
+/** Files comprising the system tablespace. Also used by Mariabackup. */
+UNIV_INTERN pfs_os_file_t files[1000];
/** io_handler_thread parameters for thread identification */
static ulint n[SRV_MAX_N_IO_THREADS];
@@ -203,6 +210,39 @@ UNIV_INTERN mysql_pfs_key_t srv_purge_thread_key;
UNIV_INTERN mysql_pfs_key_t srv_log_tracking_thread_key;
#endif /* UNIV_PFS_THREAD */
+/** Innobase start-up aborted. Perform cleanup actions.
+@param[in] create_new_db TRUE if new db is being created
+@param[in] file File name
+@param[in] line Line number
+@param[in] err Reason for aborting InnoDB startup
+@return DB_SUCCESS or error code. */
+static
+dberr_t
+srv_init_abort(
+ bool create_new_db,
+ const char* file,
+ ulint line,
+ dberr_t err)
+{
+ if (create_new_db) {
+ ib_logf(IB_LOG_LEVEL_ERROR,
+ "Database creation was aborted"
+ " at %s [" ULINTPF "]"
+ " with error %s. You may need"
+ " to delete the ibdata1 file before trying to start"
+ " up again.",
+ file, line, ut_strerr(err));
+ } else {
+ ib_logf(IB_LOG_LEVEL_ERROR,
+ "Plugin initialization aborted"
+ " at %s [" ULINTPF "]"
+ " with error %s.",
+ file, line, ut_strerr(err));
+ }
+
+ return(err);
+}
+
/*********************************************************************//**
Convert a numeric string that optionally ends in G or M or K, to a number
containing megabytes.
@@ -584,7 +624,7 @@ static MY_ATTRIBUTE((nonnull, warn_unused_result))
dberr_t
create_log_file(
/*============*/
- os_file_t* file, /*!< out: file handle */
+ pfs_os_file_t* file, /*!< out: file handle */
const char* name) /*!< in: log file name */
{
ibool ret;
@@ -802,7 +842,7 @@ static MY_ATTRIBUTE((nonnull, warn_unused_result))
dberr_t
open_log_file(
/*==========*/
- os_file_t* file, /*!< out: file handle */
+ pfs_os_file_t* file, /*!< out: file handle */
const char* name, /*!< in: log file name */
os_offset_t* size) /*!< out: file size */
{
@@ -823,32 +863,32 @@ open_log_file(
return(DB_SUCCESS);
}
-/*********************************************************************//**
-Creates or opens database data files and closes them.
+
+/** Creates or opens database data files and closes them.
+@param[out] create_new_db true = create new database
+@param[out] min_arch_log_no min of archived log numbers in
+ data files
+@param[out] max_arch_log_no max of archived log numbers in
+ data files
+@param[out] flushed_lsn flushed lsn in fist datafile
+@param[out] sum_of_new_sizes sum of sizes of the new files
+ added
@return DB_SUCCESS or error code */
MY_ATTRIBUTE((nonnull, warn_unused_result))
dberr_t
open_or_create_data_files(
-/*======================*/
- ibool* create_new_db, /*!< out: TRUE if new database should be
- created */
+ bool* create_new_db,
#ifdef UNIV_LOG_ARCHIVE
- lsn_t* min_arch_log_no,/*!< out: min of archived log
- numbers in data files */
- lsn_t* max_arch_log_no,/*!< out: max of archived log
- numbers in data files */
+ lsn_t* min_arch_log_no,
+ lsn_t* max_arch_log_no,
#endif /* UNIV_LOG_ARCHIVE */
- lsn_t* min_flushed_lsn,/*!< out: min of flushed lsn
- values in data files */
- lsn_t* max_flushed_lsn,/*!< out: max of flushed lsn
- values in data files */
- ulint* sum_of_new_sizes)/*!< out: sum of sizes of the
- new files added */
+ lsn_t* flushed_lsn,
+ ulint* sum_of_new_sizes)
{
ibool ret;
ulint i;
- ibool one_opened = FALSE;
- ibool one_created = FALSE;
+ bool one_opened = false;
+ bool one_created = false;
os_offset_t size;
ulint flags;
ulint space;
@@ -867,7 +907,7 @@ open_or_create_data_files(
*sum_of_new_sizes = 0;
- *create_new_db = FALSE;
+ *create_new_db = false;
srv_normalize_path_for_win(srv_data_home);
@@ -919,7 +959,7 @@ open_or_create_data_files(
&& os_file_get_last_error(false)
!= OS_FILE_ALREADY_EXISTS
#ifdef UNIV_AIX
- /* AIX 5.1 after security patch ML7 may have
+ /* AIX 5.1 after security patch ML7 may have
errno set to 0 here, which causes our
function to return 100; work around that
AIX problem */
@@ -955,9 +995,10 @@ open_or_create_data_files(
}
const char* check_msg;
+
check_msg = fil_read_first_page(
files[i], FALSE, &flags, &space,
- min_flushed_lsn, max_flushed_lsn, NULL);
+ flushed_lsn, NULL);
/* If first page is valid, don't overwrite DB.
It prevents overwriting DB when mysql_install_db
@@ -988,6 +1029,7 @@ open_or_create_data_files(
name);
return(DB_ERROR);
}
+
if (srv_data_file_is_raw_partition[i] == SRV_OLD_RAW) {
ut_a(!srv_read_only_mode);
files[i] = os_file_create(
@@ -1007,7 +1049,6 @@ open_or_create_data_files(
}
if (!ret) {
-
os_file_get_last_error(true);
ib_logf(IB_LOG_LEVEL_ERROR,
@@ -1017,7 +1058,6 @@ open_or_create_data_files(
}
if (srv_data_file_is_raw_partition[i] == SRV_OLD_RAW) {
-
goto skip_size_check;
}
@@ -1044,16 +1084,15 @@ size_check:
"auto-extending "
"data file %s is "
"of a different size "
- "%lu pages (rounded "
+ ULINTPF " pages (rounded "
"down to MB) than specified "
"in the .cnf file: "
- "initial %lu pages, "
- "max %lu (relevant if "
+ "initial " ULINTPF " pages, "
+ "max " ULINTPF " (relevant if "
"non-zero) pages!",
name,
- (ulong) rounded_size_pages,
- (ulong) srv_data_file_sizes[i],
- (ulong)
+ rounded_size_pages,
+ srv_data_file_sizes[i],
srv_last_file_size_max);
return(DB_ERROR);
@@ -1066,12 +1105,12 @@ size_check:
ib_logf(IB_LOG_LEVEL_ERROR,
"Data file %s is of a different "
- "size %lu pages (rounded down to MB) "
+ "size " ULINTPF " pages (rounded down to MB) "
"than specified in the .cnf file "
- "%lu pages!",
+ ULINTPF " pages!",
name,
- (ulong) rounded_size_pages,
- (ulong) srv_data_file_sizes[i]);
+ rounded_size_pages,
+ srv_data_file_sizes[i]);
return(DB_ERROR);
}
@@ -1090,7 +1129,7 @@ skip_size_check:
check_first_page:
check_msg = fil_read_first_page(
files[i], one_opened, &flags, &space,
- min_flushed_lsn, max_flushed_lsn, &crypt_data);
+ flushed_lsn, &crypt_data);
if (check_msg) {
@@ -1127,9 +1166,9 @@ check_first_page:
!= fsp_flags_get_page_size(flags)) {
ib_logf(IB_LOG_LEVEL_ERROR,
- "Data file \"%s\" uses page size %lu,"
+ "Data file \"%s\" uses page size " ULINTPF " ,"
"but the start-up parameter "
- "is --innodb-page-size=%lu",
+ "is --innodb-page-size=" ULINTPF " .",
name,
fsp_flags_get_page_size(flags),
UNIV_PAGE_SIZE);
@@ -1160,9 +1199,9 @@ check_first_page:
}
ib_logf(IB_LOG_LEVEL_INFO,
- "Setting file %s size to %lu MB",
+ "Setting file %s size to " ULINTPF " MB",
name,
- (ulong) (srv_data_file_sizes[i]
+ (srv_data_file_sizes[i]
>> (20 - UNIV_PAGE_SIZE_SHIFT)));
ret = os_file_set_size(
@@ -1221,7 +1260,7 @@ srv_undo_tablespace_create(
const char* name, /*!< in: tablespace name */
ulint size) /*!< in: tablespace size in pages */
{
- os_file_t fh;
+ pfs_os_file_t fh;
ibool ret;
dberr_t err = DB_SUCCESS;
@@ -1299,7 +1338,7 @@ srv_undo_tablespace_open(
const char* name, /*!< in: tablespace name */
ulint space) /*!< in: tablespace id */
{
- os_file_t fh;
+ pfs_os_file_t fh;
dberr_t err = DB_ERROR;
ibool ret;
ulint flags;
@@ -1404,13 +1443,23 @@ srv_undo_tablespaces_init(
for (i = 0; create_new_db && i < n_conf_tablespaces; ++i) {
char name[OS_FILE_MAX_PATH];
+ ulint space_id = i + 1;
+
+ DBUG_EXECUTE_IF("innodb_undo_upgrade",
+ space_id = i + 3;);
ut_snprintf(
name, sizeof(name),
"%s%cundo%03lu",
- srv_undo_dir, SRV_PATH_SEPARATOR, i + 1);
+ srv_undo_dir, SRV_PATH_SEPARATOR, space_id);
+
+ if (i == 0) {
+ srv_undo_space_id_start = space_id;
+ prev_space_id = srv_undo_space_id_start - 1;
+ }
+
+ undo_tablespace_ids[i] = space_id;
- /* Undo space ids start from 1. */
err = srv_undo_tablespace_create(
name, SRV_UNDO_TABLESPACE_SIZE_IN_PAGES);
@@ -1432,14 +1481,16 @@ srv_undo_tablespaces_init(
if (!create_new_db && !backup_mode) {
n_undo_tablespaces = trx_rseg_get_n_undo_tablespaces(
undo_tablespace_ids);
- } else {
- n_undo_tablespaces = n_conf_tablespaces;
- for (i = 1; i <= n_undo_tablespaces; ++i) {
- undo_tablespace_ids[i - 1] = i;
+ if (n_undo_tablespaces != 0) {
+ srv_undo_space_id_start = undo_tablespace_ids[0];
+ prev_space_id = srv_undo_space_id_start - 1;
}
- undo_tablespace_ids[i] = ULINT_UNDEFINED;
+ } else {
+ n_undo_tablespaces = n_conf_tablespaces;
+
+ undo_tablespace_ids[n_conf_tablespaces] = ULINT_UNDEFINED;
}
/* Open all the undo tablespaces that are currently in use. If we
@@ -1463,8 +1514,6 @@ srv_undo_tablespaces_init(
ut_a(undo_tablespace_ids[i] != 0);
ut_a(undo_tablespace_ids[i] != ULINT_UNDEFINED);
- /* Undo space ids start from 1. */
-
err = srv_undo_tablespace_open(name, undo_tablespace_ids[i]);
if (err != DB_SUCCESS) {
@@ -1499,11 +1548,23 @@ srv_undo_tablespaces_init(
break;
}
+ /** Note the first undo tablespace id in case of
+ no active undo tablespace. */
+ if (n_undo_tablespaces == 0) {
+ srv_undo_space_id_start = i;
+ }
+
++n_undo_tablespaces;
++*n_opened;
}
+ /** Explictly specify the srv_undo_space_id_start
+ as zero when there are no undo tablespaces. */
+ if (n_undo_tablespaces == 0) {
+ srv_undo_space_id_start = 0;
+ }
+
/* If the user says that there are fewer than what we find we
tolerate that discrepancy but not the inverse. Because there could
be unused undo tablespaces for future use. */
@@ -1548,10 +1609,11 @@ srv_undo_tablespaces_init(
mtr_start(&mtr);
/* The undo log tablespace */
- for (i = 1; i <= n_undo_tablespaces; ++i) {
+ for (i = 0; i < n_undo_tablespaces; ++i) {
fsp_header_init(
- i, SRV_UNDO_TABLESPACE_SIZE_IN_PAGES, &mtr);
+ undo_tablespace_ids[i],
+ SRV_UNDO_TABLESPACE_SIZE_IN_PAGES, &mtr);
}
mtr_commit(&mtr);
@@ -1626,12 +1688,10 @@ are not found and the user wants.
@return DB_SUCCESS or error code */
UNIV_INTERN
dberr_t
-innobase_start_or_create_for_mysql(void)
-/*====================================*/
+innobase_start_or_create_for_mysql()
{
- ibool create_new_db;
- lsn_t min_flushed_lsn;
- lsn_t max_flushed_lsn;
+ bool create_new_db;
+ lsn_t flushed_lsn;
#ifdef UNIV_LOG_ARCHIVE
lsn_t min_arch_log_no = LSN_MAX;
lsn_t max_arch_log_no = LSN_MAX;
@@ -1665,6 +1725,10 @@ innobase_start_or_create_for_mysql(void)
/* This should be initialized early */
ut_init_timer();
+ if (srv_force_recovery == SRV_FORCE_NO_LOG_REDO) {
+ srv_read_only_mode = 1;
+ }
+
high_level_read_only = srv_read_only_mode
|| srv_force_recovery > SRV_FORCE_NO_TRX_UNDO;
@@ -2176,7 +2240,7 @@ innobase_start_or_create_for_mysql(void)
#ifdef UNIV_LOG_ARCHIVE
&min_arch_log_no, &max_arch_log_no,
#endif /* UNIV_LOG_ARCHIVE */
- &min_flushed_lsn, &max_flushed_lsn,
+ &flushed_lsn,
&sum_of_new_sizes);
if (err == DB_FAIL) {
@@ -2220,12 +2284,12 @@ innobase_start_or_create_for_mysql(void)
bool success = buf_flush_list(ULINT_MAX, LSN_MAX, NULL);
ut_a(success);
- min_flushed_lsn = max_flushed_lsn = log_get_lsn();
+ flushed_lsn = log_get_lsn();
buf_flush_wait_batch_end(NULL, BUF_FLUSH_LIST);
err = create_log_files(create_new_db, logfilename, dirnamelen,
- max_flushed_lsn, logfile0);
+ flushed_lsn, logfile0);
if (err != DB_SUCCESS) {
return(err);
@@ -2245,19 +2309,8 @@ innobase_start_or_create_for_mysql(void)
if (err == DB_NOT_FOUND) {
if (i == 0) {
- if (max_flushed_lsn
- != min_flushed_lsn) {
- ib_logf(IB_LOG_LEVEL_ERROR,
- "Cannot create"
- " log files because"
- " data files are"
- " corrupt or"
- " not in sync"
- " with each other");
- return(DB_ERROR);
- }
- if (max_flushed_lsn < (lsn_t) 1000) {
+ if (flushed_lsn < (lsn_t) 1000) {
ib_logf(IB_LOG_LEVEL_ERROR,
"Cannot create"
" log files because"
@@ -2272,14 +2325,14 @@ innobase_start_or_create_for_mysql(void)
err = create_log_files(
create_new_db, logfilename,
- dirnamelen, max_flushed_lsn,
+ dirnamelen, flushed_lsn,
logfile0);
if (err == DB_SUCCESS) {
err = create_log_files_rename(
logfilename,
dirnamelen,
- max_flushed_lsn,
+ flushed_lsn,
logfile0);
}
@@ -2289,8 +2342,7 @@ innobase_start_or_create_for_mysql(void)
/* Suppress the message about
crash recovery. */
- max_flushed_lsn = min_flushed_lsn
- = log_get_lsn();
+ flushed_lsn = log_get_lsn();
goto files_checked;
} else if (i < 2 && !IS_XTRABACKUP()) {
/* must have at least 2 log files */
@@ -2420,9 +2472,23 @@ files_checked:
mtr_start(&mtr);
fsp_header_init(0, sum_of_new_sizes, &mtr);
+ compile_time_assert(TRX_SYS_SPACE == 0);
+ compile_time_assert(IBUF_SPACE_ID == 0);
+
+ ulint ibuf_root = btr_create(
+ DICT_CLUSTERED | DICT_UNIVERSAL | DICT_IBUF,
+ 0, 0, DICT_IBUF_ID_MIN,
+ dict_ind_redundant, &mtr);
mtr_commit(&mtr);
+ if (ibuf_root == FIL_NULL) {
+ return(srv_init_abort(true, __FILE__, __LINE__,
+ DB_ERROR));
+ }
+
+ ut_ad(ibuf_root == IBUF_TREE_ROOT_PAGE_NO);
+
/* To maintain backward compatibility we create only
the first rollback segment before the double write buffer.
All the remaining rollback segments will be created later,
@@ -2448,17 +2514,19 @@ files_checked:
bool success = buf_flush_list(ULINT_MAX, LSN_MAX, NULL);
ut_a(success);
- min_flushed_lsn = max_flushed_lsn = log_get_lsn();
+ flushed_lsn = log_get_lsn();
buf_flush_wait_batch_end(NULL, BUF_FLUSH_LIST);
/* Stamp the LSN to the data files. */
- fil_write_flushed_lsn_to_data_files(max_flushed_lsn, 0);
+ err = fil_write_flushed_lsn(flushed_lsn);
- fil_flush_file_spaces(FIL_TABLESPACE);
+ if (err != DB_SUCCESS) {
+ return(err);
+ }
err = create_log_files_rename(logfilename, dirnamelen,
- max_flushed_lsn, logfile0);
+ flushed_lsn, logfile0);
if (err != DB_SUCCESS) {
return(err);
@@ -2513,7 +2581,7 @@ files_checked:
err = recv_recovery_from_checkpoint_start(
LOG_CHECKPOINT, LSN_MAX,
- min_flushed_lsn, max_flushed_lsn);
+ flushed_lsn);
if (err != DB_SUCCESS) {
return(err);
@@ -2696,7 +2764,7 @@ files_checked:
DBUG_EXECUTE_IF("innodb_log_abort_1",
return(DB_ERROR););
- min_flushed_lsn = max_flushed_lsn = log_get_lsn();
+ flushed_lsn = log_get_lsn();
ib_logf(IB_LOG_LEVEL_WARN,
"Resizing redo log from %u*%u to %u*%u pages"
@@ -2705,7 +2773,7 @@ files_checked:
(unsigned) srv_log_file_size,
(unsigned) srv_n_log_files,
(unsigned) srv_log_file_size_requested,
- max_flushed_lsn);
+ flushed_lsn);
buf_flush_wait_batch_end(NULL, BUF_FLUSH_LIST);
@@ -2715,7 +2783,7 @@ files_checked:
we need to explicitly flush the log buffers. */
fil_flush(SRV_LOG_SPACE_FIRST_ID);
- ut_ad(max_flushed_lsn == log_get_lsn());
+ ut_ad(flushed_lsn == log_get_lsn());
/* Prohibit redo log writes from any other
threads until creating a log checkpoint at the
@@ -2727,8 +2795,7 @@ files_checked:
return(DB_ERROR););
/* Stamp the LSN to the data files. */
- fil_write_flushed_lsn_to_data_files(
- max_flushed_lsn, 0);
+ err = fil_write_flushed_lsn(flushed_lsn);
DBUG_EXECUTE_IF("innodb_log_abort_4", err = DB_ERROR;);
@@ -2736,8 +2803,6 @@ files_checked:
return(err);
}
- fil_flush_file_spaces(FIL_TABLESPACE);
-
/* Close and free the redo log files, so that
we can replace them. */
fil_close_log_files(true);
@@ -2754,28 +2819,23 @@ files_checked:
srv_log_file_size = srv_log_file_size_requested;
err = create_log_files(create_new_db, logfilename,
- dirnamelen, max_flushed_lsn,
+ dirnamelen, flushed_lsn,
logfile0);
if (err != DB_SUCCESS) {
return(err);
}
- /* create_log_files() can increase system lsn that is
- why FIL_PAGE_FILE_FLUSH_LSN have to be updated */
- min_flushed_lsn = max_flushed_lsn = log_get_lsn();
- fil_write_flushed_lsn_to_data_files(min_flushed_lsn, 0);
- fil_flush_file_spaces(FIL_TABLESPACE);
-
err = create_log_files_rename(logfilename, dirnamelen,
log_get_lsn(), logfile0);
+
if (err != DB_SUCCESS) {
return(err);
}
}
- srv_startup_is_before_trx_rollback_phase = FALSE;
recv_recovery_rollback_active();
+ srv_startup_is_before_trx_rollback_phase = FALSE;
/* It is possible that file_format tag has never
been set. In this case we initialize it to minimum
@@ -2815,10 +2875,9 @@ files_checked:
/* fprintf(stderr, "Max allowed record size %lu\n",
page_get_free_space_of_empty() / 2); */
- if (buf_dblwr == NULL) {
- /* Create the doublewrite buffer to a new tablespace */
-
- buf_dblwr_create();
+ if (!buf_dblwr_create()) {
+ return(srv_init_abort(create_new_db, __FILE__, __LINE__,
+ DB_ERROR));
}
/* Here the double write buffer has already been created and so
@@ -2848,6 +2907,9 @@ files_checked:
/* Can only happen if server is read only. */
ut_a(srv_read_only_mode);
srv_undo_logs = ULONG_UNDEFINED;
+ } else if (srv_available_undo_logs < srv_undo_logs) {
+ /* Should due to out of file space. */
+ return (srv_init_abort(create_new_db, __FILE__, __LINE__, DB_ERROR));
}
if (!srv_read_only_mode) {
@@ -2905,6 +2967,16 @@ files_checked:
srv_master_thread,
NULL, thread_ids + (1 + SRV_MAX_N_IO_THREADS));
thread_started[1 + SRV_MAX_N_IO_THREADS] = true;
+
+ srv_undo_sources = true;
+ /* Create the dict stats gathering thread */
+ srv_dict_stats_thread_active = true;
+ dict_stats_thread_handle = os_thread_create(
+ dict_stats_thread, NULL, NULL);
+ dict_stats_thread_started = true;
+
+ /* Create the thread that will optimize the FTS sub-system. */
+ fts_optimize_init();
}
if (!srv_read_only_mode
@@ -2949,12 +3021,16 @@ files_checked:
}
- buf_flush_page_cleaner_thread_handle = os_thread_create(buf_flush_page_cleaner_thread, NULL, NULL);
+ buf_page_cleaner_is_active = true;
+ buf_flush_page_cleaner_thread_handle = os_thread_create(
+ buf_flush_page_cleaner_thread, NULL, NULL);
buf_flush_page_cleaner_thread_started = true;
- }
- buf_flush_lru_manager_thread_handle = os_thread_create(buf_flush_lru_manager_thread, NULL, NULL);
- buf_flush_lru_manager_thread_started = true;
+ buf_lru_manager_is_active = true;
+ buf_flush_lru_manager_thread_handle = os_thread_create(
+ buf_flush_lru_manager_thread, NULL, NULL);
+ buf_flush_lru_manager_thread_started = true;
+ }
if (!srv_file_per_table && srv_pass_corrupt_table) {
fprintf(stderr, "InnoDB: Warning:"
@@ -3002,10 +3078,10 @@ files_checked:
if (!wsrep_recovery) {
#endif /* WITH_WSREP */
/* Create the buffer pool dump/load thread */
+ srv_buf_dump_thread_active = true;
buf_dump_thread_handle=
os_thread_create(buf_dump_thread, NULL, NULL);
- srv_buf_dump_thread_active = true;
buf_dump_thread_started = true;
#ifdef WITH_WSREP
} else {
@@ -3015,26 +3091,19 @@ files_checked:
}
#endif /* WITH_WSREP */
- /* Create the dict stats gathering thread */
- dict_stats_thread_handle = os_thread_create(
- dict_stats_thread, NULL, NULL);
- srv_dict_stats_thread_active = true;
- dict_stats_thread_started = true;
-
- /* Create the thread that will optimize the FTS sub-system. */
- fts_optimize_init();
-
/* Create thread(s) that handles key rotation */
fil_system_enter();
fil_crypt_threads_init();
fil_system_exit();
- }
- /* Init data for datafile scrub threads */
- btr_scrub_init();
+ /* Init data for datafile scrub threads */
+ btr_scrub_init();
- /* Initialize online defragmentation. */
- btr_defragment_init();
+ /* Initialize online defragmentation. */
+ btr_defragment_init();
+ btr_defragment_thread_active = true;
+ os_thread_create(btr_defragment_thread, NULL, NULL);
+ }
srv_was_started = TRUE;
@@ -3071,13 +3140,10 @@ srv_fts_close(void)
}
#endif
-/****************************************************************//**
-Shuts down the InnoDB database.
-@return DB_SUCCESS or error code */
+/** Shut down InnoDB. */
UNIV_INTERN
-dberr_t
-innobase_shutdown_for_mysql(void)
-/*=============================*/
+void
+innodb_shutdown()
{
ulint i;
@@ -3087,15 +3153,20 @@ innobase_shutdown_for_mysql(void)
"Shutting down an improperly started, "
"or created database!");
}
-
- return(DB_SUCCESS);
}
- if (!srv_read_only_mode) {
+ if (srv_undo_sources) {
+ ut_ad(!srv_read_only_mode);
/* Shutdown the FTS optimize sub system. */
fts_optimize_start_shutdown();
fts_optimize_end();
+ dict_stats_shutdown();
+ while (row_get_background_drop_list_len_low()) {
+ srv_wake_master_thread();
+ os_thread_yield();
+ }
+ srv_undo_sources = false;
}
/* 1. Flush the buffer pool to disk, write the current lsn to
@@ -3199,11 +3270,10 @@ innobase_shutdown_for_mysql(void)
if (!srv_read_only_mode) {
dict_stats_thread_deinit();
fil_crypt_threads_cleanup();
+ btr_scrub_cleanup();
+ btr_defragment_shutdown();
}
- /* Cleanup data for datafile scrubbing */
- btr_scrub_cleanup();
-
#ifdef __WIN__
/* MDEV-361: ha_innodb.dll leaks handles on Windows
MDEV-7403: should not pass recv_writer_thread_handle to
@@ -3311,88 +3381,9 @@ innobase_shutdown_for_mysql(void)
srv_start_has_been_called = FALSE;
/* reset io_tid_i, in case current process does second innodb start (xtrabackup might do that).*/
io_tid_i = 0;
- return(DB_SUCCESS);
}
#endif /* !UNIV_HOTBACKUP */
-
-/********************************************************************
-Signal all per-table background threads to shutdown, and wait for them to do
-so. */
-UNIV_INTERN
-void
-srv_shutdown_table_bg_threads(void)
-/*===============================*/
-{
- dict_table_t* table;
- dict_table_t* first;
- dict_table_t* last = NULL;
-
- mutex_enter(&dict_sys->mutex);
-
- /* Signal all threads that they should stop. */
- table = UT_LIST_GET_FIRST(dict_sys->table_LRU);
- first = table;
- while (table) {
- dict_table_t* next;
- fts_t* fts = table->fts;
-
- if (fts != NULL) {
- fts_start_shutdown(table, fts);
- }
-
- next = UT_LIST_GET_NEXT(table_LRU, table);
-
- if (!next) {
- last = table;
- }
-
- table = next;
- }
-
- /* We must release dict_sys->mutex here; if we hold on to it in the
- loop below, we will deadlock if any of the background threads try to
- acquire it (for example, the FTS thread by calling que_eval_sql).
-
- Releasing it here and going through dict_sys->table_LRU without
- holding it is safe because:
-
- a) MySQL only starts the shutdown procedure after all client
- threads have been disconnected and no new ones are accepted, so no
- new tables are added or old ones dropped.
-
- b) Despite its name, the list is not LRU, and the order stays
- fixed.
-
- To safeguard against the above assumptions ever changing, we store
- the first and last items in the list above, and then check that
- they've stayed the same below. */
-
- mutex_exit(&dict_sys->mutex);
-
- /* Wait for the threads of each table to stop. This is not inside
- the above loop, because by signaling all the threads first we can
- overlap their shutting down delays. */
- table = UT_LIST_GET_FIRST(dict_sys->table_LRU);
- ut_a(first == table);
- while (table) {
- dict_table_t* next;
- fts_t* fts = table->fts;
-
- if (fts != NULL) {
- fts_shutdown(table, fts);
- }
-
- next = UT_LIST_GET_NEXT(table_LRU, table);
-
- if (table == last) {
- ut_a(!next);
- }
-
- table = next;
- }
-}
-
/*****************************************************************//**
Get the meta-data filename from the table name. */
UNIV_INTERN
diff --git a/storage/xtradb/sync/sync0sync.cc b/storage/xtradb/sync/sync0sync.cc
index 6692eef9fb0..37ac3c56fff 100644
--- a/storage/xtradb/sync/sync0sync.cc
+++ b/storage/xtradb/sync/sync0sync.cc
@@ -1236,6 +1236,7 @@ sync_thread_add_level(
upgrading in innobase_start_or_create_for_mysql(). */
break;
}
+ /* fall through */
case SYNC_MEM_POOL:
case SYNC_MEM_HASH:
case SYNC_RECV:
@@ -1299,9 +1300,9 @@ sync_thread_add_level(
}
}
ut_ad(found_current);
-
- /* fallthrough */
}
+
+ /* fall through */
case SYNC_BUF_FLUSH_LIST:
case SYNC_BUF_LRU_LIST:
case SYNC_BUF_FREE_LIST:
diff --git a/storage/xtradb/trx/trx0i_s.cc b/storage/xtradb/trx/trx0i_s.cc
index eacd9212d2f..0c9618d98eb 100644
--- a/storage/xtradb/trx/trx0i_s.cc
+++ b/storage/xtradb/trx/trx0i_s.cc
@@ -507,7 +507,9 @@ fill_trx_row(
row->trx_mysql_thread_id = thd_get_thread_id(trx->mysql_thd);
- stmt = innobase_get_stmt(trx->mysql_thd, &stmt_len);
+ stmt = trx->mysql_thd
+ ? innobase_get_stmt(trx->mysql_thd, &stmt_len)
+ : NULL;
if (stmt != NULL) {
char query[TRX_I_S_TRX_QUERY_MAX_LEN + 1];
diff --git a/storage/xtradb/trx/trx0purge.cc b/storage/xtradb/trx/trx0purge.cc
index 7d35bb12093..df4a3217820 100644
--- a/storage/xtradb/trx/trx0purge.cc
+++ b/storage/xtradb/trx/trx0purge.cc
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1996, 2017, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2017, MariaDB Corporation. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
@@ -247,6 +247,19 @@ trx_purge_add_update_undo_to_history(
hist_size + undo->size, MLOG_4BYTES, mtr);
}
+ /* Before any transaction-generating background threads or the
+ purge have been started, recv_recovery_rollback_active() can
+ start transactions in row_merge_drop_temp_indexes() and
+ fts_drop_orphaned_tables(), and roll back recovered transactions.
+ After the purge thread has been given permission to exit,
+ in fast shutdown, we may roll back transactions (trx->undo_no==0)
+ in THD::cleanup() invoked from unlink_thd(). */
+ ut_ad(srv_undo_sources
+ || ((srv_startup_is_before_trx_rollback_phase
+ || trx_rollback_or_clean_is_active)
+ && purge_sys->state == PURGE_STATE_INIT)
+ || (trx->undo_no == 0 && srv_fast_shutdown));
+
/* Add the log as the first in the history list */
flst_add_first(rseg_header + TRX_RSEG_HISTORY,
undo_header + TRX_UNDO_HISTORY_NODE, mtr);
@@ -685,7 +698,8 @@ trx_purge_get_rseg_with_min_trx_id(
/* We assume in purge of externally stored fields that space id is
in the range of UNDO tablespace space ids */
- ut_a(purge_sys->rseg->space <= srv_undo_tablespaces_open);
+ ut_a(purge_sys->rseg->space == 0
+ || srv_is_undo_tablespace(purge_sys->rseg->space));
zip_size = purge_sys->rseg->zip_size;
diff --git a/storage/xtradb/trx/trx0rec.cc b/storage/xtradb/trx/trx0rec.cc
index 74a63b60286..8c0904dd57b 100644
--- a/storage/xtradb/trx/trx0rec.cc
+++ b/storage/xtradb/trx/trx0rec.cc
@@ -1186,10 +1186,6 @@ UNIV_INTERN
dberr_t
trx_undo_report_row_operation(
/*==========================*/
- ulint flags, /*!< in: if BTR_NO_UNDO_LOG_FLAG bit is
- set, does nothing */
- ulint op_type, /*!< in: TRX_UNDO_INSERT_OP or
- TRX_UNDO_MODIFY_OP */
que_thr_t* thr, /*!< in: query thread */
dict_index_t* index, /*!< in: clustered index */
const dtuple_t* clust_entry, /*!< in: in the case of an insert,
@@ -1223,16 +1219,8 @@ trx_undo_report_row_operation(
ut_a(dict_index_is_clust(index));
ut_ad(!rec || rec_offs_validate(rec, index, offsets));
- if (flags & BTR_NO_UNDO_LOG_FLAG) {
-
- *roll_ptr = 0;
-
- return(DB_SUCCESS);
- }
-
ut_ad(thr);
- ut_ad((op_type != TRX_UNDO_INSERT_OP)
- || (clust_entry && !update && !rec));
+ ut_ad(!clust_entry || (!update && !rec));
trx = thr_get_trx(thr);
@@ -1253,8 +1241,7 @@ trx_undo_report_row_operation(
/* If the undo log is not assigned yet, assign one */
- switch (op_type) {
- case TRX_UNDO_INSERT_OP:
+ if (clust_entry) {
undo = trx->insert_undo;
if (undo == NULL) {
@@ -1270,10 +1257,7 @@ trx_undo_report_row_operation(
ut_ad(err == DB_SUCCESS);
}
- break;
- default:
- ut_ad(op_type == TRX_UNDO_MODIFY_OP);
-
+ } else {
undo = trx->update_undo;
if (undo == NULL) {
@@ -1297,23 +1281,15 @@ trx_undo_report_row_operation(
buf_block_dbg_add_level(undo_block, SYNC_TRX_UNDO_PAGE);
do {
- page_t* undo_page;
- ulint offset;
-
- undo_page = buf_block_get_frame(undo_block);
ut_ad(page_no == buf_block_get_page_no(undo_block));
- switch (op_type) {
- case TRX_UNDO_INSERT_OP:
- offset = trx_undo_page_report_insert(
- undo_page, trx, index, clust_entry, &mtr);
- break;
- default:
- ut_ad(op_type == TRX_UNDO_MODIFY_OP);
- offset = trx_undo_page_report_modify(
+ page_t* undo_page = buf_block_get_frame(undo_block);
+ ulint offset = clust_entry
+ ? trx_undo_page_report_insert(
+ undo_page, trx, index, clust_entry, &mtr)
+ : trx_undo_page_report_modify(
undo_page, trx, index, rec, offsets, update,
cmpl_info, &mtr);
- }
if (UNIV_UNLIKELY(offset == 0)) {
/* The record did not fit on the page. We erase the
@@ -1364,7 +1340,7 @@ trx_undo_report_row_operation(
mutex_exit(&trx->undo_mutex);
*roll_ptr = trx_undo_build_roll_ptr(
- op_type == TRX_UNDO_INSERT_OP,
+ clust_entry != NULL,
rseg->id, page_no, offset);
return(DB_SUCCESS);
}
diff --git a/storage/xtradb/trx/trx0roll.cc b/storage/xtradb/trx/trx0roll.cc
index d228743d300..335ef8859c4 100644
--- a/storage/xtradb/trx/trx0roll.cc
+++ b/storage/xtradb/trx/trx0roll.cc
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1996, 2017, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2016, 2017, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
@@ -818,6 +818,7 @@ DECLARE_THREAD(trx_rollback_or_clean_all_recovered)(
/*!< in: a dummy parameter required by
os_thread_create */
{
+ my_thread_init();
ut_ad(!srv_read_only_mode);
#ifdef UNIV_PFS_THREAD
@@ -828,6 +829,7 @@ DECLARE_THREAD(trx_rollback_or_clean_all_recovered)(
trx_rollback_or_clean_is_active = false;
+ my_thread_end();
/* We count the number of threads in os_thread_exit(). A created
thread should always use that to exit and not use return() to exit. */
diff --git a/storage/xtradb/trx/trx0rseg.cc b/storage/xtradb/trx/trx0rseg.cc
index 003d1036a8c..16fa334872b 100644
--- a/storage/xtradb/trx/trx0rseg.cc
+++ b/storage/xtradb/trx/trx0rseg.cc
@@ -1,6 +1,7 @@
/*****************************************************************************
Copyright (c) 1996, 2011, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2017, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -293,14 +294,13 @@ trx_rseg_create_instance(
}
}
-/*********************************************************************
-Creates a rollback segment.
-@return pointer to new rollback segment if create successful */
+/** Create a rollback segment.
+@param[in] space undo tablespace ID
+@return pointer to new rollback segment
+@retval NULL on failure */
UNIV_INTERN
trx_rseg_t*
-trx_rseg_create(
-/*============*/
- ulint space) /*!< in: id of UNDO tablespace */
+trx_rseg_create(ulint space)
{
mtr_t mtr;
ulint slot_no;
@@ -323,22 +323,21 @@ trx_rseg_create(
page_no = trx_rseg_header_create(
space, 0, ULINT_MAX, slot_no, &mtr);
- ut_a(page_no != FIL_NULL);
-
- sys_header = trx_sysf_get(&mtr);
+ if (page_no != FIL_NULL) {
+ sys_header = trx_sysf_get(&mtr);
- id = trx_sysf_rseg_get_space(sys_header, slot_no, &mtr);
- ut_a(id == space);
+ id = trx_sysf_rseg_get_space(sys_header, slot_no, &mtr);
+ ut_a(id == space);
- zip_size = space ? fil_space_get_zip_size(space) : 0;
+ zip_size = space ? fil_space_get_zip_size(space) : 0;
- rseg = trx_rseg_mem_create(
- slot_no, space, zip_size, page_no,
- purge_sys->ib_bh, &mtr);
+ rseg = trx_rseg_mem_create(
+ slot_no, space, zip_size, page_no,
+ purge_sys->ib_bh, &mtr);
+ }
}
mtr_commit(&mtr);
-
return(rseg);
}
diff --git a/storage/xtradb/trx/trx0sys.cc b/storage/xtradb/trx/trx0sys.cc
index 558fe8a2c49..9accb4ef303 100644
--- a/storage/xtradb/trx/trx0sys.cc
+++ b/storage/xtradb/trx/trx0sys.cc
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1996, 2015, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1996, 2017, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -1064,18 +1064,12 @@ trx_sys_create_rsegs(
ulint new_rsegs = n_rsegs - n_used;
for (i = 0; i < new_rsegs; ++i) {
- ulint space;
+ ulint space_id;
+ space_id = (n_spaces == 0) ? 0
+ : (srv_undo_space_id_start + i % n_spaces);
- /* Tablespace 0 is the system tablespace. All UNDO
- log tablespaces start from 1. */
-
- if (n_spaces > 0) {
- space = (i % n_spaces) + 1;
- } else {
- space = 0; /* System tablespace */
- }
-
- if (trx_rseg_create(space) != NULL) {
+ /* Tablespace 0 is the system tablespace. */
+ if (trx_rseg_create(space_id) != NULL) {
++n_used;
} else {
break;
diff --git a/storage/xtradb/trx/trx0trx.cc b/storage/xtradb/trx/trx0trx.cc
index d0cb4a883cc..1d2f7ada54e 100644
--- a/storage/xtradb/trx/trx0trx.cc
+++ b/storage/xtradb/trx/trx0trx.cc
@@ -1,6 +1,7 @@
/*****************************************************************************
Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2015, 2017, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -2283,6 +2284,7 @@ state_ok:
}
}
#endif /* WITH_WSREP */
+
/**********************************************************************//**
Prints info about a transaction.
Acquires and releases lock_sys->mutex and trx_sys->mutex. */
@@ -2744,4 +2746,3 @@ trx_start_for_ddl_low(
ut_error;
}
-
diff --git a/storage/xtradb/usr/usr0sess.cc b/storage/xtradb/usr/usr0sess.cc
index ab7ba6bea09..e1bd71ff1a0 100644
--- a/storage/xtradb/usr/usr0sess.cc
+++ b/storage/xtradb/usr/usr0sess.cc
@@ -1,6 +1,7 @@
/*****************************************************************************
Copyright (c) 1996, 2011, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2017, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -48,8 +49,6 @@ sess_open(void)
sess->trx = trx_allocate_for_background();
sess->trx->sess = sess;
- UT_LIST_INIT(sess->graphs);
-
return(sess);
}
diff --git a/strings/ctype-utf8.c b/strings/ctype-utf8.c
index 683c80f643d..56330bd68cb 100644
--- a/strings/ctype-utf8.c
+++ b/strings/ctype-utf8.c
@@ -1,5 +1,5 @@
/* Copyright (c) 2000, 2013, Oracle and/or its affiliates.
- Copyright (c) 2009, 2016, MariaDB
+ Copyright (c) 2009, 2017, MariaDB
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Library General Public
@@ -5114,7 +5114,9 @@ static int my_uni_utf8_no_range(CHARSET_INFO *cs __attribute__((unused)),
{
/* Fall through all cases!!! */
case 3: r[2]= (uchar) (0x80 | (wc & 0x3f)); wc= wc >> 6; wc |= 0x800;
+ /* fall through */
case 2: r[1]= (uchar) (0x80 | (wc & 0x3f)); wc= wc >> 6; wc |= 0xc0;
+ /* fall through */
case 1: r[0]= (uchar) wc;
}
return count;
@@ -7497,8 +7499,11 @@ my_wc_mb_utf8mb4(CHARSET_INFO *cs __attribute__((unused)),
switch (count) {
/* Fall through all cases!!! */
case 4: r[3] = (uchar) (0x80 | (wc & 0x3f)); wc = wc >> 6; wc |= 0x10000;
+ /* fall through */
case 3: r[2] = (uchar) (0x80 | (wc & 0x3f)); wc = wc >> 6; wc |= 0x800;
+ /* fall through */
case 2: r[1] = (uchar) (0x80 | (wc & 0x3f)); wc = wc >> 6; wc |= 0xc0;
+ /* fall through */
case 1: r[0] = (uchar) wc;
}
return count;
@@ -7529,8 +7534,11 @@ my_wc_mb_utf8mb4_no_range(CHARSET_INFO *cs __attribute__((unused)),
{
/* Fall through all cases!!! */
case 4: r[3]= (uchar) (0x80 | (wc & 0x3f)); wc= wc >> 6; wc |= 0x10000;
+ /* fall through */
case 3: r[2]= (uchar) (0x80 | (wc & 0x3f)); wc= wc >> 6; wc |= 0x800;
+ /* fall through */
case 2: r[1]= (uchar) (0x80 | (wc & 0x3f)); wc= wc >> 6; wc |= 0xc0;
+ /* fall through */
case 1: r[0]= (uchar) wc;
}
return count;
diff --git a/strings/ctype.c b/strings/ctype.c
index 12d511162d7..0aed6c8bf52 100644
--- a/strings/ctype.c
+++ b/strings/ctype.c
@@ -762,7 +762,7 @@ static int cs_value(MY_XML_PARSER *st,const char *attr, size_t len)
/* Rules: Context */
case _CS_CONTEXT:
- if (len < sizeof(i->context) + 1)
+ if (len < sizeof(i->context))
{
memcpy(i->context, attr, len);
i->context[len]= '\0';
diff --git a/strings/dtoa.c b/strings/dtoa.c
index fdf7bceddfd..a16ec93d3eb 100644
--- a/strings/dtoa.c
+++ b/strings/dtoa.c
@@ -1,4 +1,5 @@
/* Copyright (c) 2007, 2012, Oracle and/or its affiliates. All rights reserved.
+ Copyright (c) 2017, MariaDB Corporation.
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Library General Public
@@ -1377,7 +1378,7 @@ static double my_strtod_int(const char *s00, char **se, int *error, char *buf, s
switch (*s) {
case '-':
sign= 1;
- /* no break */
+ /* fall through */
case '+':
s++;
goto break2;
@@ -1467,6 +1468,7 @@ static double my_strtod_int(const char *s00, char **se, int *error, char *buf, s
switch (c= *s) {
case '-':
esign= 1;
+ /* fall through */
case '+':
c= *++s;
}
@@ -2360,7 +2362,7 @@ static char *dtoa(double dd, int mode, int ndigits, int *decpt, int *sign,
break;
case 2:
leftright= 0;
- /* no break */
+ /* fall through */
case 4:
if (ndigits <= 0)
ndigits= 1;
@@ -2368,7 +2370,7 @@ static char *dtoa(double dd, int mode, int ndigits, int *decpt, int *sign,
break;
case 3:
leftright= 0;
- /* no break */
+ /* fall through */
case 5:
i= ndigits + k + 1;
ilim= i;
diff --git a/tests/mysql_client_test.c b/tests/mysql_client_test.c
index 1b98622313e..b37e0e8bc2c 100644
--- a/tests/mysql_client_test.c
+++ b/tests/mysql_client_test.c
@@ -1096,6 +1096,7 @@ static void test_wl4435_2()
\
rs_metadata= mysql_stmt_result_metadata(ps); \
fields= mysql_fetch_fields(rs_metadata); \
+ mysql_free_result(rs_metadata); \
\
rc= mysql_stmt_bind_result(ps, &psp); \
check_execute(ps, rc); \
diff --git a/unittest/mysys/ma_dyncol-t.c b/unittest/mysys/ma_dyncol-t.c
index b3fff638b65..3b43c10a6a8 100644
--- a/unittest/mysys/ma_dyncol-t.c
+++ b/unittest/mysys/ma_dyncol-t.c
@@ -687,6 +687,9 @@ void test_update_many(uint *column_numbers, uint *column_values,
err:
ok(rc, "%s", "update_many");
/* cleanup */
+ free(val);
+ free(upd);
+ free(res);
mariadb_dyncol_free(&str1);
mariadb_dyncol_free(&str2);
}