From 42802ad66c49b6de11b37c7ea4e4658ccc5a94aa Mon Sep 17 00:00:00 2001 From: Brandon Nesterenko Date: Wed, 14 Sep 2022 15:08:12 -0600 Subject: MDEV-25616 XA PREPARE event group is not binlogged when.. the only query of the XA transaction is on a non-transactional table errors out: XA BEGIN 'x'; --error ER_DUP_ENTRY INSERT INTO t1 VALUES (1),(1); XA END 'x'; XA PREPARE 'x'; The binlogging pattern is correctly started as expected with the errored-out Query or its ROW format events, but there is no empty XA_prepare_log_event group. The following XA COMMIT 'x'; therefore should not be logged either, but it does. The bug is fixed with proper maintaining of a read-write binlog hton property and use it to enforce correct binlogging decisions. Specifically in the bug description case XA COMMIT won't be binlogged in both when given in the same connection and externally after disconnect. The same continue to apply to an empty XA that do not change any data in all transactional engines involved. --- .../suite/binlog/r/binlog_empty_xa_prepared.result | 108 ++ .../suite/binlog/t/binlog_empty_xa_prepared.test | 52 + .../suite/rpl/include/rpl_xa_empty_transaction.inc | 10 + .../include/rpl_xa_empty_transaction_test_case.inc | 131 +++ .../suite/rpl/r/rpl_xa_empty_transaction.result | 1169 ++++++++++++++++++++ .../suite/rpl/t/rpl_xa_empty_transaction.cnf | 18 + .../suite/rpl/t/rpl_xa_empty_transaction.test | 175 +++ sql/log.cc | 52 +- 8 files changed, 1709 insertions(+), 6 deletions(-) create mode 100644 mysql-test/suite/rpl/include/rpl_xa_empty_transaction.inc create mode 100644 mysql-test/suite/rpl/include/rpl_xa_empty_transaction_test_case.inc create mode 100644 mysql-test/suite/rpl/r/rpl_xa_empty_transaction.result create mode 100644 mysql-test/suite/rpl/t/rpl_xa_empty_transaction.cnf create mode 100644 mysql-test/suite/rpl/t/rpl_xa_empty_transaction.test diff --git a/mysql-test/suite/binlog/r/binlog_empty_xa_prepared.result b/mysql-test/suite/binlog/r/binlog_empty_xa_prepared.result index 9f998e049c0..589570d8300 100644 --- a/mysql-test/suite/binlog/r/binlog_empty_xa_prepared.result +++ b/mysql-test/suite/binlog/r/binlog_empty_xa_prepared.result @@ -108,3 +108,111 @@ master-bin.000001 # Gtid # # GTID #-#-# master-bin.000001 # Query # # use `test`; DROP SEQUENCE `s` /* generated by server */ master-bin.000001 # Gtid # # GTID #-#-# master-bin.000001 # Query # # use `test`; DROP TABLE `t1` /* generated by server */ +CREATE TABLE t1 (a INT PRIMARY KEY) ENGINE=InnoDB; +INSERT INTO t1 VALUES (1); +connect con1,localhost,root,,; +XA START '1'; +INSERT INTO t1 VALUES (2),(1); +ERROR 23000: Duplicate entry '1' for key 'PRIMARY' +SELECT * FROM t1 WHERE a = 2; +a +XA END '1'; +XA PREPARE '1'; +disconnect con1; +connection default; +XA RECOVER; +formatID gtrid_length bqual_length data +1 1 0 1 +XA COMMIT '1'; +ERROR XA100: XA_RBROLLBACK: Transaction branch was rolled back +Must be no XA PREPARE group nor XA completion one: +include/show_binlog_events.inc +Log_name Pos Event_type Server_id End_log_pos Info +master-bin.000001 # Gtid # # GTID #-#-# +master-bin.000001 # Query # # use `test`; CREATE TABLE t1 (a INT PRIMARY KEY) ENGINE=InnoDB +master-bin.000001 # Gtid # # BEGIN GTID #-#-# +master-bin.000001 # Annotate_rows # # INSERT INTO t1 VALUES (1) +master-bin.000001 # Table_map # # table_id: # (test.t1) +master-bin.000001 # Write_rows_v1 # # table_id: # flags: STMT_END_F +master-bin.000001 # Xid # # COMMIT /* XID */ +master-bin.000001 # Gtid # # GTID #-#-# +master-bin.000001 # Query # # use `test`; CREATE SEQUENCE s ENGINE=InnoDB +master-bin.000001 # Gtid # # BEGIN GTID #-#-# +master-bin.000001 # Annotate_rows # # SELECT NEXT VALUE FOR s +master-bin.000001 # Table_map # # table_id: # (test.s) +master-bin.000001 # Write_rows_v1 # # table_id: # flags: STMT_END_F +master-bin.000001 # Query # # COMMIT +master-bin.000001 # Gtid # # XA START X'32',X'',1 GTID #-#-# +master-bin.000001 # Query # # XA END X'32',X'',1 +master-bin.000001 # XA_prepare # # XA PREPARE X'32',X'',1 +master-bin.000001 # Gtid # # GTID #-#-# +master-bin.000001 # Query # # XA ROLLBACK X'32',X'',1 +master-bin.000001 # Gtid # # GTID #-#-# +master-bin.000001 # Query # # use `test`; DROP SEQUENCE `s` /* generated by server */ +master-bin.000001 # Gtid # # GTID #-#-# +master-bin.000001 # Query # # use `test`; DROP TABLE `t1` /* generated by server */ +master-bin.000001 # Gtid # # GTID #-#-# +master-bin.000001 # Query # # use `test`; CREATE TABLE t1 (a INT PRIMARY KEY) ENGINE=InnoDB +master-bin.000001 # Gtid # # BEGIN GTID #-#-# +master-bin.000001 # Annotate_rows # # INSERT INTO t1 VALUES (1) +master-bin.000001 # Table_map # # table_id: # (test.t1) +master-bin.000001 # Write_rows_v1 # # table_id: # flags: STMT_END_F +master-bin.000001 # Xid # # COMMIT /* XID */ +DROP TABLE t1; +connect con2,localhost,root,,; +CREATE TABLE tm (a INT PRIMARY KEY) ENGINE=MyISAM; +XA START '1'; +INSERT INTO tm VALUES (1),(1); +ERROR 23000: Duplicate entry '1' for key 'PRIMARY' +SELECT * FROM tm WHERE a = 2; +a +XA END '1'; +XA PREPARE '1'; +disconnect con2; +connection default; +XA RECOVER; +formatID gtrid_length bqual_length data +1 1 0 1 +XA ROLLBACK '1'; +ERROR XA100: XA_RBROLLBACK: Transaction branch was rolled back +Must be no XA PREPARE group nor XA completion one: +include/show_binlog_events.inc +Log_name Pos Event_type Server_id End_log_pos Info +master-bin.000001 # Gtid # # GTID #-#-# +master-bin.000001 # Query # # use `test`; CREATE TABLE t1 (a INT PRIMARY KEY) ENGINE=InnoDB +master-bin.000001 # Gtid # # BEGIN GTID #-#-# +master-bin.000001 # Annotate_rows # # INSERT INTO t1 VALUES (1) +master-bin.000001 # Table_map # # table_id: # (test.t1) +master-bin.000001 # Write_rows_v1 # # table_id: # flags: STMT_END_F +master-bin.000001 # Xid # # COMMIT /* XID */ +master-bin.000001 # Gtid # # GTID #-#-# +master-bin.000001 # Query # # use `test`; CREATE SEQUENCE s ENGINE=InnoDB +master-bin.000001 # Gtid # # BEGIN GTID #-#-# +master-bin.000001 # Annotate_rows # # SELECT NEXT VALUE FOR s +master-bin.000001 # Table_map # # table_id: # (test.s) +master-bin.000001 # Write_rows_v1 # # table_id: # flags: STMT_END_F +master-bin.000001 # Query # # COMMIT +master-bin.000001 # Gtid # # XA START X'32',X'',1 GTID #-#-# +master-bin.000001 # Query # # XA END X'32',X'',1 +master-bin.000001 # XA_prepare # # XA PREPARE X'32',X'',1 +master-bin.000001 # Gtid # # GTID #-#-# +master-bin.000001 # Query # # XA ROLLBACK X'32',X'',1 +master-bin.000001 # Gtid # # GTID #-#-# +master-bin.000001 # Query # # use `test`; DROP SEQUENCE `s` /* generated by server */ +master-bin.000001 # Gtid # # GTID #-#-# +master-bin.000001 # Query # # use `test`; DROP TABLE `t1` /* generated by server */ +master-bin.000001 # Gtid # # GTID #-#-# +master-bin.000001 # Query # # use `test`; CREATE TABLE t1 (a INT PRIMARY KEY) ENGINE=InnoDB +master-bin.000001 # Gtid # # BEGIN GTID #-#-# +master-bin.000001 # Annotate_rows # # INSERT INTO t1 VALUES (1) +master-bin.000001 # Table_map # # table_id: # (test.t1) +master-bin.000001 # Write_rows_v1 # # table_id: # flags: STMT_END_F +master-bin.000001 # Xid # # COMMIT /* XID */ +master-bin.000001 # Gtid # # GTID #-#-# +master-bin.000001 # Query # # use `test`; DROP TABLE `t1` /* generated by server */ +master-bin.000001 # Gtid # # GTID #-#-# +master-bin.000001 # Query # # use `test`; CREATE TABLE tm (a INT PRIMARY KEY) ENGINE=MyISAM +master-bin.000001 # Gtid # # BEGIN GTID #-#-# +master-bin.000001 # Query # # use `test`; INSERT INTO tm VALUES (1),(1) +master-bin.000001 # Query # # COMMIT +DROP TABLE tm; diff --git a/mysql-test/suite/binlog/t/binlog_empty_xa_prepared.test b/mysql-test/suite/binlog/t/binlog_empty_xa_prepared.test index 443feb60627..2890c42a087 100644 --- a/mysql-test/suite/binlog/t/binlog_empty_xa_prepared.test +++ b/mysql-test/suite/binlog/t/binlog_empty_xa_prepared.test @@ -80,3 +80,55 @@ DROP TABLE t1; --echo # Proof of correct logging incl empty XA-PREPARE --source include/show_binlog_events.inc + + +# MDEV-25616 Binlog event for XA COMMIT is generated without matching XA START + +CREATE TABLE t1 (a INT PRIMARY KEY) ENGINE=InnoDB; +INSERT INTO t1 VALUES (1); + +--source include/count_sessions.inc +--connect(con1,localhost,root,,) + +XA START '1'; + --error ER_DUP_ENTRY + INSERT INTO t1 VALUES (2),(1); + SELECT * FROM t1 WHERE a = 2; +XA END '1'; +XA PREPARE '1'; + +--disconnect con1 + +--connection default +--source include/wait_until_count_sessions.inc +XA RECOVER; + +--error ER_XA_RBROLLBACK +XA COMMIT '1'; +--echo Must be no XA PREPARE group nor XA completion one: +--source include/show_binlog_events.inc +DROP TABLE t1; + +--source include/count_sessions.inc + +--connect(con2,localhost,root,,) +CREATE TABLE tm (a INT PRIMARY KEY) ENGINE=MyISAM; +XA START '1'; + --error ER_DUP_ENTRY + INSERT INTO tm VALUES (1),(1); + SELECT * FROM tm WHERE a = 2; +XA END '1'; +XA PREPARE '1'; + +--disconnect con2 + +--connection default +--source include/wait_until_count_sessions.inc +XA RECOVER; + +--error ER_XA_RBROLLBACK +XA ROLLBACK '1'; +--echo Must be no XA PREPARE group nor XA completion one: +--source include/show_binlog_events.inc +DROP TABLE tm; + diff --git a/mysql-test/suite/rpl/include/rpl_xa_empty_transaction.inc b/mysql-test/suite/rpl/include/rpl_xa_empty_transaction.inc new file mode 100644 index 00000000000..4cb4fe8962f --- /dev/null +++ b/mysql-test/suite/rpl/include/rpl_xa_empty_transaction.inc @@ -0,0 +1,10 @@ +# +# Helper file to run each empty-due-to-err XA transaction test case both with +# and without detaching from the connection when the transaction is prepared. +# + +--let $use_disconnect=0 +--source rpl_xa_empty_transaction_test_case.inc + +--let $use_disconnect=1 +--source rpl_xa_empty_transaction_test_case.inc diff --git a/mysql-test/suite/rpl/include/rpl_xa_empty_transaction_test_case.inc b/mysql-test/suite/rpl/include/rpl_xa_empty_transaction_test_case.inc new file mode 100644 index 00000000000..6368336b8e3 --- /dev/null +++ b/mysql-test/suite/rpl/include/rpl_xa_empty_transaction_test_case.inc @@ -0,0 +1,131 @@ +# +# Helper script to create an XA transaction and validate it was not +# binlogged +# +# Parameters +# $xa_completion_action : The action to end the XA transaction, either +# COMMIT or ROLLBACK +# $trx_statements : A comma separated list specifying how to build +# the statements of the transaction. Each item in +# the list is either T (for transactional) or N +# (for non-transactional). An empty list will not +# add any statements to the transaction. +# $use_disconnect : When TRUE, disconnect after preparing the XA +# transaction to test the detach/rollback case +# + +# +# Setup +--let $generic_assert_text= should not binlog XA transaction + +--connection server_1 +--let server_1_datadir=`select @@datadir` + +--connection server_2 +--let server_2_datadir=`select @@datadir` + +--connection server_3 +--let server_3_datadir=`select @@datadir` + +--let assert_file=$MYSQLTEST_VARDIR/tmp/binlog_decoded.out + +--connection server_1 +CREATE TABLE tm (a INT PRIMARY KEY) engine=myisam; +CREATE TABLE ti (a INT PRIMARY KEY) engine=innodb; +--source include/save_master_gtid.inc + +--connection server_3 +--source include/sync_with_master_gtid.inc + +--connection server_1 + +if ($use_disconnect) +{ + --source include/count_sessions.inc + --connect(con1,localhost,root,,) +} + +XA START 'x'; +--let $_stmt_items= $trx_statements +--let $_ctr= 1 +while($_stmt_items) +{ + --let $_cur_stmt= `SELECT SUBSTRING_INDEX('$_stmt_items', ',', 1)` + --let $_stmt_items= `SELECT LTRIM(SUBSTRING('$_stmt_items', LENGTH('$_cur_stmt') + 2))` + + if (`SELECT strcmp("$_cur_stmt","T") = 0`) + { + --let $target_table= ti + } + + if (`SELECT strcmp("$_cur_stmt","N") = 0`) + { + --let $target_table= tm + } + + --error ER_DUP_ENTRY + --eval INSERT INTO $target_table VALUES ($_ctr),($_ctr); + inc $_ctr; + +} +XA END 'x'; +XA PREPARE 'x'; + +if ($use_disconnect) +{ + --disconnect con1 + --connection server_1 + --source include/wait_until_count_sessions.inc + XA RECOVER; + + --error ER_XA_RBROLLBACK + --eval XA $xa_completion_action 'x'; +} +if (!$use_disconnect) +{ + --eval XA $xa_completion_action 'x'; +} + +--source include/save_master_gtid.inc + +--let binlog_filename= query_get_value(SHOW MASTER STATUS, File, 1) +FLUSH LOGS; + +--echo # MYSQL_BINLOG server_1_datadir/binlog_filename --result-file=assert_file +--exec $MYSQL_BINLOG $server_1_datadir/$binlog_filename --result-file=$assert_file + +--let assert_text= server_1 $generic_assert_text +--let assert_count= 0 +--let assert_select= XA START|XA END|XA PREPARE|XA COMMIT|XA ROLLBACK +--source include/assert_grep.inc + +--connection server_2 +--source include/sync_with_master_gtid.inc +--let binlog_filename= query_get_value(SHOW MASTER STATUS, File, 1) +FLUSH LOGS; + +--echo # MYSQL_BINLOG server_2_datadir/binlog_filename --result-file=assert_file +--exec $MYSQL_BINLOG $server_2_datadir/$binlog_filename --result-file=$assert_file + +--let assert_text= server_2 $generic_assert_text +--source include/assert_grep.inc + +--connection server_3 +--source include/sync_with_master_gtid.inc +--let binlog_filename= query_get_value(SHOW MASTER STATUS, File, 1) +FLUSH LOGS; + +--echo # MYSQL_BINLOG server_3_datadir/binlog_filename --result-file=assert_file +--exec $MYSQL_BINLOG $server_3_datadir/$binlog_filename --result-file=$assert_file + +--let assert_text= server_3 $generic_assert_text +--source include/assert_grep.inc + +# +# Cleanup +--connection server_1 +DROP TABLE ti,tm; +--source include/save_master_gtid.inc + +--connection server_3 +--source include/sync_with_master_gtid.inc diff --git a/mysql-test/suite/rpl/r/rpl_xa_empty_transaction.result b/mysql-test/suite/rpl/r/rpl_xa_empty_transaction.result new file mode 100644 index 00000000000..f3ea53c219a --- /dev/null +++ b/mysql-test/suite/rpl/r/rpl_xa_empty_transaction.result @@ -0,0 +1,1169 @@ +include/rpl_init.inc [topology=1->2->3] +connection server_1; +connection server_2; +connection server_3; +connection server_1; +# +# Test Case 1: An XA transaction without any statements should not be +# binlogged +connection server_1; +connection server_2; +connection server_3; +connection server_1; +CREATE TABLE tm (a INT PRIMARY KEY) engine=myisam; +CREATE TABLE ti (a INT PRIMARY KEY) engine=innodb; +include/save_master_gtid.inc +connection server_3; +include/sync_with_master_gtid.inc +connection server_1; +XA START 'x'; +XA END 'x'; +XA PREPARE 'x'; +XA COMMIT 'x';; +include/save_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_1_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_1 should not binlog XA transaction] +connection server_2; +include/sync_with_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_2_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_2 should not binlog XA transaction] +connection server_3; +include/sync_with_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_3_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_3 should not binlog XA transaction] +connection server_1; +DROP TABLE ti,tm; +include/save_master_gtid.inc +connection server_3; +include/sync_with_master_gtid.inc +connection server_1; +connection server_2; +connection server_3; +connection server_1; +CREATE TABLE tm (a INT PRIMARY KEY) engine=myisam; +CREATE TABLE ti (a INT PRIMARY KEY) engine=innodb; +include/save_master_gtid.inc +connection server_3; +include/sync_with_master_gtid.inc +connection server_1; +connect con1,localhost,root,,; +XA START 'x'; +XA END 'x'; +XA PREPARE 'x'; +disconnect con1; +connection server_1; +XA RECOVER; +formatID gtrid_length bqual_length data +1 1 0 x +XA COMMIT 'x';; +ERROR XA100: XA_RBROLLBACK: Transaction branch was rolled back +include/save_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_1_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_1 should not binlog XA transaction] +connection server_2; +include/sync_with_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_2_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_2 should not binlog XA transaction] +connection server_3; +include/sync_with_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_3_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_3 should not binlog XA transaction] +connection server_1; +DROP TABLE ti,tm; +include/save_master_gtid.inc +connection server_3; +include/sync_with_master_gtid.inc +connection server_1; +connection server_2; +connection server_3; +connection server_1; +CREATE TABLE tm (a INT PRIMARY KEY) engine=myisam; +CREATE TABLE ti (a INT PRIMARY KEY) engine=innodb; +include/save_master_gtid.inc +connection server_3; +include/sync_with_master_gtid.inc +connection server_1; +XA START 'x'; +XA END 'x'; +XA PREPARE 'x'; +XA ROLLBACK 'x';; +include/save_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_1_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_1 should not binlog XA transaction] +connection server_2; +include/sync_with_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_2_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_2 should not binlog XA transaction] +connection server_3; +include/sync_with_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_3_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_3 should not binlog XA transaction] +connection server_1; +DROP TABLE ti,tm; +include/save_master_gtid.inc +connection server_3; +include/sync_with_master_gtid.inc +connection server_1; +connection server_2; +connection server_3; +connection server_1; +CREATE TABLE tm (a INT PRIMARY KEY) engine=myisam; +CREATE TABLE ti (a INT PRIMARY KEY) engine=innodb; +include/save_master_gtid.inc +connection server_3; +include/sync_with_master_gtid.inc +connection server_1; +connect con1,localhost,root,,; +XA START 'x'; +XA END 'x'; +XA PREPARE 'x'; +disconnect con1; +connection server_1; +XA RECOVER; +formatID gtrid_length bqual_length data +1 1 0 x +XA ROLLBACK 'x';; +ERROR XA100: XA_RBROLLBACK: Transaction branch was rolled back +include/save_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_1_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_1 should not binlog XA transaction] +connection server_2; +include/sync_with_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_2_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_2 should not binlog XA transaction] +connection server_3; +include/sync_with_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_3_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_3 should not binlog XA transaction] +connection server_1; +DROP TABLE ti,tm; +include/save_master_gtid.inc +connection server_3; +include/sync_with_master_gtid.inc +# +# Test Case 2: An XA transaction consisting of a successfully rolled back +# statement should not be binlogged +connection server_1; +connection server_2; +connection server_3; +connection server_1; +CREATE TABLE tm (a INT PRIMARY KEY) engine=myisam; +CREATE TABLE ti (a INT PRIMARY KEY) engine=innodb; +include/save_master_gtid.inc +connection server_3; +include/sync_with_master_gtid.inc +connection server_1; +XA START 'x'; +INSERT INTO ti VALUES (1),(1);; +ERROR 23000: Duplicate entry '1' for key 'PRIMARY' +XA END 'x'; +XA PREPARE 'x'; +XA COMMIT 'x';; +include/save_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_1_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_1 should not binlog XA transaction] +connection server_2; +include/sync_with_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_2_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_2 should not binlog XA transaction] +connection server_3; +include/sync_with_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_3_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_3 should not binlog XA transaction] +connection server_1; +DROP TABLE ti,tm; +include/save_master_gtid.inc +connection server_3; +include/sync_with_master_gtid.inc +connection server_1; +connection server_2; +connection server_3; +connection server_1; +CREATE TABLE tm (a INT PRIMARY KEY) engine=myisam; +CREATE TABLE ti (a INT PRIMARY KEY) engine=innodb; +include/save_master_gtid.inc +connection server_3; +include/sync_with_master_gtid.inc +connection server_1; +connect con1,localhost,root,,; +XA START 'x'; +INSERT INTO ti VALUES (1),(1);; +ERROR 23000: Duplicate entry '1' for key 'PRIMARY' +XA END 'x'; +XA PREPARE 'x'; +disconnect con1; +connection server_1; +XA RECOVER; +formatID gtrid_length bqual_length data +1 1 0 x +XA COMMIT 'x';; +ERROR XA100: XA_RBROLLBACK: Transaction branch was rolled back +include/save_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_1_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_1 should not binlog XA transaction] +connection server_2; +include/sync_with_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_2_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_2 should not binlog XA transaction] +connection server_3; +include/sync_with_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_3_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_3 should not binlog XA transaction] +connection server_1; +DROP TABLE ti,tm; +include/save_master_gtid.inc +connection server_3; +include/sync_with_master_gtid.inc +connection server_1; +connection server_2; +connection server_3; +connection server_1; +CREATE TABLE tm (a INT PRIMARY KEY) engine=myisam; +CREATE TABLE ti (a INT PRIMARY KEY) engine=innodb; +include/save_master_gtid.inc +connection server_3; +include/sync_with_master_gtid.inc +connection server_1; +XA START 'x'; +INSERT INTO ti VALUES (1),(1);; +ERROR 23000: Duplicate entry '1' for key 'PRIMARY' +XA END 'x'; +XA PREPARE 'x'; +XA ROLLBACK 'x';; +include/save_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_1_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_1 should not binlog XA transaction] +connection server_2; +include/sync_with_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_2_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_2 should not binlog XA transaction] +connection server_3; +include/sync_with_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_3_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_3 should not binlog XA transaction] +connection server_1; +DROP TABLE ti,tm; +include/save_master_gtid.inc +connection server_3; +include/sync_with_master_gtid.inc +connection server_1; +connection server_2; +connection server_3; +connection server_1; +CREATE TABLE tm (a INT PRIMARY KEY) engine=myisam; +CREATE TABLE ti (a INT PRIMARY KEY) engine=innodb; +include/save_master_gtid.inc +connection server_3; +include/sync_with_master_gtid.inc +connection server_1; +connect con1,localhost,root,,; +XA START 'x'; +INSERT INTO ti VALUES (1),(1);; +ERROR 23000: Duplicate entry '1' for key 'PRIMARY' +XA END 'x'; +XA PREPARE 'x'; +disconnect con1; +connection server_1; +XA RECOVER; +formatID gtrid_length bqual_length data +1 1 0 x +XA ROLLBACK 'x';; +ERROR XA100: XA_RBROLLBACK: Transaction branch was rolled back +include/save_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_1_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_1 should not binlog XA transaction] +connection server_2; +include/sync_with_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_2_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_2 should not binlog XA transaction] +connection server_3; +include/sync_with_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_3_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_3 should not binlog XA transaction] +connection server_1; +DROP TABLE ti,tm; +include/save_master_gtid.inc +connection server_3; +include/sync_with_master_gtid.inc +# +# Test Case 3: An XA transaction with a statement that cannot be rolled +# back should be binlogged +connection server_1; +set @sav_binlog_format = @@binlog_format; +set @@binlog_format = row; +set @@global.binlog_format = row; +connection server_1; +connection server_2; +connection server_3; +connection server_1; +CREATE TABLE tm (a INT PRIMARY KEY) engine=myisam; +CREATE TABLE ti (a INT PRIMARY KEY) engine=innodb; +include/save_master_gtid.inc +connection server_3; +include/sync_with_master_gtid.inc +connection server_1; +XA START 'x'; +INSERT INTO tm VALUES (1),(1);; +ERROR 23000: Duplicate entry '1' for key 'PRIMARY' +XA END 'x'; +XA PREPARE 'x'; +XA COMMIT 'x';; +include/save_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_1_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_1 should not binlog XA transaction] +connection server_2; +include/sync_with_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_2_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_2 should not binlog XA transaction] +connection server_3; +include/sync_with_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_3_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_3 should not binlog XA transaction] +connection server_1; +DROP TABLE ti,tm; +include/save_master_gtid.inc +connection server_3; +include/sync_with_master_gtid.inc +connection server_1; +connection server_2; +connection server_3; +connection server_1; +CREATE TABLE tm (a INT PRIMARY KEY) engine=myisam; +CREATE TABLE ti (a INT PRIMARY KEY) engine=innodb; +include/save_master_gtid.inc +connection server_3; +include/sync_with_master_gtid.inc +connection server_1; +connect con1,localhost,root,,; +XA START 'x'; +INSERT INTO tm VALUES (1),(1);; +ERROR 23000: Duplicate entry '1' for key 'PRIMARY' +XA END 'x'; +XA PREPARE 'x'; +disconnect con1; +connection server_1; +XA RECOVER; +formatID gtrid_length bqual_length data +1 1 0 x +XA COMMIT 'x';; +ERROR XA100: XA_RBROLLBACK: Transaction branch was rolled back +include/save_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_1_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_1 should not binlog XA transaction] +connection server_2; +include/sync_with_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_2_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_2 should not binlog XA transaction] +connection server_3; +include/sync_with_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_3_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_3 should not binlog XA transaction] +connection server_1; +DROP TABLE ti,tm; +include/save_master_gtid.inc +connection server_3; +include/sync_with_master_gtid.inc +connection server_1; +connection server_2; +connection server_3; +connection server_1; +CREATE TABLE tm (a INT PRIMARY KEY) engine=myisam; +CREATE TABLE ti (a INT PRIMARY KEY) engine=innodb; +include/save_master_gtid.inc +connection server_3; +include/sync_with_master_gtid.inc +connection server_1; +XA START 'x'; +INSERT INTO tm VALUES (1),(1);; +ERROR 23000: Duplicate entry '1' for key 'PRIMARY' +XA END 'x'; +XA PREPARE 'x'; +XA ROLLBACK 'x';; +Warnings: +Warning 1196 Some non-transactional changed tables couldn't be rolled back +include/save_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_1_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_1 should not binlog XA transaction] +connection server_2; +include/sync_with_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_2_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_2 should not binlog XA transaction] +connection server_3; +include/sync_with_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_3_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_3 should not binlog XA transaction] +connection server_1; +DROP TABLE ti,tm; +include/save_master_gtid.inc +connection server_3; +include/sync_with_master_gtid.inc +connection server_1; +connection server_2; +connection server_3; +connection server_1; +CREATE TABLE tm (a INT PRIMARY KEY) engine=myisam; +CREATE TABLE ti (a INT PRIMARY KEY) engine=innodb; +include/save_master_gtid.inc +connection server_3; +include/sync_with_master_gtid.inc +connection server_1; +connect con1,localhost,root,,; +XA START 'x'; +INSERT INTO tm VALUES (1),(1);; +ERROR 23000: Duplicate entry '1' for key 'PRIMARY' +XA END 'x'; +XA PREPARE 'x'; +disconnect con1; +connection server_1; +XA RECOVER; +formatID gtrid_length bqual_length data +1 1 0 x +XA ROLLBACK 'x';; +ERROR XA100: XA_RBROLLBACK: Transaction branch was rolled back +include/save_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_1_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_1 should not binlog XA transaction] +connection server_2; +include/sync_with_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_2_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_2 should not binlog XA transaction] +connection server_3; +include/sync_with_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_3_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_3 should not binlog XA transaction] +connection server_1; +DROP TABLE ti,tm; +include/save_master_gtid.inc +connection server_3; +include/sync_with_master_gtid.inc +connection server_1; +set @@binlog_format = @sav_binlog_format; +set @@global.binlog_format = @sav_binlog_format; +# +# Test Case 4: An XA transaction with multiple statements that can all +# be rolled back should not be binlogged +connection server_1; +connection server_2; +connection server_3; +connection server_1; +CREATE TABLE tm (a INT PRIMARY KEY) engine=myisam; +CREATE TABLE ti (a INT PRIMARY KEY) engine=innodb; +include/save_master_gtid.inc +connection server_3; +include/sync_with_master_gtid.inc +connection server_1; +XA START 'x'; +INSERT INTO ti VALUES (1),(1);; +ERROR 23000: Duplicate entry '1' for key 'PRIMARY' +INSERT INTO ti VALUES (2),(2);; +ERROR 23000: Duplicate entry '2' for key 'PRIMARY' +XA END 'x'; +XA PREPARE 'x'; +XA COMMIT 'x';; +include/save_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_1_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_1 should not binlog XA transaction] +connection server_2; +include/sync_with_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_2_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_2 should not binlog XA transaction] +connection server_3; +include/sync_with_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_3_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_3 should not binlog XA transaction] +connection server_1; +DROP TABLE ti,tm; +include/save_master_gtid.inc +connection server_3; +include/sync_with_master_gtid.inc +connection server_1; +connection server_2; +connection server_3; +connection server_1; +CREATE TABLE tm (a INT PRIMARY KEY) engine=myisam; +CREATE TABLE ti (a INT PRIMARY KEY) engine=innodb; +include/save_master_gtid.inc +connection server_3; +include/sync_with_master_gtid.inc +connection server_1; +connect con1,localhost,root,,; +XA START 'x'; +INSERT INTO ti VALUES (1),(1);; +ERROR 23000: Duplicate entry '1' for key 'PRIMARY' +INSERT INTO ti VALUES (2),(2);; +ERROR 23000: Duplicate entry '2' for key 'PRIMARY' +XA END 'x'; +XA PREPARE 'x'; +disconnect con1; +connection server_1; +XA RECOVER; +formatID gtrid_length bqual_length data +1 1 0 x +XA COMMIT 'x';; +ERROR XA100: XA_RBROLLBACK: Transaction branch was rolled back +include/save_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_1_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_1 should not binlog XA transaction] +connection server_2; +include/sync_with_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_2_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_2 should not binlog XA transaction] +connection server_3; +include/sync_with_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_3_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_3 should not binlog XA transaction] +connection server_1; +DROP TABLE ti,tm; +include/save_master_gtid.inc +connection server_3; +include/sync_with_master_gtid.inc +connection server_1; +connection server_2; +connection server_3; +connection server_1; +CREATE TABLE tm (a INT PRIMARY KEY) engine=myisam; +CREATE TABLE ti (a INT PRIMARY KEY) engine=innodb; +include/save_master_gtid.inc +connection server_3; +include/sync_with_master_gtid.inc +connection server_1; +XA START 'x'; +INSERT INTO ti VALUES (1),(1);; +ERROR 23000: Duplicate entry '1' for key 'PRIMARY' +INSERT INTO ti VALUES (2),(2);; +ERROR 23000: Duplicate entry '2' for key 'PRIMARY' +XA END 'x'; +XA PREPARE 'x'; +XA ROLLBACK 'x';; +include/save_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_1_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_1 should not binlog XA transaction] +connection server_2; +include/sync_with_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_2_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_2 should not binlog XA transaction] +connection server_3; +include/sync_with_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_3_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_3 should not binlog XA transaction] +connection server_1; +DROP TABLE ti,tm; +include/save_master_gtid.inc +connection server_3; +include/sync_with_master_gtid.inc +connection server_1; +connection server_2; +connection server_3; +connection server_1; +CREATE TABLE tm (a INT PRIMARY KEY) engine=myisam; +CREATE TABLE ti (a INT PRIMARY KEY) engine=innodb; +include/save_master_gtid.inc +connection server_3; +include/sync_with_master_gtid.inc +connection server_1; +connect con1,localhost,root,,; +XA START 'x'; +INSERT INTO ti VALUES (1),(1);; +ERROR 23000: Duplicate entry '1' for key 'PRIMARY' +INSERT INTO ti VALUES (2),(2);; +ERROR 23000: Duplicate entry '2' for key 'PRIMARY' +XA END 'x'; +XA PREPARE 'x'; +disconnect con1; +connection server_1; +XA RECOVER; +formatID gtrid_length bqual_length data +1 1 0 x +XA ROLLBACK 'x';; +ERROR XA100: XA_RBROLLBACK: Transaction branch was rolled back +include/save_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_1_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_1 should not binlog XA transaction] +connection server_2; +include/sync_with_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_2_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_2 should not binlog XA transaction] +connection server_3; +include/sync_with_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_3_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_3 should not binlog XA transaction] +connection server_1; +DROP TABLE ti,tm; +include/save_master_gtid.inc +connection server_3; +include/sync_with_master_gtid.inc +# +# Test Case 5: A mixed XA transaction consisting of one statement that +# can successfully be rolled back (first statement), and another that +# can not (second statement) should be binlogged +connection server_1; +set @sav_binlog_format = @@binlog_format; +set @@binlog_format = row; +set @@global.binlog_format = row; +connection server_1; +connection server_2; +connection server_3; +connection server_1; +CREATE TABLE tm (a INT PRIMARY KEY) engine=myisam; +CREATE TABLE ti (a INT PRIMARY KEY) engine=innodb; +include/save_master_gtid.inc +connection server_3; +include/sync_with_master_gtid.inc +connection server_1; +XA START 'x'; +INSERT INTO ti VALUES (1),(1);; +ERROR 23000: Duplicate entry '1' for key 'PRIMARY' +INSERT INTO tm VALUES (2),(2);; +ERROR 23000: Duplicate entry '2' for key 'PRIMARY' +XA END 'x'; +XA PREPARE 'x'; +XA COMMIT 'x';; +include/save_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_1_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_1 should not binlog XA transaction] +connection server_2; +include/sync_with_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_2_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_2 should not binlog XA transaction] +connection server_3; +include/sync_with_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_3_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_3 should not binlog XA transaction] +connection server_1; +DROP TABLE ti,tm; +include/save_master_gtid.inc +connection server_3; +include/sync_with_master_gtid.inc +connection server_1; +connection server_2; +connection server_3; +connection server_1; +CREATE TABLE tm (a INT PRIMARY KEY) engine=myisam; +CREATE TABLE ti (a INT PRIMARY KEY) engine=innodb; +include/save_master_gtid.inc +connection server_3; +include/sync_with_master_gtid.inc +connection server_1; +connect con1,localhost,root,,; +XA START 'x'; +INSERT INTO ti VALUES (1),(1);; +ERROR 23000: Duplicate entry '1' for key 'PRIMARY' +INSERT INTO tm VALUES (2),(2);; +ERROR 23000: Duplicate entry '2' for key 'PRIMARY' +XA END 'x'; +XA PREPARE 'x'; +disconnect con1; +connection server_1; +XA RECOVER; +formatID gtrid_length bqual_length data +1 1 0 x +XA COMMIT 'x';; +ERROR XA100: XA_RBROLLBACK: Transaction branch was rolled back +include/save_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_1_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_1 should not binlog XA transaction] +connection server_2; +include/sync_with_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_2_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_2 should not binlog XA transaction] +connection server_3; +include/sync_with_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_3_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_3 should not binlog XA transaction] +connection server_1; +DROP TABLE ti,tm; +include/save_master_gtid.inc +connection server_3; +include/sync_with_master_gtid.inc +connection server_1; +connection server_2; +connection server_3; +connection server_1; +CREATE TABLE tm (a INT PRIMARY KEY) engine=myisam; +CREATE TABLE ti (a INT PRIMARY KEY) engine=innodb; +include/save_master_gtid.inc +connection server_3; +include/sync_with_master_gtid.inc +connection server_1; +XA START 'x'; +INSERT INTO ti VALUES (1),(1);; +ERROR 23000: Duplicate entry '1' for key 'PRIMARY' +INSERT INTO tm VALUES (2),(2);; +ERROR 23000: Duplicate entry '2' for key 'PRIMARY' +XA END 'x'; +XA PREPARE 'x'; +XA ROLLBACK 'x';; +Warnings: +Warning 1196 Some non-transactional changed tables couldn't be rolled back +include/save_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_1_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_1 should not binlog XA transaction] +connection server_2; +include/sync_with_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_2_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_2 should not binlog XA transaction] +connection server_3; +include/sync_with_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_3_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_3 should not binlog XA transaction] +connection server_1; +DROP TABLE ti,tm; +include/save_master_gtid.inc +connection server_3; +include/sync_with_master_gtid.inc +connection server_1; +connection server_2; +connection server_3; +connection server_1; +CREATE TABLE tm (a INT PRIMARY KEY) engine=myisam; +CREATE TABLE ti (a INT PRIMARY KEY) engine=innodb; +include/save_master_gtid.inc +connection server_3; +include/sync_with_master_gtid.inc +connection server_1; +connect con1,localhost,root,,; +XA START 'x'; +INSERT INTO ti VALUES (1),(1);; +ERROR 23000: Duplicate entry '1' for key 'PRIMARY' +INSERT INTO tm VALUES (2),(2);; +ERROR 23000: Duplicate entry '2' for key 'PRIMARY' +XA END 'x'; +XA PREPARE 'x'; +disconnect con1; +connection server_1; +XA RECOVER; +formatID gtrid_length bqual_length data +1 1 0 x +XA ROLLBACK 'x';; +ERROR XA100: XA_RBROLLBACK: Transaction branch was rolled back +include/save_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_1_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_1 should not binlog XA transaction] +connection server_2; +include/sync_with_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_2_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_2 should not binlog XA transaction] +connection server_3; +include/sync_with_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_3_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_3 should not binlog XA transaction] +connection server_1; +DROP TABLE ti,tm; +include/save_master_gtid.inc +connection server_3; +include/sync_with_master_gtid.inc +connection server_1; +set @@binlog_format = @sav_binlog_format; +set @@global.binlog_format = @sav_binlog_format; +# +# Test Case 6: A mixed XA transaction consisting of one statement that +# cannot successfully be rolled back (first statement), and another that +# can (second statement) should be binlogged +connection server_1; +set @sav_binlog_format = @@binlog_format; +set @@binlog_format = row; +set @@global.binlog_format = row; +connection server_1; +connection server_2; +connection server_3; +connection server_1; +CREATE TABLE tm (a INT PRIMARY KEY) engine=myisam; +CREATE TABLE ti (a INT PRIMARY KEY) engine=innodb; +include/save_master_gtid.inc +connection server_3; +include/sync_with_master_gtid.inc +connection server_1; +XA START 'x'; +INSERT INTO tm VALUES (1),(1);; +ERROR 23000: Duplicate entry '1' for key 'PRIMARY' +INSERT INTO ti VALUES (2),(2);; +ERROR 23000: Duplicate entry '2' for key 'PRIMARY' +XA END 'x'; +XA PREPARE 'x'; +XA COMMIT 'x';; +include/save_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_1_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_1 should not binlog XA transaction] +connection server_2; +include/sync_with_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_2_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_2 should not binlog XA transaction] +connection server_3; +include/sync_with_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_3_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_3 should not binlog XA transaction] +connection server_1; +DROP TABLE ti,tm; +include/save_master_gtid.inc +connection server_3; +include/sync_with_master_gtid.inc +connection server_1; +connection server_2; +connection server_3; +connection server_1; +CREATE TABLE tm (a INT PRIMARY KEY) engine=myisam; +CREATE TABLE ti (a INT PRIMARY KEY) engine=innodb; +include/save_master_gtid.inc +connection server_3; +include/sync_with_master_gtid.inc +connection server_1; +connect con1,localhost,root,,; +XA START 'x'; +INSERT INTO tm VALUES (1),(1);; +ERROR 23000: Duplicate entry '1' for key 'PRIMARY' +INSERT INTO ti VALUES (2),(2);; +ERROR 23000: Duplicate entry '2' for key 'PRIMARY' +XA END 'x'; +XA PREPARE 'x'; +disconnect con1; +connection server_1; +XA RECOVER; +formatID gtrid_length bqual_length data +1 1 0 x +XA COMMIT 'x';; +ERROR XA100: XA_RBROLLBACK: Transaction branch was rolled back +include/save_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_1_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_1 should not binlog XA transaction] +connection server_2; +include/sync_with_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_2_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_2 should not binlog XA transaction] +connection server_3; +include/sync_with_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_3_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_3 should not binlog XA transaction] +connection server_1; +DROP TABLE ti,tm; +include/save_master_gtid.inc +connection server_3; +include/sync_with_master_gtid.inc +connection server_1; +connection server_2; +connection server_3; +connection server_1; +CREATE TABLE tm (a INT PRIMARY KEY) engine=myisam; +CREATE TABLE ti (a INT PRIMARY KEY) engine=innodb; +include/save_master_gtid.inc +connection server_3; +include/sync_with_master_gtid.inc +connection server_1; +XA START 'x'; +INSERT INTO tm VALUES (1),(1);; +ERROR 23000: Duplicate entry '1' for key 'PRIMARY' +INSERT INTO ti VALUES (2),(2);; +ERROR 23000: Duplicate entry '2' for key 'PRIMARY' +XA END 'x'; +XA PREPARE 'x'; +XA ROLLBACK 'x';; +Warnings: +Warning 1196 Some non-transactional changed tables couldn't be rolled back +include/save_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_1_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_1 should not binlog XA transaction] +connection server_2; +include/sync_with_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_2_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_2 should not binlog XA transaction] +connection server_3; +include/sync_with_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_3_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_3 should not binlog XA transaction] +connection server_1; +DROP TABLE ti,tm; +include/save_master_gtid.inc +connection server_3; +include/sync_with_master_gtid.inc +connection server_1; +connection server_2; +connection server_3; +connection server_1; +CREATE TABLE tm (a INT PRIMARY KEY) engine=myisam; +CREATE TABLE ti (a INT PRIMARY KEY) engine=innodb; +include/save_master_gtid.inc +connection server_3; +include/sync_with_master_gtid.inc +connection server_1; +connect con1,localhost,root,,; +XA START 'x'; +INSERT INTO tm VALUES (1),(1);; +ERROR 23000: Duplicate entry '1' for key 'PRIMARY' +INSERT INTO ti VALUES (2),(2);; +ERROR 23000: Duplicate entry '2' for key 'PRIMARY' +XA END 'x'; +XA PREPARE 'x'; +disconnect con1; +connection server_1; +XA RECOVER; +formatID gtrid_length bqual_length data +1 1 0 x +XA ROLLBACK 'x';; +ERROR XA100: XA_RBROLLBACK: Transaction branch was rolled back +include/save_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_1_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_1 should not binlog XA transaction] +connection server_2; +include/sync_with_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_2_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_2 should not binlog XA transaction] +connection server_3; +include/sync_with_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_3_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_3 should not binlog XA transaction] +connection server_1; +DROP TABLE ti,tm; +include/save_master_gtid.inc +connection server_3; +include/sync_with_master_gtid.inc +connection server_1; +set @@binlog_format = @sav_binlog_format; +set @@global.binlog_format = @sav_binlog_format; +# +# Test Case 7: An XA transaction consisting of two failed +# non-transactional statements should be binlogged +connection server_1; +set @sav_binlog_format = @@binlog_format; +set @@binlog_format = row; +set @@global.binlog_format = row; +connection server_1; +connection server_2; +connection server_3; +connection server_1; +CREATE TABLE tm (a INT PRIMARY KEY) engine=myisam; +CREATE TABLE ti (a INT PRIMARY KEY) engine=innodb; +include/save_master_gtid.inc +connection server_3; +include/sync_with_master_gtid.inc +connection server_1; +XA START 'x'; +INSERT INTO tm VALUES (1),(1);; +ERROR 23000: Duplicate entry '1' for key 'PRIMARY' +INSERT INTO tm VALUES (2),(2);; +ERROR 23000: Duplicate entry '2' for key 'PRIMARY' +XA END 'x'; +XA PREPARE 'x'; +XA COMMIT 'x';; +include/save_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_1_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_1 should not binlog XA transaction] +connection server_2; +include/sync_with_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_2_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_2 should not binlog XA transaction] +connection server_3; +include/sync_with_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_3_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_3 should not binlog XA transaction] +connection server_1; +DROP TABLE ti,tm; +include/save_master_gtid.inc +connection server_3; +include/sync_with_master_gtid.inc +connection server_1; +connection server_2; +connection server_3; +connection server_1; +CREATE TABLE tm (a INT PRIMARY KEY) engine=myisam; +CREATE TABLE ti (a INT PRIMARY KEY) engine=innodb; +include/save_master_gtid.inc +connection server_3; +include/sync_with_master_gtid.inc +connection server_1; +connect con1,localhost,root,,; +XA START 'x'; +INSERT INTO tm VALUES (1),(1);; +ERROR 23000: Duplicate entry '1' for key 'PRIMARY' +INSERT INTO tm VALUES (2),(2);; +ERROR 23000: Duplicate entry '2' for key 'PRIMARY' +XA END 'x'; +XA PREPARE 'x'; +disconnect con1; +connection server_1; +XA RECOVER; +formatID gtrid_length bqual_length data +1 1 0 x +XA COMMIT 'x';; +ERROR XA100: XA_RBROLLBACK: Transaction branch was rolled back +include/save_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_1_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_1 should not binlog XA transaction] +connection server_2; +include/sync_with_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_2_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_2 should not binlog XA transaction] +connection server_3; +include/sync_with_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_3_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_3 should not binlog XA transaction] +connection server_1; +DROP TABLE ti,tm; +include/save_master_gtid.inc +connection server_3; +include/sync_with_master_gtid.inc +connection server_1; +connection server_2; +connection server_3; +connection server_1; +CREATE TABLE tm (a INT PRIMARY KEY) engine=myisam; +CREATE TABLE ti (a INT PRIMARY KEY) engine=innodb; +include/save_master_gtid.inc +connection server_3; +include/sync_with_master_gtid.inc +connection server_1; +XA START 'x'; +INSERT INTO tm VALUES (1),(1);; +ERROR 23000: Duplicate entry '1' for key 'PRIMARY' +INSERT INTO tm VALUES (2),(2);; +ERROR 23000: Duplicate entry '2' for key 'PRIMARY' +XA END 'x'; +XA PREPARE 'x'; +XA ROLLBACK 'x';; +Warnings: +Warning 1196 Some non-transactional changed tables couldn't be rolled back +include/save_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_1_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_1 should not binlog XA transaction] +connection server_2; +include/sync_with_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_2_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_2 should not binlog XA transaction] +connection server_3; +include/sync_with_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_3_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_3 should not binlog XA transaction] +connection server_1; +DROP TABLE ti,tm; +include/save_master_gtid.inc +connection server_3; +include/sync_with_master_gtid.inc +connection server_1; +connection server_2; +connection server_3; +connection server_1; +CREATE TABLE tm (a INT PRIMARY KEY) engine=myisam; +CREATE TABLE ti (a INT PRIMARY KEY) engine=innodb; +include/save_master_gtid.inc +connection server_3; +include/sync_with_master_gtid.inc +connection server_1; +connect con1,localhost,root,,; +XA START 'x'; +INSERT INTO tm VALUES (1),(1);; +ERROR 23000: Duplicate entry '1' for key 'PRIMARY' +INSERT INTO tm VALUES (2),(2);; +ERROR 23000: Duplicate entry '2' for key 'PRIMARY' +XA END 'x'; +XA PREPARE 'x'; +disconnect con1; +connection server_1; +XA RECOVER; +formatID gtrid_length bqual_length data +1 1 0 x +XA ROLLBACK 'x';; +ERROR XA100: XA_RBROLLBACK: Transaction branch was rolled back +include/save_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_1_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_1 should not binlog XA transaction] +connection server_2; +include/sync_with_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_2_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_2 should not binlog XA transaction] +connection server_3; +include/sync_with_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_3_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_3 should not binlog XA transaction] +connection server_1; +DROP TABLE ti,tm; +include/save_master_gtid.inc +connection server_3; +include/sync_with_master_gtid.inc +connection server_1; +set @@binlog_format = @sav_binlog_format; +set @@global.binlog_format = @sav_binlog_format; +connection server_1; +include/rpl_end.inc +# End of rpl_xa_empty_transaction.test diff --git a/mysql-test/suite/rpl/t/rpl_xa_empty_transaction.cnf b/mysql-test/suite/rpl/t/rpl_xa_empty_transaction.cnf new file mode 100644 index 00000000000..92acd0c73a6 --- /dev/null +++ b/mysql-test/suite/rpl/t/rpl_xa_empty_transaction.cnf @@ -0,0 +1,18 @@ +!include include/default_mysqld.cnf + +[mysqld.1] +log-slave-updates +innodb + +[mysqld.2] +log-slave-updates +innodb + +[mysqld.3] +log-slave-updates +innodb + +[ENV] +SERVER_MYPORT_1= @mysqld.1.port +SERVER_MYPORT_2= @mysqld.2.port +SERVER_MYPORT_3= @mysqld.3.port diff --git a/mysql-test/suite/rpl/t/rpl_xa_empty_transaction.test b/mysql-test/suite/rpl/t/rpl_xa_empty_transaction.test new file mode 100644 index 00000000000..61cc0621d5a --- /dev/null +++ b/mysql-test/suite/rpl/t/rpl_xa_empty_transaction.test @@ -0,0 +1,175 @@ +# +# Purpose: +# This test ensures consistency in binlogging behavior for XA transactions +# that have all statements error and rollback, effectively leaving an "empty" +# transaction. In such cases, an empty XA transaction should be binlogged. The +# bug reported by MDEV-25616 revealed that an "empty" XA transaction would +# binlog an XA ROLLBACK or XA COMMIT event without a preceding setup, i.e. +# XA START through XA PREPARE. The bug presented differently for XA +# transactions consisting of transactional and non-transactional statements. +# Therefore, this test validates that an entire XA transaction is binlogged +# for different combinations of transactional or non-transactional statements. +# Note that the behavior changes when binlogging empty XA transactions +# depending on the binlog_row_format variables. That is, when the content of +# the transaction consists of errored transactional statements, in row format, +# an empty XA transaction will be binlogged; however, in mixed and statement +# formats, nothing will be written into the binary log. +# +# Methodology: +# Create XA transactions with various combinations of erroring transactional +# or non-transactional statements. The binary log is examined to ensure all +# XA components are written. Chain replication is used, i.e. +# (primary->replica->replica), to ensure replica binlogging is consistent with +# manual execution. The transactional and non-transactional tables use InnoDB +# and MyISAM, respectively. +# +# Parameters +# $expect_transactional_xa_binlog : Boolean indicating whether or not an +# errored transactional statement should result in XA statements written +# into the binary log. +# +# References: +# MDEV-25616: Binlog event for XA COMMIT is generated without matching +# XA START, replication aborts +# +--source include/have_log_bin.inc + +--let $rpl_server_count= 3 +--let $rpl_topology= 1->2->3 +--source include/rpl_init.inc + +--connection server_1 +-- source include/have_innodb.inc +--connection server_2 +-- source include/have_innodb.inc +--connection server_3 +-- source include/have_innodb.inc +--connection server_1 + +--echo # +--echo # Test Case 1: An XA transaction without any statements should not be +--echo # binlogged +--let $trx_statements= + +--let $xa_completion_action= COMMIT +--source include/rpl_xa_empty_transaction.inc + +--let $xa_completion_action= ROLLBACK +--source include/rpl_xa_empty_transaction.inc + + +--echo # +--echo # Test Case 2: An XA transaction consisting of a successfully rolled back +--echo # statement should not be binlogged +--let $trx_statements= T + +--let $xa_completion_action= COMMIT +--source include/rpl_xa_empty_transaction.inc + +--let $xa_completion_action= ROLLBACK +--source include/rpl_xa_empty_transaction.inc + + +--echo # +--echo # Test Case 3: An XA transaction with a statement that cannot be rolled +--echo # back should be binlogged + +# TODO: remove work-around MDEV-24654 when fixed. +--connection server_1 +set @sav_binlog_format = @@binlog_format; +set @@binlog_format = row; +set @@global.binlog_format = row; +--let $trx_statements= N + +--let $xa_completion_action= COMMIT +--source include/rpl_xa_empty_transaction.inc + +--let $xa_completion_action= ROLLBACK +--source include/rpl_xa_empty_transaction.inc + +--connection server_1 +set @@binlog_format = @sav_binlog_format; +set @@global.binlog_format = @sav_binlog_format; + + +--echo # +--echo # Test Case 4: An XA transaction with multiple statements that can all +--echo # be rolled back should not be binlogged +--let $trx_statements= T,T + +--let $xa_completion_action= COMMIT +--source include/rpl_xa_empty_transaction.inc + +--let $xa_completion_action= ROLLBACK +--source include/rpl_xa_empty_transaction.inc + + +--echo # +--echo # Test Case 5: A mixed XA transaction consisting of one statement that +--echo # can successfully be rolled back (first statement), and another that +--echo # can not (second statement) should be binlogged + +--connection server_1 +set @sav_binlog_format = @@binlog_format; +set @@binlog_format = row; +set @@global.binlog_format = row; +--let $trx_statements= T,N + +--let $xa_completion_action= COMMIT +--source include/rpl_xa_empty_transaction.inc + +--let $xa_completion_action= ROLLBACK +--source include/rpl_xa_empty_transaction.inc + +--connection server_1 +set @@binlog_format = @sav_binlog_format; +set @@global.binlog_format = @sav_binlog_format; + + +--echo # +--echo # Test Case 6: A mixed XA transaction consisting of one statement that +--echo # cannot successfully be rolled back (first statement), and another that +--echo # can (second statement) should be binlogged + +--connection server_1 +set @sav_binlog_format = @@binlog_format; +set @@binlog_format = row; +set @@global.binlog_format = row; +--let $trx_statements= N,T + +--let $xa_completion_action= COMMIT +--source include/rpl_xa_empty_transaction.inc + +--let $xa_completion_action= ROLLBACK +--source include/rpl_xa_empty_transaction.inc + +--connection server_1 +set @@binlog_format = @sav_binlog_format; +set @@global.binlog_format = @sav_binlog_format; + +--echo # +--echo # Test Case 7: An XA transaction consisting of two failed +--echo # non-transactional statements should be binlogged + +--connection server_1 +set @sav_binlog_format = @@binlog_format; +set @@binlog_format = row; +set @@global.binlog_format = row; +--let $trx_statements= N,N + +--let $xa_completion_action= COMMIT +--source include/rpl_xa_empty_transaction.inc + +--let $xa_completion_action= ROLLBACK +--source include/rpl_xa_empty_transaction.inc + +--connection server_1 +set @@binlog_format = @sav_binlog_format; +set @@global.binlog_format = @sav_binlog_format; + +# +# Cleanup +--connection server_1 +--source include/rpl_end.inc + +--echo # End of rpl_xa_empty_transaction.test diff --git a/sql/log.cc b/sql/log.cc index ec212825787..d15e7b5dd88 100644 --- a/sql/log.cc +++ b/sql/log.cc @@ -2003,25 +2003,56 @@ static int binlog_xa_recover_dummy(handlerton *hton __attribute__((unused)), static int binlog_commit_by_xid(handlerton *hton, XID *xid) { + int rc= 0; THD *thd= current_thd; + /* the asserted state can't be reachable with xa commit */ + DBUG_ASSERT(!thd->get_stmt_da()->is_error() || + thd->get_stmt_da()->sql_errno() != ER_XA_RBROLLBACK); + /* + This is a recovered user xa transaction commit. + Create a "temporary" binlog transaction to write the commit record + into binlog. + */ + THD_TRANS trans; + trans.ha_list= NULL; + + thd->ha_data[hton->slot].ha_info[1].register_ha(&trans, hton); + thd->ha_data[binlog_hton->slot].ha_info[1].set_trx_read_write(); (void) thd->binlog_setup_trx_data(); DBUG_ASSERT(thd->lex->sql_command == SQLCOM_XA_COMMIT); - return binlog_commit(hton, thd, TRUE); + rc= binlog_commit(hton, thd, TRUE); + thd->ha_data[binlog_hton->slot].ha_info[1].reset(); + + return rc; } static int binlog_rollback_by_xid(handlerton *hton, XID *xid) { + int rc= 0; THD *thd= current_thd; + if (thd->get_stmt_da()->is_error() && + thd->get_stmt_da()->sql_errno() == ER_XA_RBROLLBACK) + return rc; + + THD_TRANS trans; + trans.ha_list= NULL; + + thd->ha_data[hton->slot].ha_info[1].register_ha(&trans, hton); + thd->ha_data[hton->slot].ha_info[1].set_trx_read_write(); (void) thd->binlog_setup_trx_data(); DBUG_ASSERT(thd->lex->sql_command == SQLCOM_XA_ROLLBACK || (thd->transaction->xid_state.get_state_code() == XA_ROLLBACK_ONLY)); - return binlog_rollback(hton, thd, TRUE); + + rc= binlog_rollback(hton, thd, TRUE); + thd->ha_data[hton->slot].ha_info[1].reset(); + + return rc; } @@ -2159,10 +2190,16 @@ static int binlog_commit(handlerton *hton, THD *thd, bool all) } if (cache_mngr->trx_cache.empty() && - thd->transaction->xid_state.get_state_code() != XA_PREPARED) + (thd->transaction->xid_state.get_state_code() != XA_PREPARED || + !(thd->ha_data[binlog_hton->slot].ha_info[1].is_started() && + thd->ha_data[binlog_hton->slot].ha_info[1].is_trx_read_write()))) { /* - we're here because cache_log was flushed in MYSQL_BIN_LOG::log_xid() + This is an empty transaction commit (both the regular and xa), + or such transaction xa-prepare or + either one's statement having no effect on the transactional cache + as any prior to it. + The empty xa-prepare sinks in only when binlog is read-only. */ cache_mngr->reset(false, true); THD_STAGE_INFO(thd, org_stage); @@ -2247,10 +2284,12 @@ static int binlog_rollback(handlerton *hton, THD *thd, bool all) } if (!cache_mngr->trx_cache.has_incident() && cache_mngr->trx_cache.empty() && - thd->transaction->xid_state.get_state_code() != XA_PREPARED) + (thd->transaction->xid_state.get_state_code() != XA_PREPARED || + !(thd->ha_data[binlog_hton->slot].ha_info[1].is_started() && + thd->ha_data[binlog_hton->slot].ha_info[1].is_trx_read_write()))) { /* - we're here because cache_log was flushed in MYSQL_BIN_LOG::log_xid() + The same comments apply as in the binlog commit method's branch. */ cache_mngr->reset(false, true); thd->reset_binlog_for_next_statement(); @@ -10290,6 +10329,7 @@ int TC_LOG_BINLOG::unlog_xa_prepare(THD *thd, bool all) /* an empty XA-prepare event group is logged */ rc= write_empty_xa_prepare(thd, cache_mngr); // normally gains need_unlog trans_register_ha(thd, true, binlog_hton, 0); // do it for future commmit + thd->ha_data[binlog_hton->slot].ha_info[1].set_trx_read_write(); } if (rw_count == 0 || !cache_mngr->need_unlog) return rc; -- cgit v1.2.1 From 8c5d323326d9d527e9a5e08c69eb6085953eb130 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jan=20Lindstr=C3=B6m?= Date: Tue, 25 Oct 2022 07:33:35 +0300 Subject: Additional fixes * galera_many_rows : reduce the time used * wsrep_thd.cc : remove incorrect assertion * disabled.def : disable failing test cases --- mysql-test/suite/galera/disabled.def | 7 +++++-- mysql-test/suite/galera/r/galera_many_rows.result | 14 +++++++------- mysql-test/suite/galera/t/galera_many_rows.test | 6 +++--- sql/wsrep_thd.h | 1 - 4 files changed, 15 insertions(+), 13 deletions(-) diff --git a/mysql-test/suite/galera/disabled.def b/mysql-test/suite/galera/disabled.def index 7902f7ef537..d9f4924756e 100644 --- a/mysql-test/suite/galera/disabled.def +++ b/mysql-test/suite/galera/disabled.def @@ -25,6 +25,9 @@ galera_var_ignore_apply_errors : MDEV-26770 galera_var_ignore_apply_errors fails galera_var_node_address : MDEV-20485 Galera test failure galera_var_retry_autocommit: MDEV-18181 Galera test failure on galera.galera_var_retry_autocommit partition : MDEV-19958 Galera test failure on galera.partition -query_cache: MDEV-15805 Test failure on galera.query_cache -versioning_trx_id: MDEV-18590: galera.versioning_trx_id: Test failure: mysqltest: Result content mismatch +query_cache : MDEV-15805 Test failure on galera.query_cache +versioning_trx_id : MDEV-18590: galera.versioning_trx_id: Test failure: mysqltest: Result content mismatch galera_bf_abort_at_after_statement : Unstable +galera.MW-284 : MDEV-29861: Galera test case hangs +galera.galera_binlog_checksum : MDEV-29861: Galera test case hangs +galera_var_notify_ssl_ipv6 : MDEV-29861: Galera test case hangs diff --git a/mysql-test/suite/galera/r/galera_many_rows.result b/mysql-test/suite/galera/r/galera_many_rows.result index 566bc59f8ab..b34c2484aea 100644 --- a/mysql-test/suite/galera/r/galera_many_rows.result +++ b/mysql-test/suite/galera/r/galera_many_rows.result @@ -5,32 +5,32 @@ connection node_2; connection node_1; SET SESSION innodb_lock_wait_timeout=600; SET SESSION lock_wait_timeout=600; -CREATE TABLE ten (f1 INTEGER) engine=InnoDB; +CREATE TABLE ten (f1 INTEGER NOT NULL PRIMARY KEY) engine=InnoDB; INSERT INTO ten VALUES (1),(2),(3),(4),(5),(6),(7),(8),(9),(10); CREATE TABLE t1 (f1 INTEGER AUTO_INCREMENT PRIMARY KEY, f2 INTEGER) Engine=InnoDB; -INSERT INTO t1 (f2) SELECT a1.f1 FROM ten AS a1, ten AS a2, ten AS a3, ten AS a4, ten AS a5; +INSERT INTO t1 (f2) SELECT a1.f1 FROM ten AS a1, ten AS a2, ten AS a3, ten AS a4; connection node_2; SET SESSION wsrep_sync_wait = 0; SET SESSION wsrep_sync_wait = 15; SET GLOBAL wsrep_provider_options = 'repl.causal_read_timeout=PT1H'; SELECT COUNT(*) FROM t1; COUNT(*) -100000 -INSERT INTO t1 (f2) SELECT a1.f1 FROM ten AS a1, ten AS a2, ten AS a3, ten AS a4, ten AS a5; +10000 +INSERT INTO t1 (f2) SELECT a1.f1 FROM ten AS a1, ten AS a2, ten AS a3, ten AS a4; connection node_1; SELECT COUNT(*) FROM t1; COUNT(*) -200000 +20000 UPDATE t1 SET f2 = 1; connection node_2; SELECT COUNT(*) FROM t1 WHERE f2 = 1; COUNT(*) -200000 +20000 connection node_1; START TRANSACTION; SELECT COUNT(*) FROM t1; COUNT(*) -200000 +20000 UPDATE t1 SET f2 = 3; connection node_2; START TRANSACTION; diff --git a/mysql-test/suite/galera/t/galera_many_rows.test b/mysql-test/suite/galera/t/galera_many_rows.test index bc9e99db8da..3623b3f33b0 100644 --- a/mysql-test/suite/galera/t/galera_many_rows.test +++ b/mysql-test/suite/galera/t/galera_many_rows.test @@ -10,11 +10,11 @@ SET SESSION innodb_lock_wait_timeout=600; SET SESSION lock_wait_timeout=600; -CREATE TABLE ten (f1 INTEGER) engine=InnoDB; +CREATE TABLE ten (f1 INTEGER NOT NULL PRIMARY KEY) engine=InnoDB; INSERT INTO ten VALUES (1),(2),(3),(4),(5),(6),(7),(8),(9),(10); CREATE TABLE t1 (f1 INTEGER AUTO_INCREMENT PRIMARY KEY, f2 INTEGER) Engine=InnoDB; -INSERT INTO t1 (f2) SELECT a1.f1 FROM ten AS a1, ten AS a2, ten AS a3, ten AS a4, ten AS a5; +INSERT INTO t1 (f2) SELECT a1.f1 FROM ten AS a1, ten AS a2, ten AS a3, ten AS a4; --connection node_2 SET SESSION wsrep_sync_wait = 0; @@ -24,7 +24,7 @@ SET SESSION wsrep_sync_wait = 15; SET GLOBAL wsrep_provider_options = 'repl.causal_read_timeout=PT1H'; SELECT COUNT(*) FROM t1; -INSERT INTO t1 (f2) SELECT a1.f1 FROM ten AS a1, ten AS a2, ten AS a3, ten AS a4, ten AS a5; +INSERT INTO t1 (f2) SELECT a1.f1 FROM ten AS a1, ten AS a2, ten AS a3, ten AS a4; --connection node_1 SELECT COUNT(*) FROM t1; diff --git a/sql/wsrep_thd.h b/sql/wsrep_thd.h index 31f95510c5e..e9add662e3f 100644 --- a/sql/wsrep_thd.h +++ b/sql/wsrep_thd.h @@ -184,7 +184,6 @@ void wsrep_reset_threadvars(THD *); static inline void wsrep_override_error(THD *thd, uint error, const char *format= 0, ...) { - DBUG_ASSERT(error != ER_ERROR_DURING_COMMIT); Diagnostics_area *da= thd->get_stmt_da(); if (da->is_ok() || da->is_eof() || -- cgit v1.2.1 From 32158be720b85a3ae0e0eeebe1277c36f86dca38 Mon Sep 17 00:00:00 2001 From: Vladislav Vaintroub Date: Fri, 21 Oct 2022 19:50:07 +0200 Subject: MDEV-29811 server advertises ssl even if it's unusable. Abort startup, if SSL setup fails. Also, for the server always check that certificate matches private key (even if ssl_cert is not set, OpenSSL will try to use default one) --- mysql-test/main/bad_startup_options.result | 1 + mysql-test/main/bad_startup_options.test | 19 +++++++++++++++++++ sql/mysqld.cc | 7 +++---- vio/viosslfactories.c | 11 ++++++----- 4 files changed, 29 insertions(+), 9 deletions(-) create mode 100644 mysql-test/main/bad_startup_options.result create mode 100644 mysql-test/main/bad_startup_options.test diff --git a/mysql-test/main/bad_startup_options.result b/mysql-test/main/bad_startup_options.result new file mode 100644 index 00000000000..72c05cf5235 --- /dev/null +++ b/mysql-test/main/bad_startup_options.result @@ -0,0 +1 @@ +FOUND 1 /\[ERROR\] SSL error: Unable to get certificate/ in errorlog.err diff --git a/mysql-test/main/bad_startup_options.test b/mysql-test/main/bad_startup_options.test new file mode 100644 index 00000000000..bd0b6283854 --- /dev/null +++ b/mysql-test/main/bad_startup_options.test @@ -0,0 +1,19 @@ +--source include/not_embedded.inc +--source include/have_ssl_communication.inc + +--source include/shutdown_mysqld.inc + +# Try to start the server, with bad values for some options. +# Make sure, the starts fails, and expected message is in the error log + +--let errorlog=$MYSQL_TMP_DIR/errorlog.err +--let SEARCH_FILE=$errorlog + +# Bad ssl-cert +--error 1 +--exec $MYSQLD --defaults-group-suffix=.1 --defaults-file=$MYSQLTEST_VARDIR/my.cnf --ssl-cert=bad --log-error=$errorlog +--let SEARCH_PATTERN=\[ERROR\] SSL error: Unable to get certificate +--source include/search_pattern_in_file.inc +--remove_file $SEARCH_FILE + +--source include/start_mysqld.inc diff --git a/sql/mysqld.cc b/sql/mysqld.cc index 8c70a0d3145..5d58d42faf9 100644 --- a/sql/mysqld.cc +++ b/sql/mysqld.cc @@ -5037,10 +5037,9 @@ static void init_ssl() DBUG_PRINT("info",("ssl_acceptor_fd: %p", ssl_acceptor_fd)); if (!ssl_acceptor_fd) { - sql_print_warning("Failed to setup SSL"); - sql_print_warning("SSL error: %s", sslGetErrString(error)); - opt_use_ssl = 0; - have_ssl= SHOW_OPTION_DISABLED; + sql_print_error("Failed to setup SSL"); + sql_print_error("SSL error: %s", sslGetErrString(error)); + unireg_abort(1); } if (global_system_variables.log_warnings > 0) { diff --git a/vio/viosslfactories.c b/vio/viosslfactories.c index fe980322736..9b15e70294f 100644 --- a/vio/viosslfactories.c +++ b/vio/viosslfactories.c @@ -97,7 +97,7 @@ sslGetErrString(enum enum_ssl_init_error e) static int vio_set_cert_stuff(SSL_CTX *ctx, const char *cert_file, const char *key_file, - enum enum_ssl_init_error* error) + my_bool is_client, enum enum_ssl_init_error* error) { DBUG_ENTER("vio_set_cert_stuff"); DBUG_PRINT("enter", ("ctx: %p cert_file: %s key_file: %s", @@ -134,10 +134,10 @@ vio_set_cert_stuff(SSL_CTX *ctx, const char *cert_file, const char *key_file, } /* - If we are using DSA, we can copy the parameters from the private key - Now we know that a key and cert have been set against the SSL context + If certificate is used check if private key matches. + Note, that server side has to use certificate. */ - if (cert_file && !SSL_CTX_check_private_key(ctx)) + if ((cert_file != NULL || !is_client) && !SSL_CTX_check_private_key(ctx)) { *error= SSL_INITERR_NOMATCH; DBUG_PRINT("error", ("%s",sslGetErrString(*error))); @@ -288,7 +288,8 @@ new_VioSSLFd(const char *key_file, const char *cert_file, #endif } - if (vio_set_cert_stuff(ssl_fd->ssl_context, cert_file, key_file, error)) + if (vio_set_cert_stuff(ssl_fd->ssl_context, cert_file, key_file, + is_client_method, error)) { DBUG_PRINT("error", ("vio_set_cert_stuff failed")); goto err2; -- cgit v1.2.1 From 1ff476b415cacc616f68af542b75793d064367ae Mon Sep 17 00:00:00 2001 From: Lawrin Novitsky Date: Mon, 12 Sep 2022 14:39:12 +0200 Subject: MDEV-29490 Renaming internally used client API to avoid name conflicts with C/C. The patch introduces mariadb_capi_rename.h which is included into mysql.h. The hew header contains macro definitions for the names being renamed. In versions 10.6+(i.e. where sql service exists) the renaming condition in the mariadb_capi_rename.h should be added with && !defined(MYSQL_DYNAMIC_PLUGIN) and look like The patch also contains removal of mysql.h from the api check. Disabling false_duper-6543 test for embedded. ha_federated.so uses C API. C API functions are being renamed in the server, but not renamed in embedded, since embedded server library should have proper C API, as expected by programs using it. Thus the same ha_federated.so cannot work both for server and embedded server library. As all federated tests are already disabled for embedded, federated isn't supposed to work for embedded anyway, and thus the test is being disabled. --- cmake/abi_check.cmake | 1 - include/mariadb_capi_rename.h | 57 ++ include/mysql.h | 1 + include/mysql.h.pp | 772 ----------------------- mysql-test/suite/plugins/t/false_dupes-6543.test | 1 + storage/federatedx/CMakeLists.txt | 3 +- storage/federatedx/federatedx_io_mysql.cc | 1 + storage/federatedx/ha_federatedx.cc | 1 + storage/sphinx/CMakeLists.txt | 2 +- 9 files changed, 64 insertions(+), 775 deletions(-) create mode 100644 include/mariadb_capi_rename.h delete mode 100644 include/mysql.h.pp diff --git a/cmake/abi_check.cmake b/cmake/abi_check.cmake index 805322229c4..1d897e5a3a7 100644 --- a/cmake/abi_check.cmake +++ b/cmake/abi_check.cmake @@ -38,7 +38,6 @@ IF(CMAKE_C_COMPILER_ID MATCHES "GNU|Clang" AND RUN_ABI_CHECK) SET(API_PREPROCESSOR_HEADER ${CMAKE_SOURCE_DIR}/include/mysql/plugin_audit.h ${CMAKE_SOURCE_DIR}/include/mysql/plugin_ftparser.h - ${CMAKE_SOURCE_DIR}/include/mysql.h ${CMAKE_SOURCE_DIR}/include/mysql/psi/psi_abi_v1.h ${CMAKE_SOURCE_DIR}/include/mysql/psi/psi_abi_v2.h ${CMAKE_SOURCE_DIR}/include/mysql/client_plugin.h diff --git a/include/mariadb_capi_rename.h b/include/mariadb_capi_rename.h new file mode 100644 index 00000000000..616a9142fe6 --- /dev/null +++ b/include/mariadb_capi_rename.h @@ -0,0 +1,57 @@ +/* Copyright (c) 2022, MariaDB + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335 USA */ + +/* Renaming C API symbols inside server + * client.c defines a number of functions from the C API, that are used in replication, in number of storage engine plugins, mariadb-backup. + * That can cause a problem if a plugin loads libmariadb/libmysql or a library, that has dependency on them. The known case is ODBC driver. + * Thus the header re-names those functions for internal use. + */ + +#ifndef MARIADB_CAPI_RENAME_INCLUDED +#define MARIADB_CAPI_RENAME_INCLUDED + +#if !defined(EMBEDDED_LIBRARY) + +#define MARIADB_ADD_PREFIX(_SYMBOL) server_##_SYMBOL +#define mysql_real_connect MARIADB_ADD_PREFIX(mysql_real_connect) +#define mysql_init MARIADB_ADD_PREFIX(mysql_init) +#define mysql_close MARIADB_ADD_PREFIX(mysql_close) +#define mysql_options MARIADB_ADD_PREFIX(mysql_options) +#define mysql_load_plugin MARIADB_ADD_PREFIX(mysql_load_plugin) +#define mysql_load_plugin_v MARIADB_ADD_PREFIX(mysql_load_plugin_v) +#define mysql_client_find_plugin MARIADB_ADD_PREFIX(mysql_client_find_plugin) +#define mysql_real_query MARIADB_ADD_PREFIX(mysql_real_query) +#define mysql_send_query MARIADB_ADD_PREFIX(mysql_send_query) +#define mysql_free_result MARIADB_ADD_PREFIX(mysql_free_result) +#define mysql_get_socket MARIADB_ADD_PREFIX(mysql_get_socket) +#define mysql_set_character_set MARIADB_ADD_PREFIX(mysql_set_character_set) +#define mysql_get_server_version MARIADB_ADD_PREFIX(mysql_get_server_version) +#define mysql_error MARIADB_ADD_PREFIX(mysql_error) +#define mysql_errno MARIADB_ADD_PREFIX(mysql_errno) +#define mysql_num_fields MARIADB_ADD_PREFIX(mysql_num_fields) +#define mysql_num_rows MARIADB_ADD_PREFIX(mysql_num_rows) +#define mysql_options4 MARIADB_ADD_PREFIX(mysql_options4) +#define mysql_fetch_lengths MARIADB_ADD_PREFIX(mysql_fetch_lengths) +#define mysql_fetch_row MARIADB_ADD_PREFIX(mysql_fetch_row) +#define mysql_affected_rows MARIADB_ADD_PREFIX(mysql_affected_rows) +#define mysql_store_result MARIADB_ADD_PREFIX(mysql_store_result) +#define mysql_select_db MARIADB_ADD_PREFIX(mysql_select_db) +#define mysql_get_ssl_cipher MARIADB_ADD_PREFIX(mysql_get_ssl_cipher) +#define mysql_ssl_set MARIADB_ADD_PREFIX(mysql_ssl_set) +#define mysql_client_register_plugin MARIADB_ADD_PREFIX(mysql_client_register_plugin) + +#endif // !EMBEDDED_LIBRARY && !MYSQL_DYNAMIC_PLUGIN + +#endif // !MARIADB_CAPI_RENAME_INCLUDED diff --git a/include/mysql.h b/include/mysql.h index ec49ca0482a..114d763e58c 100644 --- a/include/mysql.h +++ b/include/mysql.h @@ -73,6 +73,7 @@ typedef int my_socket; #endif /* my_socket_defined */ #endif /* MY_GLOBAL_INCLUDED */ +#include "mariadb_capi_rename.h" #include "mysql_version.h" #include "mysql_com.h" #include "mysql_time.h" diff --git a/include/mysql.h.pp b/include/mysql.h.pp deleted file mode 100644 index 1e4479e8f2b..00000000000 --- a/include/mysql.h.pp +++ /dev/null @@ -1,772 +0,0 @@ -typedef char my_bool; -typedef int my_socket; -enum enum_server_command -{ - COM_SLEEP, COM_QUIT, COM_INIT_DB, COM_QUERY, COM_FIELD_LIST, - COM_CREATE_DB, COM_DROP_DB, COM_REFRESH, COM_SHUTDOWN, COM_STATISTICS, - COM_PROCESS_INFO, COM_CONNECT, COM_PROCESS_KILL, COM_DEBUG, COM_PING, - COM_TIME, COM_DELAYED_INSERT, COM_CHANGE_USER, COM_BINLOG_DUMP, - COM_TABLE_DUMP, COM_CONNECT_OUT, COM_REGISTER_SLAVE, - COM_STMT_PREPARE, COM_STMT_EXECUTE, COM_STMT_SEND_LONG_DATA, COM_STMT_CLOSE, - COM_STMT_RESET, COM_SET_OPTION, COM_STMT_FETCH, COM_DAEMON, - COM_UNIMPLEMENTED, - COM_RESET_CONNECTION, - COM_MDB_GAP_BEG, - COM_MDB_GAP_END=249, - COM_STMT_BULK_EXECUTE=250, - COM_SLAVE_WORKER=251, - COM_SLAVE_IO=252, - COM_SLAVE_SQL=253, - COM_MULTI=254, - COM_END=255 -}; -enum enum_indicator_type -{ - STMT_INDICATOR_NONE= 0, - STMT_INDICATOR_NULL, - STMT_INDICATOR_DEFAULT, - STMT_INDICATOR_IGNORE -}; -struct st_vio; -typedef struct st_vio Vio; -typedef struct st_net { - Vio *vio; - unsigned char *buff,*buff_end,*write_pos,*read_pos; - my_socket fd; - unsigned long remain_in_buf,length, buf_length, where_b; - unsigned long max_packet,max_packet_size; - unsigned int pkt_nr,compress_pkt_nr; - unsigned int write_timeout, read_timeout, retry_count; - int fcntl; - unsigned int *return_status; - unsigned char reading_or_writing; - char save_char; - char net_skip_rest_factor; - my_bool thread_specific_malloc; - unsigned char compress; - my_bool unused3; - void *thd; - unsigned int last_errno; - unsigned char error; - my_bool unused4; - my_bool unused5; - char last_error[512]; - char sqlstate[5 +1]; - void *extension; -} NET; -enum enum_field_types { MYSQL_TYPE_DECIMAL, MYSQL_TYPE_TINY, - MYSQL_TYPE_SHORT, MYSQL_TYPE_LONG, - MYSQL_TYPE_FLOAT, MYSQL_TYPE_DOUBLE, - MYSQL_TYPE_NULL, MYSQL_TYPE_TIMESTAMP, - MYSQL_TYPE_LONGLONG,MYSQL_TYPE_INT24, - MYSQL_TYPE_DATE, MYSQL_TYPE_TIME, - MYSQL_TYPE_DATETIME, MYSQL_TYPE_YEAR, - MYSQL_TYPE_NEWDATE, MYSQL_TYPE_VARCHAR, - MYSQL_TYPE_BIT, - MYSQL_TYPE_TIMESTAMP2, - MYSQL_TYPE_DATETIME2, - MYSQL_TYPE_TIME2, - MYSQL_TYPE_BLOB_COMPRESSED= 140, - MYSQL_TYPE_VARCHAR_COMPRESSED= 141, - MYSQL_TYPE_NEWDECIMAL=246, - MYSQL_TYPE_ENUM=247, - MYSQL_TYPE_SET=248, - MYSQL_TYPE_TINY_BLOB=249, - MYSQL_TYPE_MEDIUM_BLOB=250, - MYSQL_TYPE_LONG_BLOB=251, - MYSQL_TYPE_BLOB=252, - MYSQL_TYPE_VAR_STRING=253, - MYSQL_TYPE_STRING=254, - MYSQL_TYPE_GEOMETRY=255 -}; -enum mysql_enum_shutdown_level { - SHUTDOWN_DEFAULT = 0, - SHUTDOWN_WAIT_CONNECTIONS= (unsigned char)(1 << 0), - SHUTDOWN_WAIT_TRANSACTIONS= (unsigned char)(1 << 1), - SHUTDOWN_WAIT_UPDATES= (unsigned char)(1 << 3), - SHUTDOWN_WAIT_ALL_BUFFERS= ((unsigned char)(1 << 3) << 1), - SHUTDOWN_WAIT_CRITICAL_BUFFERS= ((unsigned char)(1 << 3) << 1) + 1 -}; -enum enum_cursor_type -{ - CURSOR_TYPE_NO_CURSOR= 0, - CURSOR_TYPE_READ_ONLY= 1, - CURSOR_TYPE_FOR_UPDATE= 2, - CURSOR_TYPE_SCROLLABLE= 4 -}; -enum enum_mysql_set_option -{ - MYSQL_OPTION_MULTI_STATEMENTS_ON, - MYSQL_OPTION_MULTI_STATEMENTS_OFF -}; -enum enum_session_state_type -{ - SESSION_TRACK_SYSTEM_VARIABLES, - SESSION_TRACK_SCHEMA, - SESSION_TRACK_STATE_CHANGE, - SESSION_TRACK_GTIDS, - SESSION_TRACK_TRANSACTION_CHARACTERISTICS, - SESSION_TRACK_TRANSACTION_STATE, - SESSION_TRACK_always_at_the_end -}; -my_bool my_net_init(NET *net, Vio* vio, void *thd, unsigned int my_flags); -void my_net_local_init(NET *net); -void net_end(NET *net); -void net_clear(NET *net, my_bool clear_buffer); -my_bool net_realloc(NET *net, size_t length); -my_bool net_flush(NET *net); -my_bool my_net_write(NET *net,const unsigned char *packet, size_t len); -my_bool net_write_command(NET *net,unsigned char command, - const unsigned char *header, size_t head_len, - const unsigned char *packet, size_t len); -int net_real_write(NET *net,const unsigned char *packet, size_t len); -unsigned long my_net_read_packet(NET *net, my_bool read_from_server); -unsigned long my_net_read_packet_reallen(NET *net, my_bool read_from_server, - unsigned long* reallen); -struct sockaddr; -int my_connect(my_socket s, const struct sockaddr *name, unsigned int namelen, - unsigned int timeout); -struct my_rnd_struct; -enum Item_result -{ - STRING_RESULT=0, REAL_RESULT, INT_RESULT, ROW_RESULT, DECIMAL_RESULT, - TIME_RESULT -}; -typedef struct st_udf_args -{ - unsigned int arg_count; - enum Item_result *arg_type; - char **args; - unsigned long *lengths; - char *maybe_null; - const char **attributes; - unsigned long *attribute_lengths; - void *extension; -} UDF_ARGS; -typedef struct st_udf_init -{ - my_bool maybe_null; - unsigned int decimals; - unsigned long max_length; - char *ptr; - my_bool const_item; - void *extension; -} UDF_INIT; -void create_random_string(char *to, unsigned int length, - struct my_rnd_struct *rand_st); -void hash_password(unsigned long *to, const char *password, unsigned int password_len); -void make_scrambled_password_323(char *to, const char *password); -void scramble_323(char *to, const char *message, const char *password); -my_bool check_scramble_323(const unsigned char *reply, const char *message, - unsigned long *salt); -void get_salt_from_password_323(unsigned long *res, const char *password); -void make_scrambled_password(char *to, const char *password); -void scramble(char *to, const char *message, const char *password); -my_bool check_scramble(const unsigned char *reply, const char *message, - const unsigned char *hash_stage2); -void get_salt_from_password(unsigned char *res, const char *password); -char *octet2hex(char *to, const char *str, size_t len); -char *get_tty_password(const char *opt_message); -void get_tty_password_buff(const char *opt_message, char *to, size_t length); -const char *mysql_errno_to_sqlstate(unsigned int mysql_errno); -my_bool my_thread_init(void); -void my_thread_end(void); -typedef long my_time_t; -enum enum_mysql_timestamp_type -{ - MYSQL_TIMESTAMP_NONE= -2, MYSQL_TIMESTAMP_ERROR= -1, - MYSQL_TIMESTAMP_DATE= 0, MYSQL_TIMESTAMP_DATETIME= 1, MYSQL_TIMESTAMP_TIME= 2 -}; -typedef struct st_mysql_time -{ - unsigned int year, month, day, hour, minute, second; - unsigned long second_part; - my_bool neg; - enum enum_mysql_timestamp_type time_type; -} MYSQL_TIME; -typedef struct st_list { - struct st_list *prev,*next; - void *data; -} LIST; -typedef int (*list_walk_action)(void *,void *); -extern LIST *list_add(LIST *root,LIST *element); -extern LIST *list_delete(LIST *root,LIST *element); -extern LIST *list_cons(void *data,LIST *root); -extern LIST *list_reverse(LIST *root); -extern void list_free(LIST *root,unsigned int free_data); -extern unsigned int list_length(LIST *); -extern int list_walk(LIST *,list_walk_action action,unsigned char * argument); -extern unsigned int mariadb_deinitialize_ssl; -extern unsigned int mysql_port; -extern char *mysql_unix_port; -typedef struct st_mysql_field { - char *name; - char *org_name; - char *table; - char *org_table; - char *db; - char *catalog; - char *def; - unsigned long length; - unsigned long max_length; - unsigned int name_length; - unsigned int org_name_length; - unsigned int table_length; - unsigned int org_table_length; - unsigned int db_length; - unsigned int catalog_length; - unsigned int def_length; - unsigned int flags; - unsigned int decimals; - unsigned int charsetnr; - enum enum_field_types type; - void *extension; -} MYSQL_FIELD; -typedef char **MYSQL_ROW; -typedef unsigned int MYSQL_FIELD_OFFSET; -typedef unsigned long long my_ulonglong; -typedef struct st_used_mem -{ - struct st_used_mem *next; - size_t left; - size_t size; -} USED_MEM; -typedef struct st_mem_root -{ - USED_MEM *free; - USED_MEM *used; - USED_MEM *pre_alloc; - size_t min_malloc; - size_t block_size; - size_t total_alloc; - unsigned int block_num; - unsigned int first_block_usage; - void (*error_handler)(void); - const char *name; -} MEM_ROOT; -typedef struct st_typelib { - unsigned int count; - const char *name; - const char **type_names; - unsigned int *type_lengths; -} TYPELIB; -extern my_ulonglong find_typeset(char *x, TYPELIB *typelib,int *error_position); -extern int find_type_with_warning(const char *x, TYPELIB *typelib, - const char *option); -extern int find_type(const char *x, const TYPELIB *typelib, unsigned int flags); -extern void make_type(char *to,unsigned int nr,TYPELIB *typelib); -extern const char *get_type(TYPELIB *typelib,unsigned int nr); -extern TYPELIB *copy_typelib(MEM_ROOT *root, TYPELIB *from); -extern TYPELIB sql_protocol_typelib; -my_ulonglong find_set_from_flags(const TYPELIB *lib, unsigned int default_name, - my_ulonglong cur_set, my_ulonglong default_set, - const char *str, unsigned int length, - char **err_pos, unsigned int *err_len); -typedef struct st_mysql_rows { - struct st_mysql_rows *next; - MYSQL_ROW data; - unsigned long length; -} MYSQL_ROWS; -typedef MYSQL_ROWS *MYSQL_ROW_OFFSET; -typedef struct embedded_query_result EMBEDDED_QUERY_RESULT; -typedef struct st_mysql_data { - MYSQL_ROWS *data; - struct embedded_query_result *embedded_info; - MEM_ROOT alloc; - my_ulonglong rows; - unsigned int fields; - void *extension; -} MYSQL_DATA; -enum mysql_option -{ - MYSQL_OPT_CONNECT_TIMEOUT, MYSQL_OPT_COMPRESS, MYSQL_OPT_NAMED_PIPE, - MYSQL_INIT_COMMAND, MYSQL_READ_DEFAULT_FILE, MYSQL_READ_DEFAULT_GROUP, - MYSQL_SET_CHARSET_DIR, MYSQL_SET_CHARSET_NAME, MYSQL_OPT_LOCAL_INFILE, - MYSQL_OPT_PROTOCOL, MYSQL_SHARED_MEMORY_BASE_NAME, MYSQL_OPT_READ_TIMEOUT, - MYSQL_OPT_WRITE_TIMEOUT, MYSQL_OPT_USE_RESULT, - MYSQL_OPT_USE_REMOTE_CONNECTION, MYSQL_OPT_USE_EMBEDDED_CONNECTION, - MYSQL_OPT_GUESS_CONNECTION, MYSQL_SET_CLIENT_IP, MYSQL_SECURE_AUTH, - MYSQL_REPORT_DATA_TRUNCATION, MYSQL_OPT_RECONNECT, - MYSQL_OPT_SSL_VERIFY_SERVER_CERT, MYSQL_PLUGIN_DIR, MYSQL_DEFAULT_AUTH, - MYSQL_OPT_BIND, - MYSQL_OPT_SSL_KEY, MYSQL_OPT_SSL_CERT, - MYSQL_OPT_SSL_CA, MYSQL_OPT_SSL_CAPATH, MYSQL_OPT_SSL_CIPHER, - MYSQL_OPT_SSL_CRL, MYSQL_OPT_SSL_CRLPATH, - MYSQL_OPT_CONNECT_ATTR_RESET, MYSQL_OPT_CONNECT_ATTR_ADD, - MYSQL_OPT_CONNECT_ATTR_DELETE, - MYSQL_SERVER_PUBLIC_KEY, - MYSQL_ENABLE_CLEARTEXT_PLUGIN, - MYSQL_OPT_CAN_HANDLE_EXPIRED_PASSWORDS, - MYSQL_PROGRESS_CALLBACK=5999, - MYSQL_OPT_NONBLOCK, - MYSQL_OPT_USE_THREAD_SPECIFIC_MEMORY -}; -struct st_mysql_options_extention; -struct st_mysql_options { - unsigned int connect_timeout, read_timeout, write_timeout; - unsigned int port, protocol; - unsigned long client_flag; - char *host,*user,*password,*unix_socket,*db; - struct st_dynamic_array *init_commands; - char *my_cnf_file,*my_cnf_group, *charset_dir, *charset_name; - char *ssl_key; - char *ssl_cert; - char *ssl_ca; - char *ssl_capath; - char *ssl_cipher; - char *shared_memory_base_name; - unsigned long max_allowed_packet; - my_bool use_ssl; - my_bool compress,named_pipe; - my_bool use_thread_specific_memory; - my_bool unused2; - my_bool unused3; - my_bool unused4; - enum mysql_option methods_to_use; - char *client_ip; - my_bool secure_auth; - my_bool report_data_truncation; - int (*local_infile_init)(void **, const char *, void *); - int (*local_infile_read)(void *, char *, unsigned int); - void (*local_infile_end)(void *); - int (*local_infile_error)(void *, char *, unsigned int); - void *local_infile_userdata; - struct st_mysql_options_extention *extension; -}; -enum mysql_status -{ - MYSQL_STATUS_READY, MYSQL_STATUS_GET_RESULT, MYSQL_STATUS_USE_RESULT, - MYSQL_STATUS_STATEMENT_GET_RESULT -}; -enum mysql_protocol_type -{ - MYSQL_PROTOCOL_DEFAULT, MYSQL_PROTOCOL_TCP, MYSQL_PROTOCOL_SOCKET, - MYSQL_PROTOCOL_PIPE, MYSQL_PROTOCOL_MEMORY -}; -typedef struct character_set -{ - unsigned int number; - unsigned int state; - const char *csname; - const char *name; - const char *comment; - const char *dir; - unsigned int mbminlen; - unsigned int mbmaxlen; -} MY_CHARSET_INFO; -struct st_mysql_methods; -struct st_mysql_stmt; -typedef struct st_mysql -{ - NET net; - unsigned char *connector_fd; - char *host,*user,*passwd,*unix_socket,*server_version,*host_info; - char *info, *db; - const struct charset_info_st *charset; - MYSQL_FIELD *fields; - MEM_ROOT field_alloc; - my_ulonglong affected_rows; - my_ulonglong insert_id; - my_ulonglong extra_info; - unsigned long thread_id; - unsigned long packet_length; - unsigned int port; - unsigned long client_flag,server_capabilities; - unsigned int protocol_version; - unsigned int field_count; - unsigned int server_status; - unsigned int server_language; - unsigned int warning_count; - struct st_mysql_options options; - enum mysql_status status; - my_bool free_me; - my_bool reconnect; - char scramble[20 +1]; - my_bool auto_local_infile; - void *unused2, *unused3, *unused4, *unused5; - LIST *stmts; - const struct st_mysql_methods *methods; - void *thd; - my_bool *unbuffered_fetch_owner; - char *info_buffer; - void *extension; -} MYSQL; -typedef struct st_mysql_res { - my_ulonglong row_count; - MYSQL_FIELD *fields; - MYSQL_DATA *data; - MYSQL_ROWS *data_cursor; - unsigned long *lengths; - MYSQL *handle; - const struct st_mysql_methods *methods; - MYSQL_ROW row; - MYSQL_ROW current_row; - MEM_ROOT field_alloc; - unsigned int field_count, current_field; - my_bool eof; - my_bool unbuffered_fetch_cancelled; - void *extension; -} MYSQL_RES; -typedef struct st_mysql_parameters -{ - unsigned long *p_max_allowed_packet; - unsigned long *p_net_buffer_length; - void *extension; -} MYSQL_PARAMETERS; -int mysql_server_init(int argc, char **argv, char **groups); -void mysql_server_end(void); -MYSQL_PARAMETERS * mysql_get_parameters(void); -my_bool mysql_thread_init(void); -void mysql_thread_end(void); -my_ulonglong mysql_num_rows(MYSQL_RES *res); -unsigned int mysql_num_fields(MYSQL_RES *res); -my_bool mysql_eof(MYSQL_RES *res); -MYSQL_FIELD * mysql_fetch_field_direct(MYSQL_RES *res, - unsigned int fieldnr); -MYSQL_FIELD * mysql_fetch_fields(MYSQL_RES *res); -MYSQL_ROW_OFFSET mysql_row_tell(MYSQL_RES *res); -MYSQL_FIELD_OFFSET mysql_field_tell(MYSQL_RES *res); -unsigned int mysql_field_count(MYSQL *mysql); -my_ulonglong mysql_affected_rows(MYSQL *mysql); -my_ulonglong mysql_insert_id(MYSQL *mysql); -unsigned int mysql_errno(MYSQL *mysql); -const char * mysql_error(MYSQL *mysql); -const char * mysql_sqlstate(MYSQL *mysql); -unsigned int mysql_warning_count(MYSQL *mysql); -const char * mysql_info(MYSQL *mysql); -unsigned long mysql_thread_id(MYSQL *mysql); -const char * mysql_character_set_name(MYSQL *mysql); -int mysql_set_character_set(MYSQL *mysql, const char *csname); -int mysql_set_character_set_start(int *ret, MYSQL *mysql, - const char *csname); -int mysql_set_character_set_cont(int *ret, MYSQL *mysql, - int status); -MYSQL * mysql_init(MYSQL *mysql); -my_bool mysql_ssl_set(MYSQL *mysql, const char *key, - const char *cert, const char *ca, - const char *capath, const char *cipher); -const char * mysql_get_ssl_cipher(MYSQL *mysql); -my_bool mysql_change_user(MYSQL *mysql, const char *user, - const char *passwd, const char *db); -int mysql_change_user_start(my_bool *ret, MYSQL *mysql, - const char *user, - const char *passwd, - const char *db); -int mysql_change_user_cont(my_bool *ret, MYSQL *mysql, - int status); -MYSQL * mysql_real_connect(MYSQL *mysql, const char *host, - const char *user, - const char *passwd, - const char *db, - unsigned int port, - const char *unix_socket, - unsigned long clientflag); -int mysql_real_connect_start(MYSQL **ret, MYSQL *mysql, - const char *host, - const char *user, - const char *passwd, - const char *db, - unsigned int port, - const char *unix_socket, - unsigned long clientflag); -int mysql_real_connect_cont(MYSQL **ret, MYSQL *mysql, - int status); -int mysql_select_db(MYSQL *mysql, const char *db); -int mysql_select_db_start(int *ret, MYSQL *mysql, - const char *db); -int mysql_select_db_cont(int *ret, MYSQL *mysql, - int status); -int mysql_query(MYSQL *mysql, const char *q); -int mysql_query_start(int *ret, MYSQL *mysql, - const char *q); -int mysql_query_cont(int *ret, MYSQL *mysql, - int status); -int mysql_send_query(MYSQL *mysql, const char *q, - unsigned long length); -int mysql_send_query_start(int *ret, MYSQL *mysql, - const char *q, - unsigned long length); -int mysql_send_query_cont(int *ret, MYSQL *mysql, - int status); -int mysql_real_query(MYSQL *mysql, const char *q, - unsigned long length); -int mysql_real_query_start(int *ret, MYSQL *mysql, - const char *q, - unsigned long length); -int mysql_real_query_cont(int *ret, MYSQL *mysql, - int status); -MYSQL_RES * mysql_store_result(MYSQL *mysql); -int mysql_store_result_start(MYSQL_RES **ret, MYSQL *mysql); -int mysql_store_result_cont(MYSQL_RES **ret, MYSQL *mysql, - int status); -MYSQL_RES * mysql_use_result(MYSQL *mysql); -void mysql_get_character_set_info(MYSQL *mysql, - MY_CHARSET_INFO *charset); -void -mysql_set_local_infile_handler(MYSQL *mysql, - int (*local_infile_init)(void **, const char *, - void *), - int (*local_infile_read)(void *, char *, - unsigned int), - void (*local_infile_end)(void *), - int (*local_infile_error)(void *, char*, - unsigned int), - void *); -void -mysql_set_local_infile_default(MYSQL *mysql); -int mysql_shutdown(MYSQL *mysql, - enum mysql_enum_shutdown_level - shutdown_level); -int mysql_shutdown_start(int *ret, MYSQL *mysql, - enum mysql_enum_shutdown_level - shutdown_level); -int mysql_shutdown_cont(int *ret, MYSQL *mysql, - int status); -int mysql_dump_debug_info(MYSQL *mysql); -int mysql_dump_debug_info_start(int *ret, MYSQL *mysql); -int mysql_dump_debug_info_cont(int *ret, MYSQL *mysql, - int status); -int mysql_refresh(MYSQL *mysql, - unsigned int refresh_options); -int mysql_refresh_start(int *ret, MYSQL *mysql, - unsigned int refresh_options); -int mysql_refresh_cont(int *ret, MYSQL *mysql, int status); -int mysql_kill(MYSQL *mysql,unsigned long pid); -int mysql_kill_start(int *ret, MYSQL *mysql, - unsigned long pid); -int mysql_kill_cont(int *ret, MYSQL *mysql, int status); -int mysql_set_server_option(MYSQL *mysql, - enum enum_mysql_set_option - option); -int mysql_set_server_option_start(int *ret, MYSQL *mysql, - enum enum_mysql_set_option - option); -int mysql_set_server_option_cont(int *ret, MYSQL *mysql, - int status); -int mysql_ping(MYSQL *mysql); -int mysql_ping_start(int *ret, MYSQL *mysql); -int mysql_ping_cont(int *ret, MYSQL *mysql, int status); -const char * mysql_stat(MYSQL *mysql); -int mysql_stat_start(const char **ret, MYSQL *mysql); -int mysql_stat_cont(const char **ret, MYSQL *mysql, - int status); -const char * mysql_get_server_info(MYSQL *mysql); -const char * mysql_get_server_name(MYSQL *mysql); -const char * mysql_get_client_info(void); -unsigned long mysql_get_client_version(void); -const char * mysql_get_host_info(MYSQL *mysql); -unsigned long mysql_get_server_version(MYSQL *mysql); -unsigned int mysql_get_proto_info(MYSQL *mysql); -MYSQL_RES * mysql_list_dbs(MYSQL *mysql,const char *wild); -int mysql_list_dbs_start(MYSQL_RES **ret, MYSQL *mysql, - const char *wild); -int mysql_list_dbs_cont(MYSQL_RES **ret, MYSQL *mysql, - int status); -MYSQL_RES * mysql_list_tables(MYSQL *mysql,const char *wild); -int mysql_list_tables_start(MYSQL_RES **ret, MYSQL *mysql, - const char *wild); -int mysql_list_tables_cont(MYSQL_RES **ret, MYSQL *mysql, - int status); -MYSQL_RES * mysql_list_processes(MYSQL *mysql); -int mysql_list_processes_start(MYSQL_RES **ret, - MYSQL *mysql); -int mysql_list_processes_cont(MYSQL_RES **ret, MYSQL *mysql, - int status); -int mysql_options(MYSQL *mysql,enum mysql_option option, - const void *arg); -int mysql_options4(MYSQL *mysql,enum mysql_option option, - const void *arg1, const void *arg2); -void mysql_free_result(MYSQL_RES *result); -int mysql_free_result_start(MYSQL_RES *result); -int mysql_free_result_cont(MYSQL_RES *result, int status); -void mysql_data_seek(MYSQL_RES *result, - my_ulonglong offset); -MYSQL_ROW_OFFSET mysql_row_seek(MYSQL_RES *result, - MYSQL_ROW_OFFSET offset); -MYSQL_FIELD_OFFSET mysql_field_seek(MYSQL_RES *result, - MYSQL_FIELD_OFFSET offset); -MYSQL_ROW mysql_fetch_row(MYSQL_RES *result); -int mysql_fetch_row_start(MYSQL_ROW *ret, - MYSQL_RES *result); -int mysql_fetch_row_cont(MYSQL_ROW *ret, MYSQL_RES *result, - int status); -unsigned long * mysql_fetch_lengths(MYSQL_RES *result); -MYSQL_FIELD * mysql_fetch_field(MYSQL_RES *result); -MYSQL_RES * mysql_list_fields(MYSQL *mysql, const char *table, - const char *wild); -int mysql_list_fields_start(MYSQL_RES **ret, MYSQL *mysql, - const char *table, - const char *wild); -int mysql_list_fields_cont(MYSQL_RES **ret, MYSQL *mysql, - int status); -unsigned long mysql_escape_string(char *to,const char *from, - unsigned long from_length); -unsigned long mysql_hex_string(char *to,const char *from, - unsigned long from_length); -unsigned long mysql_real_escape_string(MYSQL *mysql, - char *to,const char *from, - unsigned long length); -void mysql_debug(const char *debug); -void myodbc_remove_escape(MYSQL *mysql,char *name); -unsigned int mysql_thread_safe(void); -my_bool mysql_embedded(void); -my_bool mariadb_connection(MYSQL *mysql); -my_bool mysql_read_query_result(MYSQL *mysql); -int mysql_read_query_result_start(my_bool *ret, - MYSQL *mysql); -int mysql_read_query_result_cont(my_bool *ret, - MYSQL *mysql, int status); -enum enum_mysql_stmt_state -{ - MYSQL_STMT_INIT_DONE= 1, MYSQL_STMT_PREPARE_DONE, MYSQL_STMT_EXECUTE_DONE, - MYSQL_STMT_FETCH_DONE -}; -typedef struct st_mysql_bind -{ - unsigned long *length; - my_bool *is_null; - void *buffer; - my_bool *error; - unsigned char *row_ptr; - void (*store_param_func)(NET *net, struct st_mysql_bind *param); - void (*fetch_result)(struct st_mysql_bind *, MYSQL_FIELD *, - unsigned char **row); - void (*skip_result)(struct st_mysql_bind *, MYSQL_FIELD *, - unsigned char **row); - unsigned long buffer_length; - unsigned long offset; - unsigned long length_value; - unsigned int param_number; - unsigned int pack_length; - enum enum_field_types buffer_type; - my_bool error_value; - my_bool is_unsigned; - my_bool long_data_used; - my_bool is_null_value; - void *extension; -} MYSQL_BIND; -struct st_mysql_stmt_extension; -typedef struct st_mysql_stmt -{ - MEM_ROOT mem_root; - LIST list; - MYSQL *mysql; - MYSQL_BIND *params; - MYSQL_BIND *bind; - MYSQL_FIELD *fields; - MYSQL_DATA result; - MYSQL_ROWS *data_cursor; - int (*read_row_func)(struct st_mysql_stmt *stmt, - unsigned char **row); - my_ulonglong affected_rows; - my_ulonglong insert_id; - unsigned long stmt_id; - unsigned long flags; - unsigned long prefetch_rows; - unsigned int server_status; - unsigned int last_errno; - unsigned int param_count; - unsigned int field_count; - enum enum_mysql_stmt_state state; - char last_error[512]; - char sqlstate[5 +1]; - my_bool send_types_to_server; - my_bool bind_param_done; - unsigned char bind_result_done; - my_bool unbuffered_fetch_cancelled; - my_bool update_max_length; - struct st_mysql_stmt_extension *extension; -} MYSQL_STMT; -enum enum_stmt_attr_type -{ - STMT_ATTR_UPDATE_MAX_LENGTH, - STMT_ATTR_CURSOR_TYPE, - STMT_ATTR_PREFETCH_ROWS -}; -MYSQL_STMT * mysql_stmt_init(MYSQL *mysql); -int mysql_stmt_prepare(MYSQL_STMT *stmt, const char *query, - unsigned long length); -int mysql_stmt_prepare_start(int *ret, MYSQL_STMT *stmt, - const char *query, unsigned long length); -int mysql_stmt_prepare_cont(int *ret, MYSQL_STMT *stmt, int status); -int mysql_stmt_execute(MYSQL_STMT *stmt); -int mysql_stmt_execute_start(int *ret, MYSQL_STMT *stmt); -int mysql_stmt_execute_cont(int *ret, MYSQL_STMT *stmt, int status); -int mysql_stmt_fetch(MYSQL_STMT *stmt); -int mysql_stmt_fetch_start(int *ret, MYSQL_STMT *stmt); -int mysql_stmt_fetch_cont(int *ret, MYSQL_STMT *stmt, int status); -int mysql_stmt_fetch_column(MYSQL_STMT *stmt, MYSQL_BIND *bind_arg, - unsigned int column, - unsigned long offset); -int mysql_stmt_store_result(MYSQL_STMT *stmt); -int mysql_stmt_store_result_start(int *ret, MYSQL_STMT *stmt); -int mysql_stmt_store_result_cont(int *ret, MYSQL_STMT *stmt, - int status); -unsigned long mysql_stmt_param_count(MYSQL_STMT * stmt); -my_bool mysql_stmt_attr_set(MYSQL_STMT *stmt, - enum enum_stmt_attr_type attr_type, - const void *attr); -my_bool mysql_stmt_attr_get(MYSQL_STMT *stmt, - enum enum_stmt_attr_type attr_type, - void *attr); -my_bool mysql_stmt_bind_param(MYSQL_STMT * stmt, MYSQL_BIND * bnd); -my_bool mysql_stmt_bind_result(MYSQL_STMT * stmt, MYSQL_BIND * bnd); -my_bool mysql_stmt_close(MYSQL_STMT * stmt); -int mysql_stmt_close_start(my_bool *ret, MYSQL_STMT *stmt); -int mysql_stmt_close_cont(my_bool *ret, MYSQL_STMT * stmt, int status); -my_bool mysql_stmt_reset(MYSQL_STMT * stmt); -int mysql_stmt_reset_start(my_bool *ret, MYSQL_STMT * stmt); -int mysql_stmt_reset_cont(my_bool *ret, MYSQL_STMT *stmt, int status); -my_bool mysql_stmt_free_result(MYSQL_STMT *stmt); -int mysql_stmt_free_result_start(my_bool *ret, MYSQL_STMT *stmt); -int mysql_stmt_free_result_cont(my_bool *ret, MYSQL_STMT *stmt, - int status); -my_bool mysql_stmt_send_long_data(MYSQL_STMT *stmt, - unsigned int param_number, - const char *data, - unsigned long length); -int mysql_stmt_send_long_data_start(my_bool *ret, MYSQL_STMT *stmt, - unsigned int param_number, - const char *data, - unsigned long len); -int mysql_stmt_send_long_data_cont(my_bool *ret, MYSQL_STMT *stmt, - int status); -MYSQL_RES * mysql_stmt_result_metadata(MYSQL_STMT *stmt); -MYSQL_RES * mysql_stmt_param_metadata(MYSQL_STMT *stmt); -unsigned int mysql_stmt_errno(MYSQL_STMT * stmt); -const char * mysql_stmt_error(MYSQL_STMT * stmt); -const char * mysql_stmt_sqlstate(MYSQL_STMT * stmt); -MYSQL_ROW_OFFSET mysql_stmt_row_seek(MYSQL_STMT *stmt, - MYSQL_ROW_OFFSET offset); -MYSQL_ROW_OFFSET mysql_stmt_row_tell(MYSQL_STMT *stmt); -void mysql_stmt_data_seek(MYSQL_STMT *stmt, my_ulonglong offset); -my_ulonglong mysql_stmt_num_rows(MYSQL_STMT *stmt); -my_ulonglong mysql_stmt_affected_rows(MYSQL_STMT *stmt); -my_ulonglong mysql_stmt_insert_id(MYSQL_STMT *stmt); -unsigned int mysql_stmt_field_count(MYSQL_STMT *stmt); -my_bool mysql_commit(MYSQL * mysql); -int mysql_commit_start(my_bool *ret, MYSQL * mysql); -int mysql_commit_cont(my_bool *ret, MYSQL * mysql, int status); -my_bool mysql_rollback(MYSQL * mysql); -int mysql_rollback_start(my_bool *ret, MYSQL * mysql); -int mysql_rollback_cont(my_bool *ret, MYSQL * mysql, int status); -my_bool mysql_autocommit(MYSQL * mysql, my_bool auto_mode); -int mysql_autocommit_start(my_bool *ret, MYSQL * mysql, - my_bool auto_mode); -int mysql_autocommit_cont(my_bool *ret, MYSQL * mysql, int status); -my_bool mysql_more_results(MYSQL *mysql); -int mysql_next_result(MYSQL *mysql); -int mysql_next_result_start(int *ret, MYSQL *mysql); -int mysql_next_result_cont(int *ret, MYSQL *mysql, int status); -int mysql_stmt_next_result(MYSQL_STMT *stmt); -int mysql_stmt_next_result_start(int *ret, MYSQL_STMT *stmt); -int mysql_stmt_next_result_cont(int *ret, MYSQL_STMT *stmt, int status); -void mysql_close_slow_part(MYSQL *mysql); -void mysql_close(MYSQL *sock); -int mysql_close_start(MYSQL *sock); -int mysql_close_cont(MYSQL *sock, int status); -my_socket mysql_get_socket(const MYSQL *mysql); -unsigned int mysql_get_timeout_value(const MYSQL *mysql); -unsigned int mysql_get_timeout_value_ms(const MYSQL *mysql); -unsigned long mysql_net_read_packet(MYSQL *mysql); -unsigned long mysql_net_field_length(unsigned char **packet); diff --git a/mysql-test/suite/plugins/t/false_dupes-6543.test b/mysql-test/suite/plugins/t/false_dupes-6543.test index ebdbe00e47c..ca278685967 100644 --- a/mysql-test/suite/plugins/t/false_dupes-6543.test +++ b/mysql-test/suite/plugins/t/false_dupes-6543.test @@ -1,3 +1,4 @@ +source include/not_embedded.inc; # # MDEV-6543 Crash if enable 'federatedx' when 'federated' plugin already enabled, and vice-versa # diff --git a/storage/federatedx/CMakeLists.txt b/storage/federatedx/CMakeLists.txt index 67b6c1c96bb..9a79d1b3fec 100644 --- a/storage/federatedx/CMakeLists.txt +++ b/storage/federatedx/CMakeLists.txt @@ -1,4 +1,5 @@ SET(FEDERATEDX_PLUGIN_STATIC "federatedx") SET(FEDERATEDX_PLUGIN_DYNAMIC "ha_federatedx") SET(FEDERATEDX_SOURCES ha_federatedx.cc federatedx_txn.cc federatedx_io.cc federatedx_io_null.cc federatedx_io_mysql.cc) -MYSQL_ADD_PLUGIN(federatedx ${FEDERATEDX_SOURCES} STORAGE_ENGINE) +MYSQL_ADD_PLUGIN(federatedx ${FEDERATEDX_SOURCES} STORAGE_ENGINE + RECOMPILE_FOR_EMBEDDED) diff --git a/storage/federatedx/federatedx_io_mysql.cc b/storage/federatedx/federatedx_io_mysql.cc index f33cf45a241..cf620d59986 100644 --- a/storage/federatedx/federatedx_io_mysql.cc +++ b/storage/federatedx/federatedx_io_mysql.cc @@ -31,6 +31,7 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include #include "sql_priv.h" #include +#include #include "ha_federatedx.h" diff --git a/storage/federatedx/ha_federatedx.cc b/storage/federatedx/ha_federatedx.cc index 21c37907ee0..ad1d9e47ca3 100644 --- a/storage/federatedx/ha_federatedx.cc +++ b/storage/federatedx/ha_federatedx.cc @@ -314,6 +314,7 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #define MYSQL_SERVER 1 #include #include +#include #include "ha_federatedx.h" #include "sql_servers.h" #include "sql_analyse.h" // append_escaped() diff --git a/storage/sphinx/CMakeLists.txt b/storage/sphinx/CMakeLists.txt index 7cae7982e05..185ffdaaf91 100644 --- a/storage/sphinx/CMakeLists.txt +++ b/storage/sphinx/CMakeLists.txt @@ -19,4 +19,4 @@ IF(MSVC) ENDIF(MSVC) SET(SPHINX_SOURCES ha_sphinx.cc snippets_udf.cc) -MYSQL_ADD_PLUGIN(sphinx ${SPHINX_SOURCES} STORAGE_ENGINE) +MYSQL_ADD_PLUGIN(sphinx ${SPHINX_SOURCES} STORAGE_ENGINE RECOMPILE_FOR_EMBEDDED) -- cgit v1.2.1 From f1bbc1cd19d0d81fee5433efcb570a8845172241 Mon Sep 17 00:00:00 2001 From: Alexander Barkov Date: Tue, 25 Oct 2022 11:53:39 +0400 Subject: MDEV-28545 MyISAM reorganize partition corrupt older table format The ALTER related code cannot do at the same time both: - modify partitions - change column data types Explicit changing of a column data type together with a partition change is prohibited by the parter, so this is not allowed and returns a syntax error: ALTER TABLE t MODIFY ts BIGINT, DROP PARTITION p1; This fix additionally disables implicit data type upgrade (e.g. from "MariaDB 5.3 TIME" to "MySQL 5.6 TIME", or the other way around according to the current mysql56_temporal_format) in case of an ALTER modifying partitions, e.g.: ALTER TABLE t DROP PARTITION p1; In such commands now only the partition change happens, while the data types stay unchanged. One can additionally run: ALTER TABLE t FORCE; either before or after the ALTER modifying partitions to upgrade data types according to mysql56_temporal_format. --- mysql-test/main/partition_alter.result | 23 +++++++++++++++++++++++ mysql-test/main/partition_alter.test | 26 ++++++++++++++++++++++++++ sql/field.cc | 2 -- sql/field.h | 7 +++++++ sql/sql_insert.cc | 10 ++++++++++ sql/sql_table.cc | 12 ++++++++++++ 6 files changed, 78 insertions(+), 2 deletions(-) diff --git a/mysql-test/main/partition_alter.result b/mysql-test/main/partition_alter.result index 2b0a09d2653..37e975deb0e 100644 --- a/mysql-test/main/partition_alter.result +++ b/mysql-test/main/partition_alter.result @@ -212,3 +212,26 @@ test.t check status OK delete from t order by b limit 1; drop table t; # End of 10.3 tests +# +# Start of 10.4 tests +# +# +# MDEV-28545 MyISAM reorganize partition corrupt older table format +# +SET GLOBAL mysql56_temporal_format=OFF; +CREATE TABLE t (ts timestamp, KEY (ts)) ENGINE=MyISAM +PARTITION BY RANGE (unix_timestamp(ts)) ( +PARTITION p1 VALUES LESS THAN (1645398000), +PARTITION pn VALUES LESS THAN MAXVALUE +); +SET GLOBAL mysql56_temporal_format=ON; +FLUSH TABLES; +ALTER TABLE t DROP PARTITION p1; +CHECK TABLE t; +Table Op Msg_type Msg_text +test.t check status OK +DROP TABLE t; +SET GLOBAL mysql56_temporal_format=DEFAULT; +# +# End of 10.4 tests +# diff --git a/mysql-test/main/partition_alter.test b/mysql-test/main/partition_alter.test index 7a80779e386..23ad5ece0e4 100644 --- a/mysql-test/main/partition_alter.test +++ b/mysql-test/main/partition_alter.test @@ -197,3 +197,29 @@ delete from t order by b limit 1; drop table t; --echo # End of 10.3 tests + +--echo # +--echo # Start of 10.4 tests +--echo # + +--echo # +--echo # MDEV-28545 MyISAM reorganize partition corrupt older table format +--echo # + +SET GLOBAL mysql56_temporal_format=OFF; +CREATE TABLE t (ts timestamp, KEY (ts)) ENGINE=MyISAM +PARTITION BY RANGE (unix_timestamp(ts)) ( + PARTITION p1 VALUES LESS THAN (1645398000), + PARTITION pn VALUES LESS THAN MAXVALUE +); + +SET GLOBAL mysql56_temporal_format=ON; +FLUSH TABLES; +ALTER TABLE t DROP PARTITION p1; +CHECK TABLE t; +DROP TABLE t; +SET GLOBAL mysql56_temporal_format=DEFAULT; + +--echo # +--echo # End of 10.4 tests +--echo # diff --git a/sql/field.cc b/sql/field.cc index 853b0c62f14..bf27780e776 100644 --- a/sql/field.cc +++ b/sql/field.cc @@ -11020,8 +11020,6 @@ Column_definition::Column_definition(THD *thd, Field *old_field, type_handler()->Column_definition_reuse_fix_attributes(thd, this, old_field); - type_handler()->Column_definition_implicit_upgrade(this); - /* Copy the default (constant/function) from the column object orig_field, if supplied. We do this if all these conditions are met: diff --git a/sql/field.h b/sql/field.h index 7534a506edc..9d40caf0932 100644 --- a/sql/field.h +++ b/sql/field.h @@ -5203,6 +5203,13 @@ public: bool vers_check_timestamp(const Lex_table_name &table_name) const; bool vers_check_bigint(const Lex_table_name &table_name) const; + + static void upgrade_data_types(List &list) + { + List_iterator it(list); + while (Create_field *f= it++) + f->type_handler()->Column_definition_implicit_upgrade(f); + } }; diff --git a/sql/sql_insert.cc b/sql/sql_insert.cc index aeb39871025..76fd6385041 100644 --- a/sql/sql_insert.cc +++ b/sql/sql_insert.cc @@ -4346,6 +4346,16 @@ TABLE *select_create::create_table_from_items(THD *thd, List *items, alter_info->create_list.push_back(cr_field, thd->mem_root); } + /* + Item*::type_handler() always returns pointers to + type_handler_{time2|datetime2|timestamp2} no matter what + the current mysql56_temporal_format says. + Let's convert them according to mysql56_temporal_format. + QQ: This perhaps should eventually be fixed to have Item*::type_handler() + respect mysql56_temporal_format, and remove the upgrade from here. + */ + Create_field::upgrade_data_types(alter_info->create_list); + if (create_info->check_fields(thd, alter_info, create_table->table_name, create_table->db, diff --git a/sql/sql_table.cc b/sql/sql_table.cc index 6aae927800e..efa5c06dd2a 100644 --- a/sql/sql_table.cc +++ b/sql/sql_table.cc @@ -10068,6 +10068,18 @@ do_continue:; set_table_default_charset(thd, create_info, alter_ctx.db); + /* + The ALTER related code cannot alter partitions and change column data types + at the same time. So in case of partition change statements like: + ALTER TABLE t1 DROP PARTITION p1; + we skip implicit data type upgrade (such as "MariaDB 5.3 TIME" to + "MySQL 5.6 TIME" or vice versa according to mysql56_temporal_format). + Note, one can run a separate "ALTER TABLE t1 FORCE;" statement + before or after the partition change ALTER statement to upgrade data types. + */ + if (IF_PARTITIONING(!fast_alter_partition, 1)) + Create_field::upgrade_data_types(alter_info->create_list); + if (create_info->check_fields(thd, alter_info, table_list->table_name, table_list->db) || create_info->fix_period_fields(thd, alter_info)) -- cgit v1.2.1 From 72e79eaaf3e4619bbaf900f6710ffb6a00ff95bf Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Tue, 25 Oct 2022 20:24:11 +0200 Subject: cleanup: put casts in a separate statement remove useless if() --- sql/item_geofunc.cc | 38 ++++++++++++++++++-------------------- 1 file changed, 18 insertions(+), 20 deletions(-) diff --git a/sql/item_geofunc.cc b/sql/item_geofunc.cc index f2cc2e61b96..b40105aaf36 100644 --- a/sql/item_geofunc.cc +++ b/sql/item_geofunc.cc @@ -2601,51 +2601,53 @@ double Item_func_sphere_distance::spherical_distance_points(Geometry *g1, switch (g2->get_class_info()->m_type_id) { case Geometry::wkb_point: - // Optimization for point-point case + { + Gis_point *g2p= static_cast(g2); + // Optimization for point-point case if (g1->get_class_info()->m_type_id == Geometry::wkb_point) { - res= static_cast(g2)->calculate_haversine(g1, r, &error); + res= g2p->calculate_haversine(g1, r, &error); } else { // Optimization for single point in Multipoint if (g1->get_data_size() == len) { - res= static_cast(g2)->calculate_haversine(g1, r, &error); + res= g2p->calculate_haversine(g1, r, &error); } else { // There are multipoints in g1 // g1 is MultiPoint and calculate MP.sphericaldistance from g2 Point if (g1->get_data_size() != GET_SIZE_ERROR) - static_cast(g2)->spherical_distance_multipoints( - (Gis_multi_point *)g1, r, &res, &error); + g2p->spherical_distance_multipoints(g1, r, &res, &error); } } break; + } case Geometry::wkb_multipoint: // Optimization for point-point case if (g1->get_class_info()->m_type_id == Geometry::wkb_point) { + Gis_point *g1p= static_cast(g1); // Optimization for single point in Multipoint g2 if (g2->get_data_size() == len) { - res= static_cast(g1)->calculate_haversine(g2, r, &error); + res= g1p->calculate_haversine(g2, r, &error); } else { if (g2->get_data_size() != GET_SIZE_ERROR) // g1 is a point (casted to multi_point) and g2 multipoint - static_cast(g1)->spherical_distance_multipoints( - (Gis_multi_point *)g2, r, &res, &error); + g1p->spherical_distance_multipoints(g2, r, &res, &error); } } else { + Gis_multi_point *g1mp= static_cast(g1); // Multipoints in g1 and g2 - no optimization - static_cast(g1)->spherical_distance_multipoints( - (Gis_multi_point *)g2, r, &res, &error); + g1mp->spherical_distance_multipoints(g2, r, &res, &error); } break; @@ -2654,16 +2656,12 @@ double Item_func_sphere_distance::spherical_distance_points(Geometry *g1, break; } - if (res < 0) - goto handle_error; - - handle_error: - if (error > 0) - my_error(ER_STD_OUT_OF_RANGE_ERROR, MYF(0), - "Longitude should be [-180,180]", "ST_Distance_Sphere"); - else if(error < 0) - my_error(ER_STD_OUT_OF_RANGE_ERROR, MYF(0), - "Latitude should be [-90,90]", "ST_Distance_Sphere"); + if (error > 0) + my_error(ER_STD_OUT_OF_RANGE_ERROR, MYF(0), + "Longitude should be [-180,180]", "ST_Distance_Sphere"); + else if(error < 0) + my_error(ER_STD_OUT_OF_RANGE_ERROR, MYF(0), + "Latitude should be [-90,90]", "ST_Distance_Sphere"); return res; } -- cgit v1.2.1 From 58cd0bd59ef011be54f162237f2ff017c3148e7b Mon Sep 17 00:00:00 2001 From: Igor Babaev Date: Mon, 17 Oct 2022 16:44:10 -0700 Subject: MDEV-28846 Poor performance when rowid filter contains no elements When a range rowid filter was used with an index ref access the cost of accessing the index entries for the records rejected by the filter was not taken into account. For a ref access by an index with big average number of records per key this led to poor execution plans if selectivity of the used filter was high. The patch resolves this problem. It also introduces a minor optimization that skips look-ups into a filter that turns out to be empty. With this patch the output of ANALYZE stmt reports the number of look-ups into used rowid filters. The patch also back-ports from 10.5 the code that properly sets the field TABLE::file::table for opened temporary tables. The test cases that were supposed to use rowid filters have been adjusted in order to use similar execution plans after this fix. Approved by Oleksandr Byelkin --- mysql-test/include/rowid_filter_debug_kill.inc | 9 +- mysql-test/main/join_cache.result | 24 +- mysql-test/main/join_nested_jcl6.result | 2 +- mysql-test/main/rowid_filter.result | 494 +++++++++++- mysql-test/main/rowid_filter.test | 210 ++++- mysql-test/main/rowid_filter_innodb.result | 968 +++++++++++++++++++++-- mysql-test/main/rowid_filter_innodb.test | 98 ++- mysql-test/main/rowid_filter_innodb_debug.result | 20 +- mysql-test/main/rowid_filter_innodb_debug.test | 8 + mysql-test/main/rowid_filter_myisam_debug.result | 10 +- mysql-test/main/select.result | 14 +- mysql-test/main/select_jcl6.result | 14 +- mysql-test/main/select_pkeycache.result | 14 +- mysql-test/main/subselect2.result | 2 +- sql/handler.h | 6 + sql/item_func.cc | 2 +- sql/rowid_filter.h | 11 + sql/sql_analyze_stmt.h | 5 +- sql/sql_explain.cc | 1 + sql/sql_insert.cc | 2 +- sql/sql_select.cc | 53 +- 21 files changed, 1797 insertions(+), 170 deletions(-) diff --git a/mysql-test/include/rowid_filter_debug_kill.inc b/mysql-test/include/rowid_filter_debug_kill.inc index 6a8c5d3f70d..c701d206297 100644 --- a/mysql-test/include/rowid_filter_debug_kill.inc +++ b/mysql-test/include/rowid_filter_debug_kill.inc @@ -9,9 +9,6 @@ create table t0(a int); insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9); -create table t1(a int); -insert into t1 select A.a + B.a* 10 + C.a * 100 from t0 A, t0 B, t0 C; - # 100 rows create table t2(a int); insert into t2 select A.a + B.a* 10 from t0 A, t0 B; @@ -30,10 +27,10 @@ where table_schema=database() and table_name='t3'; insert into t3 select A.a, - A.a, + B.a, 'filler-data-filler-data' from - t0 A, t1 B; + t2 A, t2 B; analyze table t2,t3; @@ -63,6 +60,6 @@ disconnect con1; reap; set debug_sync='RESET'; -drop table t0,t1,t2,t3; +drop table t0,t2,t3; --source include/wait_until_count_sessions.inc diff --git a/mysql-test/main/join_cache.result b/mysql-test/main/join_cache.result index 1837576e719..f337ab6509b 100644 --- a/mysql-test/main/join_cache.result +++ b/mysql-test/main/join_cache.result @@ -853,7 +853,7 @@ LENGTH(Language) < LENGTH(City.Name) - 2; id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE City ALL Country NULL NULL NULL 4079 Using where 1 SIMPLE Country hash_ALL PRIMARY #hash#PRIMARY 3 world.City.Country 239 Using where; Using join buffer (flat, BNLH join) -1 SIMPLE CountryLanguage hash_ALL|filter PRIMARY,Percentage #hash#PRIMARY|Percentage 3|4 world.City.Country 984 (19%) Using where; Using join buffer (flat, BNLH join); Using rowid filter +1 SIMPLE CountryLanguage hash_ALL PRIMARY,Percentage #hash#PRIMARY 3 world.City.Country 984 Using where; Using join buffer (flat, BNLH join) SELECT City.Name, Country.Name, CountryLanguage.Language FROM City,Country,CountryLanguage WHERE City.Country=Country.Code AND @@ -1053,7 +1053,7 @@ LENGTH(Language) < LENGTH(City.Name) - 2; id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE City ALL Country NULL NULL NULL 4079 Using where 1 SIMPLE Country hash_ALL PRIMARY #hash#PRIMARY 3 world.City.Country 239 Using where; Using join buffer (flat, BNLH join) -1 SIMPLE CountryLanguage hash_ALL|filter PRIMARY,Percentage #hash#PRIMARY|Percentage 3|4 world.City.Country 984 (19%) Using where; Using join buffer (incremental, BNLH join); Using rowid filter +1 SIMPLE CountryLanguage hash_ALL PRIMARY,Percentage #hash#PRIMARY 3 world.City.Country 984 Using where; Using join buffer (incremental, BNLH join) SELECT City.Name, Country.Name, CountryLanguage.Language FROM City,Country,CountryLanguage WHERE City.Country=Country.Code AND @@ -1312,7 +1312,7 @@ LENGTH(Language) < LENGTH(City.Name) - 2; id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE City ALL Country NULL NULL NULL 4079 Using where 1 SIMPLE Country eq_ref PRIMARY PRIMARY 3 world.City.Country 1 Using where; Using join buffer (flat, BKA join); Key-ordered Rowid-ordered scan -1 SIMPLE CountryLanguage ref|filter PRIMARY,Percentage PRIMARY|Percentage 3|4 world.City.Country 4 (19%) Using index condition(BKA); Using where; Using join buffer (flat, BKA join); Key-ordered Rowid-ordered scan; Using rowid filter +1 SIMPLE CountryLanguage ref PRIMARY,Percentage PRIMARY 3 world.City.Country 4 Using index condition(BKA); Using where; Using join buffer (flat, BKA join); Key-ordered Rowid-ordered scan SELECT City.Name, Country.Name, CountryLanguage.Language FROM City,Country,CountryLanguage WHERE City.Country=Country.Code AND @@ -1509,7 +1509,7 @@ LENGTH(Language) < LENGTH(City.Name) - 2; id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE City ALL Country NULL NULL NULL 4079 Using where 1 SIMPLE Country eq_ref PRIMARY PRIMARY 3 world.City.Country 1 Using where; Using join buffer (flat, BKA join); Key-ordered Rowid-ordered scan -1 SIMPLE CountryLanguage ref|filter PRIMARY,Percentage PRIMARY|Percentage 3|4 world.City.Country 4 (19%) Using index condition(BKA); Using where; Using join buffer (incremental, BKA join); Key-ordered Rowid-ordered scan; Using rowid filter +1 SIMPLE CountryLanguage ref PRIMARY,Percentage PRIMARY 3 world.City.Country 4 Using index condition(BKA); Using where; Using join buffer (incremental, BKA join); Key-ordered Rowid-ordered scan SELECT City.Name, Country.Name, CountryLanguage.Language FROM City,Country,CountryLanguage WHERE City.Country=Country.Code AND @@ -1706,7 +1706,7 @@ LENGTH(Language) < LENGTH(City.Name) - 2; id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE City ALL Country NULL NULL NULL 4079 Using where 1 SIMPLE Country eq_ref PRIMARY PRIMARY 3 world.City.Country 1 Using where; Using join buffer (flat, BKAH join); Key-ordered Rowid-ordered scan -1 SIMPLE CountryLanguage ref|filter PRIMARY,Percentage PRIMARY|Percentage 3|4 world.City.Country 4 (19%) Using index condition(BKA); Using where; Using join buffer (flat, BKAH join); Key-ordered Rowid-ordered scan; Using rowid filter +1 SIMPLE CountryLanguage ref PRIMARY,Percentage PRIMARY 3 world.City.Country 4 Using index condition(BKA); Using where; Using join buffer (flat, BKAH join); Key-ordered Rowid-ordered scan SELECT City.Name, Country.Name, CountryLanguage.Language FROM City,Country,CountryLanguage WHERE City.Country=Country.Code AND @@ -1903,7 +1903,7 @@ LENGTH(Language) < LENGTH(City.Name) - 2; id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE City ALL Country NULL NULL NULL 4079 Using where 1 SIMPLE Country eq_ref PRIMARY PRIMARY 3 world.City.Country 1 Using where; Using join buffer (flat, BKAH join); Key-ordered Rowid-ordered scan -1 SIMPLE CountryLanguage ref|filter PRIMARY,Percentage PRIMARY|Percentage 3|4 world.City.Country 4 (19%) Using index condition(BKA); Using where; Using join buffer (incremental, BKAH join); Key-ordered Rowid-ordered scan; Using rowid filter +1 SIMPLE CountryLanguage ref PRIMARY,Percentage PRIMARY 3 world.City.Country 4 Using index condition(BKA); Using where; Using join buffer (incremental, BKAH join); Key-ordered Rowid-ordered scan SELECT City.Name, Country.Name, CountryLanguage.Language FROM City,Country,CountryLanguage WHERE City.Country=Country.Code AND @@ -2104,7 +2104,7 @@ LENGTH(Language) < LENGTH(City.Name) - 2; id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE City ALL Country NULL NULL NULL 4079 Using where 1 SIMPLE Country hash_ALL PRIMARY #hash#PRIMARY 3 world.City.Country 239 Using where; Using join buffer (flat, BNLH join) -1 SIMPLE CountryLanguage hash_ALL|filter PRIMARY,Percentage #hash#PRIMARY|Percentage 3|4 world.City.Country 984 (19%) Using where; Using join buffer (flat, BNLH join); Using rowid filter +1 SIMPLE CountryLanguage hash_ALL PRIMARY,Percentage #hash#PRIMARY 3 world.City.Country 984 Using where; Using join buffer (flat, BNLH join) SELECT City.Name, Country.Name, CountryLanguage.Language FROM City,Country,CountryLanguage WHERE City.Country=Country.Code AND @@ -2208,7 +2208,7 @@ LENGTH(Language) < LENGTH(City.Name) - 2; id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE City ALL Country NULL NULL NULL 4079 Using where 1 SIMPLE Country hash_ALL PRIMARY #hash#PRIMARY 3 world.City.Country 239 Using where; Using join buffer (flat, BNLH join) -1 SIMPLE CountryLanguage hash_ALL|filter PRIMARY,Percentage #hash#PRIMARY|Percentage 3|4 world.City.Country 984 (19%) Using where; Using join buffer (incremental, BNLH join); Using rowid filter +1 SIMPLE CountryLanguage hash_ALL PRIMARY,Percentage #hash#PRIMARY 3 world.City.Country 984 Using where; Using join buffer (incremental, BNLH join) SELECT City.Name, Country.Name, CountryLanguage.Language FROM City,Country,CountryLanguage WHERE City.Country=Country.Code AND @@ -2312,7 +2312,7 @@ LENGTH(Language) < LENGTH(City.Name) - 2; id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE City ALL Country NULL NULL NULL 4079 Using where 1 SIMPLE Country eq_ref PRIMARY PRIMARY 3 world.City.Country 1 Using where; Using join buffer (flat, BKA join); Key-ordered Rowid-ordered scan -1 SIMPLE CountryLanguage ref|filter PRIMARY,Percentage PRIMARY|Percentage 3|4 world.City.Country 4 (19%) Using index condition(BKA); Using where; Using join buffer (flat, BKA join); Key-ordered Rowid-ordered scan; Using rowid filter +1 SIMPLE CountryLanguage ref PRIMARY,Percentage PRIMARY 3 world.City.Country 4 Using index condition(BKA); Using where; Using join buffer (flat, BKA join); Key-ordered Rowid-ordered scan SELECT City.Name, Country.Name, CountryLanguage.Language FROM City,Country,CountryLanguage WHERE City.Country=Country.Code AND @@ -2416,7 +2416,7 @@ LENGTH(Language) < LENGTH(City.Name) - 2; id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE City ALL Country NULL NULL NULL 4079 Using where 1 SIMPLE Country eq_ref PRIMARY PRIMARY 3 world.City.Country 1 Using where; Using join buffer (flat, BKA join); Key-ordered Rowid-ordered scan -1 SIMPLE CountryLanguage ref|filter PRIMARY,Percentage PRIMARY|Percentage 3|4 world.City.Country 4 (19%) Using index condition(BKA); Using where; Using join buffer (incremental, BKA join); Key-ordered Rowid-ordered scan; Using rowid filter +1 SIMPLE CountryLanguage ref PRIMARY,Percentage PRIMARY 3 world.City.Country 4 Using index condition(BKA); Using where; Using join buffer (incremental, BKA join); Key-ordered Rowid-ordered scan SELECT City.Name, Country.Name, CountryLanguage.Language FROM City,Country,CountryLanguage WHERE City.Country=Country.Code AND @@ -2520,7 +2520,7 @@ LENGTH(Language) < LENGTH(City.Name) - 2; id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE City ALL Country NULL NULL NULL 4079 Using where 1 SIMPLE Country eq_ref PRIMARY PRIMARY 3 world.City.Country 1 Using where; Using join buffer (flat, BKAH join); Key-ordered Rowid-ordered scan -1 SIMPLE CountryLanguage ref|filter PRIMARY,Percentage PRIMARY|Percentage 3|4 world.City.Country 4 (19%) Using index condition(BKA); Using where; Using join buffer (flat, BKAH join); Key-ordered Rowid-ordered scan; Using rowid filter +1 SIMPLE CountryLanguage ref PRIMARY,Percentage PRIMARY 3 world.City.Country 4 Using index condition(BKA); Using where; Using join buffer (flat, BKAH join); Key-ordered Rowid-ordered scan SELECT City.Name, Country.Name, CountryLanguage.Language FROM City,Country,CountryLanguage WHERE City.Country=Country.Code AND @@ -2624,7 +2624,7 @@ LENGTH(Language) < LENGTH(City.Name) - 2; id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE City ALL Country NULL NULL NULL 4079 Using where 1 SIMPLE Country eq_ref PRIMARY PRIMARY 3 world.City.Country 1 Using where; Using join buffer (flat, BKAH join); Key-ordered Rowid-ordered scan -1 SIMPLE CountryLanguage ref|filter PRIMARY,Percentage PRIMARY|Percentage 3|4 world.City.Country 4 (19%) Using index condition(BKA); Using where; Using join buffer (incremental, BKAH join); Key-ordered Rowid-ordered scan; Using rowid filter +1 SIMPLE CountryLanguage ref PRIMARY,Percentage PRIMARY 3 world.City.Country 4 Using index condition(BKA); Using where; Using join buffer (incremental, BKAH join); Key-ordered Rowid-ordered scan SELECT City.Name, Country.Name, CountryLanguage.Language FROM City,Country,CountryLanguage WHERE City.Country=Country.Code AND diff --git a/mysql-test/main/join_nested_jcl6.result b/mysql-test/main/join_nested_jcl6.result index 3293f20aa17..26fa772dfd1 100644 --- a/mysql-test/main/join_nested_jcl6.result +++ b/mysql-test/main/join_nested_jcl6.result @@ -2033,7 +2033,7 @@ ON t6.b >= 2 AND t5.b=t7.b AND (t8.a > 0 OR t8.c IS NULL) AND t6.a>0 AND t7.a>0; id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE t5 ALL NULL NULL NULL NULL 3 -1 SIMPLE t7 ref|filter PRIMARY,b_i b_i|PRIMARY 5|4 test.t5.b 2 (29%) Using where; Using join buffer (flat, BKA join); Key-ordered Rowid-ordered scan; Using rowid filter +1 SIMPLE t7 ref PRIMARY,b_i b_i 5 test.t5.b 2 Using where; Using join buffer (flat, BKA join); Key-ordered Rowid-ordered scan 1 SIMPLE t6 range PRIMARY,b_i PRIMARY 4 NULL 3 Using where; Rowid-ordered scan; Using join buffer (incremental, BNL join) 1 SIMPLE t8 ref b_i b_i 5 test.t5.b 2 Using where; Using join buffer (incremental, BKA join); Key-ordered Rowid-ordered scan SELECT t5.a,t5.b,t6.a,t6.b,t7.a,t7.b,t8.a,t8.b diff --git a/mysql-test/main/rowid_filter.result b/mysql-test/main/rowid_filter.result index 2a014b04a90..0a7497f1055 100644 --- a/mysql-test/main/rowid_filter.result +++ b/mysql-test/main/rowid_filter.result @@ -128,6 +128,7 @@ ANALYZE "rows": 702, "selectivity_pct": 11.69, "r_rows": 605, + "r_lookups": 510, "r_selectivity_pct": 11.765, "r_buffer_size": "REPLACED", "r_filling_time_ms": "REPLACED" @@ -435,6 +436,7 @@ ANALYZE "rows": 69, "selectivity_pct": 4.6, "r_rows": 71, + "r_lookups": 96, "r_selectivity_pct": 10.417, "r_buffer_size": "REPLACED", "r_filling_time_ms": "REPLACED" @@ -686,6 +688,7 @@ ANALYZE "rows": 702, "selectivity_pct": 11.69, "r_rows": 605, + "r_lookups": 510, "r_selectivity_pct": 11.765, "r_buffer_size": "REPLACED", "r_filling_time_ms": "REPLACED" @@ -715,6 +718,7 @@ ANALYZE "rows": 139, "selectivity_pct": 9.2667, "r_rows": 144, + "r_lookups": 59, "r_selectivity_pct": 25.424, "r_buffer_size": "REPLACED", "r_filling_time_ms": "REPLACED" @@ -887,7 +891,7 @@ WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-06-30' AND o_totalprice between 200000 and 230000; id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE orders range PRIMARY,i_o_totalprice i_o_totalprice 9 NULL 69 Using index condition -1 SIMPLE lineitem ref|filter PRIMARY,i_l_shipdate,i_l_orderkey,i_l_orderkey_quantity PRIMARY|i_l_shipdate 4|4 dbt3_s001.orders.o_orderkey 4 (8%) Using where; Using rowid filter +1 SIMPLE lineitem ref|filter PRIMARY,i_l_shipdate,i_l_orderkey,i_l_orderkey_quantity i_l_orderkey|i_l_shipdate 4|4 dbt3_s001.orders.o_orderkey 4 (8%) Using where; Using rowid filter set statement optimizer_switch='rowid_filter=on' for EXPLAIN FORMAT=JSON SELECT o_orderkey, l_linenumber, l_shipdate, o_totalprice FROM orders JOIN lineitem ON o_orderkey=l_orderkey WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-06-30' AND @@ -916,7 +920,7 @@ EXPLAIN "i_l_orderkey", "i_l_orderkey_quantity" ], - "key": "PRIMARY", + "key": "i_l_orderkey", "key_length": "4", "used_key_parts": ["l_orderkey"], "ref": ["dbt3_s001.orders.o_orderkey"], @@ -940,7 +944,7 @@ WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-06-30' AND o_totalprice between 200000 and 230000; id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra 1 SIMPLE orders range PRIMARY,i_o_totalprice i_o_totalprice 9 NULL 69 71.00 100.00 100.00 Using index condition -1 SIMPLE lineitem ref|filter PRIMARY,i_l_shipdate,i_l_orderkey,i_l_orderkey_quantity PRIMARY|i_l_shipdate 4|4 dbt3_s001.orders.o_orderkey 4 (8%) 0.52 (7%) 8.48 100.00 Using where; Using rowid filter +1 SIMPLE lineitem ref|filter PRIMARY,i_l_shipdate,i_l_orderkey,i_l_orderkey_quantity i_l_orderkey|i_l_shipdate 4|4 dbt3_s001.orders.o_orderkey 4 (8%) 0.52 (7%) 8.48 100.00 Using where; Using rowid filter set statement optimizer_switch='rowid_filter=on' for ANALYZE FORMAT=JSON SELECT o_orderkey, l_linenumber, l_shipdate, o_totalprice FROM orders JOIN lineitem ON o_orderkey=l_orderkey WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-06-30' AND @@ -975,7 +979,7 @@ ANALYZE "i_l_orderkey", "i_l_orderkey_quantity" ], - "key": "PRIMARY", + "key": "i_l_orderkey", "key_length": "4", "used_key_parts": ["l_orderkey"], "ref": ["dbt3_s001.orders.o_orderkey"], @@ -987,6 +991,7 @@ ANALYZE "rows": 509, "selectivity_pct": 8.4763, "r_rows": 510, + "r_lookups": 476, "r_selectivity_pct": 7.7731, "r_buffer_size": "REPLACED", "r_filling_time_ms": "REPLACED" @@ -2019,7 +2024,7 @@ EXPLAIN EXTENDED SELECT * FROM t1 HAVING (7, 9) IN (SELECT t2.i1, t2.i2 FROM t2 WHERE t2.i1 = 3); id select_type table type possible_keys key key_len ref rows filtered Extra 1 PRIMARY NULL NULL NULL NULL NULL NULL NULL NULL Impossible HAVING -2 SUBQUERY NULL NULL NULL NULL NULL NULL NULL NULL no matching row in const table +2 SUBQUERY t2 ref i1,i2 i1 5 const 1 100.00 Using index condition; Using where Warnings: Note 1003 /* select#1 */ select `test`.`t1`.`pk` AS `pk` from `test`.`t1` having 0 DROP TABLE t1,t2; @@ -2028,7 +2033,7 @@ DROP TABLE t1,t2; # that uses in expensive subquery # CREATE TABLE t1 ( -pk1 INT PRIMARY KEY, a1 INT, b1 VARCHAR(1), KEY(b1) +pk1 INT PRIMARY KEY, a1 INT, b1 VARCHAR(1), KEY(a1), KEY(b1) ) ENGINE=MyISAM; INSERT INTO t1 VALUES (10,0,'z'),(11,3,'j'),(12,8,'f'),(13,8,'p'),(14,6,'w'),(15,0,'c'),(16,1,'j'), @@ -2047,21 +2052,31 @@ INSERT INTO t1 VALUES (101,0,'u'),(102,7,'r'),(103,2,'x'),(104,8,'e'),(105,8,'i'),(106,5,'q'), (107,8,'z'),(108,3,'k'),(109,65,NULL); CREATE TABLE t2 (pk2 INT PRIMARY KEY, a2 INT, b2 VARCHAR(1)) ENGINE=MyISAM; -INSERT INTO t2 VALUES (1,1,'x'); +INSERT INTO t2 VALUES (1,1,'i'); INSERT INTO t2 SELECT * FROM t1; -SELECT * FROM t1 INNER JOIN t2 ON ( pk1 <> pk2 AND pk1 = a2 ) +INSERT INTO t1 SELECT pk1+200, a1, b1 FROM t1; +INSERT INTO t1 SELECT pk1+400, a1, b1 FROM t1; +ANALYZE TABLE t1,t2 PERSISTENT FOR ALL; +Table Op Msg_type Msg_text +test.t1 analyze status Engine-independent statistics collected +test.t1 analyze status OK +test.t2 analyze status Engine-independent statistics collected +test.t2 analyze status OK +SELECT * FROM t1 INNER JOIN t2 ON ( pk1+1 = pk2+2 AND a1 = a2 ) WHERE b1 <= ( SELECT MAX(b2) FROM t2 WHERE pk2 <= 1 ); pk1 a1 b1 pk2 a2 b2 -65 2 a 109 65 NULL -EXPLAIN EXTENDED SELECT * FROM t1 INNER JOIN t2 ON ( pk1 <> pk2 AND pk1 = a2 ) +17 1 f 16 1 j +37 3 g 36 3 a +105 8 i 104 8 e +EXPLAIN EXTENDED SELECT * FROM t1 INNER JOIN t2 ON ( pk1+1 = pk2+2 AND a1 = a2 ) WHERE b1 <= ( SELECT MAX(b2) FROM t2 WHERE pk2 <= 1 ); id select_type table type possible_keys key key_len ref rows filtered Extra 1 PRIMARY t2 ALL NULL NULL NULL NULL 101 100.00 Using where -1 PRIMARY t1 eq_ref|filter PRIMARY,b1 PRIMARY|b1 4|4 test.t2.a2 1 (87%) 87.00 Using where; Using rowid filter +1 PRIMARY t1 ref|filter a1,b1 a1|b1 5|4 test.t2.a2 36 (29%) 28.75 Using where; Using rowid filter 2 SUBQUERY t2 range PRIMARY PRIMARY 4 NULL 1 100.00 Using index condition Warnings: -Note 1003 /* select#1 */ select `test`.`t1`.`pk1` AS `pk1`,`test`.`t1`.`a1` AS `a1`,`test`.`t1`.`b1` AS `b1`,`test`.`t2`.`pk2` AS `pk2`,`test`.`t2`.`a2` AS `a2`,`test`.`t2`.`b2` AS `b2` from `test`.`t1` join `test`.`t2` where `test`.`t1`.`pk1` = `test`.`t2`.`a2` and `test`.`t1`.`b1` <= (/* select#2 */ select max(`test`.`t2`.`b2`) from `test`.`t2` where `test`.`t2`.`pk2` <= 1) and `test`.`t2`.`a2` <> `test`.`t2`.`pk2` -EXPLAIN FORMAT=JSON SELECT * FROM t1 INNER JOIN t2 ON ( pk1 <> pk2 AND pk1 = a2 ) +Note 1003 /* select#1 */ select `test`.`t1`.`pk1` AS `pk1`,`test`.`t1`.`a1` AS `a1`,`test`.`t1`.`b1` AS `b1`,`test`.`t2`.`pk2` AS `pk2`,`test`.`t2`.`a2` AS `a2`,`test`.`t2`.`b2` AS `b2` from `test`.`t1` join `test`.`t2` where `test`.`t1`.`a1` = `test`.`t2`.`a2` and `test`.`t1`.`b1` <= (/* select#2 */ select max(`test`.`t2`.`b2`) from `test`.`t2` where `test`.`t2`.`pk2` <= 1) and `test`.`t1`.`pk1` + 1 = `test`.`t2`.`pk2` + 2 +EXPLAIN FORMAT=JSON SELECT * FROM t1 INNER JOIN t2 ON ( pk1+1 = pk2+2 AND a1 = a2 ) WHERE b1 <= ( SELECT MAX(b2) FROM t2 WHERE pk2 <= 1 ); EXPLAIN { @@ -2072,27 +2087,27 @@ EXPLAIN "access_type": "ALL", "rows": 101, "filtered": 100, - "attached_condition": "t2.a2 <> t2.pk2 and t2.a2 is not null" + "attached_condition": "t2.a2 is not null" }, "table": { "table_name": "t1", - "access_type": "eq_ref", - "possible_keys": ["PRIMARY", "b1"], - "key": "PRIMARY", - "key_length": "4", - "used_key_parts": ["pk1"], + "access_type": "ref", + "possible_keys": ["a1", "b1"], + "key": "a1", + "key_length": "5", + "used_key_parts": ["a1"], "ref": ["test.t2.a2"], "rowid_filter": { "range": { "key": "b1", "used_key_parts": ["b1"] }, - "rows": 87, - "selectivity_pct": 87 + "rows": 115, + "selectivity_pct": 28.75 }, - "rows": 1, - "filtered": 87, - "attached_condition": "t1.b1 <= (subquery#2)" + "rows": 36, + "filtered": 28.75, + "attached_condition": "t1.b1 <= (subquery#2) and t1.pk1 + 1 = t2.pk2 + 2" }, "subqueries": [ { @@ -2159,13 +2174,442 @@ set @save_optimizer_switch= @@optimizer_switch; SET @@optimizer_switch="index_merge_sort_union=OFF"; CREATE TABLE t1 (a INT, b INT, INDEX(a), INDEX(b)); INSERT INTO t1 VALUES (0,0),(1,0),(-1,1), (-2,1), (-2,3), (-3,4), (-2,4); +INSERT INTO t1 SELECT * FROM t1; +INSERT INTO t1 SELECT * FROM t1; +INSERT INTO t1 SELECT * FROM t1; +INSERT INTO t1 SELECT * FROM t1; +INSERT INTO t1 SELECT * FROM t1; +INSERT INTO t1 SELECT * FROM t1; +ANALYZE table t1 PERSISTENT FOR ALL; +Table Op Msg_type Msg_text +test.t1 analyze status Engine-independent statistics collected +test.t1 analyze status OK explain SELECT * FROM t1 WHERE a > 0 AND b=0; id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 ref|filter a,b b|a 5|5 const 2 (14%) Using where; Using rowid filter +1 SIMPLE t1 ref|filter a,b b|a 5|5 const 151 (17%) Using where; Using rowid filter SELECT * FROM t1 WHERE a > 0 AND b=0; a b 1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 drop table t1; SET @@optimizer_switch=@save_optimizer_switch; +# +# MDEV-28846: Poor performance when rowid filter contains no elements +# +create table t1 ( +pk int primary key auto_increment, +nm varchar(32), +fl1 tinyint default 0, +fl2 tinyint default 0, +index idx1(nm, fl1), +index idx2(fl2) +) engine=myisam; +create table name ( +pk int primary key auto_increment, +nm bigint +) engine=myisam; +create table flag2 ( +pk int primary key auto_increment, +fl2 tinyint +) engine=myisam; +insert into name(nm) select seq from seq_1_to_1000 order by rand(17); +insert into flag2(fl2) select seq mod 2 from seq_1_to_1000 order by rand(19); +insert into t1(nm,fl2) +select nm, fl2 from name, flag2 where name.pk = flag2.pk; +analyze table t1 persistent for all; +Table Op Msg_type Msg_text +test.t1 analyze status Engine-independent statistics collected +test.t1 analyze status Table is already up to date +select '500%' as a; +a +500% +set optimizer_switch='rowid_filter=on'; +explain +select * from t1 where nm like '500%' AND fl2 = 0; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 range idx1,idx2 idx1 35 NULL 1 Using index condition; Using where +analyze format=json +select * from t1 where nm like '500%' AND fl2 = 0; +ANALYZE +{ + "query_block": { + "select_id": 1, + "r_loops": 1, + "r_total_time_ms": "REPLACED", + "table": { + "table_name": "t1", + "access_type": "range", + "possible_keys": ["idx1", "idx2"], + "key": "idx1", + "key_length": "35", + "used_key_parts": ["nm"], + "r_loops": 1, + "rows": 1, + "r_rows": 1, + "r_total_time_ms": "REPLACED", + "filtered": 49.2, + "r_filtered": 100, + "index_condition": "t1.nm like '500%'", + "attached_condition": "t1.fl2 = 0" + } + } +} +select * from t1 where nm like '500%' AND fl2 = 0; +pk nm fl1 fl2 +517 500 0 0 +truncate table name; +truncate table flag2; +truncate table t1; +insert into name(nm) select seq from seq_1_to_1000 order by rand(17); +insert into flag2(fl2) select seq mod 2 from seq_1_to_1000 order by rand(19); +insert into t1(nm,fl2) +select nm, fl2 from name, flag2 where name.pk = flag2.pk; +analyze table t1 persistent for all; +Table Op Msg_type Msg_text +test.t1 analyze status Engine-independent statistics collected +test.t1 analyze status Table is already up to date +set optimizer_switch='rowid_filter=off'; +explain +select * from t1 where nm like '500%' AND fl2 = 0; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 range idx1,idx2 idx1 35 NULL 1 Using index condition; Using where +analyze format=json +select * from t1 where nm like '500%' AND fl2 = 0; +ANALYZE +{ + "query_block": { + "select_id": 1, + "r_loops": 1, + "r_total_time_ms": "REPLACED", + "table": { + "table_name": "t1", + "access_type": "range", + "possible_keys": ["idx1", "idx2"], + "key": "idx1", + "key_length": "35", + "used_key_parts": ["nm"], + "r_loops": 1, + "rows": 1, + "r_rows": 1, + "r_total_time_ms": "REPLACED", + "filtered": 49.2, + "r_filtered": 100, + "index_condition": "t1.nm like '500%'", + "attached_condition": "t1.fl2 = 0" + } + } +} +select * from t1 where nm like '500%' AND fl2 = 0; +pk nm fl1 fl2 +517 500 0 0 +truncate table name; +truncate table flag2; +truncate table t1; +insert into name(nm) select seq from seq_1_to_1000 order by rand(17); +insert into flag2(fl2) select seq mod 10 from seq_1_to_1000 order by rand(19); +insert into t1(nm,fl2) +select nm, fl2 from name, flag2 where name.pk = flag2.pk; +analyze table t1 persistent for all; +Table Op Msg_type Msg_text +test.t1 analyze status Engine-independent statistics collected +test.t1 analyze status Table is already up to date +select '607%' as a; +a +607% +set optimizer_switch='rowid_filter=on'; +explain +select * from t1 where nm like '607%' AND fl2 = 0; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 range idx1,idx2 idx1 35 NULL 1 Using index condition; Using where +select * from t1 where nm like '607%' AND fl2 = 0; +pk nm fl1 fl2 +721 607 0 0 +truncate table name; +truncate table flag2; +truncate table t1; +insert into name(nm) select seq from seq_1_to_10000 order by rand(17); +insert into flag2(fl2) select seq mod 100 from seq_1_to_10000 order by rand(19); +insert into t1(nm,fl2) +select nm, fl2 from name, flag2 where name.pk = flag2.pk; +analyze table t1 persistent for all; +Table Op Msg_type Msg_text +test.t1 analyze status Engine-independent statistics collected +test.t1 analyze status Table is already up to date +select '75%' as a; +a +75% +set optimizer_switch='rowid_filter=on'; +explain +select * from t1 where nm like '75%' AND fl2 = 0; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ref|filter idx1,idx2 idx2|idx1 2|35 const 55 (1%) Using where; Using rowid filter +analyze format=json +select * from t1 where nm like '75%' AND fl2 = 0; +ANALYZE +{ + "query_block": { + "select_id": 1, + "r_loops": 1, + "r_total_time_ms": "REPLACED", + "table": { + "table_name": "t1", + "access_type": "ref", + "possible_keys": ["idx1", "idx2"], + "key": "idx2", + "key_length": "2", + "used_key_parts": ["fl2"], + "ref": ["const"], + "rowid_filter": { + "range": { + "key": "idx1", + "used_key_parts": ["nm"] + }, + "rows": 115, + "selectivity_pct": 1.15, + "r_rows": 111, + "r_lookups": 100, + "r_selectivity_pct": 2, + "r_buffer_size": "REPLACED", + "r_filling_time_ms": "REPLACED" + }, + "r_loops": 1, + "rows": 55, + "r_rows": 2, + "r_total_time_ms": "REPLACED", + "filtered": 1.15, + "r_filtered": 100, + "attached_condition": "t1.nm like '75%'" + } + } +} +select * from t1 where nm like '75%' AND fl2 = 0; +pk nm fl1 fl2 +4543 7503 0 0 +7373 7518 0 0 +drop table name, flag2; +drop table t1; +create table t1 ( +pk int primary key auto_increment, +nm varchar(32), +fl1 tinyint default 0, +fl2 int default 0, +index idx1(nm, fl1), +index idx2(fl2) +) engine=myisam; +create table name ( +pk int primary key auto_increment, +nm bigint +) engine=myisam; +create table flag2 ( +pk int primary key auto_increment, +fl2 int +) engine=myisam; +insert into name(nm) select seq from seq_1_to_10000 order by rand(17); +insert into flag2(fl2) select seq mod 10 from seq_1_to_10000 order by rand(19); +insert into t1(nm,fl2) +select nm, fl2 from name, flag2 where name.pk = flag2.pk; +analyze table t1 persistent for all; +Table Op Msg_type Msg_text +test.t1 analyze status Engine-independent statistics collected +test.t1 analyze status Table is already up to date +select * from t1 +where +( +nm like '3400%' or nm like '3402%' or nm like '3403%' or +nm like '3404%' or nm like '3405%' or nm like '3406%' or nm like '3407%' or +nm like '3409%' or +nm like '3411%' or nm like '3412%' or nm like '3413%' or +nm like '3414%' or nm like '3415%' or nm like '3416%' or nm like '3417%' or +nm like '3418%' or nm like '3419%' or +nm like '3421%' or nm like '3422%' or nm like '3423%' or +nm like '3424%' or nm like '3425%' or nm like '3426%' or nm like '3427%' or +nm like '3428%' or nm like '3429%' or +nm like '3430%' or nm like '3431%' or nm like '3432%' or nm like '3433%' or +nm like '3434%' or nm like '3435%' or nm like '3436%' or nm like '3437%' or +nm like '3439%' or +nm like '3440%' or nm like '3441%' or nm like '3442%' or nm like '3443%' or +nm like '3444%' or nm like '3445%' or nm like '3446%' or nm like '3447%' or +nm like '3448%' +) and fl2 = 0; +pk nm fl1 fl2 +analyze format=json select * from t1 +where +( +nm like '3400%' or nm like '3402%' or nm like '3403%' or +nm like '3404%' or nm like '3405%' or nm like '3406%' or nm like '3407%' or +nm like '3409%' or +nm like '3411%' or nm like '3412%' or nm like '3413%' or +nm like '3414%' or nm like '3415%' or nm like '3416%' or nm like '3417%' or +nm like '3418%' or nm like '3419%' or +nm like '3421%' or nm like '3422%' or nm like '3423%' or +nm like '3424%' or nm like '3425%' or nm like '3426%' or nm like '3427%' or +nm like '3428%' or nm like '3429%' or +nm like '3430%' or nm like '3431%' or nm like '3432%' or nm like '3433%' or +nm like '3434%' or nm like '3435%' or nm like '3436%' or nm like '3437%' or +nm like '3439%' or +nm like '3440%' or nm like '3441%' or nm like '3442%' or nm like '3443%' or +nm like '3444%' or nm like '3445%' or nm like '3446%' or nm like '3447%' or +nm like '3448%' +) and fl2 = 0; +ANALYZE +{ + "query_block": { + "select_id": 1, + "r_loops": 1, + "r_total_time_ms": "REPLACED", + "table": { + "table_name": "t1", + "access_type": "ref", + "possible_keys": ["idx1", "idx2"], + "key": "idx2", + "key_length": "5", + "used_key_parts": ["fl2"], + "ref": ["const"], + "rowid_filter": { + "range": { + "key": "idx1", + "used_key_parts": ["nm"] + }, + "rows": 44, + "selectivity_pct": 0.44, + "r_rows": 44, + "r_lookups": 1000, + "r_selectivity_pct": 0, + "r_buffer_size": "REPLACED", + "r_filling_time_ms": "REPLACED" + }, + "r_loops": 1, + "rows": 921, + "r_rows": 0, + "r_total_time_ms": "REPLACED", + "filtered": 0.44, + "r_filtered": 100, + "attached_condition": "t1.nm like '3400%' or t1.nm like '3402%' or t1.nm like '3403%' or t1.nm like '3404%' or t1.nm like '3405%' or t1.nm like '3406%' or t1.nm like '3407%' or t1.nm like '3409%' or t1.nm like '3411%' or t1.nm like '3412%' or t1.nm like '3413%' or t1.nm like '3414%' or t1.nm like '3415%' or t1.nm like '3416%' or t1.nm like '3417%' or t1.nm like '3418%' or t1.nm like '3419%' or t1.nm like '3421%' or t1.nm like '3422%' or t1.nm like '3423%' or t1.nm like '3424%' or t1.nm like '3425%' or t1.nm like '3426%' or t1.nm like '3427%' or t1.nm like '3428%' or t1.nm like '3429%' or t1.nm like '3430%' or t1.nm like '3431%' or t1.nm like '3432%' or t1.nm like '3433%' or t1.nm like '3434%' or t1.nm like '3435%' or t1.nm like '3436%' or t1.nm like '3437%' or t1.nm like '3439%' or t1.nm like '3440%' or t1.nm like '3441%' or t1.nm like '3442%' or t1.nm like '3443%' or t1.nm like '3444%' or t1.nm like '3445%' or t1.nm like '3446%' or t1.nm like '3447%' or t1.nm like '3448%'" + } + } +} +create table t0 select * from t1 where nm like '34%'; +delete from t1 using t1,t0 where t1.nm=t0.nm; +analyze format=json select * from t1 +where +( +nm like '3400%' or nm like '3402%' or nm like '3403%' or +nm like '3404%' or nm like '3405%' or nm like '3406%' or nm like '3407%' or +nm like '3409%' or +nm like '3411%' or nm like '3412%' or nm like '3413%' or +nm like '3414%' or nm like '3415%' or nm like '3416%' or nm like '3417%' or +nm like '3418%' or nm like '3419%' or +nm like '3421%' or nm like '3422%' or nm like '3423%' or +nm like '3424%' or nm like '3425%' or nm like '3426%' or nm like '3427%' or +nm like '3428%' or nm like '3429%' or +nm like '3430%' or nm like '3431%' or nm like '3432%' or nm like '3433%' or +nm like '3434%' or nm like '3435%' or nm like '3436%' or nm like '3437%' or +nm like '3439%' or +nm like '3440%' or nm like '3441%' or nm like '3442%' or nm like '3443%' or +nm like '3444%' or nm like '3445%' or nm like '3446%' or nm like '3447%' or +nm like '3448%' +) and fl2 = 0; +ANALYZE +{ + "query_block": { + "select_id": 1, + "r_loops": 1, + "r_total_time_ms": "REPLACED", + "table": { + "table_name": "t1", + "access_type": "ref", + "possible_keys": ["idx1", "idx2"], + "key": "idx2", + "key_length": "5", + "used_key_parts": ["fl2"], + "ref": ["const"], + "rowid_filter": { + "range": { + "key": "idx1", + "used_key_parts": ["nm"] + }, + "rows": 44, + "selectivity_pct": 0.44, + "r_rows": 0, + "r_lookups": 0, + "r_selectivity_pct": 0, + "r_buffer_size": "REPLACED", + "r_filling_time_ms": "REPLACED" + }, + "r_loops": 1, + "rows": 911, + "r_rows": 0, + "filtered": 0.44, + "r_filtered": 100, + "attached_condition": "t1.nm like '3400%' or t1.nm like '3402%' or t1.nm like '3403%' or t1.nm like '3404%' or t1.nm like '3405%' or t1.nm like '3406%' or t1.nm like '3407%' or t1.nm like '3409%' or t1.nm like '3411%' or t1.nm like '3412%' or t1.nm like '3413%' or t1.nm like '3414%' or t1.nm like '3415%' or t1.nm like '3416%' or t1.nm like '3417%' or t1.nm like '3418%' or t1.nm like '3419%' or t1.nm like '3421%' or t1.nm like '3422%' or t1.nm like '3423%' or t1.nm like '3424%' or t1.nm like '3425%' or t1.nm like '3426%' or t1.nm like '3427%' or t1.nm like '3428%' or t1.nm like '3429%' or t1.nm like '3430%' or t1.nm like '3431%' or t1.nm like '3432%' or t1.nm like '3433%' or t1.nm like '3434%' or t1.nm like '3435%' or t1.nm like '3436%' or t1.nm like '3437%' or t1.nm like '3439%' or t1.nm like '3440%' or t1.nm like '3441%' or t1.nm like '3442%' or t1.nm like '3443%' or t1.nm like '3444%' or t1.nm like '3445%' or t1.nm like '3446%' or t1.nm like '3447%' or t1.nm like '3448%'" + } + } +} +drop table t0; +set optimizer_switch='rowid_filter=default'; +drop table name, flag2; +drop table t1; set @@use_stat_tables=@save_use_stat_tables; diff --git a/mysql-test/main/rowid_filter.test b/mysql-test/main/rowid_filter.test index a68c32cf0de..1dd99097556 100644 --- a/mysql-test/main/rowid_filter.test +++ b/mysql-test/main/rowid_filter.test @@ -320,7 +320,7 @@ DROP TABLE t1,t2; --echo # CREATE TABLE t1 ( - pk1 INT PRIMARY KEY, a1 INT, b1 VARCHAR(1), KEY(b1) + pk1 INT PRIMARY KEY, a1 INT, b1 VARCHAR(1), KEY(a1), KEY(b1) ) ENGINE=MyISAM; INSERT INTO t1 VALUES (10,0,'z'),(11,3,'j'),(12,8,'f'),(13,8,'p'),(14,6,'w'),(15,0,'c'),(16,1,'j'), @@ -340,11 +340,16 @@ INSERT INTO t1 VALUES (107,8,'z'),(108,3,'k'),(109,65,NULL); CREATE TABLE t2 (pk2 INT PRIMARY KEY, a2 INT, b2 VARCHAR(1)) ENGINE=MyISAM; -INSERT INTO t2 VALUES (1,1,'x'); +INSERT INTO t2 VALUES (1,1,'i'); INSERT INTO t2 SELECT * FROM t1; +INSERT INTO t1 SELECT pk1+200, a1, b1 FROM t1; +INSERT INTO t1 SELECT pk1+400, a1, b1 FROM t1; + +ANALYZE TABLE t1,t2 PERSISTENT FOR ALL; + let $q= -SELECT * FROM t1 INNER JOIN t2 ON ( pk1 <> pk2 AND pk1 = a2 ) +SELECT * FROM t1 INNER JOIN t2 ON ( pk1+1 = pk2+2 AND a1 = a2 ) WHERE b1 <= ( SELECT MAX(b2) FROM t2 WHERE pk2 <= 1 ); eval $q; @@ -399,6 +404,15 @@ set @save_optimizer_switch= @@optimizer_switch; SET @@optimizer_switch="index_merge_sort_union=OFF"; CREATE TABLE t1 (a INT, b INT, INDEX(a), INDEX(b)); INSERT INTO t1 VALUES (0,0),(1,0),(-1,1), (-2,1), (-2,3), (-3,4), (-2,4); +INSERT INTO t1 SELECT * FROM t1; +INSERT INTO t1 SELECT * FROM t1; +INSERT INTO t1 SELECT * FROM t1; +INSERT INTO t1 SELECT * FROM t1; +INSERT INTO t1 SELECT * FROM t1; +INSERT INTO t1 SELECT * FROM t1; + +ANALYZE table t1 PERSISTENT FOR ALL; + explain SELECT * FROM t1 WHERE a > 0 AND b=0; SELECT * FROM t1 WHERE a > 0 AND b=0; @@ -406,4 +420,194 @@ drop table t1; SET @@optimizer_switch=@save_optimizer_switch; +--echo # +--echo # MDEV-28846: Poor performance when rowid filter contains no elements +--echo # + +--source include/have_sequence.inc + +create table t1 ( + pk int primary key auto_increment, + nm varchar(32), + fl1 tinyint default 0, + fl2 tinyint default 0, + index idx1(nm, fl1), + index idx2(fl2) +) engine=myisam; + +create table name ( + pk int primary key auto_increment, + nm bigint +) engine=myisam; + +create table flag2 ( + pk int primary key auto_increment, + fl2 tinyint +) engine=myisam; + +insert into name(nm) select seq from seq_1_to_1000 order by rand(17); +insert into flag2(fl2) select seq mod 2 from seq_1_to_1000 order by rand(19); + +insert into t1(nm,fl2) + select nm, fl2 from name, flag2 where name.pk = flag2.pk; + +analyze table t1 persistent for all; + +let $a= +`select concat((select nm from t1 where fl2=0 order by RAND(13) limit 1),'%')`; +eval select '$a' as a; + +set optimizer_switch='rowid_filter=on'; +eval +explain +select * from t1 where nm like '$a' AND fl2 = 0; +--source include/analyze-format.inc +eval +analyze format=json +select * from t1 where nm like '$a' AND fl2 = 0; +eval +select * from t1 where nm like '$a' AND fl2 = 0; + +truncate table name; +truncate table flag2; +truncate table t1; + +insert into name(nm) select seq from seq_1_to_1000 order by rand(17); +insert into flag2(fl2) select seq mod 2 from seq_1_to_1000 order by rand(19); + +insert into t1(nm,fl2) + select nm, fl2 from name, flag2 where name.pk = flag2.pk; + +analyze table t1 persistent for all; + +set optimizer_switch='rowid_filter=off'; +eval +explain +select * from t1 where nm like '$a' AND fl2 = 0; +--source include/analyze-format.inc +eval +analyze format=json +select * from t1 where nm like '$a' AND fl2 = 0; +eval +select * from t1 where nm like '$a' AND fl2 = 0; + +truncate table name; +truncate table flag2; +truncate table t1; + +insert into name(nm) select seq from seq_1_to_1000 order by rand(17); +insert into flag2(fl2) select seq mod 10 from seq_1_to_1000 order by rand(19); + +insert into t1(nm,fl2) + select nm, fl2 from name, flag2 where name.pk = flag2.pk; + +analyze table t1 persistent for all; + +let $a= +`select concat((select nm from t1 where fl2=0 order by RAND(13) limit 1),'%')`; +eval select '$a' as a; + +set optimizer_switch='rowid_filter=on'; +eval +explain +select * from t1 where nm like '$a' AND fl2 = 0; +eval +select * from t1 where nm like '$a' AND fl2 = 0; + +truncate table name; +truncate table flag2; +truncate table t1; + +insert into name(nm) select seq from seq_1_to_10000 order by rand(17); +insert into flag2(fl2) select seq mod 100 from seq_1_to_10000 order by rand(19); + +insert into t1(nm,fl2) + select nm, fl2 from name, flag2 where name.pk = flag2.pk; + +analyze table t1 persistent for all; + +let $a= +`select concat(left((select nm from t1 where fl2=0 order by RAND(13) limit 1),2),'%')`; +eval select '$a' as a; + +set optimizer_switch='rowid_filter=on'; +eval +explain +select * from t1 where nm like '$a' AND fl2 = 0; +--source include/analyze-format.inc +eval +analyze format=json +select * from t1 where nm like '$a' AND fl2 = 0; +eval +select * from t1 where nm like '$a' AND fl2 = 0; + +drop table name, flag2; +drop table t1; + +# This test shows that if the container is empty there are no lookups into it + +create table t1 ( + pk int primary key auto_increment, + nm varchar(32), + fl1 tinyint default 0, + fl2 int default 0, + index idx1(nm, fl1), + index idx2(fl2) +) engine=myisam; + +create table name ( + pk int primary key auto_increment, + nm bigint +) engine=myisam; + +create table flag2 ( + pk int primary key auto_increment, + fl2 int +) engine=myisam; + +insert into name(nm) select seq from seq_1_to_10000 order by rand(17); +insert into flag2(fl2) select seq mod 10 from seq_1_to_10000 order by rand(19); + +insert into t1(nm,fl2) + select nm, fl2 from name, flag2 where name.pk = flag2.pk; + +analyze table t1 persistent for all; + +let $q= +select * from t1 +where +( + nm like '3400%' or nm like '3402%' or nm like '3403%' or + nm like '3404%' or nm like '3405%' or nm like '3406%' or nm like '3407%' or + nm like '3409%' or + nm like '3411%' or nm like '3412%' or nm like '3413%' or + nm like '3414%' or nm like '3415%' or nm like '3416%' or nm like '3417%' or + nm like '3418%' or nm like '3419%' or + nm like '3421%' or nm like '3422%' or nm like '3423%' or + nm like '3424%' or nm like '3425%' or nm like '3426%' or nm like '3427%' or + nm like '3428%' or nm like '3429%' or + nm like '3430%' or nm like '3431%' or nm like '3432%' or nm like '3433%' or + nm like '3434%' or nm like '3435%' or nm like '3436%' or nm like '3437%' or + nm like '3439%' or + nm like '3440%' or nm like '3441%' or nm like '3442%' or nm like '3443%' or + nm like '3444%' or nm like '3445%' or nm like '3446%' or nm like '3447%' or + nm like '3448%' +) and fl2 = 0; + +eval $q; +--source include/analyze-format.inc +eval analyze format=json $q; + +create table t0 select * from t1 where nm like '34%'; +delete from t1 using t1,t0 where t1.nm=t0.nm; +--source include/analyze-format.inc +eval analyze format=json $q; + +drop table t0; + +set optimizer_switch='rowid_filter=default'; + +drop table name, flag2; +drop table t1; + set @@use_stat_tables=@save_use_stat_tables; diff --git a/mysql-test/main/rowid_filter_innodb.result b/mysql-test/main/rowid_filter_innodb.result index b6be3d32aa6..d7f1fe4a0d3 100644 --- a/mysql-test/main/rowid_filter_innodb.result +++ b/mysql-test/main/rowid_filter_innodb.result @@ -129,6 +129,7 @@ ANALYZE "rows": 605, "selectivity_pct": 10.075, "r_rows": 605, + "r_lookups": 510, "r_selectivity_pct": 11.765, "r_buffer_size": "REPLACED", "r_filling_time_ms": "REPLACED" @@ -1948,7 +1949,7 @@ EXPLAIN EXTENDED SELECT * FROM t1 HAVING (7, 9) IN (SELECT t2.i1, t2.i2 FROM t2 WHERE t2.i1 = 3); id select_type table type possible_keys key key_len ref rows filtered Extra 1 PRIMARY NULL NULL NULL NULL NULL NULL NULL NULL Impossible HAVING -2 SUBQUERY NULL NULL NULL NULL NULL NULL NULL NULL no matching row in const table +2 SUBQUERY t2 ref i1,i2 i1 5 const 1 100.00 Using index condition; Using where Warnings: Note 1003 /* select#1 */ select `test`.`t1`.`pk` AS `pk` from `test`.`t1` having 0 DROP TABLE t1,t2; @@ -1957,7 +1958,7 @@ DROP TABLE t1,t2; # that uses in expensive subquery # CREATE TABLE t1 ( -pk1 INT PRIMARY KEY, a1 INT, b1 VARCHAR(1), KEY(b1) +pk1 INT PRIMARY KEY, a1 INT, b1 VARCHAR(1), KEY(a1), KEY(b1) ) ENGINE=MyISAM; INSERT INTO t1 VALUES (10,0,'z'),(11,3,'j'),(12,8,'f'),(13,8,'p'),(14,6,'w'),(15,0,'c'),(16,1,'j'), @@ -1976,21 +1977,31 @@ INSERT INTO t1 VALUES (101,0,'u'),(102,7,'r'),(103,2,'x'),(104,8,'e'),(105,8,'i'),(106,5,'q'), (107,8,'z'),(108,3,'k'),(109,65,NULL); CREATE TABLE t2 (pk2 INT PRIMARY KEY, a2 INT, b2 VARCHAR(1)) ENGINE=MyISAM; -INSERT INTO t2 VALUES (1,1,'x'); +INSERT INTO t2 VALUES (1,1,'i'); INSERT INTO t2 SELECT * FROM t1; -SELECT * FROM t1 INNER JOIN t2 ON ( pk1 <> pk2 AND pk1 = a2 ) +INSERT INTO t1 SELECT pk1+200, a1, b1 FROM t1; +INSERT INTO t1 SELECT pk1+400, a1, b1 FROM t1; +ANALYZE TABLE t1,t2 PERSISTENT FOR ALL; +Table Op Msg_type Msg_text +test.t1 analyze status Engine-independent statistics collected +test.t1 analyze status OK +test.t2 analyze status Engine-independent statistics collected +test.t2 analyze status OK +SELECT * FROM t1 INNER JOIN t2 ON ( pk1+1 = pk2+2 AND a1 = a2 ) WHERE b1 <= ( SELECT MAX(b2) FROM t2 WHERE pk2 <= 1 ); pk1 a1 b1 pk2 a2 b2 -65 2 a 109 65 NULL -EXPLAIN EXTENDED SELECT * FROM t1 INNER JOIN t2 ON ( pk1 <> pk2 AND pk1 = a2 ) +17 1 f 16 1 j +37 3 g 36 3 a +105 8 i 104 8 e +EXPLAIN EXTENDED SELECT * FROM t1 INNER JOIN t2 ON ( pk1+1 = pk2+2 AND a1 = a2 ) WHERE b1 <= ( SELECT MAX(b2) FROM t2 WHERE pk2 <= 1 ); id select_type table type possible_keys key key_len ref rows filtered Extra 1 PRIMARY t2 ALL NULL NULL NULL NULL 101 100.00 Using where -1 PRIMARY t1 eq_ref|filter PRIMARY,b1 PRIMARY|b1 4|4 test.t2.a2 1 (87%) 87.00 Using where; Using rowid filter +1 PRIMARY t1 ref|filter a1,b1 a1|b1 5|4 test.t2.a2 36 (29%) 28.75 Using where; Using rowid filter 2 SUBQUERY t2 range PRIMARY PRIMARY 4 NULL 1 100.00 Using index condition Warnings: -Note 1003 /* select#1 */ select `test`.`t1`.`pk1` AS `pk1`,`test`.`t1`.`a1` AS `a1`,`test`.`t1`.`b1` AS `b1`,`test`.`t2`.`pk2` AS `pk2`,`test`.`t2`.`a2` AS `a2`,`test`.`t2`.`b2` AS `b2` from `test`.`t1` join `test`.`t2` where `test`.`t1`.`pk1` = `test`.`t2`.`a2` and `test`.`t1`.`b1` <= (/* select#2 */ select max(`test`.`t2`.`b2`) from `test`.`t2` where `test`.`t2`.`pk2` <= 1) and `test`.`t2`.`a2` <> `test`.`t2`.`pk2` -EXPLAIN FORMAT=JSON SELECT * FROM t1 INNER JOIN t2 ON ( pk1 <> pk2 AND pk1 = a2 ) +Note 1003 /* select#1 */ select `test`.`t1`.`pk1` AS `pk1`,`test`.`t1`.`a1` AS `a1`,`test`.`t1`.`b1` AS `b1`,`test`.`t2`.`pk2` AS `pk2`,`test`.`t2`.`a2` AS `a2`,`test`.`t2`.`b2` AS `b2` from `test`.`t1` join `test`.`t2` where `test`.`t1`.`a1` = `test`.`t2`.`a2` and `test`.`t1`.`b1` <= (/* select#2 */ select max(`test`.`t2`.`b2`) from `test`.`t2` where `test`.`t2`.`pk2` <= 1) and `test`.`t1`.`pk1` + 1 = `test`.`t2`.`pk2` + 2 +EXPLAIN FORMAT=JSON SELECT * FROM t1 INNER JOIN t2 ON ( pk1+1 = pk2+2 AND a1 = a2 ) WHERE b1 <= ( SELECT MAX(b2) FROM t2 WHERE pk2 <= 1 ); EXPLAIN { @@ -2001,27 +2012,27 @@ EXPLAIN "access_type": "ALL", "rows": 101, "filtered": 100, - "attached_condition": "t2.a2 <> t2.pk2 and t2.a2 is not null" + "attached_condition": "t2.a2 is not null" }, "table": { "table_name": "t1", - "access_type": "eq_ref", - "possible_keys": ["PRIMARY", "b1"], - "key": "PRIMARY", - "key_length": "4", - "used_key_parts": ["pk1"], + "access_type": "ref", + "possible_keys": ["a1", "b1"], + "key": "a1", + "key_length": "5", + "used_key_parts": ["a1"], "ref": ["test.t2.a2"], "rowid_filter": { "range": { "key": "b1", "used_key_parts": ["b1"] }, - "rows": 87, - "selectivity_pct": 87 + "rows": 115, + "selectivity_pct": 28.75 }, - "rows": 1, - "filtered": 87, - "attached_condition": "t1.b1 <= (subquery#2)" + "rows": 36, + "filtered": 28.75, + "attached_condition": "t1.b1 <= (subquery#2) and t1.pk1 + 1 = t2.pk2 + 2" }, "subqueries": [ { @@ -2088,15 +2099,444 @@ set @save_optimizer_switch= @@optimizer_switch; SET @@optimizer_switch="index_merge_sort_union=OFF"; CREATE TABLE t1 (a INT, b INT, INDEX(a), INDEX(b)); INSERT INTO t1 VALUES (0,0),(1,0),(-1,1), (-2,1), (-2,3), (-3,4), (-2,4); +INSERT INTO t1 SELECT * FROM t1; +INSERT INTO t1 SELECT * FROM t1; +INSERT INTO t1 SELECT * FROM t1; +INSERT INTO t1 SELECT * FROM t1; +INSERT INTO t1 SELECT * FROM t1; +INSERT INTO t1 SELECT * FROM t1; +ANALYZE table t1 PERSISTENT FOR ALL; +Table Op Msg_type Msg_text +test.t1 analyze status Engine-independent statistics collected +test.t1 analyze status OK explain SELECT * FROM t1 WHERE a > 0 AND b=0; id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 ref|filter a,b b|a 5|5 const 2 (14%) Using where; Using rowid filter +1 SIMPLE t1 ref|filter a,b b|a 5|5 const 128 (14%) Using where; Using rowid filter SELECT * FROM t1 WHERE a > 0 AND b=0; a b 1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 drop table t1; SET @@optimizer_switch=@save_optimizer_switch; +# +# MDEV-28846: Poor performance when rowid filter contains no elements +# +create table t1 ( +pk int primary key auto_increment, +nm varchar(32), +fl1 tinyint default 0, +fl2 tinyint default 0, +index idx1(nm, fl1), +index idx2(fl2) +) engine=myisam; +create table name ( +pk int primary key auto_increment, +nm bigint +) engine=myisam; +create table flag2 ( +pk int primary key auto_increment, +fl2 tinyint +) engine=myisam; +insert into name(nm) select seq from seq_1_to_1000 order by rand(17); +insert into flag2(fl2) select seq mod 2 from seq_1_to_1000 order by rand(19); +insert into t1(nm,fl2) +select nm, fl2 from name, flag2 where name.pk = flag2.pk; +analyze table t1 persistent for all; +Table Op Msg_type Msg_text +test.t1 analyze status Engine-independent statistics collected +test.t1 analyze status Table is already up to date +select '500%' as a; +a +500% +set optimizer_switch='rowid_filter=on'; +explain +select * from t1 where nm like '500%' AND fl2 = 0; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 range idx1,idx2 idx1 35 NULL 1 Using index condition; Using where +analyze format=json +select * from t1 where nm like '500%' AND fl2 = 0; +ANALYZE +{ + "query_block": { + "select_id": 1, + "r_loops": 1, + "r_total_time_ms": "REPLACED", + "table": { + "table_name": "t1", + "access_type": "range", + "possible_keys": ["idx1", "idx2"], + "key": "idx1", + "key_length": "35", + "used_key_parts": ["nm"], + "r_loops": 1, + "rows": 1, + "r_rows": 1, + "r_total_time_ms": "REPLACED", + "filtered": 49.2, + "r_filtered": 100, + "index_condition": "t1.nm like '500%'", + "attached_condition": "t1.fl2 = 0" + } + } +} +select * from t1 where nm like '500%' AND fl2 = 0; +pk nm fl1 fl2 +517 500 0 0 +truncate table name; +truncate table flag2; +truncate table t1; +insert into name(nm) select seq from seq_1_to_1000 order by rand(17); +insert into flag2(fl2) select seq mod 2 from seq_1_to_1000 order by rand(19); +insert into t1(nm,fl2) +select nm, fl2 from name, flag2 where name.pk = flag2.pk; +analyze table t1 persistent for all; +Table Op Msg_type Msg_text +test.t1 analyze status Engine-independent statistics collected +test.t1 analyze status Table is already up to date +set optimizer_switch='rowid_filter=off'; +explain +select * from t1 where nm like '500%' AND fl2 = 0; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 range idx1,idx2 idx1 35 NULL 1 Using index condition; Using where +analyze format=json +select * from t1 where nm like '500%' AND fl2 = 0; +ANALYZE +{ + "query_block": { + "select_id": 1, + "r_loops": 1, + "r_total_time_ms": "REPLACED", + "table": { + "table_name": "t1", + "access_type": "range", + "possible_keys": ["idx1", "idx2"], + "key": "idx1", + "key_length": "35", + "used_key_parts": ["nm"], + "r_loops": 1, + "rows": 1, + "r_rows": 1, + "r_total_time_ms": "REPLACED", + "filtered": 49.2, + "r_filtered": 100, + "index_condition": "t1.nm like '500%'", + "attached_condition": "t1.fl2 = 0" + } + } +} +select * from t1 where nm like '500%' AND fl2 = 0; +pk nm fl1 fl2 +517 500 0 0 +truncate table name; +truncate table flag2; +truncate table t1; +insert into name(nm) select seq from seq_1_to_1000 order by rand(17); +insert into flag2(fl2) select seq mod 10 from seq_1_to_1000 order by rand(19); +insert into t1(nm,fl2) +select nm, fl2 from name, flag2 where name.pk = flag2.pk; +analyze table t1 persistent for all; +Table Op Msg_type Msg_text +test.t1 analyze status Engine-independent statistics collected +test.t1 analyze status Table is already up to date +select '607%' as a; +a +607% +set optimizer_switch='rowid_filter=on'; +explain +select * from t1 where nm like '607%' AND fl2 = 0; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 range idx1,idx2 idx1 35 NULL 1 Using index condition; Using where +select * from t1 where nm like '607%' AND fl2 = 0; +pk nm fl1 fl2 +721 607 0 0 +truncate table name; +truncate table flag2; +truncate table t1; +insert into name(nm) select seq from seq_1_to_10000 order by rand(17); +insert into flag2(fl2) select seq mod 100 from seq_1_to_10000 order by rand(19); +insert into t1(nm,fl2) +select nm, fl2 from name, flag2 where name.pk = flag2.pk; +analyze table t1 persistent for all; +Table Op Msg_type Msg_text +test.t1 analyze status Engine-independent statistics collected +test.t1 analyze status Table is already up to date +select '75%' as a; +a +75% +set optimizer_switch='rowid_filter=on'; +explain +select * from t1 where nm like '75%' AND fl2 = 0; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ref|filter idx1,idx2 idx2|idx1 2|35 const 55 (1%) Using where; Using rowid filter +analyze format=json +select * from t1 where nm like '75%' AND fl2 = 0; +ANALYZE +{ + "query_block": { + "select_id": 1, + "r_loops": 1, + "r_total_time_ms": "REPLACED", + "table": { + "table_name": "t1", + "access_type": "ref", + "possible_keys": ["idx1", "idx2"], + "key": "idx2", + "key_length": "2", + "used_key_parts": ["fl2"], + "ref": ["const"], + "rowid_filter": { + "range": { + "key": "idx1", + "used_key_parts": ["nm"] + }, + "rows": 115, + "selectivity_pct": 1.15, + "r_rows": 111, + "r_lookups": 100, + "r_selectivity_pct": 2, + "r_buffer_size": "REPLACED", + "r_filling_time_ms": "REPLACED" + }, + "r_loops": 1, + "rows": 55, + "r_rows": 2, + "r_total_time_ms": "REPLACED", + "filtered": 1.15, + "r_filtered": 100, + "attached_condition": "t1.nm like '75%'" + } + } +} +select * from t1 where nm like '75%' AND fl2 = 0; +pk nm fl1 fl2 +4543 7503 0 0 +7373 7518 0 0 +drop table name, flag2; +drop table t1; +create table t1 ( +pk int primary key auto_increment, +nm varchar(32), +fl1 tinyint default 0, +fl2 int default 0, +index idx1(nm, fl1), +index idx2(fl2) +) engine=myisam; +create table name ( +pk int primary key auto_increment, +nm bigint +) engine=myisam; +create table flag2 ( +pk int primary key auto_increment, +fl2 int +) engine=myisam; +insert into name(nm) select seq from seq_1_to_10000 order by rand(17); +insert into flag2(fl2) select seq mod 10 from seq_1_to_10000 order by rand(19); +insert into t1(nm,fl2) +select nm, fl2 from name, flag2 where name.pk = flag2.pk; +analyze table t1 persistent for all; +Table Op Msg_type Msg_text +test.t1 analyze status Engine-independent statistics collected +test.t1 analyze status Table is already up to date +select * from t1 +where +( +nm like '3400%' or nm like '3402%' or nm like '3403%' or +nm like '3404%' or nm like '3405%' or nm like '3406%' or nm like '3407%' or +nm like '3409%' or +nm like '3411%' or nm like '3412%' or nm like '3413%' or +nm like '3414%' or nm like '3415%' or nm like '3416%' or nm like '3417%' or +nm like '3418%' or nm like '3419%' or +nm like '3421%' or nm like '3422%' or nm like '3423%' or +nm like '3424%' or nm like '3425%' or nm like '3426%' or nm like '3427%' or +nm like '3428%' or nm like '3429%' or +nm like '3430%' or nm like '3431%' or nm like '3432%' or nm like '3433%' or +nm like '3434%' or nm like '3435%' or nm like '3436%' or nm like '3437%' or +nm like '3439%' or +nm like '3440%' or nm like '3441%' or nm like '3442%' or nm like '3443%' or +nm like '3444%' or nm like '3445%' or nm like '3446%' or nm like '3447%' or +nm like '3448%' +) and fl2 = 0; +pk nm fl1 fl2 +analyze format=json select * from t1 +where +( +nm like '3400%' or nm like '3402%' or nm like '3403%' or +nm like '3404%' or nm like '3405%' or nm like '3406%' or nm like '3407%' or +nm like '3409%' or +nm like '3411%' or nm like '3412%' or nm like '3413%' or +nm like '3414%' or nm like '3415%' or nm like '3416%' or nm like '3417%' or +nm like '3418%' or nm like '3419%' or +nm like '3421%' or nm like '3422%' or nm like '3423%' or +nm like '3424%' or nm like '3425%' or nm like '3426%' or nm like '3427%' or +nm like '3428%' or nm like '3429%' or +nm like '3430%' or nm like '3431%' or nm like '3432%' or nm like '3433%' or +nm like '3434%' or nm like '3435%' or nm like '3436%' or nm like '3437%' or +nm like '3439%' or +nm like '3440%' or nm like '3441%' or nm like '3442%' or nm like '3443%' or +nm like '3444%' or nm like '3445%' or nm like '3446%' or nm like '3447%' or +nm like '3448%' +) and fl2 = 0; +ANALYZE +{ + "query_block": { + "select_id": 1, + "r_loops": 1, + "r_total_time_ms": "REPLACED", + "table": { + "table_name": "t1", + "access_type": "ref", + "possible_keys": ["idx1", "idx2"], + "key": "idx2", + "key_length": "5", + "used_key_parts": ["fl2"], + "ref": ["const"], + "rowid_filter": { + "range": { + "key": "idx1", + "used_key_parts": ["nm"] + }, + "rows": 44, + "selectivity_pct": 0.44, + "r_rows": 44, + "r_lookups": 1000, + "r_selectivity_pct": 0, + "r_buffer_size": "REPLACED", + "r_filling_time_ms": "REPLACED" + }, + "r_loops": 1, + "rows": 921, + "r_rows": 0, + "r_total_time_ms": "REPLACED", + "filtered": 0.44, + "r_filtered": 100, + "attached_condition": "t1.nm like '3400%' or t1.nm like '3402%' or t1.nm like '3403%' or t1.nm like '3404%' or t1.nm like '3405%' or t1.nm like '3406%' or t1.nm like '3407%' or t1.nm like '3409%' or t1.nm like '3411%' or t1.nm like '3412%' or t1.nm like '3413%' or t1.nm like '3414%' or t1.nm like '3415%' or t1.nm like '3416%' or t1.nm like '3417%' or t1.nm like '3418%' or t1.nm like '3419%' or t1.nm like '3421%' or t1.nm like '3422%' or t1.nm like '3423%' or t1.nm like '3424%' or t1.nm like '3425%' or t1.nm like '3426%' or t1.nm like '3427%' or t1.nm like '3428%' or t1.nm like '3429%' or t1.nm like '3430%' or t1.nm like '3431%' or t1.nm like '3432%' or t1.nm like '3433%' or t1.nm like '3434%' or t1.nm like '3435%' or t1.nm like '3436%' or t1.nm like '3437%' or t1.nm like '3439%' or t1.nm like '3440%' or t1.nm like '3441%' or t1.nm like '3442%' or t1.nm like '3443%' or t1.nm like '3444%' or t1.nm like '3445%' or t1.nm like '3446%' or t1.nm like '3447%' or t1.nm like '3448%'" + } + } +} +create table t0 select * from t1 where nm like '34%'; +delete from t1 using t1,t0 where t1.nm=t0.nm; +analyze format=json select * from t1 +where +( +nm like '3400%' or nm like '3402%' or nm like '3403%' or +nm like '3404%' or nm like '3405%' or nm like '3406%' or nm like '3407%' or +nm like '3409%' or +nm like '3411%' or nm like '3412%' or nm like '3413%' or +nm like '3414%' or nm like '3415%' or nm like '3416%' or nm like '3417%' or +nm like '3418%' or nm like '3419%' or +nm like '3421%' or nm like '3422%' or nm like '3423%' or +nm like '3424%' or nm like '3425%' or nm like '3426%' or nm like '3427%' or +nm like '3428%' or nm like '3429%' or +nm like '3430%' or nm like '3431%' or nm like '3432%' or nm like '3433%' or +nm like '3434%' or nm like '3435%' or nm like '3436%' or nm like '3437%' or +nm like '3439%' or +nm like '3440%' or nm like '3441%' or nm like '3442%' or nm like '3443%' or +nm like '3444%' or nm like '3445%' or nm like '3446%' or nm like '3447%' or +nm like '3448%' +) and fl2 = 0; +ANALYZE +{ + "query_block": { + "select_id": 1, + "r_loops": 1, + "r_total_time_ms": "REPLACED", + "table": { + "table_name": "t1", + "access_type": "ref", + "possible_keys": ["idx1", "idx2"], + "key": "idx2", + "key_length": "5", + "used_key_parts": ["fl2"], + "ref": ["const"], + "rowid_filter": { + "range": { + "key": "idx1", + "used_key_parts": ["nm"] + }, + "rows": 44, + "selectivity_pct": 0.44, + "r_rows": 0, + "r_lookups": 0, + "r_selectivity_pct": 0, + "r_buffer_size": "REPLACED", + "r_filling_time_ms": "REPLACED" + }, + "r_loops": 1, + "rows": 911, + "r_rows": 0, + "filtered": 0.44, + "r_filtered": 100, + "attached_condition": "t1.nm like '3400%' or t1.nm like '3402%' or t1.nm like '3403%' or t1.nm like '3404%' or t1.nm like '3405%' or t1.nm like '3406%' or t1.nm like '3407%' or t1.nm like '3409%' or t1.nm like '3411%' or t1.nm like '3412%' or t1.nm like '3413%' or t1.nm like '3414%' or t1.nm like '3415%' or t1.nm like '3416%' or t1.nm like '3417%' or t1.nm like '3418%' or t1.nm like '3419%' or t1.nm like '3421%' or t1.nm like '3422%' or t1.nm like '3423%' or t1.nm like '3424%' or t1.nm like '3425%' or t1.nm like '3426%' or t1.nm like '3427%' or t1.nm like '3428%' or t1.nm like '3429%' or t1.nm like '3430%' or t1.nm like '3431%' or t1.nm like '3432%' or t1.nm like '3433%' or t1.nm like '3434%' or t1.nm like '3435%' or t1.nm like '3436%' or t1.nm like '3437%' or t1.nm like '3439%' or t1.nm like '3440%' or t1.nm like '3441%' or t1.nm like '3442%' or t1.nm like '3443%' or t1.nm like '3444%' or t1.nm like '3445%' or t1.nm like '3446%' or t1.nm like '3447%' or t1.nm like '3448%'" + } + } +} +drop table t0; +set optimizer_switch='rowid_filter=default'; +drop table name, flag2; +drop table t1; set @@use_stat_tables=@save_use_stat_tables; # # MDEV-18755: possible RORI-plan and possible plan with range filter @@ -2121,6 +2561,11 @@ insert into t1 values (81,'a','a',20),(82,'a','a',0),(83,'a','a',0),(84,'a','a',null), (85,'a','a',-1),(86,'a','a',5),(87,'a','a',null),(88,'a','a',160), (89,null,null,null),(90,'a','a',14785),(91,'a','a',0),(92,'a','a',null); +insert into t1 select pk+100, f1, f2, a from t1; +analyze table t1; +Table Op Msg_type Msg_text +test.t1 analyze status Engine-independent statistics collected +test.t1 analyze status OK ( select * from t1 where (f1 is null and f2 is null) and (f2 between 'a' and 'z' or f1 in ('a'))) union @@ -2169,7 +2614,7 @@ EXPLAIN } }, "rows": 1, - "filtered": 1.5873, + "filtered": 3.1746, "attached_condition": "t1.f1 is null and t1.f2 is null and (t1.f2 between 'a' and 'z' or t1.f1 = 'a')" } } @@ -2196,7 +2641,7 @@ EXPLAIN } }, "rows": 1, - "filtered": 1.5873, + "filtered": 3.1746, "attached_condition": "t1.f1 is null and t1.f2 is null and (t1.f2 between 'a' and 'z' or t1.f1 = 'a')" } } @@ -2231,46 +2676,44 @@ drop table t1, t2; # create table t1 (a int, b int, key (b), key (a)) engine=innodb; insert into t1 -select (rand(1)*1000)/10, (rand(1001)*1000)/50 from seq_1_to_1000; +select (rand(1)*1000)/10, (rand(1001)*1000)/20 from seq_1_to_1000; analyze table t1; Table Op Msg_type Msg_text test.t1 analyze status Engine-independent statistics collected test.t1 analyze status OK set @save_optimizer_switch= @@optimizer_switch; set optimizer_switch='rowid_filter=off'; -select count(*) from t1 where a in (22,83,11) and b=2; +select count(*) from t1 where a between 21 and 30 and b=2; count(*) -6 -explain extended select count(*) from t1 where a in (22,83,11) and b=2; +5 +explain extended select count(*) from t1 where a between 21 and 30 and b=2; id select_type table type possible_keys key key_len ref rows filtered Extra -1 SIMPLE t1 ref b,a b 5 const 59 3.30 Using where +1 SIMPLE t1 ref b,a b 5 const 24 9.60 Using where Warnings: -Note 1003 select count(0) AS `count(*)` from `test`.`t1` where `test`.`t1`.`b` = 2 and `test`.`t1`.`a` in (22,83,11) -select * from t1 where a in (22,83,11) and b=2; +Note 1003 select count(0) AS `count(*)` from `test`.`t1` where `test`.`t1`.`b` = 2 and `test`.`t1`.`a` between 21 and 30 +select * from t1 where a between 21 and 30 and b=2; a b -11 2 -11 2 -83 2 -11 2 -83 2 +30 2 +21 2 22 2 +26 2 +25 2 set optimizer_switch='rowid_filter=on'; -select count(*) from t1 where a in (22,83,11) and b=2; +select count(*) from t1 where a between 21 and 30 and b=2; count(*) -6 -explain extended select count(*) from t1 where a in (22,83,11) and b=2; +5 +explain extended select count(*) from t1 where a between 21 and 30 and b=2; id select_type table type possible_keys key key_len ref rows filtered Extra -1 SIMPLE t1 ref|filter b,a b|a 5|5 const 59 (3%) 3.30 Using where; Using rowid filter +1 SIMPLE t1 ref|filter b,a b|a 5|5 const 24 (10%) 9.60 Using where; Using rowid filter Warnings: -Note 1003 select count(0) AS `count(*)` from `test`.`t1` where `test`.`t1`.`b` = 2 and `test`.`t1`.`a` in (22,83,11) -select * from t1 where a in (22,83,11) and b=2; +Note 1003 select count(0) AS `count(*)` from `test`.`t1` where `test`.`t1`.`b` = 2 and `test`.`t1`.`a` between 21 and 30 +select * from t1 where a between 21 and 30 and b=2; a b -11 2 -11 2 -83 2 -11 2 -83 2 +30 2 +21 2 22 2 +26 2 +25 2 drop table t1; set optimizer_switch=@save_optimizer_switch; SET SESSION STORAGE_ENGINE=DEFAULT; @@ -2425,7 +2868,7 @@ set global innodb_stats_persistent= @stats.save; # CREATE TABLE t1 ( id int(11) unsigned NOT NULL AUTO_INCREMENT, -domain varchar(255) NOT NULL, +domain varchar(32) NOT NULL, registrant_name varchar(255) DEFAULT NULL, registrant_organization varchar(255) DEFAULT NULL, registrant_street1 varchar(255) DEFAULT NULL, @@ -2516,21 +2959,216 @@ null, 'SUELZBURGSTRASSE 158A', null, null, null, null, 'KOELN', '50937', 'MAXIMILIAN V. KETELHODT', null, 'SUELZBURGSTRASSE 158A', null, null, null, null, 'KOELN', '50937', 'GERMANY', 'ICANN@EXPIRES-2009.WEBCARE24.COM', '492214307580', '', '2017-01-30 10:08:29'); +INSERT INTO t1 ( +domain, registrant_name, registrant_organization, registrant_street1, +registrant_street2, registrant_street3, registrant_street4, registrant_street5, +registrant_city, registrant_postal_code, registrant_country, registrant_email, +registrant_telephone, administrative_name, administrative_organization, +administrative_street1, administrative_street2, administrative_street3, +administrative_street4, administrative_street5, administrative_city, +administrative_postal_code, administrative_country, administrative_email, +administrative_telephone, technical_name, technical_organization, +technical_street1, technical_street2, technical_street3, technical_street4, +technical_street5, technical_city, technical_postal_code, technical_country, +technical_email, technical_telephone, json, timestamp) +VALUES +('www.mailhost.i-dev.fr', null, null, null, null, null, null, null, null, +null, null, null, null, null, null, null, null, null, null, null, null, null, +null, null, null, null, null, null, null, null, null, null, null, null, null, +null, null, '', '2016-12-22 09:18:28'); +INSERT INTO t1 ( +domain, registrant_name, registrant_organization, registrant_street1, +registrant_street2, registrant_street3, registrant_street4, registrant_street5, +registrant_city, registrant_postal_code, registrant_country, registrant_email, +registrant_telephone, administrative_name, administrative_organization, +administrative_street1, administrative_street2, administrative_street3, +administrative_street4, administrative_street5, administrative_city, +administrative_postal_code, administrative_country, administrative_email, +administrative_telephone, technical_name, technical_organization, +technical_street1, technical_street2, technical_street3, technical_street4, +technical_street5, technical_city, technical_postal_code, technical_country, +technical_email, technical_telephone, json, timestamp) +VALUES +('www.mailhost.i-dev.fr', null, null, null, null, null, null, null, null, +null, null, null, null, null, null, null, null, null, null, null, null, null, +null, null, null, null, null, null, null, null, null, null, null, null, null, +null, null, '', '2016-12-22 09:18:28'); +INSERT INTO t1 ( +domain, registrant_name, registrant_organization, registrant_street1, +registrant_street2, registrant_street3, registrant_street4, registrant_street5, +registrant_city, registrant_postal_code, registrant_country, registrant_email, +registrant_telephone, administrative_name, administrative_organization, +administrative_street1, administrative_street2, administrative_street3, +administrative_street4, administrative_street5, administrative_city, +administrative_postal_code, administrative_country, administrative_email, +administrative_telephone, technical_name, technical_organization, +technical_street1, technical_street2, technical_street3, technical_street4, +technical_street5, technical_city, technical_postal_code, technical_country, +technical_email, technical_telephone, json, timestamp) +VALUES +('www.mailhost.i-dev.fr', null, null, null, null, null, null, null, null, +null, null, null, null, null, null, null, null, null, null, null, null, null, +null, null, null, null, null, null, null, null, null, null, null, null, null, +null, null, '', '2016-12-22 09:18:28'); +INSERT INTO t1 ( +domain, registrant_name, registrant_organization, registrant_street1, +registrant_street2, registrant_street3, registrant_street4, registrant_street5, +registrant_city, registrant_postal_code, registrant_country, registrant_email, +registrant_telephone, administrative_name, administrative_organization, +administrative_street1, administrative_street2, administrative_street3, +administrative_street4, administrative_street5, administrative_city, +administrative_postal_code, administrative_country, administrative_email, +administrative_telephone, technical_name, technical_organization, +technical_street1, technical_street2, technical_street3, technical_street4, +technical_street5, technical_city, technical_postal_code, technical_country, +technical_email, technical_telephone, json, timestamp) +VALUES +('www.mailhost.i-dev.fr', null, null, null, null, null, null, null, null, +null, null, null, null, null, null, null, null, null, null, null, null, null, +null, null, null, null, null, null, null, null, null, null, null, null, null, +null, null, '', '2016-12-22 09:18:28'); +INSERT INTO t1 ( +domain, registrant_name, registrant_organization, registrant_street1, +registrant_street2, registrant_street3, registrant_street4, registrant_street5, +registrant_city, registrant_postal_code, registrant_country, registrant_email, +registrant_telephone, administrative_name, administrative_organization, +administrative_street1, administrative_street2, administrative_street3, +administrative_street4, administrative_street5, administrative_city, +administrative_postal_code, administrative_country, administrative_email, +administrative_telephone, technical_name, technical_organization, +technical_street1, technical_street2, technical_street3, technical_street4, +technical_street5, technical_city, technical_postal_code, technical_country, +technical_email, technical_telephone, json, timestamp) +VALUES +('www.mailhost.i-dev.fr', null, null, null, null, null, null, null, null, +null, null, null, null, null, null, null, null, null, null, null, null, null, +null, null, null, null, null, null, null, null, null, null, null, null, null, +null, null, '', '2016-12-22 09:18:28'); +INSERT INTO t1 ( +domain, registrant_name, registrant_organization, registrant_street1, +registrant_street2, registrant_street3, registrant_street4, registrant_street5, +registrant_city, registrant_postal_code, registrant_country, registrant_email, +registrant_telephone, administrative_name, administrative_organization, +administrative_street1, administrative_street2, administrative_street3, +administrative_street4, administrative_street5, administrative_city, +administrative_postal_code, administrative_country, administrative_email, +administrative_telephone, technical_name, technical_organization, +technical_street1, technical_street2, technical_street3, technical_street4, +technical_street5, technical_city, technical_postal_code, technical_country, +technical_email, technical_telephone, json, timestamp) +VALUES +('www.mailhost.i-dev.fr', null, null, null, null, null, null, null, null, +null, null, null, null, null, null, null, null, null, null, null, null, null, +null, null, null, null, null, null, null, null, null, null, null, null, null, +null, null, '', '2016-12-22 09:18:28'); +INSERT INTO t1 ( +domain, registrant_name, registrant_organization, registrant_street1, +registrant_street2, registrant_street3, registrant_street4, registrant_street5, +registrant_city, registrant_postal_code, registrant_country, registrant_email, +registrant_telephone, administrative_name, administrative_organization, +administrative_street1, administrative_street2, administrative_street3, +administrative_street4, administrative_street5, administrative_city, +administrative_postal_code, administrative_country, administrative_email, +administrative_telephone, technical_name, technical_organization, +technical_street1, technical_street2, technical_street3, technical_street4, +technical_street5, technical_city, technical_postal_code, technical_country, +technical_email, technical_telephone, json, timestamp) +VALUES +('www.mailhost.i-dev.fr', null, null, null, null, null, null, null, null, +null, null, null, null, null, null, null, null, null, null, null, null, null, +null, null, null, null, null, null, null, null, null, null, null, null, null, +null, null, '', '2016-12-22 09:18:28'); +INSERT INTO t1 ( +domain, registrant_name, registrant_organization, registrant_street1, +registrant_street2, registrant_street3, registrant_street4, registrant_street5, +registrant_city, registrant_postal_code, registrant_country, registrant_email, +registrant_telephone, administrative_name, administrative_organization, +administrative_street1, administrative_street2, administrative_street3, +administrative_street4, administrative_street5, administrative_city, +administrative_postal_code, administrative_country, administrative_email, +administrative_telephone, technical_name, technical_organization, +technical_street1, technical_street2, technical_street3, technical_street4, +technical_street5, technical_city, technical_postal_code, technical_country, +technical_email, technical_telephone, json, timestamp) +VALUES +('www.mailhost.i-dev.fr', null, null, null, null, null, null, null, null, +null, null, null, null, null, null, null, null, null, null, null, null, null, +null, null, null, null, null, null, null, null, null, null, null, null, null, +null, null, '', '2016-12-22 09:18:28'); +INSERT INTO t1 ( +domain, registrant_name, registrant_organization, registrant_street1, +registrant_street2, registrant_street3, registrant_street4, registrant_street5, +registrant_city, registrant_postal_code, registrant_country, registrant_email, +registrant_telephone, administrative_name, administrative_organization, +administrative_street1, administrative_street2, administrative_street3, +administrative_street4, administrative_street5, administrative_city, +administrative_postal_code, administrative_country, administrative_email, +administrative_telephone, technical_name, technical_organization, +technical_street1, technical_street2, technical_street3, technical_street4, +technical_street5, technical_city, technical_postal_code, technical_country, +technical_email, technical_telephone, json, timestamp) +SELECT +domain, registrant_name, registrant_organization, registrant_street1, +registrant_street2, registrant_street3, registrant_street4, registrant_street5, +registrant_city, registrant_postal_code, registrant_country, registrant_email, +registrant_telephone, administrative_name, administrative_organization, +administrative_street1, administrative_street2, administrative_street3, +administrative_street4, administrative_street5, administrative_city, +administrative_postal_code, administrative_country, administrative_email, +administrative_telephone, technical_name, technical_organization, +technical_street1, technical_street2, technical_street3, technical_street4, +technical_street5, technical_city, technical_postal_code, technical_country, +technical_email, technical_telephone, json, timestamp +FROM t1; +INSERT INTO t1 ( +domain, registrant_name, registrant_organization, registrant_street1, +registrant_street2, registrant_street3, registrant_street4, registrant_street5, +registrant_city, registrant_postal_code, registrant_country, registrant_email, +registrant_telephone, administrative_name, administrative_organization, +administrative_street1, administrative_street2, administrative_street3, +administrative_street4, administrative_street5, administrative_city, +administrative_postal_code, administrative_country, administrative_email, +administrative_telephone, technical_name, technical_organization, +technical_street1, technical_street2, technical_street3, technical_street4, +technical_street5, technical_city, technical_postal_code, technical_country, +technical_email, technical_telephone, json, timestamp) +SELECT +domain, registrant_name, registrant_organization, registrant_street1, +registrant_street2, registrant_street3, registrant_street4, registrant_street5, +registrant_city, registrant_postal_code, registrant_country, registrant_email, +registrant_telephone, administrative_name, administrative_organization, +administrative_street1, administrative_street2, administrative_street3, +administrative_street4, administrative_street5, administrative_city, +administrative_postal_code, administrative_country, administrative_email, +administrative_telephone, technical_name, technical_organization, +technical_street1, technical_street2, technical_street3, technical_street4, +technical_street5, technical_city, technical_postal_code, technical_country, +technical_email, technical_telephone, json, timestamp +FROM t1; +ANALYZE TABLE t1 PERSISTENT FOR ALL; +Table Op Msg_type Msg_text +test.t1 analyze status Engine-independent statistics collected +test.t1 analyze Warning Engine-independent statistics are not collected for column 'json' +test.t1 analyze status OK SET @save_optimizer_switch=@@optimizer_switch; SET optimizer_switch='mrr=on,mrr_sort_keys=on'; SELECT * FROM t1 WHERE 1 = 1 AND domain = 'www.mailhost.i-dev.fr' AND -timestamp >= DATE_ADD(CURRENT_TIMESTAMP, INTERVAL -1 MONTH) +timestamp >= DATE_ADD('2017-01-30 08:24:51', INTERVAL -1 MONTH) ORDER BY timestamp DESC; id domain registrant_name registrant_organization registrant_street1 registrant_street2 registrant_street3 registrant_street4 registrant_street5 registrant_city registrant_postal_code registrant_country registrant_email registrant_telephone administrative_name administrative_organization administrative_street1 administrative_street2 administrative_street3 administrative_street4 administrative_street5 administrative_city administrative_postal_code administrative_country administrative_email administrative_telephone technical_name technical_organization technical_street1 technical_street2 technical_street3 technical_street4 technical_street5 technical_city technical_postal_code technical_country technical_email technical_telephone json timestamp +80551 www.mailhost.i-dev.fr NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL 2017-01-30 10:00:56 +80579 www.mailhost.i-dev.fr NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL 2017-01-30 10:00:56 +80594 www.mailhost.i-dev.fr NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL 2017-01-30 10:00:56 +80609 www.mailhost.i-dev.fr NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL 2017-01-30 10:00:56 EXPLAIN EXTENDED SELECT * FROM t1 WHERE 1 = 1 AND domain = 'www.mailhost.i-dev.fr' AND -timestamp >= DATE_ADD(CURRENT_TIMESTAMP, INTERVAL -1 MONTH) +timestamp >= DATE_ADD('2017-01-30 08:24:51', INTERVAL -1 MONTH) ORDER BY timestamp DESC; id select_type table type possible_keys key key_len ref rows filtered Extra -1 SIMPLE t1 ref|filter ixEventWhoisDomainDomain,ixEventWhoisDomainTimestamp ixEventWhoisDomainDomain|ixEventWhoisDomainTimestamp 767|4 const 2 (14%) 14.29 Using index condition; Using where; Using filesort; Using rowid filter +1 SIMPLE t1 ref|filter ixEventWhoisDomainDomain,ixEventWhoisDomainTimestamp ixEventWhoisDomainDomain|ixEventWhoisDomainTimestamp 98|4 const 40 (33%) 33.33 Using index condition; Using where; Using filesort; Using rowid filter Warnings: -Note 1003 select `test`.`t1`.`id` AS `id`,`test`.`t1`.`domain` AS `domain`,`test`.`t1`.`registrant_name` AS `registrant_name`,`test`.`t1`.`registrant_organization` AS `registrant_organization`,`test`.`t1`.`registrant_street1` AS `registrant_street1`,`test`.`t1`.`registrant_street2` AS `registrant_street2`,`test`.`t1`.`registrant_street3` AS `registrant_street3`,`test`.`t1`.`registrant_street4` AS `registrant_street4`,`test`.`t1`.`registrant_street5` AS `registrant_street5`,`test`.`t1`.`registrant_city` AS `registrant_city`,`test`.`t1`.`registrant_postal_code` AS `registrant_postal_code`,`test`.`t1`.`registrant_country` AS `registrant_country`,`test`.`t1`.`registrant_email` AS `registrant_email`,`test`.`t1`.`registrant_telephone` AS `registrant_telephone`,`test`.`t1`.`administrative_name` AS `administrative_name`,`test`.`t1`.`administrative_organization` AS `administrative_organization`,`test`.`t1`.`administrative_street1` AS `administrative_street1`,`test`.`t1`.`administrative_street2` AS `administrative_street2`,`test`.`t1`.`administrative_street3` AS `administrative_street3`,`test`.`t1`.`administrative_street4` AS `administrative_street4`,`test`.`t1`.`administrative_street5` AS `administrative_street5`,`test`.`t1`.`administrative_city` AS `administrative_city`,`test`.`t1`.`administrative_postal_code` AS `administrative_postal_code`,`test`.`t1`.`administrative_country` AS `administrative_country`,`test`.`t1`.`administrative_email` AS `administrative_email`,`test`.`t1`.`administrative_telephone` AS `administrative_telephone`,`test`.`t1`.`technical_name` AS `technical_name`,`test`.`t1`.`technical_organization` AS `technical_organization`,`test`.`t1`.`technical_street1` AS `technical_street1`,`test`.`t1`.`technical_street2` AS `technical_street2`,`test`.`t1`.`technical_street3` AS `technical_street3`,`test`.`t1`.`technical_street4` AS `technical_street4`,`test`.`t1`.`technical_street5` AS `technical_street5`,`test`.`t1`.`technical_city` AS `technical_city`,`test`.`t1`.`technical_postal_code` AS `technical_postal_code`,`test`.`t1`.`technical_country` AS `technical_country`,`test`.`t1`.`technical_email` AS `technical_email`,`test`.`t1`.`technical_telephone` AS `technical_telephone`,`test`.`t1`.`json` AS `json`,`test`.`t1`.`timestamp` AS `timestamp` from `test`.`t1` where `test`.`t1`.`domain` = 'www.mailhost.i-dev.fr' and `test`.`t1`.`timestamp` >= (current_timestamp() + interval -1 month) order by `test`.`t1`.`timestamp` desc +Note 1003 select `test`.`t1`.`id` AS `id`,`test`.`t1`.`domain` AS `domain`,`test`.`t1`.`registrant_name` AS `registrant_name`,`test`.`t1`.`registrant_organization` AS `registrant_organization`,`test`.`t1`.`registrant_street1` AS `registrant_street1`,`test`.`t1`.`registrant_street2` AS `registrant_street2`,`test`.`t1`.`registrant_street3` AS `registrant_street3`,`test`.`t1`.`registrant_street4` AS `registrant_street4`,`test`.`t1`.`registrant_street5` AS `registrant_street5`,`test`.`t1`.`registrant_city` AS `registrant_city`,`test`.`t1`.`registrant_postal_code` AS `registrant_postal_code`,`test`.`t1`.`registrant_country` AS `registrant_country`,`test`.`t1`.`registrant_email` AS `registrant_email`,`test`.`t1`.`registrant_telephone` AS `registrant_telephone`,`test`.`t1`.`administrative_name` AS `administrative_name`,`test`.`t1`.`administrative_organization` AS `administrative_organization`,`test`.`t1`.`administrative_street1` AS `administrative_street1`,`test`.`t1`.`administrative_street2` AS `administrative_street2`,`test`.`t1`.`administrative_street3` AS `administrative_street3`,`test`.`t1`.`administrative_street4` AS `administrative_street4`,`test`.`t1`.`administrative_street5` AS `administrative_street5`,`test`.`t1`.`administrative_city` AS `administrative_city`,`test`.`t1`.`administrative_postal_code` AS `administrative_postal_code`,`test`.`t1`.`administrative_country` AS `administrative_country`,`test`.`t1`.`administrative_email` AS `administrative_email`,`test`.`t1`.`administrative_telephone` AS `administrative_telephone`,`test`.`t1`.`technical_name` AS `technical_name`,`test`.`t1`.`technical_organization` AS `technical_organization`,`test`.`t1`.`technical_street1` AS `technical_street1`,`test`.`t1`.`technical_street2` AS `technical_street2`,`test`.`t1`.`technical_street3` AS `technical_street3`,`test`.`t1`.`technical_street4` AS `technical_street4`,`test`.`t1`.`technical_street5` AS `technical_street5`,`test`.`t1`.`technical_city` AS `technical_city`,`test`.`t1`.`technical_postal_code` AS `technical_postal_code`,`test`.`t1`.`technical_country` AS `technical_country`,`test`.`t1`.`technical_email` AS `technical_email`,`test`.`t1`.`technical_telephone` AS `technical_telephone`,`test`.`t1`.`json` AS `json`,`test`.`t1`.`timestamp` AS `timestamp` from `test`.`t1` where `test`.`t1`.`domain` = 'www.mailhost.i-dev.fr' and `test`.`t1`.`timestamp` >= ('2017-01-30 08:24:51' + interval -1 month) order by `test`.`t1`.`timestamp` desc SET optimizer_switch=@save_optimizer_switch; DROP TABLE t1; # @@ -2692,6 +3330,10 @@ insert into filt(id,aceid,clid,fh) values (6341490487802728361,6341490487802728360,1,1291319099896431785), (6341490487802728362,6341490487802728360,1,8948400944397203540), (6341490487802728363,6341490487802728361,1,6701841652906431497); +insert into filt select id+10000,aceid,clid,fh from filt; +insert into filt select id+20000,aceid,clid,fh from filt; +insert into filt select id+40000,aceid,clid,fh from filt; +insert into filt select id+80000,aceid,clid,fh from filt; analyze table filt, acei, acli; Table Op Msg_type Msg_text test.filt analyze status Engine-independent statistics collected @@ -2716,7 +3358,7 @@ fi.fh in (6311439873746261694,-397087483897438286, id select_type table type possible_keys key key_len ref rows filtered Extra 1 SIMPLE t index_merge PRIMARY,acli_rid,acli_tp acli_tp,acli_rid 2,767 NULL 2 100.00 Using intersect(acli_tp,acli_rid); Using where; Using index 1 SIMPLE a ref PRIMARY,acei_aclid acei_aclid 8 test.t.id 1 100.00 Using where -1 SIMPLE fi ref filt_aceid,filt_fh filt_aceid 8 test.a.id 1 17.14 Using where +1 SIMPLE fi ref filt_aceid,filt_fh filt_aceid 8 test.a.id 24 14.46 Using where Warnings: Note 1003 select `test`.`t`.`id` AS `id`,`test`.`fi`.`id` AS `id`,`test`.`fi`.`aceid` AS `aceid`,`test`.`fi`.`clid` AS `clid`,`test`.`fi`.`fh` AS `fh` from `test`.`acli` `t` join `test`.`acei` `a` join `test`.`filt` `fi` where `test`.`t`.`tp` = 121 and `test`.`a`.`atp` = 1 and `test`.`fi`.`aceid` = `test`.`a`.`id` and `test`.`a`.`aclid` = `test`.`t`.`id` and `test`.`t`.`rid` = 'B5FCC8C7111E4E3CBC21AAF5012F59C2' and `test`.`fi`.`fh` in (6311439873746261694,-397087483897438286,8518228073041491534,-5420422472375069774) set statement optimizer_switch='rowid_filter=off' for select t.id, fi.* @@ -2731,6 +3373,36 @@ fi.fh in (6311439873746261694,-397087483897438286, id id aceid clid fh 3080602882609775594 3080602882609775600 3080602882609775598 1 6311439873746261694 3080602882609775594 3080602882609775601 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609785600 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609785601 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609795600 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609795601 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609805600 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609805601 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609815600 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609815601 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609825600 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609825601 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609835600 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609835601 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609845600 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609845601 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609855600 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609855601 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609865600 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609865601 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609875600 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609875601 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609885600 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609885601 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609895600 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609895601 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609905600 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609905601 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609915600 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609915601 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609925600 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609925601 3080602882609775598 1 6311439873746261694 set statement optimizer_switch='rowid_filter=on' for explain extended select t.id, fi.* from (acli t inner join acei a on a.aclid = t.id) inner join filt fi on a.id = fi.aceid @@ -2743,7 +3415,7 @@ fi.fh in (6311439873746261694,-397087483897438286, id select_type table type possible_keys key key_len ref rows filtered Extra 1 SIMPLE t index_merge PRIMARY,acli_rid,acli_tp acli_tp,acli_rid 2,767 NULL 2 100.00 Using intersect(acli_tp,acli_rid); Using where; Using index 1 SIMPLE a ref PRIMARY,acei_aclid acei_aclid 8 test.t.id 1 100.00 Using where -1 SIMPLE fi ref|filter filt_aceid,filt_fh filt_aceid|filt_fh 8|8 test.a.id 1 (17%) 17.14 Using where; Using rowid filter +1 SIMPLE fi ref|filter filt_aceid,filt_fh filt_aceid|filt_fh 8|8 test.a.id 24 (14%) 14.46 Using where; Using rowid filter Warnings: Note 1003 select `test`.`t`.`id` AS `id`,`test`.`fi`.`id` AS `id`,`test`.`fi`.`aceid` AS `aceid`,`test`.`fi`.`clid` AS `clid`,`test`.`fi`.`fh` AS `fh` from `test`.`acli` `t` join `test`.`acei` `a` join `test`.`filt` `fi` where `test`.`t`.`tp` = 121 and `test`.`a`.`atp` = 1 and `test`.`fi`.`aceid` = `test`.`a`.`id` and `test`.`a`.`aclid` = `test`.`t`.`id` and `test`.`t`.`rid` = 'B5FCC8C7111E4E3CBC21AAF5012F59C2' and `test`.`fi`.`fh` in (6311439873746261694,-397087483897438286,8518228073041491534,-5420422472375069774) set statement optimizer_switch='rowid_filter=on' for select t.id, fi.* @@ -2758,6 +3430,36 @@ fi.fh in (6311439873746261694,-397087483897438286, id id aceid clid fh 3080602882609775594 3080602882609775600 3080602882609775598 1 6311439873746261694 3080602882609775594 3080602882609775601 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609785600 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609785601 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609795600 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609795601 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609805600 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609805601 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609815600 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609815601 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609825600 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609825601 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609835600 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609835601 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609845600 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609845601 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609855600 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609855601 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609865600 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609865601 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609875600 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609875601 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609885600 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609885601 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609895600 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609895601 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609905600 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609905601 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609915600 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609915601 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609925600 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609925601 3080602882609775598 1 6311439873746261694 set optimizer_switch='mrr=on'; set join_cache_level=6; set statement optimizer_switch='rowid_filter=off' for explain extended select t.id, fi.* @@ -2772,7 +3474,7 @@ fi.fh in (6311439873746261694,-397087483897438286, id select_type table type possible_keys key key_len ref rows filtered Extra 1 SIMPLE t index_merge PRIMARY,acli_rid,acli_tp acli_tp,acli_rid 2,767 NULL 2 100.00 Using intersect(acli_tp,acli_rid); Using where; Using index 1 SIMPLE a ref PRIMARY,acei_aclid acei_aclid 8 test.t.id 1 100.00 Using where; Using join buffer (flat, BKA join); Rowid-ordered scan -1 SIMPLE fi ref filt_aceid,filt_fh filt_aceid 8 test.a.id 1 17.14 Using where; Using join buffer (incremental, BKA join); Rowid-ordered scan +1 SIMPLE fi ref filt_aceid,filt_fh filt_aceid 8 test.a.id 24 14.46 Using where; Using join buffer (incremental, BKA join); Rowid-ordered scan Warnings: Note 1003 select `test`.`t`.`id` AS `id`,`test`.`fi`.`id` AS `id`,`test`.`fi`.`aceid` AS `aceid`,`test`.`fi`.`clid` AS `clid`,`test`.`fi`.`fh` AS `fh` from `test`.`acli` `t` join `test`.`acei` `a` join `test`.`filt` `fi` where `test`.`t`.`tp` = 121 and `test`.`a`.`atp` = 1 and `test`.`fi`.`aceid` = `test`.`a`.`id` and `test`.`a`.`aclid` = `test`.`t`.`id` and `test`.`t`.`rid` = 'B5FCC8C7111E4E3CBC21AAF5012F59C2' and `test`.`fi`.`fh` in (6311439873746261694,-397087483897438286,8518228073041491534,-5420422472375069774) set statement optimizer_switch='rowid_filter=off' for select t.id, fi.* @@ -2787,6 +3489,36 @@ fi.fh in (6311439873746261694,-397087483897438286, id id aceid clid fh 3080602882609775594 3080602882609775600 3080602882609775598 1 6311439873746261694 3080602882609775594 3080602882609775601 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609785600 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609785601 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609795600 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609795601 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609805600 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609805601 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609815600 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609815601 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609825600 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609825601 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609835600 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609835601 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609845600 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609845601 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609855600 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609855601 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609865600 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609865601 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609875600 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609875601 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609885600 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609885601 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609895600 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609895601 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609905600 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609905601 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609915600 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609915601 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609925600 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609925601 3080602882609775598 1 6311439873746261694 set statement optimizer_switch='rowid_filter=on' for explain extended select t.id, fi.* from (acli t inner join acei a on a.aclid = t.id) inner join filt fi on a.id = fi.aceid @@ -2799,7 +3531,7 @@ fi.fh in (6311439873746261694,-397087483897438286, id select_type table type possible_keys key key_len ref rows filtered Extra 1 SIMPLE t index_merge PRIMARY,acli_rid,acli_tp acli_tp,acli_rid 2,767 NULL 2 100.00 Using intersect(acli_tp,acli_rid); Using where; Using index 1 SIMPLE a ref PRIMARY,acei_aclid acei_aclid 8 test.t.id 1 100.00 Using where; Using join buffer (flat, BKA join); Rowid-ordered scan -1 SIMPLE fi ref|filter filt_aceid,filt_fh filt_aceid|filt_fh 8|8 test.a.id 1 (17%) 17.14 Using where; Using join buffer (incremental, BKA join); Rowid-ordered scan; Using rowid filter +1 SIMPLE fi ref|filter filt_aceid,filt_fh filt_aceid|filt_fh 8|8 test.a.id 24 (14%) 14.46 Using where; Using join buffer (incremental, BKA join); Rowid-ordered scan; Using rowid filter Warnings: Note 1003 select `test`.`t`.`id` AS `id`,`test`.`fi`.`id` AS `id`,`test`.`fi`.`aceid` AS `aceid`,`test`.`fi`.`clid` AS `clid`,`test`.`fi`.`fh` AS `fh` from `test`.`acli` `t` join `test`.`acei` `a` join `test`.`filt` `fi` where `test`.`t`.`tp` = 121 and `test`.`a`.`atp` = 1 and `test`.`fi`.`aceid` = `test`.`a`.`id` and `test`.`a`.`aclid` = `test`.`t`.`id` and `test`.`t`.`rid` = 'B5FCC8C7111E4E3CBC21AAF5012F59C2' and `test`.`fi`.`fh` in (6311439873746261694,-397087483897438286,8518228073041491534,-5420422472375069774) set statement optimizer_switch='rowid_filter=on' for select t.id, fi.* @@ -2814,6 +3546,36 @@ fi.fh in (6311439873746261694,-397087483897438286, id id aceid clid fh 3080602882609775594 3080602882609775600 3080602882609775598 1 6311439873746261694 3080602882609775594 3080602882609775601 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609785600 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609785601 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609795600 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609795601 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609805600 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609805601 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609815600 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609815601 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609825600 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609825601 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609835600 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609835601 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609845600 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609845601 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609855600 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609855601 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609865600 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609865601 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609875600 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609875601 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609885600 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609885601 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609895600 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609895601 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609905600 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609905601 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609915600 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609915601 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609925600 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609925601 3080602882609775598 1 6311439873746261694 set statement optimizer_switch='rowid_filter=on' for analyze format=json select t.id, fi.* from (acli t inner join acei a on a.aclid = t.id) inner join filt fi on a.id = fi.aceid @@ -2892,22 +3654,23 @@ ANALYZE "key": "filt_fh", "used_key_parts": ["fh"] }, - "rows": 6, - "selectivity_pct": 17.143, - "r_rows": 5, + "rows": 81, + "selectivity_pct": 14.464, + "r_rows": 80, + "r_lookups": 80, "r_selectivity_pct": 40, "r_buffer_size": "REPLACED", "r_filling_time_ms": "REPLACED" }, "r_loops": 1, - "rows": 1, - "r_rows": 2, + "rows": 24, + "r_rows": 32, "r_total_time_ms": "REPLACED", - "filtered": 17.143, + "filtered": 14.464, "r_filtered": 100 }, "buffer_type": "incremental", - "buffer_size": "603", + "buffer_size": "4Kb", "join_type": "BKA", "mrr_type": "Rowid-ordered scan", "attached_condition": "fi.fh in (6311439873746261694,-397087483897438286,8518228073041491534,-5420422472375069774)", @@ -2926,38 +3689,99 @@ CREATE TABLE t1 (pk int NOT NULL, c1 varchar(1)) engine=innodb; INSERT INTO t1 VALUES (1,NULL),(15,'o'),(16,'x'),(19,'t'),(35,'k'),(36,'h'),(42,'t'),(43,'h'), (53,'l'),(62,'a'),(71,NULL),(79,'u'),(128,'y'),(129,NULL),(133,NULL); +INSERT INTO t1 SELECT * FROM t1; +INSERT INTO t1 SELECT * FROM t1; CREATE TABLE t2 ( -i1 int, c1 varchar(1) NOT NULL, KEY c1 (c1), KEY i1 (i1) +i1 int, c1 varchar(1) NOT NULL, +filler1 char(255) default '0', filler2 char(255) default '0', +KEY c1 (c1), KEY i1 (i1) ) engine=innodb; -INSERT INTO t2 VALUES -(1,'1'),(NULL,'1'),(42,'t'),(NULL,'1'),(79,'u'),(NULL,'1'), -(NULL,'4'),(NULL,'4'),(NULL,'1'),(NULL,'u'),(2,'1'),(NULL,'w'); +INSERT INTO t2(i1,c1) VALUES +(NULL,'1'),(1,'1'),(2,'t'),(3,'1'),(4,'u'),(5,'1'), +(6,'4'),(7,'4'),(8,'1'),(1,'u'),(2,'1'),(NULL,'w'); +INSERT INTO t2 SELECT * FROM t2; INSERT INTO t2 SELECT * FROM t2; +INSERT INTO t2 SELECT * FROM t2; +INSERT INTO t2 SELECT * FROM t2; +ANALYZE TABLE t1,t2 PERSISTENT FOR ALL; +Table Op Msg_type Msg_text +test.t1 analyze status Engine-independent statistics collected +test.t1 analyze status OK +test.t2 analyze status Engine-independent statistics collected +test.t2 analyze status OK SELECT * FROM t1 WHERE t1.c1 NOT IN (SELECT t2.c1 FROM t2, t1 AS a1 -WHERE t2.i1 = t1.pk AND t2.i1 IS NOT NULL); +WHERE t2.i1 = t1.pk AND t2.i1 BETWEEN 3 AND 5); pk c1 +1 NULL +15 o +16 x +19 t +35 k +36 h +42 t +43 h +53 l +62 a +71 NULL +79 u +128 y +129 NULL +133 NULL +1 NULL +15 o +16 x +19 t +35 k +36 h +42 t +43 h +53 l +62 a +71 NULL +79 u +128 y +129 NULL +133 NULL +1 NULL +15 o +16 x +19 t +35 k +36 h +42 t +43 h +53 l +62 a +71 NULL +79 u +128 y +129 NULL +133 NULL +1 NULL 15 o 16 x 19 t 35 k 36 h +42 t 43 h 53 l 62 a 71 NULL +79 u 128 y 129 NULL 133 NULL EXPLAIN EXTENDED SELECT * FROM t1 WHERE t1.c1 NOT IN (SELECT t2.c1 FROM t2, t1 AS a1 -WHERE t2.i1 = t1.pk AND t2.i1 IS NOT NULL); +WHERE t2.i1 = t1.pk AND t2.i1 BETWEEN 3 AND 5); id select_type table type possible_keys key key_len ref rows filtered Extra -1 PRIMARY t1 ALL NULL NULL NULL NULL 15 100.00 Using where -2 DEPENDENT SUBQUERY t2 ref|filter c1,i1 c1|i1 3|5 func 6 (33%) 33.33 Using where; Full scan on NULL key; Using rowid filter -2 DEPENDENT SUBQUERY a1 ALL NULL NULL NULL NULL 15 100.00 Using join buffer (flat, BNL join) +1 PRIMARY t1 ALL NULL NULL NULL NULL 60 100.00 Using where +2 DEPENDENT SUBQUERY t2 ref|filter c1,i1 c1|i1 3|5 func 38 (25%) 25.00 Using where; Full scan on NULL key; Using rowid filter +2 DEPENDENT SUBQUERY a1 ALL NULL NULL NULL NULL 60 100.00 Using join buffer (flat, BNL join) Warnings: Note 1276 Field or reference 'test.t1.pk' of SELECT #2 was resolved in SELECT #1 -Note 1003 /* select#1 */ select `test`.`t1`.`pk` AS `pk`,`test`.`t1`.`c1` AS `c1` from `test`.`t1` where !<`test`.`t1`.`c1`,`test`.`t1`.`pk`>((`test`.`t1`.`c1`,(/* select#2 */ select `test`.`t2`.`c1` from `test`.`t2` join `test`.`t1` `a1` where `test`.`t2`.`i1` = `test`.`t1`.`pk` and `test`.`t2`.`i1` is not null and trigcond((`test`.`t1`.`c1`) = `test`.`t2`.`c1`)))) +Note 1003 /* select#1 */ select `test`.`t1`.`pk` AS `pk`,`test`.`t1`.`c1` AS `c1` from `test`.`t1` where !<`test`.`t1`.`c1`,`test`.`t1`.`pk`>((`test`.`t1`.`c1`,(/* select#2 */ select `test`.`t2`.`c1` from `test`.`t2` join `test`.`t1` `a1` where `test`.`t2`.`i1` = `test`.`t1`.`pk` and `test`.`t2`.`i1` between 3 and 5 and trigcond((`test`.`t1`.`c1`) = `test`.`t2`.`c1`)))) DROP TABLE t1,t2; # End of 10.4 tests diff --git a/mysql-test/main/rowid_filter_innodb.test b/mysql-test/main/rowid_filter_innodb.test index dc8b1ddbca5..f4d0b241d11 100644 --- a/mysql-test/main/rowid_filter_innodb.test +++ b/mysql-test/main/rowid_filter_innodb.test @@ -32,6 +32,10 @@ insert into t1 values (85,'a','a',-1),(86,'a','a',5),(87,'a','a',null),(88,'a','a',160), (89,null,null,null),(90,'a','a',14785),(91,'a','a',0),(92,'a','a',null); +insert into t1 select pk+100, f1, f2, a from t1; + +analyze table t1; + let $q= ( select * from t1 where (f1 is null and f2 is null) and (f2 between 'a' and 'z' or f1 in ('a'))) @@ -73,13 +77,13 @@ drop table t1, t2; create table t1 (a int, b int, key (b), key (a)) engine=innodb; insert into t1 -select (rand(1)*1000)/10, (rand(1001)*1000)/50 from seq_1_to_1000; +select (rand(1)*1000)/10, (rand(1001)*1000)/20 from seq_1_to_1000; analyze table t1; let $q= -select count(*) from t1 where a in (22,83,11) and b=2; +select count(*) from t1 where a between 21 and 30 and b=2; let $q1= -select * from t1 where a in (22,83,11) and b=2; +select * from t1 where a between 21 and 30 and b=2; set @save_optimizer_switch= @@optimizer_switch; @@ -224,7 +228,7 @@ set global innodb_stats_persistent= @stats.save; CREATE TABLE t1 ( id int(11) unsigned NOT NULL AUTO_INCREMENT, - domain varchar(255) NOT NULL, + domain varchar(32) NOT NULL, registrant_name varchar(255) DEFAULT NULL, registrant_organization varchar(255) DEFAULT NULL, registrant_street1 varchar(255) DEFAULT NULL, @@ -317,6 +321,66 @@ technical_email, technical_telephone, json, timestamp) VALUES null, 'KOELN', '50937', 'GERMANY', 'ICANN@EXPIRES-2009.WEBCARE24.COM', '492214307580', '', '2017-01-30 10:08:29'); +let $sqi= +INSERT INTO t1 ( +domain, registrant_name, registrant_organization, registrant_street1, +registrant_street2, registrant_street3, registrant_street4, registrant_street5, +registrant_city, registrant_postal_code, registrant_country, registrant_email, +registrant_telephone, administrative_name, administrative_organization, +administrative_street1, administrative_street2, administrative_street3, +administrative_street4, administrative_street5, administrative_city, +administrative_postal_code, administrative_country, administrative_email, +administrative_telephone, technical_name, technical_organization, +technical_street1, technical_street2, technical_street3, technical_street4, +technical_street5, technical_city, technical_postal_code, technical_country, +technical_email, technical_telephone, json, timestamp) +VALUES +('www.mailhost.i-dev.fr', null, null, null, null, null, null, null, null, + null, null, null, null, null, null, null, null, null, null, null, null, null, + null, null, null, null, null, null, null, null, null, null, null, null, null, + null, null, '', '2016-12-22 09:18:28'); + +eval $sqi; +eval $sqi; +eval $sqi; +eval $sqi; +eval $sqi; +eval $sqi; +eval $sqi; +eval $sqi; + +let $qi= +INSERT INTO t1 ( +domain, registrant_name, registrant_organization, registrant_street1, +registrant_street2, registrant_street3, registrant_street4, registrant_street5, +registrant_city, registrant_postal_code, registrant_country, registrant_email, +registrant_telephone, administrative_name, administrative_organization, +administrative_street1, administrative_street2, administrative_street3, +administrative_street4, administrative_street5, administrative_city, +administrative_postal_code, administrative_country, administrative_email, +administrative_telephone, technical_name, technical_organization, +technical_street1, technical_street2, technical_street3, technical_street4, +technical_street5, technical_city, technical_postal_code, technical_country, +technical_email, technical_telephone, json, timestamp) +SELECT +domain, registrant_name, registrant_organization, registrant_street1, +registrant_street2, registrant_street3, registrant_street4, registrant_street5, +registrant_city, registrant_postal_code, registrant_country, registrant_email, +registrant_telephone, administrative_name, administrative_organization, +administrative_street1, administrative_street2, administrative_street3, +administrative_street4, administrative_street5, administrative_city, +administrative_postal_code, administrative_country, administrative_email, +administrative_telephone, technical_name, technical_organization, +technical_street1, technical_street2, technical_street3, technical_street4, +technical_street5, technical_city, technical_postal_code, technical_country, +technical_email, technical_telephone, json, timestamp +FROM t1; + +eval $qi; +eval $qi; + +ANALYZE TABLE t1 PERSISTENT FOR ALL; + SET @save_optimizer_switch=@@optimizer_switch; SET optimizer_switch='mrr=on,mrr_sort_keys=on'; @@ -324,7 +388,7 @@ SET optimizer_switch='mrr=on,mrr_sort_keys=on'; let $q= SELECT * FROM t1 WHERE 1 = 1 AND domain = 'www.mailhost.i-dev.fr' AND - timestamp >= DATE_ADD(CURRENT_TIMESTAMP, INTERVAL -1 MONTH) + timestamp >= DATE_ADD('2017-01-30 08:24:51', INTERVAL -1 MONTH) ORDER BY timestamp DESC; eval $q; @@ -497,6 +561,11 @@ insert into filt(id,aceid,clid,fh) values (6341490487802728362,6341490487802728360,1,8948400944397203540), (6341490487802728363,6341490487802728361,1,6701841652906431497); +insert into filt select id+10000,aceid,clid,fh from filt; +insert into filt select id+20000,aceid,clid,fh from filt; +insert into filt select id+40000,aceid,clid,fh from filt; +insert into filt select id+80000,aceid,clid,fh from filt; + analyze table filt, acei, acli; let $q= @@ -545,19 +614,28 @@ CREATE TABLE t1 (pk int NOT NULL, c1 varchar(1)) engine=innodb; INSERT INTO t1 VALUES (1,NULL),(15,'o'),(16,'x'),(19,'t'),(35,'k'),(36,'h'),(42,'t'),(43,'h'), (53,'l'),(62,'a'),(71,NULL),(79,'u'),(128,'y'),(129,NULL),(133,NULL); +INSERT INTO t1 SELECT * FROM t1; +INSERT INTO t1 SELECT * FROM t1; CREATE TABLE t2 ( -i1 int, c1 varchar(1) NOT NULL, KEY c1 (c1), KEY i1 (i1) +i1 int, c1 varchar(1) NOT NULL, +filler1 char(255) default '0', filler2 char(255) default '0', +KEY c1 (c1), KEY i1 (i1) ) engine=innodb; -INSERT INTO t2 VALUES -(1,'1'),(NULL,'1'),(42,'t'),(NULL,'1'),(79,'u'),(NULL,'1'), -(NULL,'4'),(NULL,'4'),(NULL,'1'),(NULL,'u'),(2,'1'),(NULL,'w'); +INSERT INTO t2(i1,c1) VALUES +(NULL,'1'),(1,'1'),(2,'t'),(3,'1'),(4,'u'),(5,'1'), +(6,'4'),(7,'4'),(8,'1'),(1,'u'),(2,'1'),(NULL,'w'); +INSERT INTO t2 SELECT * FROM t2; +INSERT INTO t2 SELECT * FROM t2; INSERT INTO t2 SELECT * FROM t2; +INSERT INTO t2 SELECT * FROM t2; + +ANALYZE TABLE t1,t2 PERSISTENT FOR ALL; let $q= SELECT * FROM t1 WHERE t1.c1 NOT IN (SELECT t2.c1 FROM t2, t1 AS a1 - WHERE t2.i1 = t1.pk AND t2.i1 IS NOT NULL); + WHERE t2.i1 = t1.pk AND t2.i1 BETWEEN 3 AND 5); eval $q; eval EXPLAIN EXTENDED $q; diff --git a/mysql-test/main/rowid_filter_innodb_debug.result b/mysql-test/main/rowid_filter_innodb_debug.result index 6fd75294bdb..f989e00919b 100644 --- a/mysql-test/main/rowid_filter_innodb_debug.result +++ b/mysql-test/main/rowid_filter_innodb_debug.result @@ -4,8 +4,6 @@ set default_storage_engine=innodb; # create table t0(a int); insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9); -create table t1(a int); -insert into t1 select A.a + B.a* 10 + C.a * 100 from t0 A, t0 B, t0 C; create table t2(a int); insert into t2 select A.a + B.a* 10 from t0 A, t0 B; CREATE TABLE t3 ( @@ -22,10 +20,10 @@ InnoDB insert into t3 select A.a, -A.a, +B.a, 'filler-data-filler-data' from -t0 A, t1 B; +t2 A, t2 B; analyze table t2,t3; Table Op Msg_type Msg_text test.t2 analyze status Engine-independent statistics collected @@ -38,7 +36,7 @@ where t3.key1=t2.a and t3.key2 in (2,3); id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE t2 ALL NULL NULL NULL NULL 100 Using where -1 SIMPLE t3 ref|filter key1,key2 key1|key2 5|5 test.t2.a 1000 (20%) Using where; Using rowid filter +1 SIMPLE t3 ref|filter key1,key2 key1|key2 5|5 test.t2.a 100 (2%) Using where; Using rowid filter set debug_sync='handler_rowid_filter_check SIGNAL at_rowid_filter_check WAIT_FOR go'; select * from t2, t3 where @@ -52,7 +50,7 @@ connection default; disconnect con1; ERROR 70100: Query execution was interrupted set debug_sync='RESET'; -drop table t0,t1,t2,t3; +drop table t0,t2,t3; set default_storage_engine=default; set @save_optimizer_switch= @@optimizer_switch; set @save_use_stat_tables= @@use_stat_tables; @@ -66,6 +64,16 @@ set optimizer_switch='rowid_filter=on'; # CREATE TABLE t1 (a INT, b INT, INDEX(a), INDEX(b)) ENGINE=InnoDB; INSERT INTO t1 VALUES (0,0),(1,0),(-1,1), (-2,1), (-2,3), (-3,4), (-2,4); +INSERT INTO t1 SELECT * FROM t1; +INSERT INTO t1 SELECT * FROM t1; +INSERT INTO t1 SELECT * FROM t1; +INSERT INTO t1 SELECT * FROM t1; +INSERT INTO t1 SELECT * FROM t1; +INSERT INTO t1 SELECT * FROM t1; +ANALYZE TABLE t1 PERSISTENT FOR ALL; +Table Op Msg_type Msg_text +test.t1 analyze status Engine-independent statistics collected +test.t1 analyze status OK set debug_sync='handler_rowid_filter_check SIGNAL killme WAIT_FOR go'; SELECT * FROM t1 WHERE a > 0 AND b=0; connect con1, localhost, root,,; diff --git a/mysql-test/main/rowid_filter_innodb_debug.test b/mysql-test/main/rowid_filter_innodb_debug.test index 31fbd937304..74deaa8ccc9 100644 --- a/mysql-test/main/rowid_filter_innodb_debug.test +++ b/mysql-test/main/rowid_filter_innodb_debug.test @@ -24,6 +24,14 @@ set optimizer_switch='rowid_filter=on'; CREATE TABLE t1 (a INT, b INT, INDEX(a), INDEX(b)) ENGINE=InnoDB; INSERT INTO t1 VALUES (0,0),(1,0),(-1,1), (-2,1), (-2,3), (-3,4), (-2,4); +INSERT INTO t1 SELECT * FROM t1; +INSERT INTO t1 SELECT * FROM t1; +INSERT INTO t1 SELECT * FROM t1; +INSERT INTO t1 SELECT * FROM t1; +INSERT INTO t1 SELECT * FROM t1; +INSERT INTO t1 SELECT * FROM t1; + +ANALYZE TABLE t1 PERSISTENT FOR ALL; let $ID= `SELECT @id := CONNECTION_ID()`; diff --git a/mysql-test/main/rowid_filter_myisam_debug.result b/mysql-test/main/rowid_filter_myisam_debug.result index 16fcb2a416e..32a989f50da 100644 --- a/mysql-test/main/rowid_filter_myisam_debug.result +++ b/mysql-test/main/rowid_filter_myisam_debug.result @@ -3,8 +3,6 @@ # create table t0(a int); insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9); -create table t1(a int); -insert into t1 select A.a + B.a* 10 + C.a * 100 from t0 A, t0 B, t0 C; create table t2(a int); insert into t2 select A.a + B.a* 10 from t0 A, t0 B; CREATE TABLE t3 ( @@ -21,10 +19,10 @@ MyISAM insert into t3 select A.a, -A.a, +B.a, 'filler-data-filler-data' from -t0 A, t1 B; +t2 A, t2 B; analyze table t2,t3; Table Op Msg_type Msg_text test.t2 analyze status Engine-independent statistics collected @@ -37,7 +35,7 @@ where t3.key1=t2.a and t3.key2 in (2,3); id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE t2 ALL NULL NULL NULL NULL 100 Using where -1 SIMPLE t3 ref|filter key1,key2 key1|key2 5|5 test.t2.a 1000 (18%) Using where; Using rowid filter +1 SIMPLE t3 ref|filter key1,key2 key1|key2 5|5 test.t2.a 100 (2%) Using where; Using rowid filter set debug_sync='handler_rowid_filter_check SIGNAL at_rowid_filter_check WAIT_FOR go'; select * from t2, t3 where @@ -51,4 +49,4 @@ connection default; disconnect con1; ERROR 70100: Query execution was interrupted set debug_sync='RESET'; -drop table t0,t1,t2,t3; +drop table t0,t2,t3; diff --git a/mysql-test/main/select.result b/mysql-test/main/select.result index 189775aa5aa..1562144b164 100644 --- a/mysql-test/main/select.result +++ b/mysql-test/main/select.result @@ -3474,13 +3474,13 @@ INSERT INTO t2 VALUES EXPLAIN SELECT a, c, d, f FROM t1,t2 WHERE a=c AND b BETWEEN 4 AND 6; id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t2 ALL c NULL NULL NULL 18 Using where -1 SIMPLE t1 eq_ref|filter PRIMARY,b PRIMARY|b 4|5 test.t2.c 1 (30%) Using where; Using rowid filter +1 SIMPLE t1 range PRIMARY,b b 5 NULL 3 Using index condition +1 SIMPLE t2 ref c c 5 test.t1.a 2 EXPLAIN SELECT a, c, d, f FROM t1,t2 WHERE a=c AND b BETWEEN 4 AND 6 AND a > 0; id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t2 ALL c NULL NULL NULL 18 Using where -1 SIMPLE t1 eq_ref|filter PRIMARY,b PRIMARY|b 4|5 test.t2.c 1 (30%) Using where; Using rowid filter +1 SIMPLE t1 range PRIMARY,b b 5 NULL 3 Using index condition; Using where +1 SIMPLE t2 ref c c 5 test.t1.a 2 DROP TABLE t1, t2; create table t1 ( a int unsigned not null auto_increment primary key, @@ -3616,7 +3616,7 @@ t3.a=t2.a AND t3.c IN ('bb','ee'); id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE t1 const PRIMARY PRIMARY 4 const 1 1 SIMPLE t2 range si si 5 NULL 4 Using index condition; Using where -1 SIMPLE t3 eq_ref|filter PRIMARY,ci PRIMARY|ci 4|5 test.t2.a 1 (30%) Using where; Using rowid filter +1 SIMPLE t3 eq_ref PRIMARY,ci PRIMARY 4 test.t2.a 1 Using where EXPLAIN SELECT t3.a FROM t1,t2,t3 WHERE t1.id = 8 AND t2.i BETWEEN t1.b AND t1.e AND @@ -3624,7 +3624,7 @@ t3.a=t2.a AND t3.c IN ('bb','ee') ; id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE t1 const PRIMARY PRIMARY 4 const 1 1 SIMPLE t2 range si,ai si 5 NULL 4 Using index condition; Using where -1 SIMPLE t3 eq_ref|filter PRIMARY,ci PRIMARY|ci 4|5 test.t2.a 1 (30%) Using where; Using rowid filter +1 SIMPLE t3 eq_ref PRIMARY,ci PRIMARY 4 test.t2.a 1 Using where EXPLAIN SELECT t3.a FROM t1,t2 FORCE INDEX (si),t3 WHERE t1.id = 8 AND (t2.i=t1.b OR t2.i=t1.e) AND t3.a=t2.a AND @@ -3744,7 +3744,7 @@ EXPLAIN SELECT * FROM t1 WHERE ID_better=1 AND ID1_with_null IS NULL AND (ID2_with_null=1 OR ID2_with_null=2); id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 ref|filter idx1,idx2 idx1|idx2 5|4 const 2 (1%) Using index condition; Using where; Using rowid filter +1 SIMPLE t1 ref idx1,idx2 idx2 4 const 2 Using where DROP TABLE t1; CREATE TABLE t1 (a INT, ts TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, KEY ts(ts)); INSERT INTO t1 VALUES (30,"2006-01-03 23:00:00"), (31,"2006-01-03 23:00:00"); diff --git a/mysql-test/main/select_jcl6.result b/mysql-test/main/select_jcl6.result index 37277f07ff4..e144477b66e 100644 --- a/mysql-test/main/select_jcl6.result +++ b/mysql-test/main/select_jcl6.result @@ -3485,13 +3485,13 @@ INSERT INTO t2 VALUES EXPLAIN SELECT a, c, d, f FROM t1,t2 WHERE a=c AND b BETWEEN 4 AND 6; id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t2 ALL c NULL NULL NULL 18 Using where -1 SIMPLE t1 eq_ref|filter PRIMARY,b PRIMARY|b 4|5 test.t2.c 1 (30%) Using where; Using join buffer (flat, BKA join); Key-ordered Rowid-ordered scan; Using rowid filter +1 SIMPLE t1 range PRIMARY,b b 5 NULL 3 Using index condition; Rowid-ordered scan +1 SIMPLE t2 ref c c 5 test.t1.a 2 Using join buffer (flat, BKA join); Key-ordered Rowid-ordered scan EXPLAIN SELECT a, c, d, f FROM t1,t2 WHERE a=c AND b BETWEEN 4 AND 6 AND a > 0; id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t2 ALL c NULL NULL NULL 18 Using where -1 SIMPLE t1 eq_ref|filter PRIMARY,b PRIMARY|b 4|5 test.t2.c 1 (30%) Using where; Using join buffer (flat, BKA join); Key-ordered Rowid-ordered scan; Using rowid filter +1 SIMPLE t1 range PRIMARY,b b 5 NULL 3 Using index condition; Using where; Rowid-ordered scan +1 SIMPLE t2 ref c c 5 test.t1.a 2 Using join buffer (flat, BKA join); Key-ordered Rowid-ordered scan DROP TABLE t1, t2; create table t1 ( a int unsigned not null auto_increment primary key, @@ -3627,7 +3627,7 @@ t3.a=t2.a AND t3.c IN ('bb','ee'); id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE t1 const PRIMARY PRIMARY 4 const 1 1 SIMPLE t2 range si si 5 NULL 4 Using index condition; Using where; Rowid-ordered scan -1 SIMPLE t3 eq_ref|filter PRIMARY,ci PRIMARY|ci 4|5 test.t2.a 1 (30%) Using where; Using join buffer (flat, BKA join); Key-ordered Rowid-ordered scan; Using rowid filter +1 SIMPLE t3 eq_ref PRIMARY,ci PRIMARY 4 test.t2.a 1 Using where; Using join buffer (flat, BKA join); Key-ordered Rowid-ordered scan EXPLAIN SELECT t3.a FROM t1,t2,t3 WHERE t1.id = 8 AND t2.i BETWEEN t1.b AND t1.e AND @@ -3635,7 +3635,7 @@ t3.a=t2.a AND t3.c IN ('bb','ee') ; id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE t1 const PRIMARY PRIMARY 4 const 1 1 SIMPLE t2 range si,ai si 5 NULL 4 Using index condition; Using where; Rowid-ordered scan -1 SIMPLE t3 eq_ref|filter PRIMARY,ci PRIMARY|ci 4|5 test.t2.a 1 (30%) Using where; Using join buffer (flat, BKA join); Key-ordered Rowid-ordered scan; Using rowid filter +1 SIMPLE t3 eq_ref PRIMARY,ci PRIMARY 4 test.t2.a 1 Using where; Using join buffer (flat, BKA join); Key-ordered Rowid-ordered scan EXPLAIN SELECT t3.a FROM t1,t2 FORCE INDEX (si),t3 WHERE t1.id = 8 AND (t2.i=t1.b OR t2.i=t1.e) AND t3.a=t2.a AND @@ -3755,7 +3755,7 @@ EXPLAIN SELECT * FROM t1 WHERE ID_better=1 AND ID1_with_null IS NULL AND (ID2_with_null=1 OR ID2_with_null=2); id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 ref|filter idx1,idx2 idx1|idx2 5|4 const 2 (1%) Using index condition; Using where; Using rowid filter +1 SIMPLE t1 ref idx1,idx2 idx2 4 const 2 Using where DROP TABLE t1; CREATE TABLE t1 (a INT, ts TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, KEY ts(ts)); INSERT INTO t1 VALUES (30,"2006-01-03 23:00:00"), (31,"2006-01-03 23:00:00"); diff --git a/mysql-test/main/select_pkeycache.result b/mysql-test/main/select_pkeycache.result index 189775aa5aa..1562144b164 100644 --- a/mysql-test/main/select_pkeycache.result +++ b/mysql-test/main/select_pkeycache.result @@ -3474,13 +3474,13 @@ INSERT INTO t2 VALUES EXPLAIN SELECT a, c, d, f FROM t1,t2 WHERE a=c AND b BETWEEN 4 AND 6; id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t2 ALL c NULL NULL NULL 18 Using where -1 SIMPLE t1 eq_ref|filter PRIMARY,b PRIMARY|b 4|5 test.t2.c 1 (30%) Using where; Using rowid filter +1 SIMPLE t1 range PRIMARY,b b 5 NULL 3 Using index condition +1 SIMPLE t2 ref c c 5 test.t1.a 2 EXPLAIN SELECT a, c, d, f FROM t1,t2 WHERE a=c AND b BETWEEN 4 AND 6 AND a > 0; id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t2 ALL c NULL NULL NULL 18 Using where -1 SIMPLE t1 eq_ref|filter PRIMARY,b PRIMARY|b 4|5 test.t2.c 1 (30%) Using where; Using rowid filter +1 SIMPLE t1 range PRIMARY,b b 5 NULL 3 Using index condition; Using where +1 SIMPLE t2 ref c c 5 test.t1.a 2 DROP TABLE t1, t2; create table t1 ( a int unsigned not null auto_increment primary key, @@ -3616,7 +3616,7 @@ t3.a=t2.a AND t3.c IN ('bb','ee'); id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE t1 const PRIMARY PRIMARY 4 const 1 1 SIMPLE t2 range si si 5 NULL 4 Using index condition; Using where -1 SIMPLE t3 eq_ref|filter PRIMARY,ci PRIMARY|ci 4|5 test.t2.a 1 (30%) Using where; Using rowid filter +1 SIMPLE t3 eq_ref PRIMARY,ci PRIMARY 4 test.t2.a 1 Using where EXPLAIN SELECT t3.a FROM t1,t2,t3 WHERE t1.id = 8 AND t2.i BETWEEN t1.b AND t1.e AND @@ -3624,7 +3624,7 @@ t3.a=t2.a AND t3.c IN ('bb','ee') ; id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE t1 const PRIMARY PRIMARY 4 const 1 1 SIMPLE t2 range si,ai si 5 NULL 4 Using index condition; Using where -1 SIMPLE t3 eq_ref|filter PRIMARY,ci PRIMARY|ci 4|5 test.t2.a 1 (30%) Using where; Using rowid filter +1 SIMPLE t3 eq_ref PRIMARY,ci PRIMARY 4 test.t2.a 1 Using where EXPLAIN SELECT t3.a FROM t1,t2 FORCE INDEX (si),t3 WHERE t1.id = 8 AND (t2.i=t1.b OR t2.i=t1.e) AND t3.a=t2.a AND @@ -3744,7 +3744,7 @@ EXPLAIN SELECT * FROM t1 WHERE ID_better=1 AND ID1_with_null IS NULL AND (ID2_with_null=1 OR ID2_with_null=2); id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 ref|filter idx1,idx2 idx1|idx2 5|4 const 2 (1%) Using index condition; Using where; Using rowid filter +1 SIMPLE t1 ref idx1,idx2 idx2 4 const 2 Using where DROP TABLE t1; CREATE TABLE t1 (a INT, ts TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, KEY ts(ts)); INSERT INTO t1 VALUES (30,"2006-01-03 23:00:00"), (31,"2006-01-03 23:00:00"); diff --git a/mysql-test/main/subselect2.result b/mysql-test/main/subselect2.result index c54d635230f..55ac483157f 100644 --- a/mysql-test/main/subselect2.result +++ b/mysql-test/main/subselect2.result @@ -132,7 +132,7 @@ id select_type table type possible_keys key key_len ref rows Extra 1 PRIMARY t3 eq_ref PRIMARY,FFOLDERID_IDX,CMFLDRPARNT_IDX PRIMARY 34 test.t3.PARENTID 1 Using where 1 PRIMARY t3 eq_ref PRIMARY,FFOLDERID_IDX,CMFLDRPARNT_IDX PRIMARY 34 test.t3.PARENTID 1 Using where 1 PRIMARY t3 eq_ref PRIMARY,FFOLDERID_IDX,CMFLDRPARNT_IDX PRIMARY 34 test.t3.PARENTID 1 Using where -1 PRIMARY t3 ref|filter PRIMARY,FFOLDERID_IDX,CMFLDRPARNT_IDX FFOLDERID_IDX|CMFLDRPARNT_IDX 34|35 test.t3.PARENTID 1 (29%) Using where; Using rowid filter +1 PRIMARY t3 eq_ref PRIMARY,FFOLDERID_IDX,CMFLDRPARNT_IDX PRIMARY 34 test.t3.PARENTID 1 Using where drop table t1, t2, t3, t4; CREATE TABLE t1 (a int(10) , PRIMARY KEY (a)) Engine=InnoDB; INSERT INTO t1 VALUES (1),(2); diff --git a/sql/handler.h b/sql/handler.h index cd999f30bc0..aa68c30480e 100644 --- a/sql/handler.h +++ b/sql/handler.h @@ -3156,6 +3156,11 @@ public: DBUG_ASSERT(m_lock_type == F_UNLCK); DBUG_ASSERT(inited == NONE); } + /* To check if table has been properely opened */ + bool is_open() + { + return ref != 0; + } virtual handler *clone(const char *name, MEM_ROOT *mem_root); /** This is called after create to allow us to set up cached variables */ void init() @@ -4804,6 +4809,7 @@ public: ha_share= arg_ha_share; return false; } + void set_table(TABLE* table_arg) { table= table_arg; } int get_lock_type() const { return m_lock_type; } public: /* XXX to be removed, see ha_partition::partition_ht() */ diff --git a/sql/item_func.cc b/sql/item_func.cc index f4596803c2d..9c29280970b 100644 --- a/sql/item_func.cc +++ b/sql/item_func.cc @@ -6019,7 +6019,7 @@ bool Item_func_match::init_search(THD *thd, bool no_order) { DBUG_ENTER("Item_func_match::init_search"); - if (!table->file->get_table()) // the handler isn't opened yet + if (!table->file->is_open()) DBUG_RETURN(0); /* Check if init_search() has been called before */ diff --git a/sql/rowid_filter.h b/sql/rowid_filter.h index 467b6884ca6..b76b8b1e635 100644 --- a/sql/rowid_filter.h +++ b/sql/rowid_filter.h @@ -192,6 +192,9 @@ public: */ virtual bool check(void *ctxt, char *elem) = 0; + /* True if the container does not contain any element */ + virtual bool is_empty() = 0; + virtual ~Rowid_filter_container() {} }; @@ -231,6 +234,8 @@ public: virtual ~Rowid_filter() {} + bool is_empty() { return container->is_empty(); } + Rowid_filter_container *get_container() { return container; } void set_tracker(Rowid_filter_tracker *track_arg) { tracker= track_arg; } @@ -268,6 +273,8 @@ public: bool check(char *elem) { + if (container->is_empty()) + return false; bool was_checked= container->check(table, elem); tracker->increment_checked_elements_count(was_checked); return was_checked; @@ -339,6 +346,8 @@ public: my_qsort2(array->front(), array->elements()/elem_size, elem_size, (qsort2_cmp) cmp, cmp_arg); } + + bool is_empty() { return elements() == 0; } }; @@ -368,6 +377,8 @@ public: bool add(void *ctxt, char *elem) { return refpos_container.add(elem); } bool check(void *ctxt, char *elem); + + bool is_empty() { return refpos_container.is_empty(); } }; /** diff --git a/sql/sql_analyze_stmt.h b/sql/sql_analyze_stmt.h index eec52822ae5..40876d178e0 100644 --- a/sql/sql_analyze_stmt.h +++ b/sql/sql_analyze_stmt.h @@ -355,11 +355,14 @@ public: uint get_container_elements() { return container_elements; } + uint get_container_lookups() { return n_checks; } + double get_r_selectivity_pct() { - return (double)n_positive_checks/(double)n_checks; + return n_checks ? (double)n_positive_checks/(double)n_checks : 0; } size_t get_container_buff_size() { return container_buff_size; } + }; diff --git a/sql/sql_explain.cc b/sql/sql_explain.cc index 1681da63ac1..70e300997f9 100644 --- a/sql/sql_explain.cc +++ b/sql/sql_explain.cc @@ -1676,6 +1676,7 @@ void Explain_rowid_filter::print_explain_json(Explain_query *query, if (is_analyze) { writer->add_member("r_rows").add_double(tracker->get_container_elements()); + writer->add_member("r_lookups").add_ll(tracker->get_container_lookups()); writer->add_member("r_selectivity_pct"). add_double(tracker->get_r_selectivity_pct() * 100.0); writer->add_member("r_buffer_size"). diff --git a/sql/sql_insert.cc b/sql/sql_insert.cc index 76fd6385041..ca3de361865 100644 --- a/sql/sql_insert.cc +++ b/sql/sql_insert.cc @@ -4171,7 +4171,7 @@ void select_insert::abort_result_set() table will be assigned with view table structure, but that table will not be opened really (it is dummy to check fields types & Co). */ - if (table && table->file->get_table()) + if (table && table->file->is_open()) { bool changed, transactional_table; /* diff --git a/sql/sql_select.cc b/sql/sql_select.cc index a91b4571b21..5ec88e5259c 100644 --- a/sql/sql_select.cc +++ b/sql/sql_select.cc @@ -7400,6 +7400,7 @@ best_access_path(JOIN *join, table_map best_ref_depends_map= 0; Range_rowid_filter_cost_info *best_filter= 0; double tmp; + double keyread_tmp= 0; ha_rows rec; bool best_uses_jbuf= FALSE; MY_BITMAP *eq_join_set= &s->table->eq_join_set; @@ -7666,11 +7667,16 @@ best_access_path(JOIN *join, tmp= records; set_if_smaller(tmp, (double) thd->variables.max_seeks_for_key); if (table->covering_keys.is_set(key)) - tmp= table->file->keyread_time(key, 1, (ha_rows) tmp); + keyread_tmp= + tmp= table->file->keyread_time(key, 1, (ha_rows) tmp); else + { + keyread_tmp= table->file->keyread_time(key, 1, (ha_rows) tmp); tmp= table->file->read_time(key, 1, (ha_rows) MY_MIN(tmp,s->worst_seeks)); + } tmp= COST_MULT(tmp, record_count); + keyread_tmp= COST_MULT(keyread_tmp, record_count); } } else @@ -7847,11 +7853,16 @@ best_access_path(JOIN *join, /* Limit the number of matched rows */ set_if_smaller(tmp, (double) thd->variables.max_seeks_for_key); if (table->covering_keys.is_set(key)) - tmp= table->file->keyread_time(key, 1, (ha_rows) tmp); + keyread_tmp= + tmp= table->file->keyread_time(key, 1, (ha_rows) tmp); else + { + keyread_tmp= table->file->keyread_time(key, 1, (ha_rows) tmp); tmp= table->file->read_time(key, 1, (ha_rows) MY_MIN(tmp,s->worst_seeks)); + } tmp= COST_MULT(tmp, record_count); + keyread_tmp= COST_MULT(keyread_tmp, record_count); } else { @@ -7870,7 +7881,35 @@ best_access_path(JOIN *join, (found_part & 1)) // start_key->key can be used for index access { double rows= record_count * records; - double access_cost_factor= MY_MIN(tmp / rows, 1.0); + + /* + If we use filter F with selectivity s the the cost of fetching data + by key using this filter will be + cost_of_fetching_1_row * rows * s + + cost_of_fetching_1_key_tuple * rows * (1 - s) + + cost_of_1_lookup_into_filter * rows + Without using any filter the cost would be just + cost_of_fetching_1_row * rows + + So the gain in access cost per row will be + cost_of_fetching_1_row * (1 - s) - + cost_of_fetching_1_key_tuple * (1 - s) - + cost_of_1_lookup_into_filter + = + (cost_of_fetching_1_row - cost_of_fetching_1_key_tuple) * (1 - s) + - cost_of_1_lookup_into_filter + + Here we have: + cost_of_fetching_1_row = tmp/rows + cost_of_fetching_1_key_tuple = keyread_tmp/rows + + Note that access_cost_factor may be greater than 1.0. In this case + we still can expect a gain of using rowid filter due to smaller number + of checks for conditions pushed to the joined table. + */ + double rows_access_cost= MY_MIN(rows, s->worst_seeks); + double access_cost_factor= MY_MIN((rows_access_cost - keyread_tmp) / + rows, 1.0); filter= table->best_range_rowid_filter_for_partial_join(start_key->key, rows, access_cost_factor); @@ -8029,8 +8068,11 @@ best_access_path(JOIN *join, if ( s->quick->get_type() == QUICK_SELECT_I::QS_TYPE_RANGE) { double rows= record_count * s->found_records; - double access_cost_factor= MY_MIN(tmp / rows, 1.0); uint key_no= s->quick->index; + + /* See the comment concerning using rowid filter for with ref access */ + keyread_tmp= s->table->quick_index_only_costs[key_no]; + double access_cost_factor= MY_MIN((rows - keyread_tmp) / rows, 1.0); filter= s->table->best_range_rowid_filter_for_partial_join(key_no, rows, access_cost_factor); @@ -18810,6 +18852,7 @@ create_tmp_table(THD *thd, TMP_TABLE_PARAM *param, List &fields, delete table->file; goto err; } + table->file->set_table(table); if (!using_unique_constraint) reclength+= group_null_items; // null flag is stored separately @@ -20651,6 +20694,8 @@ sub_select(JOIN *join,JOIN_TAB *join_tab,bool end_of_records) DBUG_RETURN(NESTED_LOOP_ERROR); join_tab->build_range_rowid_filter_if_needed(); + if (join_tab->rowid_filter && join_tab->rowid_filter->is_empty()) + rc= NESTED_LOOP_NO_MORE_ROWS; join->return_tab= join_tab; -- cgit v1.2.1 From e910dff81ebaa84d0028705d20a40abe8f779afd Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Tue, 25 Oct 2022 21:21:19 +0200 Subject: MDEV-26161 crash in Gis_point::calculate_haversine return an error on invalid gis data --- mysql-test/main/gis.result | 5 +++++ mysql-test/main/gis.test | 5 +++++ sql/item_geofunc.cc | 20 +++++++++++--------- sql/spatial.cc | 9 ++++++--- 4 files changed, 27 insertions(+), 12 deletions(-) diff --git a/mysql-test/main/gis.result b/mysql-test/main/gis.result index bfe1d3f40a5..3d8c64b0ce8 100644 --- a/mysql-test/main/gis.result +++ b/mysql-test/main/gis.result @@ -4977,5 +4977,10 @@ ERROR HY000: Illegal parameter data type geometry for operation 'is_free_lock' SELECT IS_USED_LOCK(POINT(1,1)); ERROR HY000: Illegal parameter data type geometry for operation 'is_used_lock' # +# MDEV-26161 crash in Gis_point::calculate_haversine +# +select st_distance_sphere(x'01030000000400000004000000000000', multipoint(point(124,204)), 10); +ERROR 22003: Cannot get geometry object from data you send to the GEOMETRY field +# # End of 10.3 tests # diff --git a/mysql-test/main/gis.test b/mysql-test/main/gis.test index c7bdb366124..1d202e9be08 100644 --- a/mysql-test/main/gis.test +++ b/mysql-test/main/gis.test @@ -3090,6 +3090,11 @@ SELECT IS_FREE_LOCK(POINT(1,1)); --error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION SELECT IS_USED_LOCK(POINT(1,1)); +--echo # +--echo # MDEV-26161 crash in Gis_point::calculate_haversine +--echo # +--error ER_CANT_CREATE_GEOMETRY_OBJECT +select st_distance_sphere(x'01030000000400000004000000000000', multipoint(point(124,204)), 10); --echo # --echo # End of 10.3 tests diff --git a/sql/item_geofunc.cc b/sql/item_geofunc.cc index b40105aaf36..87c736ab94b 100644 --- a/sql/item_geofunc.cc +++ b/sql/item_geofunc.cc @@ -2596,7 +2596,7 @@ double Item_func_sphere_distance::spherical_distance_points(Geometry *g1, double res= 0.0; // Length for the single point (25 Bytes) uint32 len= SRID_SIZE + POINT_DATA_SIZE + WKB_HEADER_SIZE; - int error= 0; + int err_hv= 0, err_sph= 0; switch (g2->get_class_info()->m_type_id) { @@ -2606,21 +2606,21 @@ double Item_func_sphere_distance::spherical_distance_points(Geometry *g1, // Optimization for point-point case if (g1->get_class_info()->m_type_id == Geometry::wkb_point) { - res= g2p->calculate_haversine(g1, r, &error); + res= g2p->calculate_haversine(g1, r, &err_hv); } else { // Optimization for single point in Multipoint if (g1->get_data_size() == len) { - res= g2p->calculate_haversine(g1, r, &error); + res= g2p->calculate_haversine(g1, r, &err_hv); } else { // There are multipoints in g1 // g1 is MultiPoint and calculate MP.sphericaldistance from g2 Point if (g1->get_data_size() != GET_SIZE_ERROR) - g2p->spherical_distance_multipoints(g1, r, &res, &error); + err_sph= g2p->spherical_distance_multipoints(g1, r, &res, &err_hv); } } break; @@ -2634,20 +2634,20 @@ double Item_func_sphere_distance::spherical_distance_points(Geometry *g1, // Optimization for single point in Multipoint g2 if (g2->get_data_size() == len) { - res= g1p->calculate_haversine(g2, r, &error); + res= g1p->calculate_haversine(g2, r, &err_hv); } else { if (g2->get_data_size() != GET_SIZE_ERROR) // g1 is a point (casted to multi_point) and g2 multipoint - g1p->spherical_distance_multipoints(g2, r, &res, &error); + err_sph= g1p->spherical_distance_multipoints(g2, r, &res, &err_hv); } } else { Gis_multi_point *g1mp= static_cast(g1); // Multipoints in g1 and g2 - no optimization - g1mp->spherical_distance_multipoints(g2, r, &res, &error); + err_sph= g1mp->spherical_distance_multipoints(g2, r, &res, &err_hv); } break; @@ -2656,12 +2656,14 @@ double Item_func_sphere_distance::spherical_distance_points(Geometry *g1, break; } - if (error > 0) + if (err_hv > 0) my_error(ER_STD_OUT_OF_RANGE_ERROR, MYF(0), "Longitude should be [-180,180]", "ST_Distance_Sphere"); - else if(error < 0) + else if(err_hv < 0) my_error(ER_STD_OUT_OF_RANGE_ERROR, MYF(0), "Latitude should be [-90,90]", "ST_Distance_Sphere"); + else if (err_sph) + my_error(ER_CANT_CREATE_GEOMETRY_OBJECT, MYF(0)); return res; } diff --git a/sql/spatial.cc b/sql/spatial.cc index 53e8c4c8bdd..6e044a67161 100644 --- a/sql/spatial.cc +++ b/sql/spatial.cc @@ -1151,7 +1151,8 @@ int Gis_point::spherical_distance_multipoints(Geometry *g, const double r, POINT_DATA_SIZE*(i-1), POINT_DATA_SIZE); s[len-1]= '\0'; temp= Geometry::construct(&buff_temp, s, len); - DBUG_ASSERT(temp); + if (!temp) + return 1; temp_res= this->calculate_haversine(temp, r, err); if (res > temp_res) res= temp_res; @@ -2335,7 +2336,8 @@ int Gis_multi_point::spherical_distance_multipoints(Geometry *g, const double r, POINT_DATA_SIZE*(i-1), POINT_DATA_SIZE); s[len-1]= '\0'; temp= Geometry::construct(&buff_temp, s, len); - DBUG_ASSERT(temp); + if (!temp) + return 1; // Optimization for single Multipoint if (num_of_points2 == 1) { @@ -2354,7 +2356,8 @@ int Gis_multi_point::spherical_distance_multipoints(Geometry *g, const double r, POINT_DATA_SIZE*(j-1), POINT_DATA_SIZE); s2[len-1]= '\0'; temp2= Geometry::construct(&buff_temp2, s2, len); - DBUG_ASSERT(temp2); + if (!temp2) + return 1; temp_res= static_cast(temp)->calculate_haversine(temp2, r, err); if (res > temp_res) res= temp_res; -- cgit v1.2.1 From 77951dd7102381385093209a1f2597d28e39900a Mon Sep 17 00:00:00 2001 From: Alexey Botchkov Date: Tue, 25 Oct 2022 23:48:54 +0400 Subject: MDEV-26161 crash in Gis_point::calculate_haversine More checks for bad geometry data added. --- mysql-test/main/gis.result | 4 ++++ mysql-test/main/gis.test | 4 ++++ sql/item_geofunc.cc | 4 ++-- sql/spatial.cc | 32 +++++++++++++++++++++----------- sql/spatial.h | 2 ++ 5 files changed, 33 insertions(+), 13 deletions(-) diff --git a/mysql-test/main/gis.result b/mysql-test/main/gis.result index 3d8c64b0ce8..358be520b06 100644 --- a/mysql-test/main/gis.result +++ b/mysql-test/main/gis.result @@ -4981,6 +4981,10 @@ ERROR HY000: Illegal parameter data type geometry for operation 'is_used_lock' # select st_distance_sphere(x'01030000000400000004000000000000', multipoint(point(124,204)), 10); ERROR 22003: Cannot get geometry object from data you send to the GEOMETRY field +select st_distance_sphere(x'010300000004000000040000', multipoint(point(124,204)), 10); +ERROR 22003: Cannot get geometry object from data you send to the GEOMETRY field +select st_distance_sphere(x'010300000001000000040000', multipoint(point(124,204)), 10); +ERROR 22003: Cannot get geometry object from data you send to the GEOMETRY field # # End of 10.3 tests # diff --git a/mysql-test/main/gis.test b/mysql-test/main/gis.test index 1d202e9be08..716fab9bfeb 100644 --- a/mysql-test/main/gis.test +++ b/mysql-test/main/gis.test @@ -3095,6 +3095,10 @@ SELECT IS_USED_LOCK(POINT(1,1)); --echo # --error ER_CANT_CREATE_GEOMETRY_OBJECT select st_distance_sphere(x'01030000000400000004000000000000', multipoint(point(124,204)), 10); +--error ER_CANT_CREATE_GEOMETRY_OBJECT +select st_distance_sphere(x'010300000004000000040000', multipoint(point(124,204)), 10); +--error ER_CANT_CREATE_GEOMETRY_OBJECT +select st_distance_sphere(x'010300000001000000040000', multipoint(point(124,204)), 10); --echo # --echo # End of 10.3 tests diff --git a/sql/item_geofunc.cc b/sql/item_geofunc.cc index 87c736ab94b..6e65366f2e0 100644 --- a/sql/item_geofunc.cc +++ b/sql/item_geofunc.cc @@ -2656,13 +2656,13 @@ double Item_func_sphere_distance::spherical_distance_points(Geometry *g1, break; } - if (err_hv > 0) + if (err_hv == 1) my_error(ER_STD_OUT_OF_RANGE_ERROR, MYF(0), "Longitude should be [-180,180]", "ST_Distance_Sphere"); else if(err_hv < 0) my_error(ER_STD_OUT_OF_RANGE_ERROR, MYF(0), "Latitude should be [-90,90]", "ST_Distance_Sphere"); - else if (err_sph) + else if (err_sph || err_hv == 2) my_error(ER_CANT_CREATE_GEOMETRY_OBJECT, MYF(0)); return res; } diff --git a/sql/spatial.cc b/sql/spatial.cc index 6e044a67161..9a30d346a1c 100644 --- a/sql/spatial.cc +++ b/sql/spatial.cc @@ -1071,10 +1071,9 @@ double Gis_point::calculate_haversine(const Geometry *g, point_temp[point_size-1]= '\0'; Geometry_buffer gbuff; Geometry *gg= Geometry::construct(&gbuff, point_temp, point_size-1); - DBUG_ASSERT(gg); - if (static_cast(gg)->get_xy_radian(&x2r, &y2r)) + if (!gg || static_cast(gg)->get_xy_radian(&x2r, &y2r)) { - DBUG_ASSERT(0); + *error= 2; return -1; } } @@ -1082,15 +1081,16 @@ double Gis_point::calculate_haversine(const Geometry *g, { if (static_cast(g)->get_xy_radian(&x2r, &y2r)) { - DBUG_ASSERT(0); + *error= 2; return -1; } } if (this->get_xy_radian(&x1r, &y1r)) { - DBUG_ASSERT(0); + *error= 2; return -1; } + // // Check boundary conditions: longitude[-180,180] if (!((x2r >= -M_PI && x2r <= M_PI) && (x1r >= -M_PI && x1r <= M_PI))) { @@ -1143,12 +1143,16 @@ int Gis_point::spherical_distance_multipoints(Geometry *g, const double r, { Geometry_buffer buff_temp; Geometry *temp; + const char *pt_ptr= g->get_data_ptr()+ + 4+WKB_HEADER_SIZE*i + POINT_DATA_SIZE*(i-1); // First 4 bytes are handled already, make sure to create a Point memset(s + 4, Geometry::wkb_point, 1); + if (g->no_data(pt_ptr, POINT_DATA_SIZE)) + return 1; + memcpy(s + 5, g->get_data_ptr() + 5, 4); - memcpy(s + 4 + WKB_HEADER_SIZE, g->get_data_ptr() + 4 + WKB_HEADER_SIZE*i +\ - POINT_DATA_SIZE*(i-1), POINT_DATA_SIZE); + memcpy(s + 4 + WKB_HEADER_SIZE, pt_ptr, POINT_DATA_SIZE); s[len-1]= '\0'; temp= Geometry::construct(&buff_temp, s, len); if (!temp) @@ -2329,11 +2333,14 @@ int Gis_multi_point::spherical_distance_multipoints(Geometry *g, const double r, Geometry *temp; double temp_res= 0.0; char s[len]; + const char *pt_ptr= get_data_ptr()+ + 4+WKB_HEADER_SIZE*i + POINT_DATA_SIZE*(i-1); // First 4 bytes are handled already, make sure to create a Point memset(s + 4, Geometry::wkb_point, 1); + if (no_data(pt_ptr, POINT_DATA_SIZE)) + return 1; memcpy(s + 5, this->get_data_ptr() + 5, 4); - memcpy(s + 4 + WKB_HEADER_SIZE, this->get_data_ptr() + 4 + WKB_HEADER_SIZE*i +\ - POINT_DATA_SIZE*(i-1), POINT_DATA_SIZE); + memcpy(s + 4 + WKB_HEADER_SIZE, pt_ptr, POINT_DATA_SIZE); s[len-1]= '\0'; temp= Geometry::construct(&buff_temp, s, len); if (!temp) @@ -2349,11 +2356,14 @@ int Gis_multi_point::spherical_distance_multipoints(Geometry *g, const double r, Geometry_buffer buff_temp2; Geometry *temp2; char s2[len]; + const char *pt_ptr= g->get_data_ptr()+ + 4+WKB_HEADER_SIZE*j + POINT_DATA_SIZE*(j-1); // First 4 bytes are handled already, make sure to create a Point memset(s2 + 4, Geometry::wkb_point, 1); + if (g->no_data(pt_ptr, POINT_DATA_SIZE)) + return 1; memcpy(s2 + 5, g->get_data_ptr() + 5, 4); - memcpy(s2 + 4 + WKB_HEADER_SIZE, g->get_data_ptr() + 4 + WKB_HEADER_SIZE*j +\ - POINT_DATA_SIZE*(j-1), POINT_DATA_SIZE); + memcpy(s2 + 4 + WKB_HEADER_SIZE, pt_ptr, POINT_DATA_SIZE); s2[len-1]= '\0'; temp2= Geometry::construct(&buff_temp2, s2, len); if (!temp2) diff --git a/sql/spatial.h b/sql/spatial.h index 0c00482c09b..1a69b32bb1c 100644 --- a/sql/spatial.h +++ b/sql/spatial.h @@ -354,6 +354,7 @@ protected: const char *get_mbr_for_points(MBR *mbr, const char *data, uint offset) const; +public: /** Check if there're enough data remaining as requested @@ -384,6 +385,7 @@ protected: (expected_points > ((m_data_end - data) / (POINT_DATA_SIZE + extra_point_space)))); } +protected: const char *m_data; const char *m_data_end; }; -- cgit v1.2.1 From 5f296f3a181eb63b6112153c6d4f9186180e6c50 Mon Sep 17 00:00:00 2001 From: Oleg Smirnov Date: Tue, 25 Oct 2022 19:30:42 +0700 Subject: MDEV-29640 FederatedX does not properly handle pushdown in case of difference in local and remote table names FederatedX table may refer to a table with a different name on the remote server: test> CREATE TABLE t2 (...) ENGINE="FEDERATEDX" CONNECTION="mysql://user:pass@192.168.1.111:9308/federatedx/t1"; test> select * from t2 where ...; This could cause an issue with federated_pushdown=1, because FederatedX pushes the query (or derived table's) text to the remote server. The remote server will try to read from table t2 (while it should read from t1). Solution: do not allow pushing down queries with tables that have different db_name.table name on the local and remote server. This patch also fixes: MDEV-29863 Server crashes in federatedx_txn::acquire after select from the FederatedX table with partitions Solution: disallow pushdown when partitioned FederatedX tables are used. --- .../federated/federatedx_create_handlers.result | 49 ++++++++++++ .../federated/federatedx_create_handlers.test | 48 ++++++++++++ storage/federatedx/federatedx_pushdown.cc | 87 ++++++++++++++++++++-- storage/federatedx/ha_federatedx.cc | 2 +- storage/federatedx/ha_federatedx.h | 1 + 5 files changed, 178 insertions(+), 9 deletions(-) diff --git a/mysql-test/suite/federated/federatedx_create_handlers.result b/mysql-test/suite/federated/federatedx_create_handlers.result index b115cc73b87..29ce2c4348b 100644 --- a/mysql-test/suite/federated/federatedx_create_handlers.result +++ b/mysql-test/suite/federated/federatedx_create_handlers.result @@ -420,6 +420,55 @@ SELECT * FROM (SELECT * FROM federated.t1 LIMIT 70000) dt; SELECT COUNT(DISTINCT a) FROM federated.t2; COUNT(DISTINCT a) 70000 +# +# MDEV-29640 FederatedX does not properly handle pushdown +# in case of difference in local and remote table names +# +connection master; +# Use tables from the previous test. Make sure pushdown works: +EXPLAIN SELECT COUNT(DISTINCT a) FROM federated.t2; +id select_type table type possible_keys key key_len ref rows Extra +1 PUSHED SELECT NULL NULL NULL NULL NULL NULL NULL NULL +SELECT COUNT(DISTINCT a) FROM federated.t2; +COUNT(DISTINCT a) +70000 +# Link remote table `federated.t1` with the local table named `t1_local` +CREATE TABLE federated.t1_local ENGINE="FEDERATED" +CONNECTION='mysql://root@127.0.0.1:SLAVE_PORT/federated/t1'; +# No pushdown here due to table names mismatch, retrieve data as usual: +EXPLAIN SELECT COUNT(DISTINCT a) FROM federated.t1_local; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1_local ALL NULL NULL NULL NULL 70000 +SELECT COUNT(DISTINCT a) FROM federated.t1_local; +COUNT(DISTINCT a) +70000 +# +# MDEV-29863 Server crashes in federatedx_txn::acquire after select from +# the Federated table with partitions and federated_pushdown=1 +# in case of difference in local and remote table names +# +connection slave; +CREATE TABLE federated.t3 (a INT); +INSERT INTO federated.t3 VALUES (1),(2),(3); +CREATE TABLE federated.t4 (a INT); +connection master; +CREATE SERVER fedlink FOREIGN DATA WRAPPER mysql +OPTIONS (USER 'root', HOST '127.0.0.1', DATABASE 'federated', +PORT SLAVE_PORT); +CREATE TABLE federated.t3 (a INT) +ENGINE=FEDERATED +CONNECTION='mysql://root@127.0.0.1:$SLAVE_MYPORT/federated/t3' + PARTITION BY list (a) +(PARTITION p1 VALUES IN (1) CONNECTION='fedlink/t3', +PARTITION p2 VALUES IN (2) CONNECTION='fedlink/t4'); +EXPLAIN SELECT * FROM federated.t3; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t3 ALL NULL NULL NULL NULL 3 +SELECT * FROM federated.t3; +a +1 +2 +3 set global federated_pushdown=0; connection master; DROP TABLE IF EXISTS federated.t1; diff --git a/mysql-test/suite/federated/federatedx_create_handlers.test b/mysql-test/suite/federated/federatedx_create_handlers.test index 8863a057b47..2d6c2bc4197 100644 --- a/mysql-test/suite/federated/federatedx_create_handlers.test +++ b/mysql-test/suite/federated/federatedx_create_handlers.test @@ -1,6 +1,7 @@ --source have_federatedx.inc --source include/federated.inc --source include/no_valgrind_without_big.inc +--source include/have_partition.inc connection default; @@ -266,6 +267,53 @@ INSERT INTO federated.t2 SELECT * FROM (SELECT * FROM federated.t1 LIMIT 70000) dt; SELECT COUNT(DISTINCT a) FROM federated.t2; + +--echo # +--echo # MDEV-29640 FederatedX does not properly handle pushdown +--echo # in case of difference in local and remote table names +--echo # +connection master; +--echo # Use tables from the previous test. Make sure pushdown works: +EXPLAIN SELECT COUNT(DISTINCT a) FROM federated.t2; +SELECT COUNT(DISTINCT a) FROM federated.t2; + +--echo # Link remote table `federated.t1` with the local table named `t1_local` +--replace_result $SLAVE_MYPORT SLAVE_PORT +eval +CREATE TABLE federated.t1_local ENGINE="FEDERATED" +CONNECTION='mysql://root@127.0.0.1:$SLAVE_MYPORT/federated/t1'; + +--echo # No pushdown here due to table names mismatch, retrieve data as usual: +EXPLAIN SELECT COUNT(DISTINCT a) FROM federated.t1_local; +SELECT COUNT(DISTINCT a) FROM federated.t1_local; + + +--echo # +--echo # MDEV-29863 Server crashes in federatedx_txn::acquire after select from +--echo # the Federated table with partitions and federated_pushdown=1 +--echo # in case of difference in local and remote table names +--echo # +connection slave; +CREATE TABLE federated.t3 (a INT); +INSERT INTO federated.t3 VALUES (1),(2),(3); +CREATE TABLE federated.t4 (a INT); + +connection master; +--replace_result $SLAVE_MYPORT SLAVE_PORT +eval CREATE SERVER fedlink FOREIGN DATA WRAPPER mysql + OPTIONS (USER 'root', HOST '127.0.0.1', DATABASE 'federated', + PORT $SLAVE_MYPORT); + +CREATE TABLE federated.t3 (a INT) + ENGINE=FEDERATED + CONNECTION='mysql://root@127.0.0.1:$SLAVE_MYPORT/federated/t3' + PARTITION BY list (a) + (PARTITION p1 VALUES IN (1) CONNECTION='fedlink/t3', + PARTITION p2 VALUES IN (2) CONNECTION='fedlink/t4'); + +EXPLAIN SELECT * FROM federated.t3; +SELECT * FROM federated.t3; + set global federated_pushdown=0; source include/federated_cleanup.inc; diff --git a/storage/federatedx/federatedx_pushdown.cc b/storage/federatedx/federatedx_pushdown.cc index 15b0b0d3d4e..430bc961167 100644 --- a/storage/federatedx/federatedx_pushdown.cc +++ b/storage/federatedx/federatedx_pushdown.cc @@ -35,6 +35,64 @@ */ +/* + Check if table and database names are equal on local and remote servers + + SYNOPSIS + local_and_remote_names_match() + tbl_share Pointer to current table TABLE_SHARE structure + fshare Pointer to current table FEDERATEDX_SHARE structure + + DESCRIPTION + FederatedX table on the local server may refer to a table having another + name on the remote server. The remote table may even reside in a different + database. For example: + + -- Remote server + CREATE TABLE t1 (id int(32)); + + -- Local server + CREATE TABLE t2 ENGINE="FEDERATEDX" + CONNECTION="mysql://joe:joespass@192.168.1.111:9308/federatedx/t1"; + + It's not a problem while the federated_pushdown is disabled 'cause + the CONNECTION strings are being parsed for every table during + the execution, so the table names are translated from local to remote. + But in case of the federated_pushdown the whole query is pushed down + to the engine without any translation, so the remote server may try + to select data from a nonexistent table (for example, query + "SELECT * FROM t2" will try to retrieve data from nonexistent "t2"). + + This function checks whether there is a mismatch between local and remote + table/database names + + RETURN VALUE + false names are equal + true names are not equal + +*/ +bool local_and_remote_names_mismatch(const TABLE_SHARE *tbl_share, + const FEDERATEDX_SHARE *fshare) +{ + + if (lower_case_table_names) + { + if (strcasecmp(fshare->database, tbl_share->db.str) != 0) + return true; + } + else + { + if (strncmp(fshare->database, tbl_share->db.str, tbl_share->db.length) != 0) + return true; + } + + return my_strnncoll(system_charset_info, (uchar *) fshare->table_name, + strlen(fshare->table_name), + (uchar *) tbl_share->table_name.str, + tbl_share->table_name.length) != 0; +} + + static derived_handler* create_federatedx_derived_handler(THD* thd, TABLE_LIST *derived) { @@ -42,7 +100,6 @@ create_federatedx_derived_handler(THD* thd, TABLE_LIST *derived) return 0; ha_federatedx_derived_handler* handler = NULL; - handlerton *ht= 0; SELECT_LEX_UNIT *unit= derived->derived; @@ -54,9 +111,16 @@ create_federatedx_derived_handler(THD* thd, TABLE_LIST *derived) { if (!tbl->table) return 0; - if (!ht) - ht= tbl->table->file->partition_ht(); - else if (ht != tbl->table->file->partition_ht()) + /* + We intentionally don't support partitioned federatedx tables here, so + use file->ht and not file->partition_ht(). + */ + if (tbl->table->file->ht != federatedx_hton) + return 0; + + const FEDERATEDX_SHARE *fshare= + ((ha_federatedx*)tbl->table->file)->get_federatedx_share(); + if (local_and_remote_names_mismatch(tbl->table->s, fshare)) return 0; } } @@ -170,15 +234,22 @@ create_federatedx_select_handler(THD* thd, SELECT_LEX *sel) return 0; ha_federatedx_select_handler* handler = NULL; - handlerton *ht= 0; for (TABLE_LIST *tbl= thd->lex->query_tables; tbl; tbl= tbl->next_global) { if (!tbl->table) return 0; - if (!ht) - ht= tbl->table->file->partition_ht(); - else if (ht != tbl->table->file->partition_ht()) + /* + We intentionally don't support partitioned federatedx tables here, so + use file->ht and not file->partition_ht(). + */ + if (tbl->table->file->ht != federatedx_hton) + return 0; + + const FEDERATEDX_SHARE *fshare= + ((ha_federatedx*)tbl->table->file)->get_federatedx_share(); + + if (local_and_remote_names_mismatch(tbl->table->s, fshare)) return 0; } diff --git a/storage/federatedx/ha_federatedx.cc b/storage/federatedx/ha_federatedx.cc index 085422e6016..e329f5df177 100644 --- a/storage/federatedx/ha_federatedx.cc +++ b/storage/federatedx/ha_federatedx.cc @@ -609,7 +609,7 @@ error: parse_url() mem_root MEM_ROOT pointer for memory allocation share pointer to FEDERATEDX share - table pointer to current TABLE class + table_s pointer to current TABLE_SHARE class table_create_flag determines what error to throw DESCRIPTION diff --git a/storage/federatedx/ha_federatedx.h b/storage/federatedx/ha_federatedx.h index c9d80dd8282..ce09fb2253b 100644 --- a/storage/federatedx/ha_federatedx.h +++ b/storage/federatedx/ha_federatedx.h @@ -463,6 +463,7 @@ public: int reset(void); int free_result(void); + const FEDERATEDX_SHARE *get_federatedx_share() const { return share; } friend class ha_federatedx_derived_handler; friend class ha_federatedx_select_handler; }; -- cgit v1.2.1 From 055cb3fcd1aec2a05908a312a2ae4dfd3a0d4d81 Mon Sep 17 00:00:00 2001 From: Brad Smith Date: Sat, 22 Oct 2022 23:08:06 -0400 Subject: Rocksdb: Add initial OpenBSD support --- storage/rocksdb/CMakeLists.txt | 4 ++-- storage/rocksdb/ha_rocksdb.cc | 10 +++++----- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/storage/rocksdb/CMakeLists.txt b/storage/rocksdb/CMakeLists.txt index e0c1003b461..9b7058a5348 100644 --- a/storage/rocksdb/CMakeLists.txt +++ b/storage/rocksdb/CMakeLists.txt @@ -188,7 +188,7 @@ ADD_DEPENDENCIES(rocksdb_aux_lib GenError) # MARIAROCKS-TODO: how to properly depend on -lrt ? TARGET_LINK_LIBRARIES(rocksdb_aux_lib rocksdblib ${ZLIB_LIBRARY}) -if (UNIX AND NOT APPLE) +if (UNIX AND NOT APPLE AND NOT CMAKE_SYSTEM_NAME STREQUAL "OpenBSD") TARGET_LINK_LIBRARIES(rocksdb_aux_lib -lrt) endif() TARGET_LINK_LIBRARIES(rocksdb_aux_lib ${ATOMIC_EXTRA_LIBS}) @@ -259,7 +259,7 @@ ENDIF() # ADD_SUBDIRECTORY(unittest) #ENDIF() -if (UNIX AND NOT APPLE) +if (UNIX AND NOT APPLE AND NOT CMAKE_SYSTEM_NAME STREQUAL "OpenBSD") SET(rocksdb_static_libs ${rocksdb_static_libs} "-lrt") endif() diff --git a/storage/rocksdb/ha_rocksdb.cc b/storage/rocksdb/ha_rocksdb.cc index c38964680b1..cd366a12462 100644 --- a/storage/rocksdb/ha_rocksdb.cc +++ b/storage/rocksdb/ha_rocksdb.cc @@ -260,7 +260,7 @@ Rdb_cf_manager cf_manager; Rdb_ddl_manager ddl_manager; Rdb_binlog_manager binlog_manager; -#if !defined(_WIN32) && !defined(__APPLE__) +#if !defined(_WIN32) && !defined(__APPLE__) && !defined(__OpenBSD__) Rdb_io_watchdog *io_watchdog = nullptr; #endif /** @@ -844,7 +844,7 @@ static void rocksdb_set_io_write_timeout( void *const var_ptr MY_ATTRIBUTE((__unused__)), const void *const save) { DBUG_ASSERT(save != nullptr); DBUG_ASSERT(rdb != nullptr); -#if !defined(_WIN32) && !defined(__APPLE__) +#if !defined(_WIN32) && !defined(__APPLE__) && !defined(__OpenBSD__) DBUG_ASSERT(io_watchdog != nullptr); #endif @@ -853,7 +853,7 @@ static void rocksdb_set_io_write_timeout( const uint32_t new_val = *static_cast(save); rocksdb_io_write_timeout_secs = new_val; -#if !defined(_WIN32) && !defined(__APPLE__) +#if !defined(_WIN32) && !defined(__APPLE__) && !defined(__OpenBSD__) io_watchdog->reset_timeout(rocksdb_io_write_timeout_secs); #endif RDB_MUTEX_UNLOCK_CHECK(rdb_sysvars_mutex); @@ -5768,7 +5768,7 @@ static int rocksdb_init_func(void *const p) { directories.push_back(myrocks::rocksdb_wal_dir); } -#if !defined(_WIN32) && !defined(__APPLE__) +#if !defined(_WIN32) && !defined(__APPLE__) && !defined(__OpenBSD__) io_watchdog = new Rdb_io_watchdog(std::move(directories)); io_watchdog->reset_timeout(rocksdb_io_write_timeout_secs); #endif @@ -5875,7 +5875,7 @@ static int rocksdb_done_func(void *const p) { delete commit_latency_stats; commit_latency_stats = nullptr; -#if !defined(_WIN32) && !defined(__APPLE__) +#if !defined(_WIN32) && !defined(__APPLE__) && !defined(__OpenBSD__) delete io_watchdog; io_watchdog = nullptr; #endif -- cgit v1.2.1 From f90d9c347fdac35720f874070797559ede066598 Mon Sep 17 00:00:00 2001 From: Alexander Barkov Date: Tue, 4 Oct 2022 11:44:14 +0400 Subject: MDEV-28822 Table from older version requires table rebuild when adding column to table with multi-column index This problem was earlier fixed in 10.4 by the patch for MDEV-29481. Adding MTR tests only. --- .../mysql_upgrade/mdev28822_100427_innodb.frm | Bin 0 -> 1443 bytes mysql-test/suite/innodb/r/instant_alter.result | 11 +++++++++++ mysql-test/suite/innodb/t/instant_alter.test | 18 ++++++++++++++++++ 3 files changed, 29 insertions(+) create mode 100644 mysql-test/std_data/mysql_upgrade/mdev28822_100427_innodb.frm diff --git a/mysql-test/std_data/mysql_upgrade/mdev28822_100427_innodb.frm b/mysql-test/std_data/mysql_upgrade/mdev28822_100427_innodb.frm new file mode 100644 index 00000000000..d9cc5e6cc69 Binary files /dev/null and b/mysql-test/std_data/mysql_upgrade/mdev28822_100427_innodb.frm differ diff --git a/mysql-test/suite/innodb/r/instant_alter.result b/mysql-test/suite/innodb/r/instant_alter.result index 6744f0fe061..4a67e04205b 100644 --- a/mysql-test/suite/innodb/r/instant_alter.result +++ b/mysql-test/suite/innodb/r/instant_alter.result @@ -2929,3 +2929,14 @@ t1 CREATE TABLE `t1` ( KEY `f2` (`f2`) ) ENGINE=InnoDB DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci DROP TABLE t1, t2; +# +# MDEV-28822 Table from older version requires table rebuild when adding column to table with multi-column index +# +CREATE TABLE mdev28822_100427_innodb ( +id int not null primary key, +msg varchar(10), +index(id, msg) +) ENGINE=InnoDB ROW_FORMAT=DYNAMIC; +FLUSH TABLES; +ALTER TABLE mdev28822_100427_innodb ADD i1 INTEGER, ALGORITHM=INSTANT; +DROP TABLE mdev28822_100427_innodb; diff --git a/mysql-test/suite/innodb/t/instant_alter.test b/mysql-test/suite/innodb/t/instant_alter.test index 22815798f69..8e333e3bb72 100644 --- a/mysql-test/suite/innodb/t/instant_alter.test +++ b/mysql-test/suite/innodb/t/instant_alter.test @@ -1,6 +1,8 @@ --source include/innodb_page_size.inc --source include/have_sequence.inc +let $datadir=`select @@datadir`; + --echo # --echo # MDEV-11369: Instant ADD COLUMN for InnoDB --echo # @@ -949,3 +951,19 @@ ALTER TABLE t1 DROP COLUMN f3, ADD FOREIGN KEY fk (f1) ALTER TABLE t1 DROP COLUMN f5; SHOW CREATE TABLE t1; DROP TABLE t1, t2; + + +--echo # +--echo # MDEV-28822 Table from older version requires table rebuild when adding column to table with multi-column index +--echo # + +CREATE TABLE mdev28822_100427_innodb ( + id int not null primary key, + msg varchar(10), + index(id, msg) +) ENGINE=InnoDB ROW_FORMAT=DYNAMIC; +FLUSH TABLES; +remove_file $datadir/test/mdev28822_100427_innodb.frm; +copy_file std_data/mysql_upgrade/mdev28822_100427_innodb.frm $datadir/test/mdev28822_100427_innodb.frm; +ALTER TABLE mdev28822_100427_innodb ADD i1 INTEGER, ALGORITHM=INSTANT; +DROP TABLE mdev28822_100427_innodb; -- cgit v1.2.1 From fa5f26b422531c449e6a0c464865226af3b24636 Mon Sep 17 00:00:00 2001 From: Andrew Hutchings Date: Thu, 22 Sep 2022 15:13:22 +0100 Subject: MDEV-29578 Fix CONNECT build warnings Fix a couple of build warnings that fire with CONNECT engine. --- storage/connect/bsonudf.cpp | 8 ++++---- storage/connect/filamdbf.cpp | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/storage/connect/bsonudf.cpp b/storage/connect/bsonudf.cpp index e93f40eb509..2d9132e20ed 100644 --- a/storage/connect/bsonudf.cpp +++ b/storage/connect/bsonudf.cpp @@ -3571,14 +3571,14 @@ char *bson_item_merge(UDF_INIT *initid, UDF_ARGS *args, char *result, if (!CheckMemory(g, initid, args, 2, false, false, true)) { JTYP type; BJNX bnx(g); - PBVAL jvp, top = NULL; + PBVAL jvp = NULL, top = NULL; PBVAL jsp[2] = {NULL, NULL}; for (int i = 0; i < 2; i++) { jvp = bnx.MakeValue(args, i, true); if (i) { - if (jvp->Type != type) { + if (jvp && (jvp->Type != type)) { PUSH_WARNING("Argument types mismatch"); goto fin; } // endif type @@ -5722,14 +5722,14 @@ char *bbin_item_merge(UDF_INIT *initid, UDF_ARGS *args, char *result, if (!CheckMemory(g, initid, args, 2, false, false, true)) { JTYP type; BJNX bnx(g); - PBVAL jvp, top = NULL; + PBVAL jvp = NULL, top = NULL; PBVAL jsp[2] = {NULL, NULL}; for (int i = 0; i < 2; i++) { if (i) { jvp = bnx.MakeValue(args, i, true); - if (jvp->Type != type) { + if (jvp && (jvp->Type != type)) { PUSH_WARNING("Argument types mismatch"); goto fin; } // endif type diff --git a/storage/connect/filamdbf.cpp b/storage/connect/filamdbf.cpp index 4c3ec10062b..a4c2232b1bf 100644 --- a/storage/connect/filamdbf.cpp +++ b/storage/connect/filamdbf.cpp @@ -244,7 +244,7 @@ PQRYRES DBFColumns(PGLOBAL g, PCSZ dp, PCSZ fn, PTOS topt, bool info) int rc, type, len, field, fields; bool bad, mul; PCSZ target, pwd; - DBFHEADER mainhead, *hp; + DBFHEADER mainhead, *hp = NULL; DESCRIPTOR thisfield, *tfp; FILE *infile = NULL; UNZIPUTL *zutp = NULL; -- cgit v1.2.1 From 278fbe61d847337712c0f802cc8e0db85bf58bd7 Mon Sep 17 00:00:00 2001 From: Oleksandr Byelkin Date: Wed, 26 Oct 2022 10:14:34 +0200 Subject: Add skipped changes to oracle mode parser. --- sql/sql_yacc_ora.yy | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/sql/sql_yacc_ora.yy b/sql/sql_yacc_ora.yy index bcb6c982da3..ec8e4f4c946 100644 --- a/sql/sql_yacc_ora.yy +++ b/sql/sql_yacc_ora.yy @@ -5954,10 +5954,11 @@ opt_part_option: opt_versioning_rotation: /* empty */ {} - | INTERVAL_SYM expr interval opt_versioning_interval_start + | { Lex->expr_allows_subselect= false; } + INTERVAL_SYM expr interval opt_versioning_interval_start { partition_info *part_info= Lex->part_info; - if (unlikely(part_info->vers_set_interval(thd, $2, $3, $4))) + if (unlikely(part_info->vers_set_interval(thd, $3, $4, $5))) MYSQL_YYABORT; } | LIMIT ulonglong_num @@ -12866,11 +12867,16 @@ order_clause: */ DBUG_ASSERT(sel->master_unit()->fake_select_lex); lex->current_select= sel->master_unit()->fake_select_lex; + lex->push_context(&sel->master_unit()->fake_select_lex->context, thd->mem_root); } } order_list { - + if (Lex->current_select == + Lex->current_select->master_unit()->fake_select_lex) + { + Lex->pop_context(); + } } ; -- cgit v1.2.1 From b4a58581fdae689ec3facbe9c273a4789a869542 Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Wed, 26 Oct 2022 17:18:19 +0200 Subject: columnstore 5.6.8-1 --- storage/columnstore/columnstore | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/storage/columnstore/columnstore b/storage/columnstore/columnstore index bc98115df18..f91c12c9df0 160000 --- a/storage/columnstore/columnstore +++ b/storage/columnstore/columnstore @@ -1 +1 @@ -Subproject commit bc98115df183c3c47fa3d733fd29e30c8159e332 +Subproject commit f91c12c9df0597561b0d7f5cc6876e246d24763b -- cgit v1.2.1 From 1a3859fff09986a8ffc7b1b466ef565ce2b0bf42 Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Tue, 1 Nov 2022 13:22:34 +0100 Subject: MDEV-29924 Assertion `(((nr) % (1LL << 24)) % (int) log_10_int[6 - dec]) == 0' failed in my_time_packed_to_binary on SELECT when using TIME field when assigning the cached item to the Item_cache for the first time make sure to use Item_cache::setup(), not Item_cache::store(). Because the former copies the metadata (and allocates memory, in case of Item_cache_row), and Item_cache::decimal must be set for comparisons to work correctly. --- mysql-test/main/type_time_hires.result | 13 ++++++++++++- mysql-test/main/type_time_hires.test | 15 +++++++++++++-- sql/sql_class.cc | 4 +++- 3 files changed, 28 insertions(+), 4 deletions(-) diff --git a/mysql-test/main/type_time_hires.result b/mysql-test/main/type_time_hires.result index 5fa9d11591a..cf7dce59f1f 100644 --- a/mysql-test/main/type_time_hires.result +++ b/mysql-test/main/type_time_hires.result @@ -360,7 +360,7 @@ select cast(1e-6 as time(6)); cast(1e-6 as time(6)) 00:00:00.000001 # -# Start of 10.4 tests +# End of 5.5 tests # # # MDEV-20397 Support TIMESTAMP, DATETIME, TIME in ROUND() and TRUNCATE() @@ -907,3 +907,14 @@ a CEILING(a) CEILING_SP(a) CEILING(a)=CEILING_SP(a) DROP FUNCTION FLOOR_SP; DROP FUNCTION CEILING_SP; DROP TABLE t1; +# +# MDEV-29924 Assertion `(((nr) % (1LL << 24)) % (int) log_10_int[6 - dec]) == 0' failed in my_time_packed_to_binary on SELECT when using TIME field +# +create table t1 (c decimal(3,1),d time(6)); +insert into t1 values (null,0.1),(null,0.1), (0.1,0.2); +select c from t1 where c &items) { cache= val_item->get_cache(thd); set_op(val_item->type_handler()); + cache->setup(thd, val_item); } - cache->store(val_item); + else + cache->store(val_item); it->store(0, cache); } it->assigned(1); -- cgit v1.2.1 From 3303748fd13399ba39ce4d646153d086c5a09445 Mon Sep 17 00:00:00 2001 From: Oleksandr Byelkin Date: Wed, 2 Nov 2022 12:49:24 +0100 Subject: MDEV-29926: ASAN heap-use-after-free in Explain_query::~Explain_query Make sure that EXPLAIN object allocated on runtime arena. --- mysql-test/main/subselect.result | 11 +++++++++++ mysql-test/main/subselect.test | 14 ++++++++++++++ mysql-test/main/subselect_no_exists_to_in.result | 11 +++++++++++ mysql-test/main/subselect_no_mat.result | 11 +++++++++++ mysql-test/main/subselect_no_opts.result | 11 +++++++++++ mysql-test/main/subselect_no_scache.result | 11 +++++++++++ mysql-test/main/subselect_no_semijoin.result | 11 +++++++++++ sql/sql_select.cc | 10 +++++++++- 8 files changed, 89 insertions(+), 1 deletion(-) diff --git a/mysql-test/main/subselect.result b/mysql-test/main/subselect.result index 1d49e178c68..281ac022318 100644 --- a/mysql-test/main/subselect.result +++ b/mysql-test/main/subselect.result @@ -7369,3 +7369,14 @@ a 1 drop table t1,t2,t3; # End of 10.2 tests +# +# MDEV-29926: ASAN heap-use-after-free in Explain_query::~Explain_query +# +CREATE TABLE t (a VARCHAR(1)) CHARACTER SET utf8mb3; +EXECUTE IMMEDIATE "SELECT COUNT(*) FROM t WHERE a < (SELECT 'x')"; +COUNT(*) +0 +DROP TABLE t; +# +# End of 10.3 tests +# diff --git a/mysql-test/main/subselect.test b/mysql-test/main/subselect.test index 20358ed5d60..27092a60ca6 100644 --- a/mysql-test/main/subselect.test +++ b/mysql-test/main/subselect.test @@ -6308,3 +6308,17 @@ select a from t3 drop table t1,t2,t3; --echo # End of 10.2 tests + +--echo # +--echo # MDEV-29926: ASAN heap-use-after-free in Explain_query::~Explain_query +--echo # + +CREATE TABLE t (a VARCHAR(1)) CHARACTER SET utf8mb3; +EXECUTE IMMEDIATE "SELECT COUNT(*) FROM t WHERE a < (SELECT 'x')"; + +# Cleanup +DROP TABLE t; + +--echo # +--echo # End of 10.3 tests +--echo # diff --git a/mysql-test/main/subselect_no_exists_to_in.result b/mysql-test/main/subselect_no_exists_to_in.result index 737636359ba..7e694b52c85 100644 --- a/mysql-test/main/subselect_no_exists_to_in.result +++ b/mysql-test/main/subselect_no_exists_to_in.result @@ -7369,6 +7369,17 @@ a 1 drop table t1,t2,t3; # End of 10.2 tests +# +# MDEV-29926: ASAN heap-use-after-free in Explain_query::~Explain_query +# +CREATE TABLE t (a VARCHAR(1)) CHARACTER SET utf8mb3; +EXECUTE IMMEDIATE "SELECT COUNT(*) FROM t WHERE a < (SELECT 'x')"; +COUNT(*) +0 +DROP TABLE t; +# +# End of 10.3 tests +# set optimizer_switch=default; select @@optimizer_switch like '%exists_to_in=off%'; @@optimizer_switch like '%exists_to_in=off%' diff --git a/mysql-test/main/subselect_no_mat.result b/mysql-test/main/subselect_no_mat.result index 66586cf5f3a..fd3f234b4e0 100644 --- a/mysql-test/main/subselect_no_mat.result +++ b/mysql-test/main/subselect_no_mat.result @@ -7362,6 +7362,17 @@ a 1 drop table t1,t2,t3; # End of 10.2 tests +# +# MDEV-29926: ASAN heap-use-after-free in Explain_query::~Explain_query +# +CREATE TABLE t (a VARCHAR(1)) CHARACTER SET utf8mb3; +EXECUTE IMMEDIATE "SELECT COUNT(*) FROM t WHERE a < (SELECT 'x')"; +COUNT(*) +0 +DROP TABLE t; +# +# End of 10.3 tests +# set optimizer_switch=default; select @@optimizer_switch like '%materialization=on%'; @@optimizer_switch like '%materialization=on%' diff --git a/mysql-test/main/subselect_no_opts.result b/mysql-test/main/subselect_no_opts.result index f55978a591c..dc0e690b957 100644 --- a/mysql-test/main/subselect_no_opts.result +++ b/mysql-test/main/subselect_no_opts.result @@ -7360,4 +7360,15 @@ a 1 drop table t1,t2,t3; # End of 10.2 tests +# +# MDEV-29926: ASAN heap-use-after-free in Explain_query::~Explain_query +# +CREATE TABLE t (a VARCHAR(1)) CHARACTER SET utf8mb3; +EXECUTE IMMEDIATE "SELECT COUNT(*) FROM t WHERE a < (SELECT 'x')"; +COUNT(*) +0 +DROP TABLE t; +# +# End of 10.3 tests +# set @optimizer_switch_for_subselect_test=null; diff --git a/mysql-test/main/subselect_no_scache.result b/mysql-test/main/subselect_no_scache.result index 895a68338d8..6c8dbc40c3b 100644 --- a/mysql-test/main/subselect_no_scache.result +++ b/mysql-test/main/subselect_no_scache.result @@ -7375,6 +7375,17 @@ a 1 drop table t1,t2,t3; # End of 10.2 tests +# +# MDEV-29926: ASAN heap-use-after-free in Explain_query::~Explain_query +# +CREATE TABLE t (a VARCHAR(1)) CHARACTER SET utf8mb3; +EXECUTE IMMEDIATE "SELECT COUNT(*) FROM t WHERE a < (SELECT 'x')"; +COUNT(*) +0 +DROP TABLE t; +# +# End of 10.3 tests +# set optimizer_switch=default; select @@optimizer_switch like '%subquery_cache=on%'; @@optimizer_switch like '%subquery_cache=on%' diff --git a/mysql-test/main/subselect_no_semijoin.result b/mysql-test/main/subselect_no_semijoin.result index 8a0e6a6c325..1731e934c6c 100644 --- a/mysql-test/main/subselect_no_semijoin.result +++ b/mysql-test/main/subselect_no_semijoin.result @@ -7361,6 +7361,17 @@ a drop table t1,t2,t3; # End of 10.2 tests # +# MDEV-29926: ASAN heap-use-after-free in Explain_query::~Explain_query +# +CREATE TABLE t (a VARCHAR(1)) CHARACTER SET utf8mb3; +EXECUTE IMMEDIATE "SELECT COUNT(*) FROM t WHERE a < (SELECT 'x')"; +COUNT(*) +0 +DROP TABLE t; +# +# End of 10.3 tests +# +# # MDEV-19714: JOIN::pseudo_bits_cond is not visible in EXPLAIN FORMAT=JSON # CREATE TABLE t1 ( a INT ); diff --git a/sql/sql_select.cc b/sql/sql_select.cc index 70c0a80ba2a..0b330528452 100644 --- a/sql/sql_select.cc +++ b/sql/sql_select.cc @@ -1078,6 +1078,15 @@ JOIN::prepare(TABLE_LIST *tables_init, // simple check that we got usable conds dbug_print_item(conds); + /* + It is hack which force creating EXPLAIN object always on runt-time arena + (because very top JOIN::prepare executes always with runtime arena, but + constant subquery like (SELECT 'x') can be called with statement arena + during prepare phase of top SELECT). + */ + if (!(thd->lex->context_analysis_only & CONTEXT_ANALYSIS_ONLY_PREPARE)) + create_explain_query_if_not_exists(thd->lex, thd->mem_root); + if (select_lex->handle_derived(thd->lex, DT_PREPARE)) DBUG_RETURN(-1); @@ -1521,7 +1530,6 @@ bool JOIN::build_explain() int JOIN::optimize() { int res= 0; - create_explain_query_if_not_exists(thd->lex, thd->mem_root); join_optimization_state init_state= optimization_state; if (optimization_state == JOIN::OPTIMIZATION_PHASE_1_DONE) res= optimize_stage2(); -- cgit v1.2.1