From b347396181018cedc946450cb49891f1a0aa4575 Mon Sep 17 00:00:00 2001 From: Sujatha Date: Tue, 28 May 2019 14:20:39 +0530 Subject: MDEV-11094: Blackhole table updates on slave fail when row annotation is enabled Problem: ======= rpl_blackhole.test fails when executed with following options mysqld=--binlog_annotate_row_events=1, mysqld=--replicate_annotate_row_events=1 Test output: ------------ worker[1] Using MTR_BUILD_THREAD 300, with reserved ports 16000..16019 rpl.rpl_blackhole_bug 'mix' [ pass ] 791 rpl.rpl_blackhole_bug 'row' [ fail ] Replicate_Wild_Ignore_Table Last_Errno 1032 Last_Error Could not execute Update_rows_v1 event on table test.t1; Can't find record in 't1', Error_code: 1032; handler error HA_ERR_END_OF_FILE; the event's master log master-bin.000001, end_log_pos 1510 Analysis: ========= Enabling "replicate_annotate_row_events" on slave, Tells the slave to write annotate rows events received from the master to its own binary log. The received annotate events are applied after the Gtid event as shown below. thd->query() will be set to the actual query received from the master, through annotate event. Annotate_rows event should not be deleted after the event is applied as the thd->query will be used to generate new Annotate_rows event during applying the subsequent Rows events. After the last Rows event has been applied, the saved Annotate_rows event (if any) will be deleted. In balckhole engine all the DML operations are noops as they donot store any data. They simply return success without doing any operation. But the existing strictly expects thd->query() to be 'NULL' to identify that row based replication is in use. This assumption will fail when row annotations are enabled as the query is not 'NULL'. Hence various row based operations like 'update', 'delete', 'index lookup' will fail when row annotations are enabled. Fix: === Extend the row based replication check to include row annotations as well. i.e Either the thd->query() is NULL or thd->query() points to query and row annotations are in use. --- mysql-test/extra/rpl_tests/rpl_blackhole.test | 2 +- .../extra/rpl_tests/rpl_blackhole_basic.test | 97 +++++ .../suite/rpl/r/rpl_blackhole_row_annotate.result | 434 +++++++++++++++++++++ mysql-test/suite/rpl/t/rpl_blackhole.test | 77 +--- .../rpl/t/rpl_blackhole_row_annotate-master.opt | 1 + .../rpl/t/rpl_blackhole_row_annotate-slave.opt | 1 + .../suite/rpl/t/rpl_blackhole_row_annotate.test | 49 +++ storage/blackhole/ha_blackhole.cc | 28 +- 8 files changed, 606 insertions(+), 83 deletions(-) create mode 100644 mysql-test/extra/rpl_tests/rpl_blackhole_basic.test create mode 100644 mysql-test/suite/rpl/r/rpl_blackhole_row_annotate.result create mode 100644 mysql-test/suite/rpl/t/rpl_blackhole_row_annotate-master.opt create mode 100644 mysql-test/suite/rpl/t/rpl_blackhole_row_annotate-slave.opt create mode 100644 mysql-test/suite/rpl/t/rpl_blackhole_row_annotate.test diff --git a/mysql-test/extra/rpl_tests/rpl_blackhole.test b/mysql-test/extra/rpl_tests/rpl_blackhole.test index 1a0eeb3cf15..569a24e5252 100644 --- a/mysql-test/extra/rpl_tests/rpl_blackhole.test +++ b/mysql-test/extra/rpl_tests/rpl_blackhole.test @@ -11,7 +11,7 @@ # executing statement. If difference is >0, then something was # written to the binary log on the slave. -connection slave; +# On Connection Slave let $before = query_get_value("SHOW MASTER STATUS", Position, 1); --echo [on master] diff --git a/mysql-test/extra/rpl_tests/rpl_blackhole_basic.test b/mysql-test/extra/rpl_tests/rpl_blackhole_basic.test new file mode 100644 index 00000000000..f3fdc915080 --- /dev/null +++ b/mysql-test/extra/rpl_tests/rpl_blackhole_basic.test @@ -0,0 +1,97 @@ +# PURPOSE. Test that blackhole works with replication in all three +# modes: STATEMENT, MIXED, and ROW. +# +# METHOD. We start by creating a table on the master and then change +# the engine to use blackhole on the slave. +# +# After insert/update/delete of one or more rows, the test the +# proceeds to check that replication is running after replicating an +# change, that the blackhole engine does not contain anything (which +# is just a check that the correct engine is used), and that something +# is written to the binary log. +# +# Whe check INSERT, UPDATE, and DELETE statement for tables with no +# key (forcing a range search on the slave), primary keys (using a +# primary key lookup), and index/key with multiple matches (forcing an +# index search). + +# We start with no primary key +CREATE TABLE t1 (a INT, b INT, c INT); +CREATE TABLE t2 (a INT, b INT, c INT); + +sync_slave_with_master; +ALTER TABLE t1 ENGINE=BLACKHOLE; + +connection master; +INSERT INTO t2 VALUES (1,9,1), (2,9,2), (3,9,3), (4,9,4); +sync_slave_with_master; + +# Test insert, no primary key +let $statement = INSERT INTO t1 VALUES (1,1,1),(2,1,2),(3,1,3),(4,1,4); +source extra/rpl_tests/rpl_blackhole.test; + +# Test update, no primary key +let $statement = UPDATE t1 SET c = 2*c WHERE a % 2 = 0 AND b = 1; +source extra/rpl_tests/rpl_blackhole.test; + +# Test delete, no primary key +let $statement = DELETE FROM t1 WHERE a % 2 = 0 AND b = 1; +source extra/rpl_tests/rpl_blackhole.test; + +# Test INSERT-SELECT into Blackhole, no primary key +let $statement = INSERT INTO t1 SELECT * FROM t2; +source extra/rpl_tests/rpl_blackhole.test; + +# +# The MASTER has MyISAM as the engine for both tables. The SLAVE has Blackhole +# on t1 (transactional engine) and MyISAM on t2 (non-transactional engine). +# +# In MIXED mode, the command "INSERT INTO t2 SELECT * FROM t1" is logged as +# statement on the master. On the slave, it is tagged as unsafe because the +# statement mixes both transactional and non-transactional engines and as such +# its changes are logged as rows. However, due to the nature of the blackhole +# engine, no rows are returned and thus any chain replication would make the +# next master on the chain diverge. +# +# Fo this reason, we have disabled the statement. +# +# Test INSERT-SELECT from Blackhole, no primary key +# let $statement = INSERT INTO t2 SELECT * FROM t1; +# source extra/rpl_tests/rpl_blackhole.test; +# + +connection master; +ALTER TABLE t1 ADD PRIMARY KEY pk_t1 (a,b); +sync_slave_with_master; + +# Test insert, primary key +let $statement = INSERT INTO t1 VALUES (1,2,1),(2,2,2),(3,2,3),(4,2,4); +source extra/rpl_tests/rpl_blackhole.test; + +# Test update, primary key +let $statement = UPDATE t1 SET c = 2*c WHERE a % 2 = 0 AND b = 2; +source extra/rpl_tests/rpl_blackhole.test; + +# Test delete, primary key +let $statement = DELETE FROM t1 WHERE a % 2 = 0 AND b = 2; +source extra/rpl_tests/rpl_blackhole.test; + +connection master; +ALTER TABLE t1 DROP PRIMARY KEY, ADD KEY key_t1 (a); +sync_slave_with_master; + +# Test insert, key +let $statement = INSERT INTO t1 VALUES (1,3,1),(2,3,2),(3,3,3),(4,3,4); +source extra/rpl_tests/rpl_blackhole.test; + +# Test update, key +let $statement = UPDATE t1 SET c = 2*c WHERE a % 2 = 0 AND b = 3; +source extra/rpl_tests/rpl_blackhole.test; + +# Test delete, key +let $statement = DELETE FROM t1 WHERE a % 2 = 0 AND b = 3; +source extra/rpl_tests/rpl_blackhole.test; + +connection master; +DROP TABLE t1,t2; +sync_slave_with_master; diff --git a/mysql-test/suite/rpl/r/rpl_blackhole_row_annotate.result b/mysql-test/suite/rpl/r/rpl_blackhole_row_annotate.result new file mode 100644 index 00000000000..20ca37322dc --- /dev/null +++ b/mysql-test/suite/rpl/r/rpl_blackhole_row_annotate.result @@ -0,0 +1,434 @@ +include/master-slave.inc +[connection master] +SET timestamp=1000000000; +RESET MASTER; +SET timestamp=1000000000; +RESET MASTER; +CREATE TABLE t1 (a INT, b INT, c INT); +CREATE TABLE t2 (a INT, b INT, c INT); +ALTER TABLE t1 ENGINE=BLACKHOLE; +INSERT INTO t2 VALUES (1,9,1), (2,9,2), (3,9,3), (4,9,4); +[on master] +INSERT INTO t1 VALUES (1,1,1),(2,1,2),(3,1,3),(4,1,4); +[on slave] +# Expect 0 +SELECT COUNT(*) FROM t1; +COUNT(*) +0 +>>> Something was written to binary log <<< +[on master] +UPDATE t1 SET c = 2*c WHERE a % 2 = 0 AND b = 1; +[on slave] +# Expect 0 +SELECT COUNT(*) FROM t1; +COUNT(*) +0 +>>> Something was written to binary log <<< +[on master] +DELETE FROM t1 WHERE a % 2 = 0 AND b = 1; +[on slave] +# Expect 0 +SELECT COUNT(*) FROM t1; +COUNT(*) +0 +>>> Something was written to binary log <<< +[on master] +INSERT INTO t1 SELECT * FROM t2; +[on slave] +# Expect 0 +SELECT COUNT(*) FROM t1; +COUNT(*) +0 +>>> Something was written to binary log <<< +ALTER TABLE t1 ADD PRIMARY KEY pk_t1 (a,b); +[on master] +INSERT INTO t1 VALUES (1,2,1),(2,2,2),(3,2,3),(4,2,4); +[on slave] +# Expect 0 +SELECT COUNT(*) FROM t1; +COUNT(*) +0 +>>> Something was written to binary log <<< +[on master] +UPDATE t1 SET c = 2*c WHERE a % 2 = 0 AND b = 2; +[on slave] +# Expect 0 +SELECT COUNT(*) FROM t1; +COUNT(*) +0 +>>> Something was written to binary log <<< +[on master] +DELETE FROM t1 WHERE a % 2 = 0 AND b = 2; +[on slave] +# Expect 0 +SELECT COUNT(*) FROM t1; +COUNT(*) +0 +>>> Something was written to binary log <<< +ALTER TABLE t1 DROP PRIMARY KEY, ADD KEY key_t1 (a); +[on master] +INSERT INTO t1 VALUES (1,3,1),(2,3,2),(3,3,3),(4,3,4); +[on slave] +# Expect 0 +SELECT COUNT(*) FROM t1; +COUNT(*) +0 +>>> Something was written to binary log <<< +[on master] +UPDATE t1 SET c = 2*c WHERE a % 2 = 0 AND b = 3; +[on slave] +# Expect 0 +SELECT COUNT(*) FROM t1; +COUNT(*) +0 +>>> Something was written to binary log <<< +[on master] +DELETE FROM t1 WHERE a % 2 = 0 AND b = 3; +[on slave] +# Expect 0 +SELECT COUNT(*) FROM t1; +COUNT(*) +0 +>>> Something was written to binary log <<< +DROP TABLE t1,t2; +FLUSH LOGS; +show binlog events in 'slave-bin.000001' from ; +Log_name Pos Event_type Server_id End_log_pos Info +slave-bin.000001 # Gtid_list 2 # [] +slave-bin.000001 # Binlog_checkpoint 2 # slave-bin.000001 +slave-bin.000001 # Gtid 1 # GTID 0-1-1 +slave-bin.000001 # Query 1 # use `test`; CREATE TABLE t1 (a INT, b INT, c INT) +slave-bin.000001 # Gtid 1 # GTID 0-1-2 +slave-bin.000001 # Query 1 # use `test`; CREATE TABLE t2 (a INT, b INT, c INT) +slave-bin.000001 # Gtid 2 # GTID 0-2-3 +slave-bin.000001 # Query 2 # use `test`; ALTER TABLE t1 ENGINE=BLACKHOLE +slave-bin.000001 # Gtid 1 # BEGIN GTID 0-1-3 +slave-bin.000001 # Annotate_rows 1 # INSERT INTO t2 VALUES (1,9,1), (2,9,2), (3,9,3), (4,9,4) +slave-bin.000001 # Table_map 1 # table_id: # (test.t2) +slave-bin.000001 # Write_rows_v1 1 # table_id: # flags: STMT_END_F +slave-bin.000001 # Query 1 # COMMIT +slave-bin.000001 # Gtid 1 # BEGIN GTID 0-1-4 +slave-bin.000001 # Annotate_rows 1 # INSERT INTO t1 VALUES (1,1,1),(2,1,2),(3,1,3),(4,1,4) +slave-bin.000001 # Table_map 1 # table_id: # (test.t1) +slave-bin.000001 # Write_rows_v1 1 # table_id: # flags: STMT_END_F +slave-bin.000001 # Query 1 # COMMIT +slave-bin.000001 # Gtid 1 # BEGIN GTID 0-1-5 +slave-bin.000001 # Annotate_rows 1 # UPDATE t1 SET c = 2*c WHERE a % 2 = 0 AND b = 1 +slave-bin.000001 # Table_map 1 # table_id: # (test.t1) +slave-bin.000001 # Update_rows_v1 1 # table_id: # flags: STMT_END_F +slave-bin.000001 # Query 1 # COMMIT +slave-bin.000001 # Gtid 1 # BEGIN GTID 0-1-6 +slave-bin.000001 # Annotate_rows 1 # DELETE FROM t1 WHERE a % 2 = 0 AND b = 1 +slave-bin.000001 # Table_map 1 # table_id: # (test.t1) +slave-bin.000001 # Delete_rows_v1 1 # table_id: # flags: STMT_END_F +slave-bin.000001 # Query 1 # COMMIT +slave-bin.000001 # Gtid 1 # BEGIN GTID 0-1-7 +slave-bin.000001 # Annotate_rows 1 # INSERT INTO t1 SELECT * FROM t2 +slave-bin.000001 # Table_map 1 # table_id: # (test.t1) +slave-bin.000001 # Write_rows_v1 1 # table_id: # flags: STMT_END_F +slave-bin.000001 # Query 1 # COMMIT +slave-bin.000001 # Gtid 1 # GTID 0-1-8 +slave-bin.000001 # Query 1 # use `test`; ALTER TABLE t1 ADD PRIMARY KEY pk_t1 (a,b) +slave-bin.000001 # Gtid 1 # BEGIN GTID 0-1-9 +slave-bin.000001 # Annotate_rows 1 # INSERT INTO t1 VALUES (1,2,1),(2,2,2),(3,2,3),(4,2,4) +slave-bin.000001 # Table_map 1 # table_id: # (test.t1) +slave-bin.000001 # Write_rows_v1 1 # table_id: # flags: STMT_END_F +slave-bin.000001 # Query 1 # COMMIT +slave-bin.000001 # Gtid 1 # BEGIN GTID 0-1-10 +slave-bin.000001 # Annotate_rows 1 # UPDATE t1 SET c = 2*c WHERE a % 2 = 0 AND b = 2 +slave-bin.000001 # Table_map 1 # table_id: # (test.t1) +slave-bin.000001 # Update_rows_v1 1 # table_id: # flags: STMT_END_F +slave-bin.000001 # Query 1 # COMMIT +slave-bin.000001 # Gtid 1 # BEGIN GTID 0-1-11 +slave-bin.000001 # Annotate_rows 1 # DELETE FROM t1 WHERE a % 2 = 0 AND b = 2 +slave-bin.000001 # Table_map 1 # table_id: # (test.t1) +slave-bin.000001 # Delete_rows_v1 1 # table_id: # flags: STMT_END_F +slave-bin.000001 # Query 1 # COMMIT +slave-bin.000001 # Gtid 1 # GTID 0-1-12 +slave-bin.000001 # Query 1 # use `test`; ALTER TABLE t1 DROP PRIMARY KEY, ADD KEY key_t1 (a) +slave-bin.000001 # Gtid 1 # BEGIN GTID 0-1-13 +slave-bin.000001 # Annotate_rows 1 # INSERT INTO t1 VALUES (1,3,1),(2,3,2),(3,3,3),(4,3,4) +slave-bin.000001 # Table_map 1 # table_id: # (test.t1) +slave-bin.000001 # Write_rows_v1 1 # table_id: # flags: STMT_END_F +slave-bin.000001 # Query 1 # COMMIT +slave-bin.000001 # Gtid 1 # BEGIN GTID 0-1-14 +slave-bin.000001 # Annotate_rows 1 # UPDATE t1 SET c = 2*c WHERE a % 2 = 0 AND b = 3 +slave-bin.000001 # Table_map 1 # table_id: # (test.t1) +slave-bin.000001 # Update_rows_v1 1 # table_id: # flags: STMT_END_F +slave-bin.000001 # Query 1 # COMMIT +slave-bin.000001 # Gtid 1 # BEGIN GTID 0-1-15 +slave-bin.000001 # Annotate_rows 1 # DELETE FROM t1 WHERE a % 2 = 0 AND b = 3 +slave-bin.000001 # Table_map 1 # table_id: # (test.t1) +slave-bin.000001 # Delete_rows_v1 1 # table_id: # flags: STMT_END_F +slave-bin.000001 # Query 1 # COMMIT +slave-bin.000001 # Gtid 1 # GTID 0-1-16 +slave-bin.000001 # Query 1 # use `test`; DROP TABLE IF EXISTS `t1`,`t2` /* generated by server */ +slave-bin.000001 # Rotate 2 # slave-bin.000002;pos=4 +/*!50530 SET @@SESSION.PSEUDO_SLAVE_MODE=1*/; +/*!40019 SET @@session.max_insert_delayed_threads=0*/; +/*!50003 SET @OLD_COMPLETION_TYPE=@@COMPLETION_TYPE,COMPLETION_TYPE=0*/; +DELIMITER /*!*/; +# at # +#010909 4:46:40 server id # end_log_pos # Start: binlog v 4, server v #.##.## created 010909 4:46:40 at startup +ROLLBACK/*!*/; +# at # +#010909 4:46:40 server id # end_log_pos # Gtid list [] +# at # +#010909 4:46:40 server id # end_log_pos # Binlog checkpoint slave-bin.000001 +# at # +#010909 4:46:40 server id # end_log_pos # GTID 0-1-1 ddl +/*!100101 SET @@session.skip_parallel_replication=0*//*!*/; +/*!100001 SET @@session.gtid_domain_id=0*//*!*/; +/*!100001 SET @@session.server_id=1*//*!*/; +/*!100001 SET @@session.gtid_seq_no=1*//*!*/; +# at # +#010909 4:46:40 server id # end_log_pos # Query thread_id=# exec_time=# error_code=0 +use `test`/*!*/; +SET TIMESTAMP=1000000000/*!*/; +SET @@session.pseudo_thread_id=#/*!*/; +SET @@session.foreign_key_checks=1, @@session.sql_auto_is_null=0, @@session.unique_checks=1, @@session.autocommit=1/*!*/; +SET @@session.sql_mode=1342177280/*!*/; +SET @@session.auto_increment_increment=1, @@session.auto_increment_offset=1/*!*/; +/*!\C latin1 *//*!*/; +SET @@session.character_set_client=8,@@session.collation_connection=8,@@session.collation_server=8/*!*/; +SET @@session.lc_time_names=0/*!*/; +SET @@session.collation_database=DEFAULT/*!*/; +CREATE TABLE t1 (a INT, b INT, c INT) +/*!*/; +# at # +#010909 4:46:40 server id # end_log_pos # GTID 0-1-2 ddl +/*!100001 SET @@session.gtid_seq_no=2*//*!*/; +# at # +#010909 4:46:40 server id # end_log_pos # Query thread_id=# exec_time=# error_code=0 +SET TIMESTAMP=1000000000/*!*/; +CREATE TABLE t2 (a INT, b INT, c INT) +/*!*/; +# at # +#010909 4:46:40 server id # end_log_pos # GTID 0-2-3 ddl +/*!100001 SET @@session.server_id=2*//*!*/; +/*!100001 SET @@session.gtid_seq_no=3*//*!*/; +# at # +#010909 4:46:40 server id # end_log_pos # Query thread_id=# exec_time=# error_code=0 +SET TIMESTAMP=1000000000/*!*/; +ALTER TABLE t1 ENGINE=BLACKHOLE +/*!*/; +# at # +#010909 4:46:40 server id # end_log_pos # GTID 0-1-3 trans +/*!100001 SET @@session.server_id=1*//*!*/; +/*!100001 SET @@session.gtid_seq_no=3*//*!*/; +BEGIN +/*!*/; +# at # +# at # +#010909 4:46:40 server id # end_log_pos # Annotate_rows: +#Q> INSERT INTO t2 VALUES (1,9,1), (2,9,2), (3,9,3), (4,9,4) +#010909 4:46:40 server id # end_log_pos # Table_map: `test`.`t2` mapped to number # +# at # +#010909 4:46:40 server id # end_log_pos # Write_rows: table id # flags: STMT_END_F +# at # +#010909 4:46:40 server id # end_log_pos # Query thread_id=# exec_time=# error_code=0 +SET TIMESTAMP=1000000000/*!*/; +COMMIT +/*!*/; +# at # +#010909 4:46:40 server id # end_log_pos # GTID 0-1-4 trans +/*!100001 SET @@session.gtid_seq_no=4*//*!*/; +BEGIN +/*!*/; +# at # +# at # +#010909 4:46:40 server id # end_log_pos # Annotate_rows: +#Q> INSERT INTO t1 VALUES (1,1,1),(2,1,2),(3,1,3),(4,1,4) +#010909 4:46:40 server id # end_log_pos # Table_map: `test`.`t1` mapped to number # +# at # +#010909 4:46:40 server id # end_log_pos # Write_rows: table id # flags: STMT_END_F +# at # +#010909 4:46:40 server id # end_log_pos # Query thread_id=# exec_time=# error_code=0 +SET TIMESTAMP=1000000000/*!*/; +COMMIT +/*!*/; +# at # +#010909 4:46:40 server id # end_log_pos # GTID 0-1-5 trans +/*!100001 SET @@session.gtid_seq_no=5*//*!*/; +BEGIN +/*!*/; +# at # +# at # +#010909 4:46:40 server id # end_log_pos # Annotate_rows: +#Q> UPDATE t1 SET c = 2*c WHERE a % 2 = 0 AND b = 1 +#010909 4:46:40 server id # end_log_pos # Table_map: `test`.`t1` mapped to number # +# at # +#010909 4:46:40 server id # end_log_pos # Update_rows: table id # flags: STMT_END_F +# at # +#010909 4:46:40 server id # end_log_pos # Query thread_id=# exec_time=# error_code=0 +SET TIMESTAMP=1000000000/*!*/; +COMMIT +/*!*/; +# at # +#010909 4:46:40 server id # end_log_pos # GTID 0-1-6 trans +/*!100001 SET @@session.gtid_seq_no=6*//*!*/; +BEGIN +/*!*/; +# at # +# at # +#010909 4:46:40 server id # end_log_pos # Annotate_rows: +#Q> DELETE FROM t1 WHERE a % 2 = 0 AND b = 1 +#010909 4:46:40 server id # end_log_pos # Table_map: `test`.`t1` mapped to number # +# at # +#010909 4:46:40 server id # end_log_pos # Delete_rows: table id # flags: STMT_END_F +# at # +#010909 4:46:40 server id # end_log_pos # Query thread_id=# exec_time=# error_code=0 +SET TIMESTAMP=1000000000/*!*/; +COMMIT +/*!*/; +# at # +#010909 4:46:40 server id # end_log_pos # GTID 0-1-7 trans +/*!100001 SET @@session.gtid_seq_no=7*//*!*/; +BEGIN +/*!*/; +# at # +# at # +#010909 4:46:40 server id # end_log_pos # Annotate_rows: +#Q> INSERT INTO t1 SELECT * FROM t2 +#010909 4:46:40 server id # end_log_pos # Table_map: `test`.`t1` mapped to number # +# at # +#010909 4:46:40 server id # end_log_pos # Write_rows: table id # flags: STMT_END_F +# at # +#010909 4:46:40 server id # end_log_pos # Query thread_id=# exec_time=# error_code=0 +SET TIMESTAMP=1000000000/*!*/; +COMMIT +/*!*/; +# at # +#010909 4:46:40 server id # end_log_pos # GTID 0-1-8 ddl +/*!100001 SET @@session.gtid_seq_no=8*//*!*/; +# at # +#010909 4:46:40 server id # end_log_pos # Query thread_id=# exec_time=# error_code=0 +SET TIMESTAMP=1000000000/*!*/; +ALTER TABLE t1 ADD PRIMARY KEY pk_t1 (a,b) +/*!*/; +# at # +#010909 4:46:40 server id # end_log_pos # GTID 0-1-9 trans +/*!100001 SET @@session.gtid_seq_no=9*//*!*/; +BEGIN +/*!*/; +# at # +# at # +#010909 4:46:40 server id # end_log_pos # Annotate_rows: +#Q> INSERT INTO t1 VALUES (1,2,1),(2,2,2),(3,2,3),(4,2,4) +#010909 4:46:40 server id # end_log_pos # Table_map: `test`.`t1` mapped to number # +# at # +#010909 4:46:40 server id # end_log_pos # Write_rows: table id # flags: STMT_END_F +# at # +#010909 4:46:40 server id # end_log_pos # Query thread_id=# exec_time=# error_code=0 +SET TIMESTAMP=1000000000/*!*/; +COMMIT +/*!*/; +# at # +#010909 4:46:40 server id # end_log_pos # GTID 0-1-10 trans +/*!100001 SET @@session.gtid_seq_no=10*//*!*/; +BEGIN +/*!*/; +# at # +# at # +#010909 4:46:40 server id # end_log_pos # Annotate_rows: +#Q> UPDATE t1 SET c = 2*c WHERE a % 2 = 0 AND b = 2 +#010909 4:46:40 server id # end_log_pos # Table_map: `test`.`t1` mapped to number # +# at # +#010909 4:46:40 server id # end_log_pos # Update_rows: table id # flags: STMT_END_F +# at # +#010909 4:46:40 server id # end_log_pos # Query thread_id=# exec_time=# error_code=0 +SET TIMESTAMP=1000000000/*!*/; +COMMIT +/*!*/; +# at # +#010909 4:46:40 server id # end_log_pos # GTID 0-1-11 trans +/*!100001 SET @@session.gtid_seq_no=11*//*!*/; +BEGIN +/*!*/; +# at # +# at # +#010909 4:46:40 server id # end_log_pos # Annotate_rows: +#Q> DELETE FROM t1 WHERE a % 2 = 0 AND b = 2 +#010909 4:46:40 server id # end_log_pos # Table_map: `test`.`t1` mapped to number # +# at # +#010909 4:46:40 server id # end_log_pos # Delete_rows: table id # flags: STMT_END_F +# at # +#010909 4:46:40 server id # end_log_pos # Query thread_id=# exec_time=# error_code=0 +SET TIMESTAMP=1000000000/*!*/; +COMMIT +/*!*/; +# at # +#010909 4:46:40 server id # end_log_pos # GTID 0-1-12 ddl +/*!100001 SET @@session.gtid_seq_no=12*//*!*/; +# at # +#010909 4:46:40 server id # end_log_pos # Query thread_id=# exec_time=# error_code=0 +SET TIMESTAMP=1000000000/*!*/; +ALTER TABLE t1 DROP PRIMARY KEY, ADD KEY key_t1 (a) +/*!*/; +# at # +#010909 4:46:40 server id # end_log_pos # GTID 0-1-13 trans +/*!100001 SET @@session.gtid_seq_no=13*//*!*/; +BEGIN +/*!*/; +# at # +# at # +#010909 4:46:40 server id # end_log_pos # Annotate_rows: +#Q> INSERT INTO t1 VALUES (1,3,1),(2,3,2),(3,3,3),(4,3,4) +#010909 4:46:40 server id # end_log_pos # Table_map: `test`.`t1` mapped to number # +# at # +#010909 4:46:40 server id # end_log_pos # Write_rows: table id # flags: STMT_END_F +# at # +#010909 4:46:40 server id # end_log_pos # Query thread_id=# exec_time=# error_code=0 +SET TIMESTAMP=1000000000/*!*/; +COMMIT +/*!*/; +# at # +#010909 4:46:40 server id # end_log_pos # GTID 0-1-14 trans +/*!100001 SET @@session.gtid_seq_no=14*//*!*/; +BEGIN +/*!*/; +# at # +# at # +#010909 4:46:40 server id # end_log_pos # Annotate_rows: +#Q> UPDATE t1 SET c = 2*c WHERE a % 2 = 0 AND b = 3 +#010909 4:46:40 server id # end_log_pos # Table_map: `test`.`t1` mapped to number # +# at # +#010909 4:46:40 server id # end_log_pos # Update_rows: table id # flags: STMT_END_F +# at # +#010909 4:46:40 server id # end_log_pos # Query thread_id=# exec_time=# error_code=0 +SET TIMESTAMP=1000000000/*!*/; +COMMIT +/*!*/; +# at # +#010909 4:46:40 server id # end_log_pos # GTID 0-1-15 trans +/*!100001 SET @@session.gtid_seq_no=15*//*!*/; +BEGIN +/*!*/; +# at # +# at # +#010909 4:46:40 server id # end_log_pos # Annotate_rows: +#Q> DELETE FROM t1 WHERE a % 2 = 0 AND b = 3 +#010909 4:46:40 server id # end_log_pos # Table_map: `test`.`t1` mapped to number # +# at # +#010909 4:46:40 server id # end_log_pos # Delete_rows: table id # flags: STMT_END_F +# at # +#010909 4:46:40 server id # end_log_pos # Query thread_id=# exec_time=# error_code=0 +SET TIMESTAMP=1000000000/*!*/; +COMMIT +/*!*/; +# at # +#010909 4:46:40 server id # end_log_pos # GTID 0-1-16 ddl +/*!100001 SET @@session.gtid_seq_no=16*//*!*/; +# at # +#010909 4:46:40 server id # end_log_pos # Query thread_id=# exec_time=# error_code=0 +SET TIMESTAMP=1000000000/*!*/; +DROP TABLE IF EXISTS `t1`,`t2` /* generated by server */ +/*!*/; +# at # +#010909 4:46:40 server id # end_log_pos # Rotate to slave-bin.000002 pos: 4 +DELIMITER ; +# End of log file +ROLLBACK /* added by mysqlbinlog */; +/*!50003 SET COMPLETION_TYPE=@OLD_COMPLETION_TYPE*/; +/*!50530 SET @@SESSION.PSEUDO_SLAVE_MODE=0*/; +include/rpl_end.inc diff --git a/mysql-test/suite/rpl/t/rpl_blackhole.test b/mysql-test/suite/rpl/t/rpl_blackhole.test index 76b2e2421c9..9128382d12b 100644 --- a/mysql-test/suite/rpl/t/rpl_blackhole.test +++ b/mysql-test/suite/rpl/t/rpl_blackhole.test @@ -20,81 +20,6 @@ source include/master-slave.inc; call mtr.add_suppression("Unsafe statement written to the binary log using statement format since BINLOG_FORMAT = STATEMENT"); -# We start with no primary key -CREATE TABLE t1 (a INT, b INT, c INT); -CREATE TABLE t2 (a INT, b INT, c INT); +source extra/rpl_tests/rpl_blackhole_basic.test; -sync_slave_with_master; -ALTER TABLE t1 ENGINE=BLACKHOLE; - -connection master; -INSERT INTO t2 VALUES (1,9,1), (2,9,2), (3,9,3), (4,9,4); -sync_slave_with_master; - -# Test insert, no primary key -let $statement = INSERT INTO t1 VALUES (1,1,1),(2,1,2),(3,1,3),(4,1,4); -source extra/rpl_tests/rpl_blackhole.test; - -# Test update, no primary key -let $statement = UPDATE t1 SET c = 2*c WHERE a % 2 = 0 AND b = 1; -source extra/rpl_tests/rpl_blackhole.test; - -# Test delete, no primary key -let $statement = DELETE FROM t1 WHERE a % 2 = 0 AND b = 1; -source extra/rpl_tests/rpl_blackhole.test; - -# Test INSERT-SELECT into Blackhole, no primary key -let $statement = INSERT INTO t1 SELECT * FROM t2; -source extra/rpl_tests/rpl_blackhole.test; - -# -# The MASTER has MyISAM as the engine for both tables. The SLAVE has Blackhole -# on t1 (transactional engine) and MyISAM on t2 (non-transactional engine). -# -# In MIXED mode, the command "INSERT INTO t2 SELECT * FROM t1" is logged as -# statement on the master. On the slave, it is tagged as unsafe because the -# statement mixes both transactional and non-transactional engines and as such -# its changes are logged as rows. However, due to the nature of the blackhole -# engine, no rows are returned and thus any chain replication would make the -# next master on the chain diverge. -# -# Fo this reason, we have disabled the statement. -# -# Test INSERT-SELECT from Blackhole, no primary key -# let $statement = INSERT INTO t2 SELECT * FROM t1; -# source extra/rpl_tests/rpl_blackhole.test; -# - -connection master; -ALTER TABLE t1 ADD PRIMARY KEY pk_t1 (a,b); - -# Test insert, primary key -let $statement = INSERT INTO t1 VALUES (1,2,1),(2,2,2),(3,2,3),(4,2,4); -source extra/rpl_tests/rpl_blackhole.test; - -# Test update, primary key -let $statement = UPDATE t1 SET c = 2*c WHERE a % 2 = 0 AND b = 2; -source extra/rpl_tests/rpl_blackhole.test; - -# Test delete, primary key -let $statement = DELETE FROM t1 WHERE a % 2 = 0 AND b = 2; -source extra/rpl_tests/rpl_blackhole.test; - -connection master; -ALTER TABLE t1 DROP PRIMARY KEY, ADD KEY key_t1 (a); - -# Test insert, key -let $statement = INSERT INTO t1 VALUES (1,3,1),(2,3,2),(3,3,3),(4,3,4); -source extra/rpl_tests/rpl_blackhole.test; - -# Test update, key -let $statement = UPDATE t1 SET c = 2*c WHERE a % 2 = 0 AND b = 3; -source extra/rpl_tests/rpl_blackhole.test; - -# Test delete, key -let $statement = DELETE FROM t1 WHERE a % 2 = 0 AND b = 3; -source extra/rpl_tests/rpl_blackhole.test; - -connection master; -DROP TABLE t1,t2; --source include/rpl_end.inc diff --git a/mysql-test/suite/rpl/t/rpl_blackhole_row_annotate-master.opt b/mysql-test/suite/rpl/t/rpl_blackhole_row_annotate-master.opt new file mode 100644 index 00000000000..91302791099 --- /dev/null +++ b/mysql-test/suite/rpl/t/rpl_blackhole_row_annotate-master.opt @@ -0,0 +1 @@ +--binlog_annotate_row_events --timezone=GMT-3 diff --git a/mysql-test/suite/rpl/t/rpl_blackhole_row_annotate-slave.opt b/mysql-test/suite/rpl/t/rpl_blackhole_row_annotate-slave.opt new file mode 100644 index 00000000000..7ac6a84faa7 --- /dev/null +++ b/mysql-test/suite/rpl/t/rpl_blackhole_row_annotate-slave.opt @@ -0,0 +1 @@ +--binlog_annotate_row_events --replicate_annotate_row_events diff --git a/mysql-test/suite/rpl/t/rpl_blackhole_row_annotate.test b/mysql-test/suite/rpl/t/rpl_blackhole_row_annotate.test new file mode 100644 index 00000000000..77384d91475 --- /dev/null +++ b/mysql-test/suite/rpl/t/rpl_blackhole_row_annotate.test @@ -0,0 +1,49 @@ +# ==== Purpose ==== +# +# Test verifies that when "replicate_annotate_row_events" are enabled on slave +# the DML operations on blackhole engine will be successful. It also ensures +# that Annotate events are logged into slave's binary log. +# +# ==== Implementation ==== +# +# Steps: +# 0 - Enable "replicate_annotate_row_events" on slave and do DML operations +# on master. +# 1 - Slave server will successfully apply the DML operations and it is in +# sync with master. +# 2 - Verify that the "show binlog events" prints all annotate events. +# 3 - Stream the slave's binary log using "mysqlbinlog" tool and verify +# that the Annotate events are being displayed. +# +# ==== References ==== +# +# MDEV-11094: Blackhole table updates on slave fail when row annotation is +# enabled + +source include/have_blackhole.inc; +source include/have_binlog_format_row.inc; +source include/binlog_start_pos.inc; +source include/master-slave.inc; + +SET timestamp=1000000000; +RESET MASTER; +connection slave; +SET timestamp=1000000000; +RESET MASTER; + +connection master; +source extra/rpl_tests/rpl_blackhole_basic.test; + +# Verify on slave. +connection slave; +FLUSH LOGS; +--replace_column 2 # 5 # +--replace_result $binlog_start_pos +--replace_regex /table_id: [0-9]+/table_id: #/ /\/\* xid=.* \*\//\/* xid= *\// +--eval show binlog events in 'slave-bin.000001' from $binlog_start_pos + +let $MYSQLD_DATADIR= `select @@datadir`; +--replace_regex /server id [0-9]*/server id #/ /server v [^ ]*/server v #.##.##/ /exec_time=[0-9]*/exec_time=#/ /thread_id=[0-9]*/thread_id=#/ /table id [0-9]*/table id #/ /mapped to number [0-9]*/mapped to number #/ /end_log_pos [0-9]*/end_log_pos #/ /# at [0-9]*/# at #/ +--exec $MYSQL_BINLOG --base64-output=decode-rows $MYSQLD_DATADIR/slave-bin.000001 + +source include/rpl_end.inc; diff --git a/storage/blackhole/ha_blackhole.cc b/storage/blackhole/ha_blackhole.cc index 01aaa9ea15f..43bcdc541a1 100644 --- a/storage/blackhole/ha_blackhole.cc +++ b/storage/blackhole/ha_blackhole.cc @@ -25,6 +25,16 @@ #include "ha_blackhole.h" #include "sql_class.h" // THD, SYSTEM_THREAD_SLAVE_SQL +static bool is_row_based_replication(THD *thd) +{ + /* + A row event which has its thd->query() == NULL or a row event which has + replicate_annotate_row_events enabled. In the later case the thd->query() + will be pointing to the query, received through replicated annotate event + from master. + */ + return ((thd->query() == NULL) || thd->variables.binlog_annotate_row_events); +} /* Static declarations for handlerton */ static handler *blackhole_create_handler(handlerton *hton, @@ -109,7 +119,8 @@ int ha_blackhole::update_row(const uchar *old_data, uchar *new_data) { DBUG_ENTER("ha_blackhole::update_row"); THD *thd= ha_thd(); - if (thd->system_thread == SYSTEM_THREAD_SLAVE_SQL && thd->query() == NULL) + if (thd->system_thread == SYSTEM_THREAD_SLAVE_SQL && + is_row_based_replication(thd)) DBUG_RETURN(0); DBUG_RETURN(HA_ERR_WRONG_COMMAND); } @@ -118,7 +129,8 @@ int ha_blackhole::delete_row(const uchar *buf) { DBUG_ENTER("ha_blackhole::delete_row"); THD *thd= ha_thd(); - if (thd->system_thread == SYSTEM_THREAD_SLAVE_SQL && thd->query() == NULL) + if (thd->system_thread == SYSTEM_THREAD_SLAVE_SQL && + is_row_based_replication(thd)) DBUG_RETURN(0); DBUG_RETURN(HA_ERR_WRONG_COMMAND); } @@ -135,7 +147,8 @@ int ha_blackhole::rnd_next(uchar *buf) int rc; DBUG_ENTER("ha_blackhole::rnd_next"); THD *thd= ha_thd(); - if (thd->system_thread == SYSTEM_THREAD_SLAVE_SQL && thd->query() == NULL) + if (thd->system_thread == SYSTEM_THREAD_SLAVE_SQL && + is_row_based_replication(thd)) rc= 0; else rc= HA_ERR_END_OF_FILE; @@ -220,7 +233,8 @@ int ha_blackhole::index_read_map(uchar * buf, const uchar * key, int rc; DBUG_ENTER("ha_blackhole::index_read"); THD *thd= ha_thd(); - if (thd->system_thread == SYSTEM_THREAD_SLAVE_SQL && thd->query() == NULL) + if (thd->system_thread == SYSTEM_THREAD_SLAVE_SQL && + is_row_based_replication(thd)) rc= 0; else rc= HA_ERR_END_OF_FILE; @@ -235,7 +249,8 @@ int ha_blackhole::index_read_idx_map(uchar * buf, uint idx, const uchar * key, int rc; DBUG_ENTER("ha_blackhole::index_read_idx"); THD *thd= ha_thd(); - if (thd->system_thread == SYSTEM_THREAD_SLAVE_SQL && thd->query() == NULL) + if (thd->system_thread == SYSTEM_THREAD_SLAVE_SQL && + is_row_based_replication(thd)) rc= 0; else rc= HA_ERR_END_OF_FILE; @@ -249,7 +264,8 @@ int ha_blackhole::index_read_last_map(uchar * buf, const uchar * key, int rc; DBUG_ENTER("ha_blackhole::index_read_last"); THD *thd= ha_thd(); - if (thd->system_thread == SYSTEM_THREAD_SLAVE_SQL && thd->query() == NULL) + if (thd->system_thread == SYSTEM_THREAD_SLAVE_SQL && + is_row_based_replication(thd)) rc= 0; else rc= HA_ERR_END_OF_FILE; -- cgit v1.2.1 From a47464d1c12d773364e78f50090b08484fe76129 Mon Sep 17 00:00:00 2001 From: Sujatha Date: Wed, 29 May 2019 17:35:29 +0530 Subject: MDEV-11094: Blackhole table updates on slave fail when row annotation is enabled Post push fix. Simplified the earlier fixes. --- storage/blackhole/ha_blackhole.cc | 40 ++++++++++++++++++++------------------- 1 file changed, 21 insertions(+), 19 deletions(-) diff --git a/storage/blackhole/ha_blackhole.cc b/storage/blackhole/ha_blackhole.cc index 43bcdc541a1..69182676c1e 100644 --- a/storage/blackhole/ha_blackhole.cc +++ b/storage/blackhole/ha_blackhole.cc @@ -25,15 +25,23 @@ #include "ha_blackhole.h" #include "sql_class.h" // THD, SYSTEM_THREAD_SLAVE_SQL +/** + Checks if the param 'thd' is pointing to slave applier thread and row based + replication is in use. + + A row event will have its thd->query() == NULL except in cases where + replicate_annotate_row_events is enabled. In the later case the thd->query() + will be pointing to the query, received through replicated annotate event + from master. + + @param thd pointer to a THD instance + + @return TRUE if thread is slave applier and row based replication is in use +*/ static bool is_row_based_replication(THD *thd) { - /* - A row event which has its thd->query() == NULL or a row event which has - replicate_annotate_row_events enabled. In the later case the thd->query() - will be pointing to the query, received through replicated annotate event - from master. - */ - return ((thd->query() == NULL) || thd->variables.binlog_annotate_row_events); + return thd->system_thread == SYSTEM_THREAD_SLAVE_SQL && + (thd->query() == NULL || thd->variables.binlog_annotate_row_events); } /* Static declarations for handlerton */ @@ -119,8 +127,7 @@ int ha_blackhole::update_row(const uchar *old_data, uchar *new_data) { DBUG_ENTER("ha_blackhole::update_row"); THD *thd= ha_thd(); - if (thd->system_thread == SYSTEM_THREAD_SLAVE_SQL && - is_row_based_replication(thd)) + if (is_row_based_replication(thd)) DBUG_RETURN(0); DBUG_RETURN(HA_ERR_WRONG_COMMAND); } @@ -129,8 +136,7 @@ int ha_blackhole::delete_row(const uchar *buf) { DBUG_ENTER("ha_blackhole::delete_row"); THD *thd= ha_thd(); - if (thd->system_thread == SYSTEM_THREAD_SLAVE_SQL && - is_row_based_replication(thd)) + if (is_row_based_replication(thd)) DBUG_RETURN(0); DBUG_RETURN(HA_ERR_WRONG_COMMAND); } @@ -147,8 +153,7 @@ int ha_blackhole::rnd_next(uchar *buf) int rc; DBUG_ENTER("ha_blackhole::rnd_next"); THD *thd= ha_thd(); - if (thd->system_thread == SYSTEM_THREAD_SLAVE_SQL && - is_row_based_replication(thd)) + if (is_row_based_replication(thd)) rc= 0; else rc= HA_ERR_END_OF_FILE; @@ -233,8 +238,7 @@ int ha_blackhole::index_read_map(uchar * buf, const uchar * key, int rc; DBUG_ENTER("ha_blackhole::index_read"); THD *thd= ha_thd(); - if (thd->system_thread == SYSTEM_THREAD_SLAVE_SQL && - is_row_based_replication(thd)) + if (is_row_based_replication(thd)) rc= 0; else rc= HA_ERR_END_OF_FILE; @@ -249,8 +253,7 @@ int ha_blackhole::index_read_idx_map(uchar * buf, uint idx, const uchar * key, int rc; DBUG_ENTER("ha_blackhole::index_read_idx"); THD *thd= ha_thd(); - if (thd->system_thread == SYSTEM_THREAD_SLAVE_SQL && - is_row_based_replication(thd)) + if (is_row_based_replication(thd)) rc= 0; else rc= HA_ERR_END_OF_FILE; @@ -264,8 +267,7 @@ int ha_blackhole::index_read_last_map(uchar * buf, const uchar * key, int rc; DBUG_ENTER("ha_blackhole::index_read_last"); THD *thd= ha_thd(); - if (thd->system_thread == SYSTEM_THREAD_SLAVE_SQL && - is_row_based_replication(thd)) + if (is_row_based_replication(thd)) rc= 0; else rc= HA_ERR_END_OF_FILE; -- cgit v1.2.1 From 78c1be8b6b427e1331bbf0e5d5a24d57dc1477e3 Mon Sep 17 00:00:00 2001 From: Sujatha Date: Thu, 30 May 2019 12:11:57 +0530 Subject: MDEV-18913: typo in error log Problem: ======== Following typo in error log: 2019-03-13 15:58:10 0 [Note] Reading of all Master_info entries succeded Should be 'succeeded' Fix: === Fixed the typo with the right word 'succeeded'. --- sql/rpl_mi.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sql/rpl_mi.cc b/sql/rpl_mi.cc index 58131ff771c..70e60b1d4ad 100644 --- a/sql/rpl_mi.cc +++ b/sql/rpl_mi.cc @@ -1233,7 +1233,7 @@ bool Master_info_index::init_all_master_info() if (!err_num) // No Error on read Master_info { if (global_system_variables.log_warnings > 1) - sql_print_information("Reading of all Master_info entries succeded"); + sql_print_information("Reading of all Master_info entries succeeded"); DBUG_RETURN(0); } if (succ_num) // Have some Error and some Success -- cgit v1.2.1 From dd939d6f7e57e418a6f80fb9057eef8823beaff6 Mon Sep 17 00:00:00 2001 From: Sergey Vojtovich Date: Thu, 30 May 2019 19:34:08 +0400 Subject: MDEV-15734 - calculation inside sizeof() warning Reverted incorrect change introduced by 548d03d7. As result is char**, third qsort() parameter must be sizeof(char*). Not sizeof(result[0] + 2), which is same as sizeof(result[0]). Not even sizeof(result[0]) + 2, which would cause invalid memory access. Proper sorting is responsibility of logfilenamecompare() callback. --- storage/tokudb/PerconaFT/ft/logger/logger.cc | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/storage/tokudb/PerconaFT/ft/logger/logger.cc b/storage/tokudb/PerconaFT/ft/logger/logger.cc index 3f13fe10feb..ddbbdcb25ab 100644 --- a/storage/tokudb/PerconaFT/ft/logger/logger.cc +++ b/storage/tokudb/PerconaFT/ft/logger/logger.cc @@ -667,12 +667,8 @@ int toku_logger_find_logfiles (const char *directory, char ***resultp, int *n_lo snprintf(fname, fnamelen, "%s/%s", directory, de->d_name); result[n_results++] = fname; } - // Return them in increasing order. Set width to allow for newer log file names ("xxx.tokulog13") - // which are one character longer than old log file names ("xxx.tokulog2"). The comparison function - // won't look beyond the terminating NUL, so an extra character in the comparison string doesn't matter. - // Allow room for terminating NUL after "xxx.tokulog13" even if result[0] is of form "xxx.tokulog2." - int width = sizeof(result[0]+2); - qsort(result, n_results, width, logfilenamecompare); + // Return them in increasing order. + qsort(result, n_results, sizeof(result[0]), logfilenamecompare); *resultp = result; *n_logfiles = n_results; result[n_results]=0; // make a trailing null -- cgit v1.2.1 From 5a19908b955a6aeeeb09fb2ce7744d1c52b65a3b Mon Sep 17 00:00:00 2001 From: Alexander Barkov Date: Fri, 31 May 2019 15:24:40 +0400 Subject: MDEV-19653 Add class Sql_cmd_create_table --- sql/handler.cc | 34 +++++++ sql/sql_alter.cc | 12 +++ sql/sql_alter.h | 5 +- sql/sql_cmd.h | 41 ++++++++ sql/sql_parse.cc | 269 +------------------------------------------------ sql/sql_table.cc | 300 +++++++++++++++++++++++++++++++++++++++++++++++++++++++ sql/sql_yacc.yy | 52 ++++------ sql/table.cc | 16 ++- 8 files changed, 425 insertions(+), 304 deletions(-) diff --git a/sql/handler.cc b/sql/handler.cc index 0b7fbc93cb5..3d091401f4b 100644 --- a/sql/handler.cc +++ b/sql/handler.cc @@ -211,6 +211,40 @@ redo: } +bool +Storage_engine_name::resolve_storage_engine_with_error(THD *thd, + handlerton **ha, + bool tmp_table) +{ +#if MYSQL_VERSION_ID < 100300 + /* + Please remove tmp_name when merging to 10.3 and pass m_storage_engine_name + directly to ha_resolve_by_name(). + */ + LEX_STRING tmp_name; + tmp_name.str= const_cast(m_storage_engine_name.str); + tmp_name.length= m_storage_engine_name.length; +#endif + if (plugin_ref plugin= ha_resolve_by_name(thd, &tmp_name, tmp_table)) + { + *ha= plugin_hton(plugin); + return false; + } + + *ha= NULL; + if (thd->variables.sql_mode & MODE_NO_ENGINE_SUBSTITUTION) + { + my_error(ER_UNKNOWN_STORAGE_ENGINE, MYF(0), m_storage_engine_name.str); + return true; + } + push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, + ER_UNKNOWN_STORAGE_ENGINE, + ER_THD(thd, ER_UNKNOWN_STORAGE_ENGINE), + m_storage_engine_name.str); + return false; +} + + plugin_ref ha_lock_engine(THD *thd, const handlerton *hton) { if (hton) diff --git a/sql/sql_alter.cc b/sql/sql_alter.cc index e37d5471f41..8ec68bf4876 100644 --- a/sql/sql_alter.cc +++ b/sql/sql_alter.cc @@ -193,6 +193,18 @@ bool Sql_cmd_alter_table::execute(THD *thd) SELECT_LEX *select_lex= &lex->select_lex; /* first table of first SELECT_LEX */ TABLE_LIST *first_table= (TABLE_LIST*) select_lex->table_list.first; + + const bool used_engine= lex->create_info.used_fields & HA_CREATE_USED_ENGINE; + DBUG_ASSERT((m_storage_engine_name.str != NULL) == used_engine); + if (used_engine) + { + if (resolve_storage_engine_with_error(thd, &lex->create_info.db_type, + lex->create_info.tmp_table())) + return true; // Engine not found, substitution is not allowed + if (!lex->create_info.db_type) // Not found, but substitution is allowed + lex->create_info.used_fields&= ~HA_CREATE_USED_ENGINE; + } + /* Code in mysql_alter_table() may modify its HA_CREATE_INFO argument, so we have to use a copy of this structure to make execution diff --git a/sql/sql_alter.h b/sql/sql_alter.h index a503837bfee..473208f0675 100644 --- a/sql/sql_alter.h +++ b/sql/sql_alter.h @@ -385,7 +385,8 @@ protected: Sql_cmd_alter_table represents the generic ALTER TABLE statement. @todo move Alter_info and other ALTER specific structures from Lex here. */ -class Sql_cmd_alter_table : public Sql_cmd_common_alter_table +class Sql_cmd_alter_table : public Sql_cmd_common_alter_table, + public Storage_engine_name { public: /** @@ -397,6 +398,8 @@ public: ~Sql_cmd_alter_table() {} + Storage_engine_name *option_storage_engine_name() { return this; } + bool execute(THD *thd); }; diff --git a/sql/sql_cmd.h b/sql/sql_cmd.h index 9583e015499..c237bb9270a 100644 --- a/sql/sql_cmd.h +++ b/sql/sql_cmd.h @@ -102,6 +102,31 @@ enum enum_sql_command { SQLCOM_END }; + +class Storage_engine_name +{ +protected: + LEX_CSTRING m_storage_engine_name; +public: + Storage_engine_name() + { + m_storage_engine_name.str= NULL; + m_storage_engine_name.length= 0; + } + Storage_engine_name(const LEX_CSTRING &name) + :m_storage_engine_name(name) + { } + Storage_engine_name(const LEX_STRING &name) + { + m_storage_engine_name.str= name.str; + m_storage_engine_name.length= name.length; + } + bool resolve_storage_engine_with_error(THD *thd, + handlerton **ha, + bool tmp_table); +}; + + /** @class Sql_cmd - Representation of an SQL command. @@ -145,6 +170,11 @@ public: */ virtual bool execute(THD *thd) = 0; + virtual Storage_engine_name *option_storage_engine_name() + { + return NULL; + } + protected: Sql_cmd() {} @@ -161,4 +191,15 @@ protected: } }; + +class Sql_cmd_create_table: public Sql_cmd, + public Storage_engine_name +{ +public: + enum_sql_command sql_command_code() const { return SQLCOM_CREATE_TABLE; } + Storage_engine_name *option_storage_engine_name() { return this; } + bool execute(THD *thd); +}; + + #endif // SQL_CMD_INCLUDED diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc index 9cb65e82321..c8b9219c280 100644 --- a/sql/sql_parse.cc +++ b/sql/sql_parse.cc @@ -3280,274 +3280,6 @@ mysql_execute_command(THD *thd) res = ha_show_status(thd, lex->create_info.db_type, HA_ENGINE_MUTEX); break; } - case SQLCOM_CREATE_TABLE: - { - DBUG_ASSERT(first_table == all_tables && first_table != 0); - bool link_to_local; - TABLE_LIST *create_table= first_table; - TABLE_LIST *select_tables= lex->create_last_non_select_table->next_global; - - if (lex->tmp_table()) - { - status_var_decrement(thd->status_var.com_stat[SQLCOM_CREATE_TABLE]); - status_var_increment(thd->status_var.com_create_tmp_table); - } - - /* - Code below (especially in mysql_create_table() and select_create - methods) may modify HA_CREATE_INFO structure in LEX, so we have to - use a copy of this structure to make execution prepared statement- - safe. A shallow copy is enough as this code won't modify any memory - referenced from this structure. - */ - Table_specification_st create_info(lex->create_info); - /* - We need to copy alter_info for the same reasons of re-execution - safety, only in case of Alter_info we have to do (almost) a deep - copy. - */ - Alter_info alter_info(lex->alter_info, thd->mem_root); - - if (thd->is_fatal_error) - { - /* If out of memory when creating a copy of alter_info. */ - res= 1; - goto end_with_restore_list; - } - - /* Check privileges */ - if ((res= create_table_precheck(thd, select_tables, create_table))) - goto end_with_restore_list; - - /* Might have been updated in create_table_precheck */ - create_info.alias= create_table->alias; - - /* Fix names if symlinked or relocated tables */ - if (append_file_to_dir(thd, &create_info.data_file_name, - create_table->table_name) || - append_file_to_dir(thd, &create_info.index_file_name, - create_table->table_name)) - goto end_with_restore_list; - - /* - If no engine type was given, work out the default now - rather than at parse-time. - */ - if (!(create_info.used_fields & HA_CREATE_USED_ENGINE)) - create_info.use_default_db_type(thd); - /* - If we are using SET CHARSET without DEFAULT, add an implicit - DEFAULT to not confuse old users. (This may change). - */ - if ((create_info.used_fields & - (HA_CREATE_USED_DEFAULT_CHARSET | HA_CREATE_USED_CHARSET)) == - HA_CREATE_USED_CHARSET) - { - create_info.used_fields&= ~HA_CREATE_USED_CHARSET; - create_info.used_fields|= HA_CREATE_USED_DEFAULT_CHARSET; - create_info.default_table_charset= create_info.table_charset; - create_info.table_charset= 0; - } - - /* - If we are a slave, we should add OR REPLACE if we don't have - IF EXISTS. This will help a slave to recover from - CREATE TABLE OR EXISTS failures by dropping the table and - retrying the create. - */ - if (thd->slave_thread && - slave_ddl_exec_mode_options == SLAVE_EXEC_MODE_IDEMPOTENT && - !lex->create_info.if_not_exists()) - { - create_info.add(DDL_options_st::OPT_OR_REPLACE); - create_info.add(DDL_options_st::OPT_OR_REPLACE_SLAVE_GENERATED); - } - -#ifdef WITH_PARTITION_STORAGE_ENGINE - { - partition_info *part_info= thd->lex->part_info; - if (part_info && !(part_info= part_info->get_clone(thd))) - { - res= -1; - goto end_with_restore_list; - } - thd->work_part_info= part_info; - } -#endif - - if (select_lex->item_list.elements) // With select - { - select_result *result; - - /* - CREATE TABLE...IGNORE/REPLACE SELECT... can be unsafe, unless - ORDER BY PRIMARY KEY clause is used in SELECT statement. We therefore - use row based logging if mixed or row based logging is available. - TODO: Check if the order of the output of the select statement is - deterministic. Waiting for BUG#42415 - */ - if(lex->ignore) - lex->set_stmt_unsafe(LEX::BINLOG_STMT_UNSAFE_CREATE_IGNORE_SELECT); - - if(lex->duplicates == DUP_REPLACE) - lex->set_stmt_unsafe(LEX::BINLOG_STMT_UNSAFE_CREATE_REPLACE_SELECT); - - /* - If: - a) we inside an SP and there was NAME_CONST substitution, - b) binlogging is on (STMT mode), - c) we log the SP as separate statements - raise a warning, as it may cause problems - (see 'NAME_CONST issues' in 'Binary Logging of Stored Programs') - */ - if (thd->query_name_consts && mysql_bin_log.is_open() && - thd->wsrep_binlog_format() == BINLOG_FORMAT_STMT && - !mysql_bin_log.is_query_in_union(thd, thd->query_id)) - { - List_iterator_fast it(select_lex->item_list); - Item *item; - uint splocal_refs= 0; - /* Count SP local vars in the top-level SELECT list */ - while ((item= it++)) - { - if (item->get_item_splocal()) - splocal_refs++; - } - /* - If it differs from number of NAME_CONST substitution applied, - we may have a SOME_FUNC(NAME_CONST()) in the SELECT list, - that may cause a problem with binary log (see BUG#35383), - raise a warning. - */ - if (splocal_refs != thd->query_name_consts) - push_warning(thd, - Sql_condition::WARN_LEVEL_WARN, - ER_UNKNOWN_ERROR, -"Invoked routine ran a statement that may cause problems with " -"binary log, see 'NAME_CONST issues' in 'Binary Logging of Stored Programs' " -"section of the manual."); - } - - select_lex->options|= SELECT_NO_UNLOCK; - unit->set_limit(select_lex); - - /* - Disable non-empty MERGE tables with CREATE...SELECT. Too - complicated. See Bug #26379. Empty MERGE tables are read-only - and don't allow CREATE...SELECT anyway. - */ - if (create_info.used_fields & HA_CREATE_USED_UNION) - { - my_error(ER_WRONG_OBJECT, MYF(0), create_table->db, - create_table->table_name, "BASE TABLE"); - res= 1; - goto end_with_restore_list; - } - - /* Copy temporarily the statement flags to thd for lock_table_names() */ - uint save_thd_create_info_options= thd->lex->create_info.options; - thd->lex->create_info.options|= create_info.options; - res= open_and_lock_tables(thd, create_info, lex->query_tables, TRUE, 0); - thd->lex->create_info.options= save_thd_create_info_options; - if (res) - { - /* Got error or warning. Set res to 1 if error */ - if (!(res= thd->is_error())) - my_ok(thd); // CREATE ... IF NOT EXISTS - goto end_with_restore_list; - } - - /* Ensure we don't try to create something from which we select from */ - if (create_info.or_replace() && !create_info.tmp_table()) - { - TABLE_LIST *duplicate; - if ((duplicate= unique_table(thd, lex->query_tables, - lex->query_tables->next_global, - CHECK_DUP_FOR_CREATE))) - { - update_non_unique_table_error(lex->query_tables, "CREATE", - duplicate); - res= TRUE; - goto end_with_restore_list; - } - } - { - /* - Remove target table from main select and name resolution - context. This can't be done earlier as it will break view merging in - statements like "CREATE TABLE IF NOT EXISTS existing_view SELECT". - */ - lex->unlink_first_table(&link_to_local); - - /* Store reference to table in case of LOCK TABLES */ - create_info.table= create_table->table; - - /* - select_create is currently not re-execution friendly and - needs to be created for every execution of a PS/SP. - Note: In wsrep-patch, CTAS is handled like a regular transaction. - */ - if ((result= new (thd->mem_root) select_create(thd, create_table, - &create_info, - &alter_info, - select_lex->item_list, - lex->duplicates, - lex->ignore, - select_tables))) - { - /* - CREATE from SELECT give its SELECT_LEX for SELECT, - and item_list belong to SELECT - */ - if (!(res= handle_select(thd, lex, result, 0))) - { - if (create_info.tmp_table()) - thd->variables.option_bits|= OPTION_KEEP_LOG; - } - delete result; - } - lex->link_first_table_back(create_table, link_to_local); - } - } - else - { - /* regular create */ - if (create_info.like()) - { - /* CREATE TABLE ... LIKE ... */ - res= mysql_create_like_table(thd, create_table, select_tables, - &create_info); - } - else - { - /* - In STATEMENT format, we probably have to replicate also temporary - tables, like mysql replication does. Also check if the requested - engine is allowed/supported. - */ - if (WSREP(thd) && - !check_engine(thd, create_table->db, create_table->table_name, - &create_info) && - (!thd->is_current_stmt_binlog_format_row() || - !create_info.tmp_table())) - { - WSREP_TO_ISOLATION_BEGIN(create_table->db, create_table->table_name, NULL) - } - /* Regular CREATE TABLE */ - res= mysql_create_table(thd, create_table, &create_info, &alter_info); - } - if (!res) - { - /* So that CREATE TEMPORARY TABLE gets to binlog at commit/rollback */ - if (create_info.tmp_table()) - thd->variables.option_bits|= OPTION_KEEP_LOG; - my_ok(thd); - } - } - -end_with_restore_list: - break; - } case SQLCOM_CREATE_INDEX: case SQLCOM_DROP_INDEX: /* @@ -5699,6 +5431,7 @@ end_with_restore_list: case SQLCOM_OPTIMIZE: case SQLCOM_REPAIR: case SQLCOM_TRUNCATE: + case SQLCOM_CREATE_TABLE: case SQLCOM_ALTER_TABLE: DBUG_ASSERT(first_table == all_tables && first_table != 0); /* fall through */ diff --git a/sql/sql_table.cc b/sql/sql_table.cc index ca78c011866..2696513d95f 100644 --- a/sql/sql_table.cc +++ b/sql/sql_table.cc @@ -10116,3 +10116,303 @@ bool check_engine(THD *thd, const char *db_name, DBUG_RETURN(false); } + + +bool Sql_cmd_create_table::execute(THD *thd) +{ + DBUG_ENTER("Sql_cmd_create_table::execute"); + LEX *lex= thd->lex; + TABLE_LIST *all_tables= lex->query_tables; + SELECT_LEX *select_lex= &lex->select_lex; + TABLE_LIST *first_table= select_lex->table_list.first; + DBUG_ASSERT(first_table == all_tables && first_table != 0); + bool link_to_local; + TABLE_LIST *create_table= first_table; + TABLE_LIST *select_tables= lex->create_last_non_select_table->next_global; + /* most outer SELECT_LEX_UNIT of query */ + SELECT_LEX_UNIT *unit= &lex->unit; + int res= 0; + + const bool used_engine= lex->create_info.used_fields & HA_CREATE_USED_ENGINE; + DBUG_ASSERT((m_storage_engine_name.str != NULL) == used_engine); + if (used_engine) + { + if (resolve_storage_engine_with_error(thd, &lex->create_info.db_type, + lex->create_info.tmp_table())) + DBUG_RETURN(true); // Engine not found, substitution is not allowed + + if (!lex->create_info.db_type) // Not found, but substitution is allowed + { + lex->create_info.use_default_db_type(thd); + push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, + ER_WARN_USING_OTHER_HANDLER, + ER_THD(thd, ER_WARN_USING_OTHER_HANDLER), + hton_name(lex->create_info.db_type)->str, + create_table->table_name); + } + } + + if (lex->tmp_table()) + { + status_var_decrement(thd->status_var.com_stat[SQLCOM_CREATE_TABLE]); + status_var_increment(thd->status_var.com_create_tmp_table); + } + + /* + Code below (especially in mysql_create_table() and select_create + methods) may modify HA_CREATE_INFO structure in LEX, so we have to + use a copy of this structure to make execution prepared statement- + safe. A shallow copy is enough as this code won't modify any memory + referenced from this structure. + */ + Table_specification_st create_info(lex->create_info); + /* + We need to copy alter_info for the same reasons of re-execution + safety, only in case of Alter_info we have to do (almost) a deep + copy. + */ + Alter_info alter_info(lex->alter_info, thd->mem_root); + + if (thd->is_fatal_error) + { + /* If out of memory when creating a copy of alter_info. */ + res= 1; + goto end_with_restore_list; + } + + /* Check privileges */ + if ((res= create_table_precheck(thd, select_tables, create_table))) + goto end_with_restore_list; + + /* Might have been updated in create_table_precheck */ + create_info.alias= create_table->alias; + + /* Fix names if symlinked or relocated tables */ + if (append_file_to_dir(thd, &create_info.data_file_name, + create_table->table_name) || + append_file_to_dir(thd, &create_info.index_file_name, + create_table->table_name)) + goto end_with_restore_list; + + /* + If no engine type was given, work out the default now + rather than at parse-time. + */ + if (!(create_info.used_fields & HA_CREATE_USED_ENGINE)) + create_info.use_default_db_type(thd); + /* + If we are using SET CHARSET without DEFAULT, add an implicit + DEFAULT to not confuse old users. (This may change). + */ + if ((create_info.used_fields & + (HA_CREATE_USED_DEFAULT_CHARSET | HA_CREATE_USED_CHARSET)) == + HA_CREATE_USED_CHARSET) + { + create_info.used_fields&= ~HA_CREATE_USED_CHARSET; + create_info.used_fields|= HA_CREATE_USED_DEFAULT_CHARSET; + create_info.default_table_charset= create_info.table_charset; + create_info.table_charset= 0; + } + + /* + If we are a slave, we should add OR REPLACE if we don't have + IF EXISTS. This will help a slave to recover from + CREATE TABLE OR EXISTS failures by dropping the table and + retrying the create. + */ + if (thd->slave_thread && + slave_ddl_exec_mode_options == SLAVE_EXEC_MODE_IDEMPOTENT && + !lex->create_info.if_not_exists()) + { + create_info.add(DDL_options_st::OPT_OR_REPLACE); + create_info.add(DDL_options_st::OPT_OR_REPLACE_SLAVE_GENERATED); + } + +#ifdef WITH_PARTITION_STORAGE_ENGINE + { + partition_info *part_info= thd->lex->part_info; + if (part_info && !(part_info= part_info->get_clone(thd))) + { + res= -1; + goto end_with_restore_list; + } + thd->work_part_info= part_info; + } +#endif + + if (select_lex->item_list.elements) // With select + { + select_result *result; + + /* + CREATE TABLE...IGNORE/REPLACE SELECT... can be unsafe, unless + ORDER BY PRIMARY KEY clause is used in SELECT statement. We therefore + use row based logging if mixed or row based logging is available. + TODO: Check if the order of the output of the select statement is + deterministic. Waiting for BUG#42415 + */ + if(lex->ignore) + lex->set_stmt_unsafe(LEX::BINLOG_STMT_UNSAFE_CREATE_IGNORE_SELECT); + + if(lex->duplicates == DUP_REPLACE) + lex->set_stmt_unsafe(LEX::BINLOG_STMT_UNSAFE_CREATE_REPLACE_SELECT); + + /* + If: + a) we inside an SP and there was NAME_CONST substitution, + b) binlogging is on (STMT mode), + c) we log the SP as separate statements + raise a warning, as it may cause problems + (see 'NAME_CONST issues' in 'Binary Logging of Stored Programs') + */ + if (thd->query_name_consts && mysql_bin_log.is_open() && + thd->wsrep_binlog_format() == BINLOG_FORMAT_STMT && + !mysql_bin_log.is_query_in_union(thd, thd->query_id)) + { + List_iterator_fast it(select_lex->item_list); + Item *item; + uint splocal_refs= 0; + /* Count SP local vars in the top-level SELECT list */ + while ((item= it++)) + { + if (item->get_item_splocal()) + splocal_refs++; + } + /* + If it differs from number of NAME_CONST substitution applied, + we may have a SOME_FUNC(NAME_CONST()) in the SELECT list, + that may cause a problem with binary log (see BUG#35383), + raise a warning. + */ + if (splocal_refs != thd->query_name_consts) + push_warning(thd, + Sql_condition::WARN_LEVEL_WARN, + ER_UNKNOWN_ERROR, +"Invoked routine ran a statement that may cause problems with " +"binary log, see 'NAME_CONST issues' in 'Binary Logging of Stored Programs' " +"section of the manual."); + } + + select_lex->options|= SELECT_NO_UNLOCK; + unit->set_limit(select_lex); + + /* + Disable non-empty MERGE tables with CREATE...SELECT. Too + complicated. See Bug #26379. Empty MERGE tables are read-only + and don't allow CREATE...SELECT anyway. + */ + if (create_info.used_fields & HA_CREATE_USED_UNION) + { + my_error(ER_WRONG_OBJECT, MYF(0), create_table->db, + create_table->table_name, "BASE TABLE"); + res= 1; + goto end_with_restore_list; + } + + /* Copy temporarily the statement flags to thd for lock_table_names() */ + uint save_thd_create_info_options= thd->lex->create_info.options; + thd->lex->create_info.options|= create_info.options; + res= open_and_lock_tables(thd, create_info, lex->query_tables, TRUE, 0); + thd->lex->create_info.options= save_thd_create_info_options; + if (res) + { + /* Got error or warning. Set res to 1 if error */ + if (!(res= thd->is_error())) + my_ok(thd); // CREATE ... IF NOT EXISTS + goto end_with_restore_list; + } + + /* Ensure we don't try to create something from which we select from */ + if (create_info.or_replace() && !create_info.tmp_table()) + { + TABLE_LIST *duplicate; + if ((duplicate= unique_table(thd, lex->query_tables, + lex->query_tables->next_global, + CHECK_DUP_FOR_CREATE))) + { + update_non_unique_table_error(lex->query_tables, "CREATE", + duplicate); + res= TRUE; + goto end_with_restore_list; + } + } + { + /* + Remove target table from main select and name resolution + context. This can't be done earlier as it will break view merging in + statements like "CREATE TABLE IF NOT EXISTS existing_view SELECT". + */ + lex->unlink_first_table(&link_to_local); + + /* Store reference to table in case of LOCK TABLES */ + create_info.table= create_table->table; + + /* + select_create is currently not re-execution friendly and + needs to be created for every execution of a PS/SP. + Note: In wsrep-patch, CTAS is handled like a regular transaction. + */ + if ((result= new (thd->mem_root) select_create(thd, create_table, + &create_info, + &alter_info, + select_lex->item_list, + lex->duplicates, + lex->ignore, + select_tables))) + { + /* + CREATE from SELECT give its SELECT_LEX for SELECT, + and item_list belong to SELECT + */ + if (!(res= handle_select(thd, lex, result, 0))) + { + if (create_info.tmp_table()) + thd->variables.option_bits|= OPTION_KEEP_LOG; + } + delete result; + } + lex->link_first_table_back(create_table, link_to_local); + } + } + else + { + /* regular create */ + if (create_info.like()) + { + /* CREATE TABLE ... LIKE ... */ + res= mysql_create_like_table(thd, create_table, select_tables, + &create_info); + } + else + { + /* + In STATEMENT format, we probably have to replicate also temporary + tables, like mysql replication does. Also check if the requested + engine is allowed/supported. + */ + if (WSREP(thd) && + !check_engine(thd, create_table->db, create_table->table_name, + &create_info) && + (!thd->is_current_stmt_binlog_format_row() || + !create_info.tmp_table())) + { + WSREP_TO_ISOLATION_BEGIN(create_table->db, create_table->table_name, NULL) + } + /* Regular CREATE TABLE */ + res= mysql_create_table(thd, create_table, &create_info, &alter_info); + } + if (!res) + { + /* So that CREATE TEMPORARY TABLE gets to binlog at commit/rollback */ + if (create_info.tmp_table()) + thd->variables.option_bits|= OPTION_KEEP_LOG; + my_ok(thd); + } + } + +end_with_restore_list: + DBUG_RETURN(res); + +WSREP_ERROR_LABEL: + DBUG_RETURN(true); +} diff --git a/sql/sql_yacc.yy b/sql/sql_yacc.yy index 59cc73d88af..fe2c6b50f98 100644 --- a/sql/sql_yacc.yy +++ b/sql/sql_yacc.yy @@ -2454,6 +2454,8 @@ create: create_or_replace opt_temporary TABLE_SYM opt_if_not_exists table_ident { LEX *lex= thd->lex; + if (!(lex->m_sql_cmd= new (thd->mem_root) Sql_cmd_create_table())) + MYSQL_YYABORT; lex->create_info.init(); if (lex->set_command_with_check(SQLCOM_CREATE_TABLE, $2, $1 | $4)) MYSQL_YYABORT; @@ -2475,16 +2477,6 @@ create: { LEX *lex= thd->lex; lex->current_select= &lex->select_lex; - if ((lex->create_info.used_fields & HA_CREATE_USED_ENGINE) && - !lex->create_info.db_type) - { - lex->create_info.use_default_db_type(thd); - push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, - ER_WARN_USING_OTHER_HANDLER, - ER_THD(thd, ER_WARN_USING_OTHER_HANDLER), - hton_name(lex->create_info.db_type)->str, - $5->table.str); - } create_table_set_open_action_and_adjust_tables(lex); } | create_or_replace opt_unique INDEX_SYM opt_if_not_exists ident @@ -5515,10 +5507,20 @@ create_table_options: ; create_table_option: - ENGINE_SYM opt_equal storage_engines + ENGINE_SYM opt_equal ident_or_text { - Lex->create_info.db_type= $3; - Lex->create_info.used_fields|= HA_CREATE_USED_ENGINE; + LEX *lex= Lex; + if (!lex->m_sql_cmd) + { + DBUG_ASSERT(lex->sql_command == SQLCOM_ALTER_TABLE); + if (!(lex->m_sql_cmd= new (thd->mem_root) Sql_cmd_alter_table())) + MYSQL_YYABORT; + } + Storage_engine_name *opt= + lex->m_sql_cmd->option_storage_engine_name(); + DBUG_ASSERT(opt); // Expect a proper Sql_cmd + *opt= Storage_engine_name($3); + lex->create_info.used_fields|= HA_CREATE_USED_ENGINE; } | MAX_ROWS opt_equal ulonglong_num { @@ -5783,21 +5785,10 @@ default_collation: storage_engines: ident_or_text { - plugin_ref plugin= ha_resolve_by_name(thd, &$1, - thd->lex->create_info.tmp_table()); - - if (plugin) - $$= plugin_hton(plugin); - else - { - if (thd->variables.sql_mode & MODE_NO_ENGINE_SUBSTITUTION) - my_yyabort_error((ER_UNKNOWN_STORAGE_ENGINE, MYF(0), $1.str)); - $$= 0; - push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, - ER_UNKNOWN_STORAGE_ENGINE, - ER_THD(thd, ER_UNKNOWN_STORAGE_ENGINE), - $1.str); - } + if (Storage_engine_name($1). + resolve_storage_engine_with_error(thd, &$$, + thd->lex->create_info.tmp_table())) + MYSQL_YYABORT; } ; @@ -7533,11 +7524,6 @@ alter_list_item: { LEX *lex=Lex; lex->alter_info.flags|= Alter_info::ALTER_OPTIONS; - if ((lex->create_info.used_fields & HA_CREATE_USED_ENGINE) && - !lex->create_info.db_type) - { - lex->create_info.used_fields&= ~HA_CREATE_USED_ENGINE; - } } | FORCE_SYM { diff --git a/sql/table.cc b/sql/table.cc index 414196f2fb1..55b505a0cc0 100644 --- a/sql/table.cc +++ b/sql/table.cc @@ -2177,8 +2177,20 @@ static bool sql_unusable_for_discovery(THD *thd, handlerton *engine, if (create_info->data_file_name || create_info->index_file_name) return 1; // ... engine - if (create_info->db_type && create_info->db_type != engine) - return 1; + DBUG_ASSERT(lex->m_sql_cmd); + if (lex->create_info.used_fields & HA_CREATE_USED_ENGINE) + { + /* + TODO: we could just compare engine names here, without resolving. + But this optimization is too late for 10.1. + */ + Storage_engine_name *opt= lex->m_sql_cmd->option_storage_engine_name(); + DBUG_ASSERT(opt); // lex->m_sql_cmd must be an Sql_cmd_create_table instance + if (opt->resolve_storage_engine_with_error(thd, &create_info->db_type, + false) || + (create_info->db_type && create_info->db_type != engine)) + return 1; + } return 0; } -- cgit v1.2.1 From b003b0c934cf6b59358e31144c4f69cf34622fb8 Mon Sep 17 00:00:00 2001 From: Alexander Barkov Date: Mon, 3 Jun 2019 12:42:36 +0400 Subject: MDEV-19675 Wrong charset is chosen when opening a pre-4.1 table --- mysql-test/r/ctype_utf8_def_upgrade.result | 99 ++++++++++++++++++++++++++++++ mysql-test/t/ctype_utf8_def_upgrade.opt | 1 + mysql-test/t/ctype_utf8_def_upgrade.test | 61 ++++++++++++++++++ sql/table.cc | 5 +- 4 files changed, 164 insertions(+), 2 deletions(-) create mode 100644 mysql-test/r/ctype_utf8_def_upgrade.result create mode 100644 mysql-test/t/ctype_utf8_def_upgrade.opt create mode 100644 mysql-test/t/ctype_utf8_def_upgrade.test diff --git a/mysql-test/r/ctype_utf8_def_upgrade.result b/mysql-test/r/ctype_utf8_def_upgrade.result new file mode 100644 index 00000000000..1b921289af6 --- /dev/null +++ b/mysql-test/r/ctype_utf8_def_upgrade.result @@ -0,0 +1,99 @@ +# +# Start of 10.1 tests +# +# +# MDEV-19675 Wrong charset is chosen when opening a pre-4.1 table +# +# Test with a saved table from 3.23 +SELECT @@character_set_database; +@@character_set_database +utf8 +SET @@character_set_database="latin1"; +SELECT COUNT(*) FROM t1; +ERROR HY000: Got error 190 "Incompatible key or row definition between the MariaDB .frm file and the information in the storage engine. You have to dump an" from storage engine MyISAM +CHECK TABLE t1; +Table Op Msg_type Msg_text +test.t1 check Error Got error 190 "Incompatible key or row definition between the MariaDB .frm file and the information in the storage engine. You have to dump an" from storage engine MyISAM +test.t1 check error Corrupt +REPAIR TABLE t1; +Table Op Msg_type Msg_text +test.t1 repair Error Got error 190 "Incompatible key or row definition between the MariaDB .frm file and the information in the storage engine. You have to dump an" from storage engine MyISAM +test.t1 repair error Corrupt +REPAIR TABLE t1 USE_FRM; +Table Op Msg_type Msg_text +test.t1 repair status OK +SELECT COUNT(*) FROM t1; +COUNT(*) +0 +CHECK TABLE t1; +Table Op Msg_type Msg_text +test.t1 check status OK +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `Host` char(60) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL DEFAULT '', + `Db` char(64) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL DEFAULT '', + `Select_priv` enum('N','Y') NOT NULL DEFAULT 'N', + `Insert_priv` enum('N','Y') NOT NULL DEFAULT 'N', + `Update_priv` enum('N','Y') NOT NULL DEFAULT 'N', + `Delete_priv` enum('N','Y') NOT NULL DEFAULT 'N', + `Create_priv` enum('N','Y') NOT NULL DEFAULT 'N', + `Drop_priv` enum('N','Y') NOT NULL DEFAULT 'N', + `Grant_priv` enum('N','Y') NOT NULL DEFAULT 'N', + `References_priv` enum('N','Y') NOT NULL DEFAULT 'N', + `Index_priv` enum('N','Y') NOT NULL DEFAULT 'N', + `Alter_priv` enum('N','Y') NOT NULL DEFAULT 'N', + PRIMARY KEY (`Host`,`Db`) +) ENGINE=MyISAM DEFAULT CHARSET=latin1 COMMENT='Host privileges; Merged with database privileges' +DROP TABLE t1; +SET @@character_set_database=DEFAULT; +# Now do the same, but doing 'ALTER DATABASE' to create the db.opt file, +# instead of setting variables directly. +# Emulate a pre-4.1 database without db.opt +SHOW CREATE DATABASE db1; +Database Create Database +db1 CREATE DATABASE `db1` /*!40100 DEFAULT CHARACTER SET utf8 */ +USE db1; +SELECT @@character_set_database, 'taken from defaults' AS comment; +@@character_set_database comment +utf8 taken from defaults +USE test; +ALTER DATABASE db1 DEFAULT CHARACTER SET latin1; +USE db1; +SELECT @@character_set_database, 'taken from db.opt' AS comment; +@@character_set_database comment +latin1 taken from db.opt +SELECT COUNT(*) FROM t1; +ERROR HY000: Got error 190 "Incompatible key or row definition between the MariaDB .frm file and the information in the storage engine. You have to dump an" from storage engine MyISAM +REPAIR TABLE t1 USE_FRM; +Table Op Msg_type Msg_text +db1.t1 repair status OK +SELECT COUNT(*) FROM t1; +COUNT(*) +0 +CHECK TABLE t1; +Table Op Msg_type Msg_text +db1.t1 check status OK +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `Host` char(60) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL DEFAULT '', + `Db` char(64) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL DEFAULT '', + `Select_priv` enum('N','Y') NOT NULL DEFAULT 'N', + `Insert_priv` enum('N','Y') NOT NULL DEFAULT 'N', + `Update_priv` enum('N','Y') NOT NULL DEFAULT 'N', + `Delete_priv` enum('N','Y') NOT NULL DEFAULT 'N', + `Create_priv` enum('N','Y') NOT NULL DEFAULT 'N', + `Drop_priv` enum('N','Y') NOT NULL DEFAULT 'N', + `Grant_priv` enum('N','Y') NOT NULL DEFAULT 'N', + `References_priv` enum('N','Y') NOT NULL DEFAULT 'N', + `Index_priv` enum('N','Y') NOT NULL DEFAULT 'N', + `Alter_priv` enum('N','Y') NOT NULL DEFAULT 'N', + PRIMARY KEY (`Host`,`Db`) +) ENGINE=MyISAM DEFAULT CHARSET=latin1 COMMENT='Host privileges; Merged with database privileges' +DROP TABLE t1; +DROP DATABASE db1; +USE test; +# +# End of 10.1 tests +# diff --git a/mysql-test/t/ctype_utf8_def_upgrade.opt b/mysql-test/t/ctype_utf8_def_upgrade.opt new file mode 100644 index 00000000000..61a472b45c5 --- /dev/null +++ b/mysql-test/t/ctype_utf8_def_upgrade.opt @@ -0,0 +1 @@ +--character-set-server=utf8 diff --git a/mysql-test/t/ctype_utf8_def_upgrade.test b/mysql-test/t/ctype_utf8_def_upgrade.test new file mode 100644 index 00000000000..4751faa0622 --- /dev/null +++ b/mysql-test/t/ctype_utf8_def_upgrade.test @@ -0,0 +1,61 @@ +let $MYSQLD_DATADIR= `select @@datadir`; + +--echo # +--echo # Start of 10.1 tests +--echo # + +--echo # +--echo # MDEV-19675 Wrong charset is chosen when opening a pre-4.1 table +--echo # + +--echo # Test with a saved table from 3.23 + +SELECT @@character_set_database; +SET @@character_set_database="latin1"; +--copy_file std_data/host_old.frm $MYSQLD_DATADIR/test/t1.frm +--copy_file std_data/host_old.MYD $MYSQLD_DATADIR/test/t1.MYD +--copy_file std_data/host_old.MYI $MYSQLD_DATADIR/test/t1.MYI + +--error ER_GET_ERRNO +SELECT COUNT(*) FROM t1; +CHECK TABLE t1; +REPAIR TABLE t1; +REPAIR TABLE t1 USE_FRM; +SELECT COUNT(*) FROM t1; +CHECK TABLE t1; +SHOW CREATE TABLE t1; +DROP TABLE t1; +SET @@character_set_database=DEFAULT; + + +--echo # Now do the same, but doing 'ALTER DATABASE' to create the db.opt file, +--echo # instead of setting variables directly. + +--echo # Emulate a pre-4.1 database without db.opt +--mkdir $MYSQLD_DATADIR/db1 +SHOW CREATE DATABASE db1; +USE db1; +SELECT @@character_set_database, 'taken from defaults' AS comment; +USE test; +ALTER DATABASE db1 DEFAULT CHARACTER SET latin1; +USE db1; +SELECT @@character_set_database, 'taken from db.opt' AS comment; + +--copy_file std_data/host_old.frm $MYSQLD_DATADIR/db1/t1.frm +--copy_file std_data/host_old.MYD $MYSQLD_DATADIR/db1/t1.MYD +--copy_file std_data/host_old.MYI $MYSQLD_DATADIR/db1/t1.MYI + +--error ER_GET_ERRNO +SELECT COUNT(*) FROM t1; +REPAIR TABLE t1 USE_FRM; +SELECT COUNT(*) FROM t1; +CHECK TABLE t1; +SHOW CREATE TABLE t1; +DROP TABLE t1; + +DROP DATABASE db1; +USE test; + +--echo # +--echo # End of 10.1 tests +--echo # diff --git a/sql/table.cc b/sql/table.cc index 55b505a0cc0..12b23304acd 100644 --- a/sql/table.cc +++ b/sql/table.cc @@ -1104,8 +1104,9 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write, } if (!share->table_charset) { + const CHARSET_INFO *cs= thd->variables.collation_database; /* unknown charset in frm_image[38] or pre-3.23 frm */ - if (use_mb(default_charset_info)) + if (use_mb(cs)) { /* Warn that we may be changing the size of character columns */ sql_print_warning("'%s' had no or invalid character set, " @@ -1113,7 +1114,7 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write, "so character column sizes may have changed", share->path.str); } - share->table_charset= default_charset_info; + share->table_charset= cs; } share->db_record_offset= 1; -- cgit v1.2.1 From c97c8c28b5c8b33d9b1b8563f4ce015c1668b7f1 Mon Sep 17 00:00:00 2001 From: Vladislav Vaintroub Date: Wed, 5 Jun 2019 19:42:21 +0200 Subject: MDEV-17103 MY_CHECK_{C,CXX}_COMPILER_FLAG do not work on with localized gcc messages Force LANG=C for MY_CHECK_{C,CXX}_COMPILER_FLAG --- cmake/check_compiler_flag.cmake | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/cmake/check_compiler_flag.cmake b/cmake/check_compiler_flag.cmake index 673361ab8fe..547325e4fa2 100644 --- a/cmake/check_compiler_flag.cmake +++ b/cmake/check_compiler_flag.cmake @@ -13,7 +13,8 @@ SET(fail_patterns FAIL_REGEX "warning:.*redefined" FAIL_REGEX "[Ww]arning: [Oo]ption" ) - +#The regex patterns above are not localized, thus LANG=C +SET(ENV{LANG} C) MACRO (MY_CHECK_C_COMPILER_FLAG flag) STRING(REGEX REPLACE "[-,= +]" "_" result "HAVE_C_${flag}") SET(SAVE_CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS}") -- cgit v1.2.1 From e7695f95ae714f3168ce953fd022ddfb40f03e67 Mon Sep 17 00:00:00 2001 From: Sergey Vojtovich Date: Fri, 7 Jun 2019 12:24:27 +0400 Subject: MDEV-19360 - Disable _FORTIFY_SOURCE for ASAN builds Those two may work incorrectly together. Namely, ASAN may produce false positives or false negatives. For details see https://github.com/google/sanitizers/wiki/AddressSanitizer#faq Make SECURITY_HARDENED disabled by default if WITH_ASAN=ON Based on contribution by Eugene Kosov. --- CMakeLists.txt | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 32e9b1e9498..083fa753b88 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -191,6 +191,8 @@ INCLUDE(check_compiler_flag) OPTION(WITH_ASAN "Enable address sanitizer" OFF) IF (WITH_ASAN) + # this flag might be set by default on some OS + MY_CHECK_AND_SET_COMPILER_FLAG("-U_FORTIFY_SOURCE" DEBUG RELWITHDEBINFO) # gcc 4.8.1 and new versions of clang MY_CHECK_AND_SET_COMPILER_FLAG("-fsanitize=address -O1 -Wno-error -fPIC" DEBUG RELWITHDEBINFO) @@ -216,22 +218,22 @@ ENDIF() OPTION(WITH_UBSAN "Enable undefined behavior sanitizer" OFF) IF (WITH_UBSAN) - IF(SECURITY_HARDENED) - MESSAGE(FATAL_ERROR "WITH_UBSAN and SECURITY_HARDENED are mutually exclusive") - ENDIF() - MY_CHECK_AND_SET_COMPILER_FLAG("-fsanitize=undefined" DEBUG RELWITHDEBINFO) + MY_CHECK_AND_SET_COMPILER_FLAG("-fsanitize=undefined -U_FORTIFY_SOURCE" DEBUG RELWITHDEBINFO) ENDIF() # enable security hardening features, like most distributions do # in our benchmarks that costs about ~1% of performance, depending on the load -IF(CMAKE_C_COMPILER_VERSION VERSION_LESS "4.6") +IF(CMAKE_C_COMPILER_VERSION VERSION_LESS "4.6" OR WITH_ASAN OR WITH_UBSAN) SET(security_default OFF) ELSE() SET(security_default ON) ENDIF() OPTION(SECURITY_HARDENED "Use security-enhancing compiler features (stack protector, relro, etc)" ${security_default}) IF(SECURITY_HARDENED) + IF(WITH_ASAN OR WITH_UBSAN) + MESSAGE(FATAL_ERROR "WITH_ASAN/WITH_UBSAN and SECURITY_HARDENED are mutually exclusive") + ENDIF() # security-enhancing flags MY_CHECK_AND_SET_COMPILER_FLAG("-pie -fPIC") MY_CHECK_AND_SET_COMPILER_FLAG("-Wl,-z,relro,-z,now") -- cgit v1.2.1 From c5fe1b8fc1ba401791d0c0e0c38ec5082fa97891 Mon Sep 17 00:00:00 2001 From: Thirunarayanan Balathandayuthapani Date: Wed, 12 Jun 2019 12:17:13 +0530 Subject: MDEV-16866 InnoDB fails to start upon crash recovery with "[ERROR] InnoDB: Redo log crypto: failed to decrypt log block" - If InnoDB encounters garbage or incomplete written log block during recovery then don't throw the error. Treat it as end of the log. - This kind of incomplete or empty block can be result of killing InnoDB when writing the redo log. --- storage/innobase/log/log0recv.cc | 38 +++++++++++++++++++------------------- storage/xtradb/log/log0recv.cc | 39 +++++++++++++++++++-------------------- 2 files changed, 38 insertions(+), 39 deletions(-) diff --git a/storage/innobase/log/log0recv.cc b/storage/innobase/log/log0recv.cc index a8a5f7b79e3..a86fd9fd8fd 100644 --- a/storage/innobase/log/log0recv.cc +++ b/storage/innobase/log/log0recv.cc @@ -2631,30 +2631,30 @@ recv_scan_log_recs( fprintf(stderr, "Scanned lsn no %lu\n", log_block_convert_lsn_to_no(scanned_lsn)); */ - if (no != log_block_convert_lsn_to_no(scanned_lsn) - || !log_block_checksum_is_ok_or_old_format(log_block, true)) { + if (no != log_block_convert_lsn_to_no(scanned_lsn)) { + /* Garbage or an incompletely written log block. + We will not report any error; because this can happen + when InnoDB was killed while it was writing + redo log. We simply treat this as an abrupt end of the + redo log. */ + finished = true; + break; + } else if (!log_block_checksum_is_ok_or_old_format( + log_block, true)) { - if (no == log_block_convert_lsn_to_no(scanned_lsn) - && !log_block_checksum_is_ok_or_old_format( - log_block, true)) { - fprintf(stderr, - "InnoDB: Log block no %lu at" - " lsn " LSN_PF " has\n" - "InnoDB: ok header, but checksum field" - " contains %lu, should be %lu\n", - (ulong) no, - scanned_lsn, - (ulong) log_block_get_checksum( - log_block), - (ulong) log_block_calc_checksum( - log_block)); - } + fprintf(stderr, + "InnoDB: Log block no %lu at" + " lsn " LSN_PF " has\n" + "InnoDB: ok header, but checksum field" + " contains %lu, should be %lu\n", + (ulong) no, + scanned_lsn, + (ulong) log_block_get_checksum(log_block), + (ulong) log_block_calc_checksum(log_block)); maybe_encrypted = log_crypt_block_maybe_encrypted(log_block, &log_crypt_err); - /* Garbage or an incompletely written log block */ - /* Print checkpoint encryption keys if present */ log_crypt_print_checkpoint_keys(log_block); finished = TRUE; diff --git a/storage/xtradb/log/log0recv.cc b/storage/xtradb/log/log0recv.cc index 2f43a1a42a8..91a8424ebf1 100644 --- a/storage/xtradb/log/log0recv.cc +++ b/storage/xtradb/log/log0recv.cc @@ -2720,30 +2720,30 @@ recv_scan_log_recs( log_block_convert_lsn_to_no(scanned_lsn)); */ - if (no != log_block_convert_lsn_to_no(scanned_lsn) - || !log_block_checksum_is_ok_or_old_format(log_block, true)) { + if (no != log_block_convert_lsn_to_no(scanned_lsn)) { + /* Garbage or an incompletely written log block. + We will not report any error; because this can happen + when InnoDB was killed while it was writing + redo log. We simply treat this as an abrupt end of the + redo log. */ + finished = true; + break; + } else if (!log_block_checksum_is_ok_or_old_format( + log_block, true)) { - if (no == log_block_convert_lsn_to_no(scanned_lsn) - && !log_block_checksum_is_ok_or_old_format( - log_block, true)) { - fprintf(stderr, - "InnoDB: Log block no %lu at" - " lsn " LSN_PF " has\n" - "InnoDB: ok header, but checksum field" - " contains %lu, should be %lu\n", - (ulong) no, - scanned_lsn, - (ulong) log_block_get_checksum( - log_block), - (ulong) log_block_calc_checksum( - log_block)); - } + fprintf(stderr, + "InnoDB: Log block no %lu at" + " lsn " LSN_PF " has\n" + "InnoDB: ok header, but checksum field" + " contains %lu, should be %lu\n", + (ulong) no, + scanned_lsn, + (ulong) log_block_get_checksum(log_block), + (ulong) log_block_calc_checksum(log_block)); maybe_encrypted = log_crypt_block_maybe_encrypted(log_block, &log_crypt_err); - /* Garbage or an incompletely written log block */ - /* Print checkpoint encryption keys if present */ log_crypt_print_checkpoint_keys(log_block); finished = TRUE; @@ -2764,7 +2764,6 @@ recv_scan_log_recs( } break; - } if (log_block_get_flush_bit(log_block)) { -- cgit v1.2.1 From b2f76bac035b62899207d443c48da8cd614cd05b Mon Sep 17 00:00:00 2001 From: Thirunarayanan Balathandayuthapani Date: Wed, 12 Jun 2019 12:25:00 +0530 Subject: MDEV-16866 InnoDB fails to start upon crash recovery with "[ERROR] InnoDB: Redo log crypto: failed to decrypt log block" - Post-push fix to change the copyright of both xtradb and innodb file. --- storage/innobase/log/log0recv.cc | 2 +- storage/xtradb/log/log0recv.cc | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/storage/innobase/log/log0recv.cc b/storage/innobase/log/log0recv.cc index a86fd9fd8fd..d68f9236610 100644 --- a/storage/innobase/log/log0recv.cc +++ b/storage/innobase/log/log0recv.cc @@ -2,7 +2,7 @@ Copyright (c) 1997, 2017, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 2012, Facebook Inc. -Copyright (c) 2013, 2017, MariaDB Corporation. +Copyright (c) 2013, 2019, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software diff --git a/storage/xtradb/log/log0recv.cc b/storage/xtradb/log/log0recv.cc index 91a8424ebf1..0bec6f9577d 100644 --- a/storage/xtradb/log/log0recv.cc +++ b/storage/xtradb/log/log0recv.cc @@ -2,7 +2,7 @@ Copyright (c) 1997, 2017, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 2012, Facebook Inc. -Copyright (c) 2013, 2017, MariaDB Corporation. +Copyright (c) 2013, 2019, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software -- cgit v1.2.1