summaryrefslogtreecommitdiff
path: root/mysql-test/extra/rpl_tests
diff options
context:
space:
mode:
authorSergei Golubchik <serg@mariadb.org>2016-12-29 13:23:18 +0100
committerSergei Golubchik <serg@mariadb.org>2016-12-29 13:23:18 +0100
commit4a5d25c338a5d1d2cc16343380193d6bf25ae6ae (patch)
tree73b84a9c8f3d5e3e3383fa79465b11f9ded512d3 /mysql-test/extra/rpl_tests
parent48dc7cc66ef5b69fcf28ec0b2ecf0338c188cf4e (diff)
parentc13b5011629b5ff7b969d648265002e4d1ba94c2 (diff)
downloadmariadb-git-4a5d25c338a5d1d2cc16343380193d6bf25ae6ae.tar.gz
Merge branch '10.1' into 10.2
Diffstat (limited to 'mysql-test/extra/rpl_tests')
-rw-r--r--mysql-test/extra/rpl_tests/multisource.inc304
-rw-r--r--mysql-test/extra/rpl_tests/rpl_binlog_errors.inc422
-rw-r--r--mysql-test/extra/rpl_tests/rpl_cant_read_event_incident.inc83
-rw-r--r--mysql-test/extra/rpl_tests/rpl_checksum.inc334
-rw-r--r--mysql-test/extra/rpl_tests/rpl_checksum_cache.inc261
-rw-r--r--mysql-test/extra/rpl_tests/rpl_corruption.inc176
-rw-r--r--mysql-test/extra/rpl_tests/rpl_gtid_basic.inc568
-rw-r--r--mysql-test/extra/rpl_tests/rpl_incident.inc63
-rw-r--r--mysql-test/extra/rpl_tests/rpl_init_slave_errors.inc95
-rw-r--r--mysql-test/extra/rpl_tests/rpl_loaddata_local.inc232
-rw-r--r--mysql-test/extra/rpl_tests/rpl_loadfile.inc120
-rw-r--r--mysql-test/extra/rpl_tests/rpl_packet.inc183
-rw-r--r--mysql-test/extra/rpl_tests/rpl_parallel.inc2219
-rw-r--r--mysql-test/extra/rpl_tests/rpl_parallel_show_binlog_events_purge_logs.inc37
-rw-r--r--mysql-test/extra/rpl_tests/rpl_relayrotate.inc18
-rw-r--r--mysql-test/extra/rpl_tests/rpl_semi_sync.inc551
-rw-r--r--mysql-test/extra/rpl_tests/rpl_skip_replication.inc402
-rw-r--r--mysql-test/extra/rpl_tests/rpl_special_charset.inc32
-rw-r--r--mysql-test/extra/rpl_tests/rpl_sporadic_master.inc32
-rw-r--r--mysql-test/extra/rpl_tests/rpl_ssl.inc115
-rw-r--r--mysql-test/extra/rpl_tests/rpl_stm_relay_ign_space.inc107
-rw-r--r--mysql-test/extra/rpl_tests/rpl_switch_stm_row_mixed.inc631
-rw-r--r--mysql-test/extra/rpl_tests/rpl_sync.inc159
-rw-r--r--mysql-test/extra/rpl_tests/rpl_temporal_format_default_to_default.inc82
-rw-r--r--mysql-test/extra/rpl_tests/rpl_typeconv.inc78
25 files changed, 7304 insertions, 0 deletions
diff --git a/mysql-test/extra/rpl_tests/multisource.inc b/mysql-test/extra/rpl_tests/multisource.inc
new file mode 100644
index 00000000000..adaae775f48
--- /dev/null
+++ b/mysql-test/extra/rpl_tests/multisource.inc
@@ -0,0 +1,304 @@
+#
+# This include file is used by more than one test suite
+# (currently multisource and binlog_encryption).
+# Please check all dependent tests after modifying it
+#
+# Usage:
+# --source extra/rpl_tests/multisource.inc
+#
+# By default, the script expects the length of the 2nd binary log to be
+# $binlog_start_pos + length(Gtid_list event) + 2 x length(Binlog_checkpoint event)
+# Some tests can have specific configuration which would change it,
+
+#
+# Test basic replication functionality
+# in multi-source setup
+#
+
+--source include/not_embedded.inc
+--source include/have_innodb.inc
+--source include/binlog_start_pos.inc
+--let $rpl_server_count= 0
+
+--connect (slave,127.0.0.1,root,,,$SERVER_MYPORT_3)
+
+# MDEV-3984: crash/read of freed memory when changing master with named connection
+# This fails after adding the new master 'abc', check we do not free twice.
+--error ER_RELAY_LOG_INIT
+change master 'abc' to relay_log_file='';
+# This fails before adding the new master, check that we do free it.
+--error ER_WRONG_ARGUMENTS
+change master 'abc2' to master_host='';
+
+
+# Start replication from the first master
+
+--replace_result $SERVER_MYPORT_1 MYPORT_1
+eval change master 'master1' to
+master_port=$SERVER_MYPORT_1,
+master_host='127.0.0.1',
+master_user='root';
+
+start slave 'master1';
+set default_master_connection = 'master1';
+--source include/wait_for_slave_to_start.inc
+
+--connect (master1,127.0.0.1,root,,,$SERVER_MYPORT_1)
+--save_master_pos
+
+--connection slave
+--sync_with_master 0,'master1'
+
+# Here and further: add an extra check on SQL thread status
+# as the normal sync is not always enough
+--source include/wait_for_sql_thread_read_all.inc
+
+# each of the 3 commands should produce
+# 'master1' status
+
+let $wait_for_all= 1;
+let $show_statement= SHOW ALL SLAVES STATUS;
+let $field= Slave_IO_State;
+let $condition= = 'Waiting for master to send event';
+--source include/wait_show_condition.inc
+
+--echo #
+--echo # Checking SHOW SLAVE 'master1' STATUS
+--echo #
+--let $status_items= Master_Port, Relay_Log_File, Slave_IO_Running, Slave_SQL_Running, Last_Errno, Last_SQL_Errno
+--let $slave_field_result_replace= /$SERVER_MYPORT_1/MYPORT_1/
+--let $slave_name= 'master1'
+--source include/show_slave_status.inc
+--let $slave_name=
+
+--echo #
+--echo # Checking SHOW SLAVE STATUS
+--echo #
+--source include/show_slave_status.inc
+
+--echo #
+--echo # Checking SHOW ALL SLAVES STATUS
+--echo #
+--let $all_slaves_status= 1
+--let $status_items= Connection_name, Master_Port, Relay_Log_File, Slave_IO_Running, Slave_SQL_Running, Last_Errno, Last_SQL_Errno, Slave_heartbeat_period
+--source include/show_slave_status.inc
+--let $all_slaves_status=
+--echo #
+
+
+# Check that replication actually works
+
+--connection master1
+
+--disable_warnings
+drop database if exists db1;
+--enable_warnings
+create database db1;
+use db1;
+create table t1 (i int auto_increment, f1 varchar(16), primary key pk (i,f1)) engine=MyISAM;
+insert into t1 (f1) values ('one'),('two');
+--save_master_pos
+
+--connection slave
+--sync_with_master 0,'master1'
+
+--sorted_result
+select * from db1.t1;
+
+--let $datadir = `SELECT @@datadir`
+
+--echo # List of relay log files in the datadir
+--list_files $datadir mysqld-relay-bin-master1.*
+
+# Check that relay logs are recognizable
+
+let binlog_start=4;
+let binlog_file=;
+source include/show_relaylog_events.inc;
+let binlog_file= mysqld-relay-bin-master1.000002;
+source include/show_relaylog_events.inc;
+
+# Try to configure connection with the same name again,
+# should get an error because the slave is running
+
+--replace_result $SERVER_MYPORT_2 MYPORT_2
+--error ER_SLAVE_MUST_STOP
+eval change master 'master1' to
+master_port=$SERVER_MYPORT_2,
+master_host='127.0.0.1',
+master_user='root';
+
+# Try to configure using the default connection name
+# (which is 'master1' at the moment),
+# again, should get an error
+
+--replace_result $SERVER_MYPORT_2 MYPORT_2
+--error ER_SLAVE_MUST_STOP
+eval change master to
+master_port=$SERVER_MYPORT_2,
+master_host='127.0.0.1',
+master_user='root';
+
+# Try to configure a connection with the same master
+# using a different name, should get a conflict
+
+--replace_result $SERVER_MYPORT_1 MYPORT_1
+--error ER_CONNECTION_ALREADY_EXISTS
+eval change master 'master2' to
+master_port=$SERVER_MYPORT_1,
+master_host='127.0.0.1',
+master_user='root';
+
+
+# Set up a proper 'default' connection to master2
+
+set default_master_connection = '';
+
+--replace_result $SERVER_MYPORT_2 MYPORT_2
+eval change master to
+master_port=$SERVER_MYPORT_2,
+master_host='127.0.0.1',
+master_user='root';
+
+start slave;
+--source include/wait_for_slave_to_start.inc
+
+--source include/wait_for_sql_thread_read_all.inc
+
+# See both connections in the same status output
+
+let $wait_for_all= 1;
+let $show_statement= SHOW ALL SLAVES STATUS;
+let $field= Slave_IO_State;
+let $condition= = 'Waiting for master to send event';
+--source include/wait_show_condition.inc
+
+--echo #
+--echo # Checking SHOW ALL SLAVES STATUS
+--echo #
+--let $all_slaves_status= 1
+--let $status_items= Connection_name, Master_Port, Relay_Log_File, Slave_IO_Running, Slave_SQL_Running, Last_Errno, Last_SQL_Errno, Slave_heartbeat_period
+--let $slave_field_result_replace= /$SERVER_MYPORT_1/MYPORT_1/ /$SERVER_MYPORT_2/MYPORT_2/
+--source include/show_slave_status.inc
+--let $all_slaves_status=
+--echo #
+
+# Check that replication from two servers actually works
+
+--connection master1
+
+insert into t1 (f1) values ('three');
+--save_master_pos
+
+--connect (master2,127.0.0.1,root,,,$SERVER_MYPORT_2)
+
+--disable_warnings
+drop database if exists db2;
+--enable_warnings
+create database db2;
+use db2;
+create table t1 (pk int auto_increment primary key, f1 int) engine=InnoDB;
+begin;
+insert into t1 (f1) values (1),(2);
+
+--connection slave
+--sync_with_master 0,'master1'
+
+--connection master2
+--save_master_pos
+
+--connection slave
+--sync_with_master 0
+--sorted_result
+select * from db1.t1;
+select * from db2.t1;
+
+--connection master2
+commit;
+--save_master_pos
+
+--connection slave
+--sync_with_master 0
+--sorted_result
+select * from db2.t1;
+
+# Flush and purge logs on one master,
+# make sure slaves don't get confused
+
+--connection master1
+flush logs;
+--source include/wait_for_binlog_checkpoint.inc
+--save_master_pos
+--connection slave
+--sync_with_master 0, 'master1'
+
+--connection master1
+purge binary logs to 'master-bin.000002';
+# Additional events: 43 (Gtid_list) + 2 x 44 (Binlog_checkpoint) = 131
+let filesize=`select $binlog_start_pos+131`;
+--replace_result $filesize filesize
+show binary logs;
+insert into t1 (f1) values ('four');
+create table db1.t3 (f1 int) engine=InnoDB;
+--save_master_pos
+
+--connection slave
+--sync_with_master 0,'master1'
+
+--source include/wait_for_sql_thread_read_all.inc
+
+let $wait_for_all= 1;
+let $show_statement= SHOW ALL SLAVES STATUS;
+let $field= Slave_IO_State;
+let $condition= = 'Waiting for master to send event';
+--source include/wait_show_condition.inc
+
+--echo #
+--echo # Checking SHOW ALL SLAVES STATUS
+--echo #
+--let $all_slaves_status= 1
+--let $status_items= Connection_name, Master_Port, Relay_Log_File, Slave_IO_Running, Slave_SQL_Running, Last_Errno, Last_SQL_Errno, Slave_heartbeat_period
+--let $slave_field_result_replace= /$SERVER_MYPORT_1/MYPORT_1/ /$SERVER_MYPORT_2/MYPORT_2/
+--source include/show_slave_status.inc
+--let $all_slaves_status=
+--echo #
+
+--sorted_result
+select * from db1.t1;
+
+# This should show relay log events for the default master
+# (the one with the empty name)
+let binlog_file=;
+source include/show_relaylog_events.inc;
+let binlog_file= mysqld-relay-bin.000002;
+source include/show_relaylog_events.inc;
+
+# Make sure we don't lose control over replication connections
+# after reconnecting to the slave
+
+--disconnect slave
+--connect (slave,127.0.0.1,root,,,$SERVER_MYPORT_3)
+
+stop slave io_thread;
+show status like 'Slave_running';
+set default_master_connection = 'master1';
+show status like 'Slave_running';
+
+# Cleanup
+
+drop database db1;
+drop database db2;
+
+--source include/reset_master_slave.inc
+--disconnect slave
+
+--connection master1
+drop database db1;
+--source include/reset_master_slave.inc
+--disconnect master1
+
+--connection master2
+drop database db2;
+--source include/reset_master_slave.inc
+--disconnect master2
+
diff --git a/mysql-test/extra/rpl_tests/rpl_binlog_errors.inc b/mysql-test/extra/rpl_tests/rpl_binlog_errors.inc
new file mode 100644
index 00000000000..c595d70daa1
--- /dev/null
+++ b/mysql-test/extra/rpl_tests/rpl_binlog_errors.inc
@@ -0,0 +1,422 @@
+#
+# This include file is used by more than one test suite
+# (currently rpl and binlog_encryption).
+# Please check all dependent tests after modifying it
+#
+# Usage:
+# --let $binlog_limit= X[,Y] # optional
+#
+# Semantics of the value is the same as in include/show_binlog_events.inc
+# which the script calls as a part of the test flow.
+# The goal is to print the event demonstrating the triggered error,
+# so normally Y should be 1 (print the exact event only);
+# however, depending on test-specific server options, the offset X
+# can be different.
+#
+
+# BUG#46166: MYSQL_BIN_LOG::new_file_impl is not propagating error
+# when generating new name.
+#
+# WHY
+# ===
+#
+# We want to check whether error is reported or not when
+# new_file_impl fails (this may happen when rotation is not
+# possible because there is some problem finding an
+# unique filename).
+#
+# HOW
+# ===
+#
+# Test cases are documented inline.
+
+-- source include/have_innodb.inc
+-- source include/have_debug.inc
+-- source include/master-slave.inc
+
+-- echo #######################################################################
+-- echo ####################### PART 1: MASTER TESTS ##########################
+-- echo #######################################################################
+
+
+### ACTION: stopping slave as it is not needed for the first part of
+### the test
+
+-- connection slave
+-- source include/stop_slave.inc
+-- connection master
+
+call mtr.add_suppression("Can't generate a unique log-filename");
+call mtr.add_suppression("Writing one row to the row-based binary log failed.*");
+call mtr.add_suppression("Error writing file .*");
+
+SET @old_debug= @@global.debug;
+
+### ACTION: create a large file (> 4096 bytes) that will be later used
+### in LOAD DATA INFILE to check binlog errors in its vacinity
+-- let $load_file= $MYSQLTEST_VARDIR/tmp/bug_46166.data
+-- let $MYSQLD_DATADIR= `select @@datadir`
+-- replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR
+-- eval SELECT repeat('x',8192) INTO OUTFILE '$load_file'
+
+### ACTION: create a small file (< 4096 bytes) that will be later used
+### in LOAD DATA INFILE to check for absence of binlog errors
+### when file loading this file does not force flushing and
+### rotating the binary log
+-- let $load_file2= $MYSQLTEST_VARDIR/tmp/bug_46166-2.data
+-- let $MYSQLD_DATADIR= `select @@datadir`
+-- replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR
+-- eval SELECT repeat('x',10) INTO OUTFILE '$load_file2'
+
+RESET MASTER;
+
+-- echo ###################### TEST #1
+
+### ASSERTION: no problem flushing logs (should show two binlogs)
+FLUSH LOGS;
+-- echo # assert: must show two binlogs
+-- source include/show_binary_logs.inc
+
+-- echo ###################### TEST #2
+
+### ASSERTION: check that FLUSH LOGS actually fails and reports
+### failure back to the user if find_uniq_filename fails
+### (should show just one binlog)
+
+RESET MASTER;
+SET GLOBAL debug_dbug="+d,error_unique_log_filename";
+-- error ER_NO_UNIQUE_LOGFILE
+FLUSH LOGS;
+-- echo # assert: must show one binlog
+-- source include/show_binary_logs.inc
+
+### ACTION: clean up and move to next test
+SET GLOBAL debug_dbug=@old_debug;
+RESET MASTER;
+
+-- echo ###################### TEST #3
+
+### ACTION: create some tables (t1, t2, t4) and insert some values in
+### table t1
+CREATE TABLE t1 (a INT);
+CREATE TABLE t2 (a VARCHAR(16384)) Engine=InnoDB;
+CREATE TABLE t4 (a VARCHAR(16384));
+INSERT INTO t1 VALUES (1);
+RESET MASTER;
+
+### ASSERTION: we force rotation of the binary log because it exceeds
+### the max_binlog_size option (should show two binary
+### logs)
+
+-- replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR
+-- eval LOAD DATA INFILE '$load_file' INTO TABLE t2
+
+# shows two binary logs
+-- echo # assert: must show two binlog
+-- source include/show_binary_logs.inc
+
+# clean up the table and the binlog to be used in next part of test
+SET GLOBAL debug_dbug=@old_debug;
+DELETE FROM t2;
+RESET MASTER;
+
+-- echo ###################### TEST #4
+
+### ASSERTION: load the big file into a transactional table and check
+### that it reports error. The table will contain the
+### changes performed despite the fact that it reported an
+### error.
+
+SET GLOBAL debug_dbug="+d,error_unique_log_filename";
+-- replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR
+-- error ER_NO_UNIQUE_LOGFILE
+-- eval LOAD DATA INFILE '$load_file' INTO TABLE t2
+
+# show table
+-- echo # assert: must show one entry
+SELECT count(*) FROM t2;
+
+# clean up the table and the binlog to be used in next part of test
+SET GLOBAL debug_dbug=@old_debug;
+DELETE FROM t2;
+RESET MASTER;
+
+-- echo ###################### TEST #5
+
+### ASSERTION: load the small file into a transactional table and
+### check that it succeeds
+
+SET GLOBAL debug_dbug="+d,error_unique_log_filename";
+-- replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR
+-- eval LOAD DATA INFILE '$load_file2' INTO TABLE t2
+
+# show table
+-- echo # assert: must show one entry
+SELECT count(*) FROM t2;
+
+# clean up the table and the binlog to be used in next part of test
+SET GLOBAL debug_dbug=@old_debug;
+DELETE FROM t2;
+RESET MASTER;
+
+-- echo ###################### TEST #6
+
+### ASSERTION: check that even if one is using a transactional table
+### and explicit transactions (no autocommit) if rotation
+### fails we get the error. Transaction is not rolledback
+### because rotation happens after the commit.
+
+SET GLOBAL debug_dbug="+d,error_unique_log_filename";
+SET AUTOCOMMIT=0;
+INSERT INTO t2 VALUES ('muse');
+-- replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR
+-- eval LOAD DATA INFILE '$load_file' INTO TABLE t2
+INSERT INTO t2 VALUES ('muse');
+-- error ER_NO_UNIQUE_LOGFILE
+COMMIT;
+
+### ACTION: Show the contents of the table after the test
+-- echo # assert: must show three entries
+SELECT count(*) FROM t2;
+
+### ACTION: clean up and move to the next test
+SET AUTOCOMMIT= 1;
+SET GLOBAL debug_dbug=@old_debug;
+DELETE FROM t2;
+RESET MASTER;
+
+-- echo ###################### TEST #7
+
+### ASSERTION: check that on a non-transactional table, if rotation
+### fails then an error is reported and an incident event
+### is written to the current binary log.
+
+SET GLOBAL debug_dbug="+d,error_unique_log_filename";
+SELECT count(*) FROM t4;
+-- replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR
+-- error ER_NO_UNIQUE_LOGFILE
+-- eval LOAD DATA INFILE '$load_file' INTO TABLE t4
+
+-- echo # assert: must show 1 entry
+SELECT count(*) FROM t4;
+
+-- echo ### check that the incident event is written to the current log
+SET GLOBAL debug_dbug=@old_debug;
+if (!$binlog_limit)
+{
+ -- let $binlog_limit= 4,1
+}
+-- source include/show_binlog_events.inc
+
+# clean up and move to next test
+DELETE FROM t4;
+RESET MASTER;
+
+-- echo ###################### TEST #8
+
+### ASSERTION: check that statements end up in error but they succeed
+### on changing the data.
+
+SET GLOBAL debug_dbug="+d,error_unique_log_filename";
+-- echo # must show 0 entries
+SELECT count(*) FROM t4;
+SELECT count(*) FROM t2;
+
+-- replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR
+-- error ER_NO_UNIQUE_LOGFILE
+-- eval LOAD DATA INFILE '$load_file' INTO TABLE t4
+-- replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR
+-- error ER_NO_UNIQUE_LOGFILE
+-- eval LOAD DATA INFILE '$load_file' INTO TABLE t2
+-- error ER_NO_UNIQUE_LOGFILE
+INSERT INTO t2 VALUES ('aaa'), ('bbb'), ('ccc');
+
+-- echo # INFO: Count(*) Before Offending DELETEs
+-- echo # assert: must show 1 entry
+SELECT count(*) FROM t4;
+-- echo # assert: must show 4 entries
+SELECT count(*) FROM t2;
+
+-- error ER_NO_UNIQUE_LOGFILE
+DELETE FROM t4;
+-- error ER_NO_UNIQUE_LOGFILE
+DELETE FROM t2;
+
+-- echo # INFO: Count(*) After Offending DELETEs
+-- echo # assert: must show zero entries
+SELECT count(*) FROM t4;
+SELECT count(*) FROM t2;
+
+# remove fault injection
+SET GLOBAL debug_dbug=@old_debug;
+
+-- echo ###################### TEST #9
+
+### ASSERTION: check that if we disable binlogging, then statements
+### succeed.
+SET GLOBAL debug_dbug="+d,error_unique_log_filename";
+SET SQL_LOG_BIN=0;
+INSERT INTO t2 VALUES ('aaa'), ('bbb'), ('ccc'), ('ddd');
+INSERT INTO t4 VALUES ('eee'), ('fff'), ('ggg'), ('hhh');
+-- echo # assert: must show four entries
+SELECT count(*) FROM t2;
+SELECT count(*) FROM t4;
+DELETE FROM t2;
+DELETE FROM t4;
+-- echo # assert: must show zero entries
+SELECT count(*) FROM t2;
+SELECT count(*) FROM t4;
+SET SQL_LOG_BIN=1;
+SET GLOBAL debug_dbug=@old_debug;
+
+-- echo ###################### TEST #10
+
+### ASSERTION: check that error is reported if there is a failure
+### while registering the index file and the binary log
+### file or failure to write the rotate event.
+
+call mtr.add_suppression("MYSQL_BIN_LOG::open failed to sync the index file.");
+call mtr.add_suppression("Could not open .*");
+
+RESET MASTER;
+SHOW WARNINGS;
+
+# +d,fault_injection_registering_index => injects fault on MYSQL_BIN_LOG::open
+SET GLOBAL debug_dbug="+d,fault_injection_registering_index";
+-- replace_regex /\.[\\\/]master/master/
+-- error ER_CANT_OPEN_FILE
+FLUSH LOGS;
+SET GLOBAL debug_dbug="-d,fault_injection_registering_index";
+
+-- error ER_NO_BINARY_LOGGING
+SHOW BINARY LOGS;
+
+# issue some statements and check that they don't fail
+CREATE TABLE t5 (a INT);
+INSERT INTO t4 VALUES ('bbbbb');
+INSERT INTO t2 VALUES ('aaaaa');
+DELETE FROM t4;
+DELETE FROM t2;
+DROP TABLE t5;
+
+-- echo ###################### TEST #11
+
+### ASSERTION: check that error is reported if there is a failure
+### while opening the index file and the binary log file or
+### failure to write the rotate event.
+
+# restart the server so that we have binlog again
+--let $rpl_server_number= 1
+--source include/rpl_restart_server.inc
+
+# +d,fault_injection_openning_index => injects fault on MYSQL_BIN_LOG::open_index_file
+SET GLOBAL debug_dbug="+d,fault_injection_openning_index";
+-- replace_regex /\.[\\\/]master/master/
+-- error ER_CANT_OPEN_FILE
+FLUSH LOGS;
+SET GLOBAL debug_dbug="-d,fault_injection_openning_index";
+
+-- error ER_FLUSH_MASTER_BINLOG_CLOSED
+RESET MASTER;
+
+# issue some statements and check that they don't fail
+CREATE TABLE t5 (a INT);
+INSERT INTO t4 VALUES ('bbbbb');
+INSERT INTO t2 VALUES ('aaaaa');
+DELETE FROM t4;
+DELETE FROM t2;
+DROP TABLE t5;
+
+# restart the server so that we have binlog again
+--let $rpl_server_number= 1
+--source include/rpl_restart_server.inc
+
+-- echo ###################### TEST #12
+
+### ASSERTION: check that error is reported if there is a failure
+### while writing the rotate event when creating a new log
+### file.
+
+# +d,fault_injection_new_file_rotate_event => injects fault on MYSQL_BIN_LOG::MYSQL_BIN_LOG::new_file_impl
+SET GLOBAL debug_dbug="+d,fault_injection_new_file_rotate_event";
+-- error ER_ERROR_ON_WRITE
+FLUSH LOGS;
+SET GLOBAL debug_dbug="-d,fault_injection_new_file_rotate_event";
+
+-- error ER_FLUSH_MASTER_BINLOG_CLOSED
+RESET MASTER;
+
+# issue some statements and check that they don't fail
+CREATE TABLE t5 (a INT);
+INSERT INTO t4 VALUES ('bbbbb');
+INSERT INTO t2 VALUES ('aaaaa');
+DELETE FROM t4;
+DELETE FROM t2;
+DROP TABLE t5;
+
+# restart the server so that we have binlog again
+--let $rpl_server_number= 1
+--source include/rpl_restart_server.inc
+
+## clean up
+DROP TABLE t1, t2, t4;
+RESET MASTER;
+
+# restart slave again
+-- connection slave
+-- source include/start_slave.inc
+-- connection master
+
+-- echo #######################################################################
+-- echo ####################### PART 2: SLAVE TESTS ###########################
+-- echo #######################################################################
+
+### setup
+--source include/rpl_reset.inc
+-- connection slave
+
+# slave suppressions
+
+call mtr.add_suppression("Slave I/O: Relay log write failure: could not queue event from master.*");
+call mtr.add_suppression("Error writing file .*");
+call mtr.add_suppression("Could not open .*");
+call mtr.add_suppression("MYSQL_BIN_LOG::open failed to sync the index file.");
+call mtr.add_suppression("Can't generate a unique log-filename .*");
+-- echo ###################### TEST #13
+
+#### ASSERTION: check against unique log filename error
+-- let $io_thd_injection_fault_flag= error_unique_log_filename
+-- let $slave_io_errno= 1595
+-- let $show_slave_io_error= 1
+-- source include/io_thd_fault_injection.inc
+
+-- echo ###################### TEST #14
+
+#### ASSERTION: check against rotate failing
+-- let $io_thd_injection_fault_flag= fault_injection_new_file_rotate_event
+-- let $slave_io_errno= 1595
+-- let $show_slave_io_error= 1
+-- source include/io_thd_fault_injection.inc
+
+-- echo ###################### TEST #15
+
+#### ASSERTION: check against relay log open failure
+-- let $io_thd_injection_fault_flag= fault_injection_registering_index
+-- let $slave_io_errno= 1595
+-- let $show_slave_io_error= 1
+-- source include/io_thd_fault_injection.inc
+
+-- echo ###################### TEST #16
+
+#### ASSERTION: check against relay log index open failure
+-- let $io_thd_injection_fault_flag= fault_injection_openning_index
+-- let $slave_io_errno= 1595
+-- let $show_slave_io_error= 1
+-- source include/io_thd_fault_injection.inc
+
+### clean up
+-- source include/stop_slave_sql.inc
+RESET SLAVE;
+RESET MASTER;
+--let $rpl_only_running_threads= 1
+--source include/rpl_end.inc
diff --git a/mysql-test/extra/rpl_tests/rpl_cant_read_event_incident.inc b/mysql-test/extra/rpl_tests/rpl_cant_read_event_incident.inc
new file mode 100644
index 00000000000..a9534a999e2
--- /dev/null
+++ b/mysql-test/extra/rpl_tests/rpl_cant_read_event_incident.inc
@@ -0,0 +1,83 @@
+#
+# This include file is used by more than one test suite
+# (currently rpl and binlog_encryption).
+# Please check all dependent tests after modifying it
+#
+
+#
+# Bug#11747416 : 32228 A disk full makes binary log corrupt.
+#
+#
+# The test demonstrates reading from binlog error propagation to slave
+# and reporting there.
+# Conditions for the bug include a crash at time of the last event to
+# the binlog was written partly. With the fixes the event is not sent out
+# any longer, but rather the dump thread sends out a sound error message.
+#
+# Crash is not simulated. A binlog with partly written event in its end is installed
+# and replication is started from it.
+#
+
+--source include/master-slave.inc
+--source include/have_binlog_format_mixed.inc
+
+--connection slave
+# Make sure the slave is stopped while we are messing with master.
+# Otherwise we get occasional failures as the slave manages to re-connect
+# to the newly started master and we get extra events applied, causing
+# conflicts.
+--source include/stop_slave.inc
+
+--connection master
+call mtr.add_suppression("Error in Log_event::read_log_event()");
+--let $datadir= `SELECT @@datadir`
+
+--let $rpl_server_number= 1
+--source include/rpl_stop_server.inc
+
+--remove_file $datadir/master-bin.000001
+--copy_file $MYSQL_TEST_DIR/std_data/bug11747416_32228_binlog.000001 $datadir/master-bin.000001
+
+--let $rpl_server_number= 1
+--source include/rpl_start_server.inc
+
+--source include/wait_until_connected_again.inc
+
+# evidence of the partial binlog
+--error ER_ERROR_WHEN_EXECUTING_COMMAND
+show binlog events;
+
+--connection slave
+call mtr.add_suppression("Slave I/O: Got fatal error 1236 from master when reading data from binary log");
+reset slave;
+start slave;
+
+# ER_MASTER_FATAL_ERROR_READING_BINLOG 1236
+--let $slave_param=Last_IO_Errno
+--let $slave_param_value=1236
+--source include/wait_for_slave_param.inc
+
+--let $slave_field_result_replace= / at [0-9]*/ at XXX/
+--let $status_items= Last_IO_Errno, Last_IO_Error
+--source include/show_slave_status.inc
+
+#
+# Cleanup
+#
+
+--connection master
+reset master;
+
+--connection slave
+stop slave;
+reset slave;
+# Table was created from binlog, it may not be created if SQL thread is running
+# slowly and IO thread reaches incident before SQL thread applies it.
+--disable_warnings
+drop table if exists t;
+--enable_warnings
+reset master;
+
+--echo End of the tests
+--let $rpl_only_running_threads= 1
+--source include/rpl_end.inc
diff --git a/mysql-test/extra/rpl_tests/rpl_checksum.inc b/mysql-test/extra/rpl_tests/rpl_checksum.inc
new file mode 100644
index 00000000000..8423d2fc1cb
--- /dev/null
+++ b/mysql-test/extra/rpl_tests/rpl_checksum.inc
@@ -0,0 +1,334 @@
+#
+# This include file is used by more than one test suite
+# (currently rpl and binlog_encryption).
+# Please check all dependent tests after modifying it
+#
+
+# WL2540 replication events checksum
+# Testing configuration parameters
+
+--source include/master-slave.inc
+--source include/have_debug.inc
+--source include/have_binlog_format_mixed.inc
+
+call mtr.add_suppression('Slave can not handle replication events with the checksum that master is configured to log');
+call mtr.add_suppression('Replication event checksum verification failed');
+# due to C failure simulation
+call mtr.add_suppression('Relay log write failure: could not queue event from master');
+call mtr.add_suppression('Master is configured to log replication events with checksum, but will not send such events to slaves that cannot process them');
+
+# A. read/write access to the global vars:
+# binlog_checksum master_verify_checksum slave_sql_verify_checksum
+
+connection master;
+
+set @master_save_binlog_checksum= @@global.binlog_checksum;
+set @save_master_verify_checksum = @@global.master_verify_checksum;
+
+select @@global.binlog_checksum as 'must be CRC32 because of the command line option';
+--error ER_INCORRECT_GLOBAL_LOCAL_VAR
+select @@session.binlog_checksum as 'no session var';
+
+select @@global.master_verify_checksum as 'must be zero because of default';
+--error ER_INCORRECT_GLOBAL_LOCAL_VAR
+select @@session.master_verify_checksum as 'no session var';
+
+connection slave;
+
+set @slave_save_binlog_checksum= @@global.binlog_checksum;
+set @save_slave_sql_verify_checksum = @@global.slave_sql_verify_checksum;
+
+select @@global.slave_sql_verify_checksum as 'must be one because of default';
+--error ER_INCORRECT_GLOBAL_LOCAL_VAR
+select @@session.slave_sql_verify_checksum as 'no session var';
+
+connection master;
+
+source include/show_binary_logs.inc;
+set @@global.binlog_checksum = NONE;
+select @@global.binlog_checksum;
+--echo *** must be rotations seen ***
+source include/show_binary_logs.inc;
+
+set @@global.binlog_checksum = default;
+select @@global.binlog_checksum;
+
+# testing lack of side-effects in non-effective update of binlog_checksum:
+set @@global.binlog_checksum = CRC32;
+select @@global.binlog_checksum;
+set @@global.binlog_checksum = CRC32;
+
+set @@global.master_verify_checksum = 0;
+set @@global.master_verify_checksum = default;
+
+--error ER_WRONG_VALUE_FOR_VAR
+set @@global.binlog_checksum = ADLER32;
+--error ER_WRONG_VALUE_FOR_VAR
+set @@global.master_verify_checksum = 2; # the var is of bool type
+
+connection slave;
+
+set @@global.slave_sql_verify_checksum = 0;
+set @@global.slave_sql_verify_checksum = default;
+--error ER_WRONG_VALUE_FOR_VAR
+set @@global.slave_sql_verify_checksum = 2; # the var is of bool type
+
+#
+# B. Old Slave to New master conditions
+#
+# while master does not send a checksum-ed binlog the Old Slave can
+# work with the New Master
+
+connection master;
+
+set @@global.binlog_checksum = NONE;
+create table t1 (a int);
+
+# testing that binlog rotation preserves opt_binlog_checksum value
+flush logs;
+flush logs;
+-- source include/wait_for_binlog_checkpoint.inc
+flush logs;
+
+sync_slave_with_master;
+#connection slave;
+# checking that rotation on the slave side leaves slave stable
+flush logs;
+flush logs;
+flush logs;
+select count(*) as zero from t1;
+
+source include/stop_slave.inc;
+
+connection master;
+set @@global.binlog_checksum = CRC32;
+-- source include/wait_for_binlog_checkpoint.inc
+insert into t1 values (1) /* will not be applied on slave due to simulation */;
+
+# instruction to the dump thread
+
+connection slave;
+set @@global.debug_dbug='d,simulate_slave_unaware_checksum';
+start slave;
+--let $slave_io_errno= 1236
+--let $show_slave_io_error= 1
+source include/wait_for_slave_io_error.inc;
+
+select count(*) as zero from t1;
+
+###connection master;
+set @@global.debug_dbug='';
+
+connection slave;
+source include/start_slave.inc;
+
+#
+# C. checksum failure simulations
+#
+
+# C1. Failure by a client thread
+connection master;
+set @@global.master_verify_checksum = 1;
+set @@session.debug_dbug='d,simulate_checksum_test_failure';
+--error ER_ERROR_WHEN_EXECUTING_COMMAND
+show binlog events;
+set @@session.debug_dbug='';
+set @@global.master_verify_checksum = default;
+
+#connection master;
+sync_slave_with_master;
+
+connection slave;
+source include/stop_slave.inc;
+
+connection master;
+create table t2 (a int);
+let $pos_master= query_get_value(SHOW MASTER STATUS, Position, 1);
+
+connection slave;
+
+# C2. Failure by IO thread
+# instruction to io thread
+set @@global.debug_dbug='d,simulate_checksum_test_failure';
+start slave io_thread;
+# When the checksum error is detected, the slave sets error code 1913
+# (ER_NETWORK_READ_EVENT_CHECKSUM_FAILURE) in queue_event(), then immediately
+# sets error 1595 (ER_SLAVE_RELAY_LOG_WRITE_FAILURE) in handle_slave_io().
+# So we usually get 1595, but it is occasionally possible to get 1913.
+--let $slave_io_errno= 1595,1913
+--let $show_slave_io_error= 0
+source include/wait_for_slave_io_error.inc;
+set @@global.debug_dbug='';
+
+# to make IO thread re-read it again w/o the failure
+start slave io_thread;
+let $slave_param= Read_Master_Log_Pos;
+let $slave_param_value= $pos_master;
+source include/wait_for_slave_param.inc;
+
+# C3. Failure by SQL thread
+# instruction to sql thread;
+set @@global.slave_sql_verify_checksum = 1;
+
+set @@global.debug_dbug='d,simulate_checksum_test_failure';
+
+start slave sql_thread;
+--let $slave_sql_errno= 1593
+--let $show_slave_sql_error= 1
+source include/wait_for_slave_sql_error.inc;
+
+# resuming SQL thread to parse out the event w/o the failure
+
+set @@global.debug_dbug='';
+source include/start_slave.inc;
+
+connection master;
+sync_slave_with_master;
+
+#connection slave;
+select count(*) as 'must be zero' from t2;
+
+#
+# D. Reset slave, Change-Master, Binlog & Relay-log rotations with
+# random value on binlog_checksum on both master and slave
+#
+connection slave;
+stop slave;
+reset slave;
+
+# randomize slave server's own checksum policy
+set @@global.binlog_checksum= IF(floor((rand()*1000)%2), "CRC32", "NONE");
+flush logs;
+
+connection master;
+set @@global.binlog_checksum= CRC32;
+reset master;
+flush logs;
+create table t3 (a int, b char(5));
+
+connection slave;
+source include/start_slave.inc;
+
+connection master;
+sync_slave_with_master;
+
+#connection slave;
+select count(*) as 'must be zero' from t3;
+source include/stop_slave.inc;
+--replace_result $MASTER_MYPORT MASTER_PORT
+eval change master to master_host='127.0.0.1',master_port=$MASTER_MYPORT, master_user='root';
+
+connection master;
+flush logs;
+reset master;
+insert into t3 value (1, @@global.binlog_checksum);
+
+connection slave;
+source include/start_slave.inc;
+flush logs;
+
+connection master;
+sync_slave_with_master;
+
+#connection slave;
+select count(*) as 'must be one' from t3;
+
+connection master;
+set @@global.binlog_checksum= IF(floor((rand()*1000)%2), "CRC32", "NONE");
+insert into t3 value (1, @@global.binlog_checksum);
+sync_slave_with_master;
+
+#connection slave;
+
+#clean-up
+
+connection master;
+drop table t1, t2, t3;
+set @@global.binlog_checksum = @master_save_binlog_checksum;
+set @@global.master_verify_checksum = @save_master_verify_checksum;
+
+#
+# BUG#58564: flush_read_lock fails in mysql-trunk-bugfixing after merging with WL#2540
+#
+# Sanity check that verifies that no assertions are triggered because
+# of old FD events (generated by versions prior to server released with
+# checksums feature)
+#
+# There is no need for query log, if something wrong this should trigger
+# an assertion
+
+--disable_query_log
+
+BINLOG '
+MfmqTA8BAAAAZwAAAGsAAAABAAQANS41LjctbTMtZGVidWctbG9nAAAAAAAAAAAAAAAAAAAAAAAA
+AAAAAAAAAAAAAAAAAAAx+apMEzgNAAgAEgAEBAQEEgAAVAAEGggAAAAICAgCAA==
+';
+
+--enable_query_log
+
+#connection slave;
+sync_slave_with_master;
+
+
+--echo *** Bug#59123 / MDEV-5799: INCIDENT_EVENT checksum written to error log as garbage characters ***
+
+--connection master
+
+--source include/wait_for_binlog_checkpoint.inc
+CREATE TABLE t4 (a INT PRIMARY KEY);
+INSERT INTO t4 VALUES (1);
+
+SET sql_log_bin=0;
+CALL mtr.add_suppression("\\[ERROR\\] Can't generate a unique log-filename");
+SET sql_log_bin=1;
+SET @old_dbug= @@GLOBAL.debug_dbug;
+SET debug_dbug= '+d,binlog_inject_new_name_error';
+--error ER_NO_UNIQUE_LOGFILE
+FLUSH LOGS;
+SET debug_dbug= @old_dbug;
+
+INSERT INTO t4 VALUES (2);
+
+--connection slave
+--let $slave_sql_errno= 1590
+--source include/wait_for_slave_sql_error.inc
+
+# Search the error log for the error message.
+# The bug was that 4 garbage bytes were output in the middle of the error
+# message; by searching for a pattern that spans that location, we can
+# catch the error.
+let $log_error_= `SELECT @@GLOBAL.log_error`;
+if(!$log_error_)
+{
+ # MySQL Server on windows is started with --console and thus
+ # does not know the location of its .err log, use default location
+ let $log_error_ = $MYSQLTEST_VARDIR/log/mysqld.2.err;
+}
+--let SEARCH_FILE= $log_error_
+--let SEARCH_RANGE=-50000
+--let SEARCH_PATTERN= Slave SQL: The incident LOST_EVENTS occurred on the master\. Message: error writing to the binary log, Internal MariaDB error code: 1590
+--source include/search_pattern_in_file.inc
+
+SELECT * FROM t4 ORDER BY a;
+STOP SLAVE IO_THREAD;
+SET sql_slave_skip_counter= 1;
+--source include/start_slave.inc
+
+--connection master
+--save_master_pos
+
+--connection slave
+--sync_with_master
+SELECT * FROM t4 ORDER BY a;
+
+
+--connection slave
+set @@global.binlog_checksum = @slave_save_binlog_checksum;
+set @@global.slave_sql_verify_checksum = @save_slave_sql_verify_checksum;
+
+--echo End of tests
+
+--connection master
+DROP TABLE t4;
+
+--source include/rpl_end.inc
diff --git a/mysql-test/extra/rpl_tests/rpl_checksum_cache.inc b/mysql-test/extra/rpl_tests/rpl_checksum_cache.inc
new file mode 100644
index 00000000000..a10c9721f70
--- /dev/null
+++ b/mysql-test/extra/rpl_tests/rpl_checksum_cache.inc
@@ -0,0 +1,261 @@
+#
+# This include file is used by more than one test suite
+# (currently rpl and binlog_encryption).
+# Please check all dependent tests after modifying it
+#
+
+-- source include/have_innodb.inc
+-- source include/master-slave.inc
+
+--disable_warnings
+call mtr.add_suppression("Unsafe statement written to the binary log using statement format since BINLOG_FORMAT = STATEMENT. Statement is unsafe because it uses a system function that may return a different value on the slave. Statement: insert into t2 set data=repeat.*'a', @act_size.*");
+call mtr.add_suppression("Unsafe statement written to the binary log using statement format since BINLOG_FORMAT = STATEMENT. Statement is unsafe because it uses a system function that may return a different value on the slave. Statement: insert into t1 values.* NAME_CONST.*'n',.*, @data .*");
+--enable_warnings
+
+connection master;
+set @save_binlog_cache_size = @@global.binlog_cache_size;
+set @save_binlog_checksum = @@global.binlog_checksum;
+set @save_master_verify_checksum = @@global.master_verify_checksum;
+set @@global.binlog_cache_size = 4096;
+set @@global.binlog_checksum = CRC32;
+set @@global.master_verify_checksum = 1;
+
+# restart slave to force the dump thread to verify events (on master side)
+connection slave;
+source include/stop_slave.inc;
+source include/start_slave.inc;
+
+connection master;
+
+#
+# Testing a critical part of checksum handling dealing with transaction cache.
+# The cache's buffer size is set to be less than the transaction's footprint
+# in binlog.
+#
+# To verify combined buffer-by-buffer read out of the file and fixing crc per event
+# there are the following parts:
+#
+# 1. the event size is much less than the cache's buffer
+# 2. the event size is bigger than the cache's buffer
+# 3. the event size if approximately the same as the cache's buffer
+# 4. all in above
+
+#
+# 1. the event size is much less than the cache's buffer
+#
+
+flush status;
+show status like "binlog_cache_use";
+show status like "binlog_cache_disk_use";
+--disable_warnings
+drop table if exists t1;
+--enable_warnings
+
+#
+# parameter to ensure the test slightly varies binlog content
+# between different invocations
+#
+let $deviation_size=32;
+eval create table t1 (a int PRIMARY KEY, b CHAR($deviation_size)) engine=innodb;
+
+# Now we are going to create transaction which is long enough so its
+# transaction binlog will be flushed to disk...
+
+delimiter |;
+create procedure test.p_init (n int, size int)
+begin
+ while n > 0 do
+ select round(RAND() * size) into @act_size;
+ set @data = repeat('a', @act_size);
+ insert into t1 values(n, @data );
+ set n= n-1;
+ end while;
+end|
+
+delimiter ;|
+
+let $1 = 4000; # PB2 can run it slow to time out on following sync_slave_with_master:s
+
+begin;
+--disable_warnings
+# todo: check if it is really so.
+#+Note 1592 Unsafe statement binlogged in statement format since BINLOG_FORMAT = STATEMENT. Reason for unsafeness: Statement uses a system function whose value may differ on slave.
+eval call test.p_init($1, $deviation_size);
+--enable_warnings
+commit;
+
+show status like "binlog_cache_use";
+--echo *** binlog_cache_disk_use must be non-zero ***
+show status like "binlog_cache_disk_use";
+
+sync_slave_with_master;
+
+let $diff_tables=master:test.t1, slave:test.t1;
+source include/diff_tables.inc;
+
+# undoing changes with verifying the above once again
+connection master;
+
+begin;
+delete from t1;
+commit;
+
+sync_slave_with_master;
+
+
+#
+# 2. the event size is bigger than the cache's buffer
+#
+connection master;
+
+flush status;
+let $t2_data_size= `select 3 * @@global.binlog_cache_size`;
+let $t2_aver_size= `select 2 * @@global.binlog_cache_size`;
+let $t2_max_rand= `select 1 * @@global.binlog_cache_size`;
+
+eval create table t2(a int auto_increment primary key, data VARCHAR($t2_data_size)) ENGINE=Innodb;
+let $1=100;
+--disable_query_log
+begin;
+while ($1)
+{
+ eval select round($t2_aver_size + RAND() * $t2_max_rand) into @act_size;
+ set @data = repeat('a', @act_size);
+ insert into t2 set data = @data;
+ dec $1;
+}
+commit;
+--enable_query_log
+show status like "binlog_cache_use";
+--echo *** binlog_cache_disk_use must be non-zero ***
+show status like "binlog_cache_disk_use";
+
+sync_slave_with_master;
+
+let $diff_tables=master:test.t2, slave:test.t2;
+source include/diff_tables.inc;
+
+# undoing changes with verifying the above once again
+connection master;
+
+begin;
+delete from t2;
+commit;
+
+sync_slave_with_master;
+
+#
+# 3. the event size if approximately the same as the cache's buffer
+#
+
+connection master;
+
+flush status;
+let $t3_data_size= `select 2 * @@global.binlog_cache_size`;
+let $t3_aver_size= `select (9 * @@global.binlog_cache_size) / 10`;
+let $t3_max_rand= `select (2 * @@global.binlog_cache_size) / 10`;
+
+eval create table t3(a int auto_increment primary key, data VARCHAR($t3_data_size)) engine=innodb;
+
+let $1= 300;
+--disable_query_log
+begin;
+while ($1)
+{
+ eval select round($t3_aver_size + RAND() * $t3_max_rand) into @act_size;
+ insert into t3 set data= repeat('a', @act_size);
+ dec $1;
+}
+commit;
+--enable_query_log
+show status like "binlog_cache_use";
+--echo *** binlog_cache_disk_use must be non-zero ***
+show status like "binlog_cache_disk_use";
+
+sync_slave_with_master;
+
+let $diff_tables=master:test.t3, slave:test.t3;
+source include/diff_tables.inc;
+
+# undoing changes with verifying the above once again
+connection master;
+
+begin;
+delete from t3;
+commit;
+
+sync_slave_with_master;
+
+
+#
+# 4. all in above
+#
+
+connection master;
+flush status;
+
+delimiter |;
+eval create procedure test.p1 (n int)
+begin
+ while n > 0 do
+ case (select (round(rand()*100) % 3) + 1)
+ when 1 then
+ select round(RAND() * $deviation_size) into @act_size;
+ set @data = repeat('a', @act_size);
+ insert into t1 values(n, @data);
+ when 2 then
+ begin
+ select round($t2_aver_size + RAND() * $t2_max_rand) into @act_size;
+ insert into t2 set data=repeat('a', @act_size);
+ end;
+ when 3 then
+ begin
+ select round($t3_aver_size + RAND() * $t3_max_rand) into @act_size;
+ insert into t3 set data= repeat('a', @act_size);
+ end;
+ end case;
+ set n= n-1;
+ end while;
+end|
+delimiter ;|
+
+let $1= 1000;
+set autocommit= 0;
+begin;
+--disable_warnings
+eval call test.p1($1);
+--enable_warnings
+commit;
+
+show status like "binlog_cache_use";
+--echo *** binlog_cache_disk_use must be non-zero ***
+show status like "binlog_cache_disk_use";
+
+sync_slave_with_master;
+
+let $diff_tables=master:test.t1, slave:test.t1;
+source include/diff_tables.inc;
+
+let $diff_tables=master:test.t2, slave:test.t2;
+source include/diff_tables.inc;
+
+let $diff_tables=master:test.t3, slave:test.t3;
+source include/diff_tables.inc;
+
+
+connection master;
+
+begin;
+delete from t1;
+delete from t2;
+delete from t3;
+commit;
+
+drop table t1, t2, t3;
+set @@global.binlog_cache_size = @save_binlog_cache_size;
+set @@global.binlog_checksum = @save_binlog_checksum;
+set @@global.master_verify_checksum = @save_master_verify_checksum;
+drop procedure test.p_init;
+drop procedure test.p1;
+
+--source include/rpl_end.inc
diff --git a/mysql-test/extra/rpl_tests/rpl_corruption.inc b/mysql-test/extra/rpl_tests/rpl_corruption.inc
new file mode 100644
index 00000000000..048a9d74249
--- /dev/null
+++ b/mysql-test/extra/rpl_tests/rpl_corruption.inc
@@ -0,0 +1,176 @@
+#
+# This include file is used by more than one test suite
+# (currently rpl and binlog_encryption).
+# Please check all dependent tests after modifying it
+#
+
+############################################################
+# Purpose: WL#5064 Testing with corrupted events.
+# The test emulates the corruption at the vary stages
+# of replication:
+# - in binlog file
+# - in network
+# - in relay log
+############################################################
+
+#
+# The tests intensively utilize @@global.debug. Note,
+# Bug#11765758 - 58754,
+# @@global.debug is read by the slave threads through dbug-interface.
+# Hence, before a client thread set @@global.debug we have to ensure that:
+# (a) the slave threads are stopped, or (b) the slave threads are in
+# sync and waiting.
+
+--source include/have_debug.inc
+--source include/master-slave.inc
+
+# Block legal errors for MTR
+call mtr.add_suppression('Found invalid event in binary log');
+call mtr.add_suppression('Slave I/O: Relay log write failure: could not queue event from master');
+call mtr.add_suppression('event read from binlog did not pass crc check');
+call mtr.add_suppression('Replication event checksum verification failed');
+call mtr.add_suppression('Event crc check failed! Most likely there is event corruption');
+call mtr.add_suppression('Slave SQL: Error initializing relay log position: I/O error reading event at position .*, error.* 1593');
+
+SET @old_master_verify_checksum = @@master_verify_checksum;
+
+# Creating test table/data and set corruption position for testing
+--echo # 1. Creating test table/data and set corruption position for testing
+--connection master
+--echo * insert/update/delete rows in table t1 *
+# Corruption algorithm modifies only the first event and
+# then will be reset. To avoid checking always the first event
+# from binlog (usually it is FD) we randomly execute different
+# statements and set position for corruption inside events.
+
+CREATE TABLE t1 (a INT NOT NULL PRIMARY KEY, b VARCHAR(10), c VARCHAR(100));
+--disable_query_log
+let $i=`SELECT 3+CEILING(10*RAND())`;
+let $j=1;
+let $pos=0;
+while ($i) {
+ eval INSERT INTO t1 VALUES ($j, 'a', NULL);
+ if (`SELECT RAND() > 0.7`)
+ {
+ eval UPDATE t1 SET c = REPEAT('a', 20) WHERE a = $j;
+ }
+ if (`SELECT RAND() > 0.8`)
+ {
+ eval DELETE FROM t1 WHERE a = $j;
+ }
+ if (!$pos) {
+ let $pos= query_get_value(SHOW MASTER STATUS, Position, 1);
+ --sync_slave_with_master
+ --source include/stop_slave.inc
+ --disable_query_log
+ --connection master
+ }
+ dec $i;
+ inc $j;
+}
+--enable_query_log
+
+
+# Emulate corruption in binlog file when SHOW BINLOG EVENTS is executing
+--echo # 2. Corruption in master binlog and SHOW BINLOG EVENTS
+SET GLOBAL debug_dbug="+d,corrupt_read_log_event_char";
+--echo SHOW BINLOG EVENTS;
+--disable_query_log
+send_eval SHOW BINLOG EVENTS FROM $pos;
+--enable_query_log
+--error ER_ERROR_WHEN_EXECUTING_COMMAND
+reap;
+
+SET GLOBAL debug_dbug="-d,corrupt_read_log_event_char";
+
+# Emulate corruption on master with crc checking on master
+--echo # 3. Master read a corrupted event from binlog and send the error to slave
+
+# We have a rare but nasty potential race here: if the dump thread on
+# the master for the _old_ slave connection has not yet discovered
+# that the slave has disconnected, we will inject the corrupt event on
+# the wrong connection, and the test will fail
+# (+d,corrupt_read_log_event2 corrupts only one event).
+# So kill any lingering dump thread (we need to kill; otherwise dump thread
+# could manage to send all events down the socket before seeing it close, and
+# hang forever waiting for new binlog events to be created).
+let $id= `select id from information_schema.processlist where command = "Binlog Dump"`;
+if ($id)
+{
+ --disable_query_log
+ --error 0,1094
+ eval kill $id;
+ --enable_query_log
+}
+let $wait_condition=
+ SELECT COUNT(*)=0 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE command = 'Binlog Dump';
+--source include/wait_condition.inc
+
+SET GLOBAL debug_dbug="+d,corrupt_read_log_event2_set";
+--connection slave
+START SLAVE IO_THREAD;
+let $slave_io_errno= 1236;
+--let $slave_timeout= 10
+--source include/wait_for_slave_io_error.inc
+--connection master
+SET GLOBAL debug_dbug="-d,corrupt_read_log_event2_set";
+
+# Emulate corruption on master without crc checking on master
+--echo # 4. Master read a corrupted event from binlog and send it to slave
+--connection master
+SET GLOBAL master_verify_checksum=0;
+SET GLOBAL debug_dbug="+d,corrupt_read_log_event2_set";
+--connection slave
+START SLAVE IO_THREAD;
+# When the checksum error is detected, the slave sets error code 1913
+# (ER_NETWORK_READ_EVENT_CHECKSUM_FAILURE) in queue_event(), then immediately
+# sets error 1595 (ER_SLAVE_RELAY_LOG_WRITE_FAILURE) in handle_slave_io().
+# So we usually get 1595, but it is occasionally possible to get 1913.
+let $slave_io_errno= 1595,1913;
+--source include/wait_for_slave_io_error.inc
+--connection master
+SET GLOBAL debug_dbug="-d,corrupt_read_log_event2_set";
+SET GLOBAL debug_dbug= "";
+SET GLOBAL master_verify_checksum=1;
+
+# Emulate corruption in network
+--echo # 5. Slave. Corruption in network
+--connection slave
+SET GLOBAL debug_dbug="+d,corrupt_queue_event";
+START SLAVE IO_THREAD;
+let $slave_io_errno= 1595,1913;
+--source include/wait_for_slave_io_error.inc
+SET GLOBAL debug_dbug="-d,corrupt_queue_event";
+
+# Emulate corruption in relay log
+--echo # 6. Slave. Corruption in relay log
+
+SET GLOBAL debug_dbug="+d,corrupt_read_log_event_char";
+
+START SLAVE SQL_THREAD;
+let $slave_sql_errno= 1593;
+--source include/wait_for_slave_sql_error.inc
+
+SET GLOBAL debug_dbug="-d,corrupt_read_log_event_char";
+SET GLOBAL debug_dbug= "";
+
+# Start normal replication and compare same table on master
+# and slave
+--echo # 7. Seek diff for tables on master and slave
+--connection slave
+--source include/start_slave.inc
+--connection master
+--sync_slave_with_master
+let $diff_tables= master:test.t1, slave:test.t1;
+--source include/diff_tables.inc
+
+# Clean up
+--echo # 8. Clean up
+--connection master
+SET GLOBAL debug_dbug= "";
+SET GLOBAL master_verify_checksum = @old_master_verify_checksum;
+DROP TABLE t1;
+--sync_slave_with_master
+SET GLOBAL debug_dbug= "";
+
+--source include/rpl_end.inc
diff --git a/mysql-test/extra/rpl_tests/rpl_gtid_basic.inc b/mysql-test/extra/rpl_tests/rpl_gtid_basic.inc
new file mode 100644
index 00000000000..ab7d23f70ac
--- /dev/null
+++ b/mysql-test/extra/rpl_tests/rpl_gtid_basic.inc
@@ -0,0 +1,568 @@
+#
+# This include file is used by more than one test suite
+# (currently rpl and binlog_encryption).
+# Please check all dependent tests after modifying it
+#
+
+--source include/have_innodb.inc
+--let $rpl_topology=1->2->3->4
+--source include/rpl_init.inc
+
+# Set up a 4-deep replication topology, then test various fail-overs
+# using GTID.
+#
+# A -> B -> C -> D
+
+connection server_1;
+--source include/wait_for_binlog_checkpoint.inc
+--let $binlog_file = query_get_value(SHOW MASTER STATUS,File,1)
+--let $binlog_pos = query_get_value(SHOW MASTER STATUS,Position,1)
+--echo *** GTID position should be empty here ***
+--replace_result $binlog_file <BINLOG_FILE> $binlog_pos <BINLOG_POS>
+eval SELECT BINLOG_GTID_POS('$binlog_file',$binlog_pos);
+
+CREATE TABLE t1 (a INT PRIMARY KEY, b VARCHAR(10)) ENGINE=MyISAM;
+CREATE TABLE t2 (a INT PRIMARY KEY, b VARCHAR(10)) ENGINE=InnoDB;
+INSERT INTO t1 VALUES (1, "m1");
+INSERT INTO t1 VALUES (2, "m2"), (3, "m3"), (4, "m4");
+INSERT INTO t2 VALUES (1, "i1");
+BEGIN;
+INSERT INTO t2 VALUES (2, "i2"), (3, "i3");
+INSERT INTO t2 VALUES (4, "i4");
+COMMIT;
+save_master_pos;
+source include/wait_for_binlog_checkpoint.inc;
+--let $binlog_file = query_get_value(SHOW MASTER STATUS,File,1)
+--let $binlog_pos = query_get_value(SHOW MASTER STATUS,Position,1)
+--let $gtid_pos_server_1 = `SELECT @@gtid_binlog_pos`
+--echo *** GTID position should be non-empty here ***
+--replace_result $binlog_file <BINLOG_FILE> $binlog_pos <BINLOG_POS> $gtid_pos_server_1 <GTID_POS_SERVER_1>
+eval SELECT BINLOG_GTID_POS('$binlog_file',$binlog_pos);
+
+connection server_2;
+sync_with_master;
+source include/wait_for_binlog_checkpoint.inc;
+--let $binlog_file = query_get_value(SHOW MASTER STATUS,File,1)
+--let $binlog_pos = query_get_value(SHOW MASTER STATUS,Position,1)
+--echo *** GTID position should be the same as on server_1 ***
+--replace_result $binlog_file <BINLOG_FILE> $binlog_pos <BINLOG_POS> $gtid_pos_server_1 <GTID_POS_SERVER_1>
+eval SELECT BINLOG_GTID_POS('$binlog_file',$binlog_pos);
+SELECT * FROM t1 ORDER BY a;
+SELECT * FROM t2 ORDER BY a;
+save_master_pos;
+
+connection server_3;
+sync_with_master;
+SELECT * FROM t1 ORDER BY a;
+SELECT * FROM t2 ORDER BY a;
+save_master_pos;
+
+connection server_4;
+sync_with_master;
+SELECT * FROM t1 ORDER BY a;
+SELECT * FROM t2 ORDER BY a;
+
+
+--echo *** Now take out D, let it fall behind a bit, and then test re-attaching it to A ***
+connection server_4;
+--source include/stop_slave.inc
+
+connection server_1;
+INSERT INTO t1 VALUES (5, "m1a");
+INSERT INTO t2 VALUES (5, "i1a");
+save_master_pos;
+
+connection server_4;
+--replace_result $MASTER_MYPORT MASTER_PORT
+eval CHANGE MASTER TO master_host = '127.0.0.1', master_port = $MASTER_MYPORT,
+ MASTER_USE_GTID=CURRENT_POS;
+--source include/start_slave.inc
+sync_with_master;
+SELECT * FROM t1 ORDER BY a;
+SELECT * FROM t2 ORDER BY a;
+
+--echo *** Now move B to D (C is still replicating from B) ***
+connection server_2;
+--source include/stop_slave.inc
+--replace_result $SERVER_MYPORT_4 SERVER_MYPORT_4
+eval CHANGE MASTER TO master_host = '127.0.0.1', master_port = $SERVER_MYPORT_4,
+ MASTER_USE_GTID=CURRENT_POS;
+--source include/start_slave.inc
+
+connection server_4;
+UPDATE t2 SET b="j1a" WHERE a=5;
+save_master_pos;
+
+connection server_2;
+sync_with_master;
+SELECT * FROM t1 ORDER BY a;
+SELECT * FROM t2 ORDER BY a;
+
+--echo *** Now move C to D, after letting it fall a little behind ***
+connection server_3;
+--source include/stop_slave.inc
+
+connection server_1;
+INSERT INTO t2 VALUES (6, "i6b");
+INSERT INTO t2 VALUES (7, "i7b");
+--source include/save_master_gtid.inc
+
+connection server_3;
+--replace_result $SERVER_MYPORT_4 SERVER_MYPORT_4
+eval CHANGE MASTER TO master_host = '127.0.0.1', master_port = $SERVER_MYPORT_4,
+ MASTER_USE_GTID=CURRENT_POS;
+--source include/start_slave.inc
+--source include/sync_with_master_gtid.inc
+SELECT * FROM t2 ORDER BY a;
+
+--echo *** Now change everything back to what it was, to make rpl_end.inc happy
+# Also check that MASTER_USE_GTID=CURRENT_POS is still enabled.
+connection server_2;
+# We need to sync up server_2 before switching. If it happened to have reached
+# the point 'UPDATE t2 SET b="j1a" WHERE a=5' it will fail to connect to
+# server_1, which is (deliberately) missing that transaction.
+--source include/sync_with_master_gtid.inc
+--source include/stop_slave.inc
+--replace_result $MASTER_MYPORT MASTER_MYPORT
+eval CHANGE MASTER TO master_host = '127.0.0.1', master_port = $MASTER_MYPORT;
+--source include/start_slave.inc
+--source include/wait_for_slave_to_start.inc
+
+connection server_3;
+--source include/stop_slave.inc
+--replace_result $SLAVE_MYPORT SLAVE_MYPORT
+eval CHANGE MASTER TO master_host = '127.0.0.1', master_port = $SLAVE_MYPORT;
+--source include/start_slave.inc
+--source include/sync_with_master_gtid.inc
+
+connection server_4;
+--source include/stop_slave.inc
+--replace_result $SERVER_MYPORT_3 SERVER_MYPORT_3
+eval CHANGE MASTER TO master_host = '127.0.0.1', master_port = $SERVER_MYPORT_3;
+--source include/start_slave.inc
+
+connection server_1;
+DROP TABLE t1,t2;
+--source include/save_master_gtid.inc
+
+--echo *** A few more checks for BINLOG_GTID_POS function ***
+--let $valid_binlog_name = query_get_value(SHOW BINARY LOGS,Log_name,1)
+--error ER_WRONG_PARAMCOUNT_TO_NATIVE_FCT
+SELECT BINLOG_GTID_POS();
+--error ER_WRONG_PARAMCOUNT_TO_NATIVE_FCT
+SELECT BINLOG_GTID_POS('a');
+--error ER_WRONG_PARAMCOUNT_TO_NATIVE_FCT
+SELECT BINLOG_GTID_POS('a',1,NULL);
+SELECT BINLOG_GTID_POS(1,'a');
+SELECT BINLOG_GTID_POS(NULL,NULL);
+SELECT BINLOG_GTID_POS('',1);
+SELECT BINLOG_GTID_POS('a',1);
+eval SELECT BINLOG_GTID_POS('$valid_binlog_name',-1);
+eval SELECT BINLOG_GTID_POS('$valid_binlog_name',0);
+eval SELECT BINLOG_GTID_POS('$valid_binlog_name',18446744073709551615);
+eval SELECT BINLOG_GTID_POS('$valid_binlog_name',18446744073709551616);
+
+
+--echo *** Some tests of @@GLOBAL.gtid_binlog_state ***
+--connection server_2
+--source include/sync_with_master_gtid.inc
+--source include/stop_slave.inc
+
+--connection server_1
+SET @old_state= @@GLOBAL.gtid_binlog_state;
+
+--error ER_BINLOG_MUST_BE_EMPTY
+SET GLOBAL gtid_binlog_state = '';
+RESET MASTER;
+SET GLOBAL gtid_binlog_state = '';
+FLUSH LOGS;
+--source include/show_binary_logs.inc
+SET GLOBAL gtid_binlog_state = '0-1-10,1-2-20,0-3-30';
+--source include/show_binary_logs.inc
+--let $binlog_file= master-bin.000001
+--let $binlog_start= 4
+--source include/show_binlog_events.inc
+#SELECT @@GLOBAL.gtid_binlog_pos;
+#SELECT @@GLOBAL.gtid_binlog_state;
+--error ER_BINLOG_MUST_BE_EMPTY
+SET GLOBAL gtid_binlog_state = @old_state;
+RESET MASTER;
+SET GLOBAL gtid_binlog_state = @old_state;
+
+# Check that slave can reconnect again, despite the RESET MASTER, as we
+# restored the state.
+
+CREATE TABLE t1 (a INT PRIMARY KEY);
+SET gtid_seq_no=100;
+INSERT INTO t1 VALUES (1);
+--source include/save_master_gtid.inc
+
+--connection server_2
+--source include/start_slave.inc
+# We cannot just use sync_with_master as we've done RESET MASTER, so
+# slave old-style position is wrong.
+# So sync on gtid position instead.
+--source include/sync_with_master_gtid.inc
+
+SELECT * FROM t1;
+# Check that the IO gtid position in SHOW SLAVE STATUS is also correct.
+--let $status_items= Gtid_IO_Pos
+--source include/show_slave_status.inc
+
+--echo *** Test @@LAST_GTID and MASTER_GTID_WAIT() ***
+
+--connection server_1
+DROP TABLE t1;
+CREATE TABLE t1 (a INT PRIMARY KEY) ENGINE=InnoDB;
+--save_master_pos
+
+--connection server_2
+--sync_with_master
+--source include/stop_slave.inc
+
+--connect (m1,127.0.0.1,root,,test,$SERVER_MYPORT_1,)
+SELECT @@last_gtid;
+SET gtid_seq_no=110;
+SELECT @@last_gtid;
+BEGIN;
+SELECT @@last_gtid;
+INSERT INTO t1 VALUES (2);
+SELECT @@last_gtid;
+COMMIT;
+SELECT @@last_gtid;
+--let $pos= `SELECT @@gtid_binlog_pos`
+
+--connect (s1,127.0.0.1,root,,test,$SERVER_MYPORT_2,)
+eval SET @pos= '$pos';
+# Check NULL argument.
+SELECT master_gtid_wait(NULL);
+# Check empty argument returns immediately.
+SELECT master_gtid_wait('', NULL);
+# Check this gets counted
+SHOW STATUS LIKE 'Master_gtid_wait_count';
+SHOW STATUS LIKE 'Master_gtid_wait_timeouts';
+SHOW STATUS LIKE 'Master_gtid_wait_time';
+# Let's check that we get a timeout
+SELECT master_gtid_wait(@pos, 0.5);
+SELECT * FROM t1 ORDER BY a;
+# Now actually wait until the slave reaches the position
+send SELECT master_gtid_wait(@pos);
+
+--connection server_2
+--source include/start_slave.inc
+
+--connection s1
+reap;
+SELECT * FROM t1 ORDER BY a;
+
+# Test waiting on a domain that does not exist yet.
+--source include/stop_slave.inc
+
+--connection server_1
+SET gtid_domain_id= 1;
+INSERT INTO t1 VALUES (3);
+--let $pos= `SELECT @@gtid_binlog_pos`
+
+--connection s1
+--replace_result $pos POS
+eval SET @pos= '$pos';
+SELECT master_gtid_wait(@pos, 0);
+SELECT * FROM t1 WHERE a >= 3;
+send SELECT master_gtid_wait(@pos, -1);
+
+--connection server_2
+--source include/start_slave.inc
+
+--connection s1
+reap;
+SELECT * FROM t1 WHERE a >= 3;
+# Waiting for only part of the position.
+SELECT master_gtid_wait('1-1-1', 0);
+
+# Now test a lot of parallel master_gtid_wait() calls, completing in different
+# order, and some of which time out or get killed on the way.
+
+--connection s1
+send SELECT master_gtid_wait('2-1-1,1-1-4,0-1-110');
+
+--connect (s2,127.0.0.1,root,,test,$SERVER_MYPORT_2,)
+# This will time out. No event 0-1-1000 exists
+send SELECT master_gtid_wait('0-1-1000', 0.5);
+
+--connect (s3,127.0.0.1,root,,test,$SERVER_MYPORT_2,)
+# This one we will kill
+--let $kill1_id= `SELECT connection_id()`
+send SELECT master_gtid_wait('0-1-2000');
+
+--connect (s4,127.0.0.1,root,,test,$SERVER_MYPORT_2,)
+send SELECT master_gtid_wait('2-1-10');
+
+--connect (s5,127.0.0.1,root,,test,$SERVER_MYPORT_2,)
+send SELECT master_gtid_wait('2-1-6', 1);
+
+# This one we will kill also.
+--connect (s6,127.0.0.1,root,,test,$SERVER_MYPORT_2,)
+--let $kill2_id= `SELECT connection_id()`
+send SELECT master_gtid_wait('2-1-5');
+
+--connect (s7,127.0.0.1,root,,test,$SERVER_MYPORT_2,)
+send SELECT master_gtid_wait('2-1-10');
+
+--connect (s8,127.0.0.1,root,,test,$SERVER_MYPORT_2,)
+send SELECT master_gtid_wait('2-1-5,1-1-4,0-1-110');
+
+--connect (s9,127.0.0.1,root,,test,$SERVER_MYPORT_2,)
+send SELECT master_gtid_wait('2-1-2');
+
+--connection server_2
+# This one completes immediately.
+SHOW STATUS LIKE 'Master_gtid_wait_timeouts';
+SHOW STATUS LIKE 'Master_gtid_wait_count';
+SELECT master_gtid_wait('1-1-1');
+SHOW STATUS LIKE 'Master_gtid_wait_timeouts';
+SHOW STATUS LIKE 'Master_gtid_wait_count';
+let $wait_time = query_get_value(SHOW STATUS LIKE 'Master_gtid_wait_time', Value, 1);
+--replace_result $wait_time MASTER_GTID_WAIT_TIME
+eval SET @a= $wait_time;
+SELECT IF(@a <= 100*1000*1000, "OK", CONCAT("Error: wait time ", @a, " is larger than expected"))
+ AS Master_gtid_wait_time_as_expected;
+
+
+--connect (s10,127.0.0.1,root,,test,$SERVER_MYPORT_2,)
+send SELECT master_gtid_wait('0-1-109');
+
+--connection server_2
+# This one should time out.
+SHOW STATUS LIKE 'Master_gtid_wait_timeouts';
+SHOW STATUS LIKE 'Master_gtid_wait_count';
+SELECT master_gtid_wait('2-1-2', 0.5);
+SHOW STATUS LIKE 'Master_gtid_wait_timeouts';
+SHOW STATUS LIKE 'Master_gtid_wait_count';
+let $wait_time = query_get_value(SHOW STATUS LIKE 'Master_gtid_wait_time', Value, 1);
+--replace_result $wait_time MASTER_GTID_WAIT_TIME
+eval SET @a= $wait_time;
+# We expect a wait time of just a bit over 0.5 seconds. But thread scheduling
+# and timer inaccuracies could introduce significant jitter. So allow a
+# generous interval.
+SELECT IF(@a BETWEEN 0.4*1000*1000 AND 100*1000*1000, "OK", CONCAT("Error: wait time ", @a, " not as expected")) AS Master_gtid_wait_time_as_expected;
+
+--replace_result $kill1_id KILL_ID
+eval KILL QUERY $kill1_id;
+--connection s3
+--error ER_QUERY_INTERRUPTED
+reap;
+
+--connection server_1
+SET gtid_domain_id=2;
+SET gtid_seq_no=2;
+INSERT INTO t1 VALUES (4);
+
+--connection s9
+reap;
+
+--connection server_2
+--replace_result $kill2_id KILL_ID
+eval KILL CONNECTION $kill2_id;
+
+--connection s6
+--error 2013,ER_CONNECTION_KILLED
+reap;
+
+--connection server_1
+SET gtid_domain_id=1;
+SET gtid_seq_no=4;
+INSERT INTO t1 VALUES (5);
+SET gtid_domain_id=2;
+SET gtid_seq_no=5;
+INSERT INTO t1 VALUES (6);
+
+--connection s8
+reap;
+--connection s1
+reap;
+--connection s2
+reap;
+--connection s5
+reap;
+--connection s10
+reap;
+
+--connection server_1
+SET gtid_domain_id=2;
+SET gtid_seq_no=10;
+INSERT INTO t1 VALUES (7);
+
+--connection s4
+reap;
+--connection s7
+reap;
+
+
+--echo *** Test gtid_slave_pos when used with GTID ***
+
+--connection server_2
+--source include/stop_slave.inc
+
+--connection server_1
+SET gtid_domain_id=2;
+SET gtid_seq_no=1000;
+INSERT INTO t1 VALUES (10);
+INSERT INTO t1 VALUES (11);
+--save_master_pos
+
+--connection server_2
+SET sql_slave_skip_counter= 1;
+--source include/start_slave.inc
+--sync_with_master
+SELECT * FROM t1 WHERE a >= 10 ORDER BY a;
+SELECT IF(LOCATE("2-1-1001", @@GLOBAL.gtid_slave_pos)>0, "Ok", CONCAT("ERROR! expected GTID 2-1-1001 not found in gtid_slave_pos: ", @@GLOBAL.gtid_slave_pos)) AS status;
+
+--source include/stop_slave.inc
+
+--connection server_1
+SET gtid_domain_id=2;
+SET gtid_seq_no=1010;
+INSERT INTO t1 VALUES (12);
+INSERT INTO t1 VALUES (13);
+--save_master_pos
+
+--connection server_2
+SET sql_slave_skip_counter= 2;
+--source include/start_slave.inc
+--sync_with_master
+SELECT * FROM t1 WHERE a >= 10 ORDER BY a;
+SELECT IF(LOCATE("2-1-1011", @@GLOBAL.gtid_slave_pos)>0, "Ok", CONCAT("ERROR! expected GTID 2-1-1011 not found in gtid_slave_pos: ", @@GLOBAL.gtid_slave_pos)) AS status;
+
+--source include/stop_slave.inc
+
+--connection server_1
+SET gtid_domain_id=2;
+SET gtid_seq_no=1020;
+INSERT INTO t1 VALUES (14);
+INSERT INTO t1 VALUES (15);
+INSERT INTO t1 VALUES (16);
+--save_master_pos
+
+--connection server_2
+SET sql_slave_skip_counter= 3;
+--source include/start_slave.inc
+--sync_with_master
+SELECT * FROM t1 WHERE a >= 10 ORDER BY a;
+SELECT IF(LOCATE("2-1-1022", @@GLOBAL.gtid_slave_pos)>0, "Ok", CONCAT("ERROR! expected GTID 2-1-1022 not found in gtid_slave_pos: ", @@GLOBAL.gtid_slave_pos)) AS status;
+
+--source include/stop_slave.inc
+
+--connection server_1
+SET gtid_domain_id=2;
+SET gtid_seq_no=1030;
+INSERT INTO t1 VALUES (17);
+INSERT INTO t1 VALUES (18);
+INSERT INTO t1 VALUES (19);
+--save_master_pos
+
+--connection server_2
+SET sql_slave_skip_counter= 5;
+--source include/start_slave.inc
+--sync_with_master
+SELECT * FROM t1 WHERE a >= 10 ORDER BY a;
+SELECT IF(LOCATE("2-1-1032", @@GLOBAL.gtid_slave_pos)>0, "Ok", CONCAT("ERROR! expected GTID 2-1-1032 not found in gtid_slave_pos: ", @@GLOBAL.gtid_slave_pos)) AS status;
+
+
+--source include/stop_slave.inc
+
+--connection server_1
+SET gtid_domain_id=3;
+SET gtid_seq_no=100;
+CREATE TABLE t2 (a INT PRIMARY KEY);
+DROP TABLE t2;
+SET gtid_domain_id=2;
+SET gtid_seq_no=1040;
+INSERT INTO t1 VALUES (20);
+--save_master_pos
+
+--connection server_2
+SET @saved_mode= @@GLOBAL.slave_ddl_exec_mode;
+SET GLOBAL slave_ddl_exec_mode=STRICT;
+SET sql_slave_skip_counter=1;
+START SLAVE UNTIL master_gtid_pos="3-1-100";
+--let $master_pos=3-1-100
+--source include/sync_with_master_gtid.inc
+--source include/wait_for_slave_to_stop.inc
+--error ER_NO_SUCH_TABLE
+SELECT * FROM t2;
+SELECT IF(LOCATE("3-1-100", @@GLOBAL.gtid_slave_pos)>0, "Ok", CONCAT("ERROR! expected GTID 3-1-100 not found in gtid_slave_pos: ", @@GLOBAL.gtid_slave_pos)) AS status;
+
+# Start the slave again, it should fail on the DROP TABLE as the table is not there.
+SET sql_log_bin=0;
+CALL mtr.add_suppression("Slave: Unknown table 'test\\.t2' Error_code: 1051");
+SET sql_log_bin=1;
+START SLAVE;
+--let $slave_sql_errno=1051
+--source include/wait_for_slave_sql_error.inc
+SELECT IF(LOCATE("3-1-100", @@GLOBAL.gtid_slave_pos)>0, "Ok", CONCAT("ERROR! expected GTID 3-1-100 not found in gtid_slave_pos: ", @@GLOBAL.gtid_slave_pos)) AS status;
+
+STOP SLAVE IO_THREAD;
+SET sql_slave_skip_counter=2;
+--source include/start_slave.inc
+--sync_with_master
+
+SELECT * FROM t1 WHERE a >= 20 ORDER BY a;
+SELECT IF(LOCATE("3-1-101", @@GLOBAL.gtid_slave_pos)>0, "Ok", CONCAT("ERROR! expected GTID 3-1-101 not found in gtid_slave_pos: ", @@GLOBAL.gtid_slave_pos)) AS status;
+SELECT IF(LOCATE("2-1-1040", @@GLOBAL.gtid_slave_pos)>0, "Ok", CONCAT("ERROR! expected GTID 2-1-1040 not found in gtid_slave_pos: ", @@GLOBAL.gtid_slave_pos)) AS status;
+
+SET GLOBAL slave_ddl_exec_mode= @saved_mode;
+
+
+--echo *** Test GTID-connecting to a master with out-of-order sequence numbers in the binlog. ***
+
+# Create an out-of-order binlog on server 2.
+# Let server 3 replicate to an out-of-order point, stop it, restart it,
+# and check that it replicates correctly despite the out-of-order.
+
+--connection server_1
+SET gtid_domain_id= @@GLOBAL.gtid_domain_id;
+INSERT INTO t1 VALUES (31);
+--save_master_pos
+
+--connection server_2
+--sync_with_master
+SET gtid_domain_id= @@GLOBAL.gtid_domain_id;
+INSERT INTO t1 VALUES (32);
+
+--connection server_1
+INSERT INTO t1 VALUES (33);
+--save_master_pos
+
+--connection server_2
+--sync_with_master
+--save_master_pos
+
+--connection server_3
+--sync_with_master
+--source include/stop_slave.inc
+
+--connection server_1
+INSERT INTO t1 VALUES (34);
+--save_master_pos
+
+--connection server_2
+--sync_with_master
+--save_master_pos
+
+--connection server_3
+--source include/start_slave.inc
+--sync_with_master
+SELECT * FROM t1 WHERE a >= 30 ORDER BY a;
+--save_master_pos
+
+--connection server_4
+--sync_with_master
+SELECT * FROM t1 WHERE a >= 30 ORDER BY a;
+
+
+# Clean up.
+--connection server_1
+DROP TABLE t1;
+
+
+--source include/rpl_end.inc
diff --git a/mysql-test/extra/rpl_tests/rpl_incident.inc b/mysql-test/extra/rpl_tests/rpl_incident.inc
new file mode 100644
index 00000000000..350a2086681
--- /dev/null
+++ b/mysql-test/extra/rpl_tests/rpl_incident.inc
@@ -0,0 +1,63 @@
+#
+# This include file is used by more than one test suite
+# (currently rpl and binlog_encryption).
+# Please check all dependent tests after modifying it
+#
+
+--source include/have_debug.inc
+--source include/master-slave.inc
+
+SET @old_binlog_checksum=@@binlog_checksum;
+SET GLOBAL BINLOG_CHECKSUM=none;
+connection slave;
+SET @old_binlog_checksum=@@binlog_checksum;
+SET GLOBAL BINLOG_CHECKSUM=none;
+connection master;
+
+--echo **** On Master ****
+CREATE TABLE t1 (a INT);
+
+INSERT INTO t1 VALUES (1),(2),(3);
+SELECT * FROM t1;
+
+let $debug_save= `SELECT @@GLOBAL.debug`;
+SET GLOBAL debug_dbug= '+d,incident_database_resync_on_replace,*';
+
+# This will generate an incident log event and store it in the binary
+# log before the replace statement.
+REPLACE INTO t1 VALUES (4);
+--save_master_pos
+SELECT * FROM t1;
+
+--disable_query_log
+eval SET GLOBAL debug_dbug= '$debug_save';
+--enable_query_log
+
+connection slave;
+# Wait until SQL thread stops with error LOST_EVENT on master
+call mtr.add_suppression("Slave SQL.*The incident LOST_EVENTS occurred on the master.* 1590");
+let $slave_sql_errno= 1590;
+let $show_slave_sql_error= 1;
+source include/wait_for_slave_sql_error.inc;
+
+# The 4 should not be inserted into the table, since the incident log
+# event should have stop the slave.
+--echo **** On Slave ****
+SELECT * FROM t1;
+
+SET GLOBAL SQL_SLAVE_SKIP_COUNTER=1;
+START SLAVE;
+--sync_with_master
+
+# Now, we should have inserted the row into the table and the slave
+# should be running. We should also have rotated to a new binary log.
+
+SELECT * FROM t1;
+source include/check_slave_is_running.inc;
+
+connection master;
+SET GLOBAL BINLOG_CHECKSUM=@old_binlog_checksum;
+DROP TABLE t1;
+--sync_slave_with_master
+SET GLOBAL BINLOG_CHECKSUM=@old_binlog_checksum;
+--source include/rpl_end.inc
diff --git a/mysql-test/extra/rpl_tests/rpl_init_slave_errors.inc b/mysql-test/extra/rpl_tests/rpl_init_slave_errors.inc
new file mode 100644
index 00000000000..a8ac4e3cd6e
--- /dev/null
+++ b/mysql-test/extra/rpl_tests/rpl_init_slave_errors.inc
@@ -0,0 +1,95 @@
+#
+# This include file is used by more than one test suite
+# (currently rpl and binlog_encryption).
+# Please check all dependent tests after modifying it
+#
+
+######################################################################
+# Some errors that cause the slave SQL thread to stop are not shown in
+# the Slave_SQL_Error column of "SHOW SLAVE STATUS". Instead, the error
+# is only in the server's error log.
+#
+# Two failures and their respective reporting are verified:
+#
+# 1 - Failures during slave thread initialization
+# 2 - Failures while processing queries passed through the init_slave
+# option.
+#
+# In order to check the first type of failure, we inject a fault in the
+# SQL/IO Threads through SET GLOBAL debug.
+#
+# To check the second type, we set @@global.init_slave to an invalid
+# command thus preventing the initialization of the SQL Thread.
+#
+# Obs:
+# 1 - Note that testing failures while initializing the relay log position
+# is hard as the same function is called before the code reaches the point
+# that we want to test.
+#
+# 2 - This test does not target failures that are reported while applying
+# events such as duplicate keys, errors while reading the relay-log.bin*,
+# etc. Such errors are already checked on other tests.
+######################################################################
+
+######################################################################
+# Configuring the Environment
+######################################################################
+source include/have_debug.inc;
+source include/master-slave.inc;
+source include/have_log_bin.inc;
+
+connection slave;
+
+--disable_warnings
+stop slave;
+--enable_warnings
+reset slave;
+
+######################################################################
+# Injecting faults in the threads' initialization
+######################################################################
+connection slave;
+
+# Set debug flags on slave to force errors to occur
+SET GLOBAL debug_dbug= "d,simulate_io_slave_error_on_init,simulate_sql_slave_error_on_init";
+
+start slave;
+
+#
+# slave is going to stop because of emulated failures
+# but there won't be any crashes nor asserts hit.
+#
+# 1593 = ER_SLAVE_FATAL_ERROR
+--let $slave_sql_errno= 1593
+--let $show_slave_sql_error= 1
+--source include/wait_for_slave_sql_error.inc
+
+call mtr.add_suppression("Failed during slave.* thread initialization");
+
+SET GLOBAL debug_dbug= "";
+
+######################################################################
+# Injecting faults in the init_slave option
+######################################################################
+connection slave;
+
+reset slave;
+
+SET GLOBAL init_slave= "garbage";
+
+start slave;
+# 1064 = ER_PARSE_ERROR
+--let $slave_sql_errno= 1064
+--let $show_slave_sql_error= 1
+--source include/wait_for_slave_sql_error.inc
+
+######################################################################
+# Clean up
+######################################################################
+SET GLOBAL init_slave= "";
+
+# Clean up Last_SQL_Error
+--source include/stop_slave_io.inc
+RESET SLAVE;
+--let $rpl_only_running_threads= 1
+--source include/rpl_end.inc
diff --git a/mysql-test/extra/rpl_tests/rpl_loaddata_local.inc b/mysql-test/extra/rpl_tests/rpl_loaddata_local.inc
new file mode 100644
index 00000000000..20962d74e98
--- /dev/null
+++ b/mysql-test/extra/rpl_tests/rpl_loaddata_local.inc
@@ -0,0 +1,232 @@
+#
+# This include file is used by more than one test suite
+# (currently rpl and binlog_encryption).
+# Please check all dependent tests after modifying it
+#
+
+# See if "LOAD DATA LOCAL INFILE" is well replicated
+# (LOAD DATA LOCAL INFILE is not written to the binlog
+# the same way as LOAD DATA INFILE : Append_blocks are smaller).
+# In MySQL 4.0 <4.0.12 there were 2 bugs with LOAD DATA LOCAL INFILE :
+# - the loaded file was not written entirely to the master's binlog,
+# only the first 4KB, 8KB or 16KB usually.
+# - the loaded file's first line was not written entirely to the
+# master's binlog (1st char was absent)
+source include/master-slave.inc;
+
+create table t1(a int);
+let $1=10000;
+disable_query_log;
+set SQL_LOG_BIN=0;
+while ($1)
+{
+ insert into t1 values(1);
+ dec $1;
+}
+set SQL_LOG_BIN=1;
+enable_query_log;
+let $MYSQLD_DATADIR= `select @@datadir`;
+--replace_result $MYSQLD_DATADIR MYSQLD_DATADIR
+eval select * into outfile '$MYSQLD_DATADIR/rpl_loaddatalocal.select_outfile' from t1;
+#This will generate a 20KB file, now test LOAD DATA LOCAL
+truncate table t1;
+--replace_result $MYSQLD_DATADIR MYSQLD_DATADIR
+eval load data local infile '$MYSQLD_DATADIR/rpl_loaddatalocal.select_outfile' into table t1;
+--remove_file $MYSQLD_DATADIR/rpl_loaddatalocal.select_outfile
+sync_slave_with_master;
+select a,count(*) from t1 group by a;
+connection master;
+drop table t1;
+sync_slave_with_master;
+
+# End of 4.1 tests
+
+#
+# Now let us test how well we replicate LOAD DATA LOCAL in situation when
+# we met duplicates in tables to which we are adding rows.
+# (It supposed that LOAD DATA LOCAL ignores such errors)
+#
+connection master;
+create table t1(a int);
+insert into t1 values (1), (2), (2), (3);
+--replace_result $MYSQLD_DATADIR MYSQLD_DATADIR
+eval select * into outfile '$MYSQLD_DATADIR/rpl_loaddatalocal.select_outfile' from t1;
+drop table t1;
+create table t1(a int primary key);
+--replace_result $MYSQLD_DATADIR MYSQLD_DATADIR
+eval load data local infile '$MYSQLD_DATADIR/rpl_loaddatalocal.select_outfile' into table t1;
+--remove_file $MYSQLD_DATADIR/rpl_loaddatalocal.select_outfile
+SELECT * FROM t1 ORDER BY a;
+save_master_pos;
+connection slave;
+sync_with_master;
+SELECT * FROM t1 ORDER BY a;
+connection master;
+drop table t1;
+save_master_pos;
+connection slave;
+sync_with_master;
+
+
+#
+# Bug22504 load data infile sql statement in replication architecture get error
+#
+--echo ==== Bug22504 Initialize ====
+
+--connection master
+
+SET sql_mode='ignore_space';
+CREATE TABLE t1(a int);
+insert into t1 values (1), (2), (3), (4);
+--replace_result $MYSQLD_DATADIR MYSQLD_DATADIR
+eval select * into outfile '$MYSQLD_DATADIR/rpl_loaddatalocal.select_outfile' from t1;
+truncate table t1;
+--replace_result $MYSQLD_DATADIR MYSQLD_DATADIR
+eval load data local infile '$MYSQLD_DATADIR/rpl_loaddatalocal.select_outfile' into table t1;
+--remove_file $MYSQLD_DATADIR/rpl_loaddatalocal.select_outfile
+SELECT * FROM t1 ORDER BY a;
+
+sync_slave_with_master;
+SELECT * FROM t1 ORDER BY a;
+
+--echo ==== Clean up ====
+
+connection master;
+DROP TABLE t1;
+
+sync_slave_with_master;
+
+--echo
+--echo Bug #43746:
+--echo "return wrong query string when parse 'load data infile' sql statement"
+--echo
+
+connection master;
+let $MYSQLD_DATADIR= `select @@datadir`;
+SELECT @@SESSION.sql_mode INTO @old_mode;
+
+SET sql_mode='ignore_space';
+
+CREATE TABLE t1(a int);
+INSERT INTO t1 VALUES (1), (2), (3), (4);
+
+--replace_result $MYSQLD_DATADIR MYSQLD_DATADIR
+eval SELECT * INTO OUTFILE '$MYSQLD_DATADIR/bug43746.sql' FROM t1;
+TRUNCATE TABLE t1;
+
+--replace_result $MYSQLD_DATADIR MYSQLD_DATADIR
+eval LOAD DATA LOCAL INFILE '$MYSQLD_DATADIR/bug43746.sql' INTO TABLE t1;
+
+--replace_result $MYSQLD_DATADIR MYSQLD_DATADIR
+eval LOAD/* look mum, with comments in weird places! */DATA/* oh hai */LOCAL INFILE '$MYSQLD_DATADIR/bug43746.sql'/* we are */INTO/* from the internets */TABLE t1;
+
+--replace_result $MYSQLD_DATADIR MYSQLD_DATADIR
+eval LOAD DATA/*!10000 LOCAL */INFILE '$MYSQLD_DATADIR/bug43746.sql' INTO TABLE t1;
+
+--replace_result $MYSQLD_DATADIR MYSQLD_DATADIR
+eval LOAD DATA LOCAL INFILE '$MYSQLD_DATADIR/bug43746.sql' /*!10000 INTO */ TABLE t1;
+
+--replace_result $MYSQLD_DATADIR MYSQLD_DATADIR
+eval LOAD DATA LOCAL INFILE '$MYSQLD_DATADIR/bug43746.sql' /*!10000 INTO TABLE */ t1;
+
+--replace_result $MYSQLD_DATADIR MYSQLD_DATADIR
+eval LOAD DATA /*!10000 LOCAL INFILE '$MYSQLD_DATADIR/bug43746.sql' INTO TABLE */ t1;
+
+--replace_result $MYSQLD_DATADIR MYSQLD_DATADIR
+eval LOAD DATA/*!10000 LOCAL */INFILE '$MYSQLD_DATADIR/bug43746.sql'/*!10000 INTO*/TABLE t1;
+
+--replace_result $MYSQLD_DATADIR MYSQLD_DATADIR
+eval LOAD DATA/*!10000 LOCAL */INFILE '$MYSQLD_DATADIR/bug43746.sql'/* empty */INTO TABLE t1;
+
+--replace_result $MYSQLD_DATADIR MYSQLD_DATADIR
+eval LOAD DATA/*!10000 LOCAL */INFILE '$MYSQLD_DATADIR/bug43746.sql' INTO/* empty */TABLE t1;
+
+--replace_result $MYSQLD_DATADIR MYSQLD_DATADIR
+eval LOAD/*!999999 special comments that do not expand */DATA/*!999999 code from the future */LOCAL INFILE '$MYSQLD_DATADIR/bug43746.sql'/*!999999 have flux capacitor */INTO/*!999999 will travel */TABLE t1;
+
+SET sql_mode='PIPES_AS_CONCAT,ANSI_QUOTES,NO_KEY_OPTIONS,NO_TABLE_OPTIONS,NO_FIELD_OPTIONS,STRICT_TRANS_TABLES,STRICT_ALL_TABLES,NO_ZERO_IN_DATE,NO_ZERO_DATE,ERROR_FOR_DIVISION_BY_ZERO,TRADITIONAL,NO_AUTO_CREATE_USER';
+
+--replace_result $MYSQLD_DATADIR MYSQLD_DATADIR
+eval LOAD DATA LOCAL INFILE '$MYSQLD_DATADIR/bug43746.sql' INTO TABLE t1;
+
+sync_slave_with_master;
+
+--echo
+--echo Bug #59267:
+--echo "LOAD DATA LOCAL INFILE not executed on slave with SBR"
+--echo
+
+connection master;
+
+--replace_result $MYSQLD_DATADIR MYSQLD_DATADIR
+eval SELECT * INTO OUTFILE '$MYSQLD_DATADIR/bug59267.sql' FROM t1;
+TRUNCATE TABLE t1;
+
+--replace_result $MYSQLD_DATADIR MYSQLD_DATADIR
+eval LOAD DATA LOCAL INFILE '$MYSQLD_DATADIR/bug59267.sql' INTO TABLE t1;
+
+SELECT 'Master', COUNT(*) FROM t1;
+
+--sync_slave_with_master
+SELECT 'Slave', COUNT(*) FROM t1;
+
+# cleanup
+connection master;
+
+--remove_file $MYSQLD_DATADIR/bug43746.sql
+--remove_file $MYSQLD_DATADIR/bug59267.sql
+
+DROP TABLE t1;
+SET SESSION sql_mode=@old_mode;
+
+sync_slave_with_master;
+
+connection master;
+
+--echo
+--echo Bug #60580/#11902767:
+--echo "statement improperly replicated crashes slave sql thread"
+--echo
+
+connection master;
+let $MYSQLD_DATADIR= `select @@datadir`;
+
+CREATE TABLE t1(f1 INT, f2 INT);
+CREATE TABLE t2(f1 INT, f2 TIMESTAMP);
+
+INSERT INTO t2 VALUES(1, '2011-03-22 21:01:28');
+INSERT INTO t2 VALUES(2, '2011-03-21 21:01:28');
+INSERT INTO t2 VALUES(3, '2011-03-20 21:01:28');
+
+CREATE TABLE t3 AS SELECT * FROM t2;
+
+CREATE VIEW v1 AS SELECT * FROM t2
+ WHERE f1 IN (SELECT f1 FROM t3 WHERE (t3.f2 IS NULL));
+
+--replace_result $MYSQLD_DATADIR MYSQLD_DATADIR
+eval SELECT 1 INTO OUTFILE '$MYSQLD_DATADIR/bug60580.csv' FROM DUAL;
+
+--replace_result $MYSQLD_DATADIR MYSQLD_DATADIR
+eval LOAD DATA LOCAL INFILE '$MYSQLD_DATADIR/bug60580.csv' INTO TABLE t1 (@f1) SET f2 = (SELECT f1 FROM v1 WHERE f1=@f1);
+
+SELECT * FROM t1;
+
+sleep 1;
+
+sync_slave_with_master;
+
+SELECT * FROM t1;
+
+--remove_file $MYSQLD_DATADIR/bug60580.csv
+
+connection master;
+
+DROP VIEW v1;
+DROP TABLE t1, t2, t3;
+
+sync_slave_with_master;
+
+connection master;
+--source include/rpl_end.inc
+
+--echo # End of 5.1 tests
diff --git a/mysql-test/extra/rpl_tests/rpl_loadfile.inc b/mysql-test/extra/rpl_tests/rpl_loadfile.inc
new file mode 100644
index 00000000000..e43c003b29c
--- /dev/null
+++ b/mysql-test/extra/rpl_tests/rpl_loadfile.inc
@@ -0,0 +1,120 @@
+#
+# This include file is used by more than one test suite
+# (currently rpl and binlog_encryption).
+# Please check all dependent tests after modifying it
+#
+
+#############################################################################
+# Original Author: JBM #
+# Original Date: Aug/18/2005 #
+#############################################################################
+# TEST: To test the LOAD_FILE() in rbr #
+#############################################################################
+# Change Author: JBM
+# Change Date: 2006-01-16
+##########
+
+# Includes
+-- source include/have_binlog_format_mixed_or_row.inc
+-- source include/master-slave.inc
+
+-- source extra/rpl_tests/rpl_loadfile.test
+
+# BUG#39701: Mixed binlog format does not switch to row mode on LOAD_FILE
+#
+# DESCRIPTION
+#
+# Problem: when using load_file string function and mixed binlogging format
+# there was no switch to row based binlogging format. This leads
+# to scenarios on which the slave replicates the statement and it
+# will try to load the file from local file system, which in most
+# likely it will not exist.
+#
+# Solution:
+# Marking this function as unsafe for statement format, makes the
+# statement using it to be logged in row based format. As such, data
+# replicated from the master, becomes the content of the loaded file.
+# Consequently, the slave receives the necessary data to complete
+# the load_file instruction correctly.
+#
+# IMPLEMENTATION
+#
+# The test is implemented as follows:
+#
+# On Master,
+# i) write to file the desired content.
+# ii) create table and stored procedure with load_file
+# iii) stop slave
+# iii) execute load_file
+# iv) remove file
+#
+# On Slave,
+# v) start slave
+# vi) sync it with master so that it gets the updates from binlog (which
+# should have bin logged in row format).
+#
+# If the the binlog format does not change to row, then the assertion
+# done in the following step fails. This happens because tables differ
+# since the file does not exist anymore, meaning that when slave
+# attempts to execute LOAD_FILE statement it inserts NULL on table
+# instead of the same contents that the master loaded when it executed
+# the procedure (which was executed when file existed).
+#
+# vii) assert that the contents of master and slave
+# table are the same
+
+--source include/rpl_reset.inc
+
+connection master;
+let $file= $MYSQLTEST_VARDIR/tmp/bug_39701.data;
+
+--replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR
+--eval SELECT repeat('x',20) INTO OUTFILE '$file'
+
+disable_warnings;
+DROP TABLE IF EXISTS t1;
+enable_warnings;
+
+CREATE TABLE t1 (t text);
+DELIMITER |;
+CREATE PROCEDURE p(file varchar(4096))
+ BEGIN
+ INSERT INTO t1 VALUES (LOAD_FILE(file));
+ END|
+DELIMITER ;|
+
+# stop slave before issuing the load_file on master
+connection slave;
+source include/stop_slave.inc;
+
+connection master;
+
+# test: check that logging falls back to rbr.
+--replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR
+--eval CALL p('$file')
+
+# test: remove the file from the filesystem and assert that slave still
+# gets the loaded file
+remove_file $file;
+
+# now that the file is removed it is safe (regarding what we want to test)
+# to start slave
+connection slave;
+source include/start_slave.inc;
+
+connection master;
+sync_slave_with_master;
+
+# assertion: assert that the slave got the updates even
+# if the file was removed before the slave started,
+# meaning that contents were indeed transfered
+# through binlog (in row format)
+let $diff_tables= master:t1, slave:t1;
+source include/diff_tables.inc;
+
+# CLEAN UP
+--connection master
+DROP TABLE t1;
+DROP PROCEDURE p;
+
+--source include/rpl_end.inc
diff --git a/mysql-test/extra/rpl_tests/rpl_packet.inc b/mysql-test/extra/rpl_tests/rpl_packet.inc
new file mode 100644
index 00000000000..41bb374b802
--- /dev/null
+++ b/mysql-test/extra/rpl_tests/rpl_packet.inc
@@ -0,0 +1,183 @@
+#
+# This include file is used by more than one test suite
+# (currently rpl and binlog_encryption).
+# Please check all dependent tests after modifying it
+#
+
+# ==== Purpose ====
+#
+# Check replication protocol packet size handling
+#
+# ==== Related bugs ====
+# Bug#19402 SQL close to the size of the max_allowed_packet fails on slave
+# BUG#23755: Replicated event larger that max_allowed_packet infinitely re-transmits
+# BUG#42914: No LAST_IO_ERROR for max_allowed_packet errors
+# BUG#55322: SHOW BINLOG EVENTS increases @@SESSION.MAX_ALLOWED_PACKET
+
+# max-out size db name
+source include/master-slave.inc;
+source include/have_binlog_format_row.inc;
+call mtr.add_suppression("Slave I/O: Got a packet bigger than 'slave_max_allowed_packet' bytes, .*error.* 1153");
+call mtr.add_suppression("Log entry on master is longer than slave_max_allowed_packet");
+let $db= DB_NAME_OF_MAX_LENGTH_AKA_NAME_LEN_64_BYTES_____________________;
+disable_warnings;
+eval drop database if exists $db;
+enable_warnings;
+eval create database $db;
+
+connection master;
+let $old_max_allowed_packet= `SELECT @@global.max_allowed_packet`;
+let $old_net_buffer_length= `SELECT @@global.net_buffer_length`;
+let $old_slave_max_allowed_packet= `SELECT @@global.slave_max_allowed_packet`;
+SET @@global.max_allowed_packet=1024;
+SET @@global.net_buffer_length=1024;
+
+sync_slave_with_master;
+# Restart slave for setting to take effect
+source include/stop_slave.inc;
+source include/start_slave.inc;
+
+# Reconnect to master for new setting to take effect
+disconnect master;
+
+# alas, can't use eval here; if db name changed apply the change here
+connect (master,localhost,root,,DB_NAME_OF_MAX_LENGTH_AKA_NAME_LEN_64_BYTES_____________________);
+
+connection master;
+select @@net_buffer_length, @@max_allowed_packet;
+
+create table `t1` (`f1` LONGTEXT) ENGINE=MyISAM;
+
+INSERT INTO `t1`(`f1`) VALUES ('aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa1023');
+sync_slave_with_master;
+
+eval select count(*) from `$db`.`t1` /* must be 1 */;
+
+SHOW STATUS LIKE 'Slave_running';
+select * from information_schema.session_status where variable_name= 'SLAVE_RUNNING';
+connection master;
+eval drop database $db;
+sync_slave_with_master;
+
+#
+# Bug #23755: Replicated event larger that max_allowed_packet infinitely re-transmits
+#
+# Check that a situation when the size of event on the master is greater than
+# max_allowed_packet on the slave does not lead to infinite re-transmits.
+
+connection master;
+
+# Change the max packet size on master
+
+SET @@global.max_allowed_packet=4096;
+SET @@global.net_buffer_length=4096;
+
+# Restart slave for new setting to take effect
+connection slave;
+source include/stop_slave.inc;
+source include/start_slave.inc;
+
+# Reconnect to master for new setting to take effect
+disconnect master;
+connect (master, localhost, root);
+connection master;
+
+CREATE TABLE `t1` (`f1` LONGTEXT) ENGINE=MyISAM;
+
+sync_slave_with_master;
+
+connection master;
+
+INSERT INTO `t1`(`f1`) VALUES ('aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa2048');
+
+
+#
+# Bug#42914: The slave I/O thread must stop after trying to read the above
+# event, However there is no Last_IO_Error report.
+#
+
+# The slave I/O thread must stop after trying to read the above event
+connection slave;
+# 1153 = ER_NET_PACKET_TOO_LARGE
+--let $slave_io_errno= 1153
+--let $show_slave_io_error= 1
+--source include/wait_for_slave_io_error.inc
+
+# TODO: this is needed because of BUG#55790. Remove once that is fixed.
+--source include/stop_slave_sql.inc
+
+#
+# Bug#42914: On the master, if a binary log event is larger than
+# max_allowed_packet, the error message ER_MASTER_FATAL_ERROR_READING_BINLOG
+# is sent to a slave when it requests a dump from the master, thus leading the
+# I/O thread to stop. However, there is no Last_IO_Error reported.
+#
+
+--let $rpl_only_running_threads= 1
+--source include/rpl_reset.inc
+--connection master
+DROP TABLE t1;
+--sync_slave_with_master
+
+
+connection master;
+CREATE TABLE t1 (f1 int PRIMARY KEY, f2 LONGTEXT, f3 LONGTEXT) ENGINE=MyISAM;
+sync_slave_with_master;
+
+connection master;
+INSERT INTO t1(f1, f2, f3) VALUES(1, REPEAT('a', @@global.max_allowed_packet), REPEAT('b', @@global.max_allowed_packet));
+
+connection slave;
+# The slave I/O thread must stop after receiving
+# 1153 = ER_NET_PACKET_TOO_LARGE
+--let $slave_io_errno= 1153
+--let $show_slave_io_error= 1
+--source include/wait_for_slave_io_error.inc
+
+# Remove the bad binlog and clear error status on slave.
+STOP SLAVE;
+RESET SLAVE;
+--connection master
+RESET MASTER;
+
+
+#
+# BUG#55322: SHOW BINLOG EVENTS increases @@SESSION.MAX_ALLOWED_PACKET
+#
+# In BUG#55322, @@session.max_allowed_packet increased each time SHOW
+# BINLOG EVENTS was issued. To verify that this bug is fixed, we
+# execute SHOW BINLOG EVENTS twice and check that max_allowed_packet
+# never changes. We turn off the result log because we don't care
+# about the contents of the binlog.
+
+--disable_result_log
+SET @max_allowed_packet_0= @@session.max_allowed_packet;
+SHOW BINLOG EVENTS;
+SET @max_allowed_packet_1= @@session.max_allowed_packet;
+SHOW BINLOG EVENTS;
+SET @max_allowed_packet_2= @@session.max_allowed_packet;
+--enable_result_log
+if (`SELECT NOT(@max_allowed_packet_0 = @max_allowed_packet_1 AND @max_allowed_packet_1 = @max_allowed_packet_2)`)
+{
+ --echo ERROR: max_allowed_packet changed after executing SHOW BINLOG EVENTS
+ --source include/show_rpl_debug_info.inc
+ SELECT @max_allowed_packet_0, @max_allowed_packet_1, @max_allowed_packet_2;
+ --die @max_allowed_packet changed after executing SHOW BINLOG EVENTS
+}
+
+
+--echo ==== clean up ====
+connection master;
+DROP TABLE t1;
+eval SET @@global.max_allowed_packet= $old_max_allowed_packet;
+eval SET @@global.net_buffer_length= $old_net_buffer_length;
+eval SET @@global.slave_max_allowed_packet= $old_slave_max_allowed_packet;
+# slave is stopped
+connection slave;
+DROP TABLE t1;
+
+# Clear Last_IO_Error
+RESET SLAVE;
+
+--source include/rpl_end.inc
+# End of tests
diff --git a/mysql-test/extra/rpl_tests/rpl_parallel.inc b/mysql-test/extra/rpl_tests/rpl_parallel.inc
new file mode 100644
index 00000000000..42354343084
--- /dev/null
+++ b/mysql-test/extra/rpl_tests/rpl_parallel.inc
@@ -0,0 +1,2219 @@
+#
+# This include file is used by more than one test suite
+# (currently rpl and binlog_encryption).
+# Please check all dependent tests after modifying it
+#
+
+--source include/have_innodb.inc
+--source include/have_debug.inc
+--source include/have_debug_sync.inc
+--source include/master-slave.inc
+
+# Test various aspects of parallel replication.
+
+--connection server_2
+SET @old_parallel_threads=@@GLOBAL.slave_parallel_threads;
+--error ER_SLAVE_MUST_STOP
+SET GLOBAL slave_parallel_threads=10;
+--source include/stop_slave.inc
+SET GLOBAL slave_parallel_threads=10;
+
+# Check that we do not spawn any worker threads when no slave is running.
+SELECT IF(COUNT(*) < 10, "OK", CONCAT("Found too many system user processes: ", COUNT(*))) FROM information_schema.processlist WHERE user = "system user";
+
+CHANGE MASTER TO master_use_gtid=slave_pos;
+--source include/start_slave.inc
+
+# Check that worker threads get spawned when slave starts.
+SELECT IF(COUNT(*) >= 10, "OK", CONCAT("Found too few system user processes: ", COUNT(*))) FROM information_schema.processlist WHERE user = "system user";
+# ... and that worker threads get removed when slave stops.
+--source include/stop_slave.inc
+SELECT IF(COUNT(*) < 10, "OK", CONCAT("Found too many system user processes: ", COUNT(*))) FROM information_schema.processlist WHERE user = "system user";
+--source include/start_slave.inc
+SELECT IF(COUNT(*) >= 10, "OK", CONCAT("Found too few system user processes: ", COUNT(*))) FROM information_schema.processlist WHERE user = "system user";
+
+--echo *** Test long-running query in domain 1 can run in parallel with short queries in domain 0 ***
+
+--connection server_1
+ALTER TABLE mysql.gtid_slave_pos ENGINE=InnoDB;
+CREATE TABLE t1 (a int PRIMARY KEY) ENGINE=MyISAM;
+CREATE TABLE t2 (a int PRIMARY KEY) ENGINE=InnoDB;
+INSERT INTO t1 VALUES (1);
+INSERT INTO t2 VALUES (1);
+--save_master_pos
+
+--connection server_2
+--sync_with_master
+
+# Block the table t1 to simulate a replicated query taking a long time.
+--connect (con_temp1,127.0.0.1,root,,test,$SERVER_MYPORT_2,)
+LOCK TABLE t1 WRITE;
+
+--connection server_1
+SET gtid_domain_id=1;
+# This query will be blocked on the slave until UNLOCK TABLES.
+INSERT INTO t1 VALUES (2);
+SET gtid_domain_id=0;
+# These t2 queries can be replicated in parallel with the prior t1 query, as
+# they are in a separate replication domain.
+INSERT INTO t2 VALUES (2);
+INSERT INTO t2 VALUES (3);
+BEGIN;
+INSERT INTO t2 VALUES (4);
+INSERT INTO t2 VALUES (5);
+COMMIT;
+INSERT INTO t2 VALUES (6);
+
+--connection server_2
+--let $wait_condition= SELECT COUNT(*) = 6 FROM t2
+--source include/wait_condition.inc
+
+SELECT * FROM t2 ORDER by a;
+
+--connection con_temp1
+SELECT * FROM t1;
+UNLOCK TABLES;
+
+--connection server_2
+--let $wait_condition= SELECT COUNT(*) = 2 FROM t1
+--source include/wait_condition.inc
+
+SELECT * FROM t1 ORDER BY a;
+
+
+--echo *** Test two transactions in different domains committed in opposite order on slave but in a single group commit. ***
+--connection server_2
+--source include/stop_slave.inc
+
+--connection server_1
+# Use a stored function to inject a debug_sync into the appropriate THD.
+# The function does nothing on the master, and on the slave it injects the
+# desired debug_sync action(s).
+SET sql_log_bin=0;
+--delimiter ||
+CREATE FUNCTION foo(x INT, d1 VARCHAR(500), d2 VARCHAR(500))
+ RETURNS INT DETERMINISTIC
+ BEGIN
+ RETURN x;
+ END
+||
+--delimiter ;
+SET sql_log_bin=1;
+
+SET @old_format= @@SESSION.binlog_format;
+SET binlog_format='statement';
+SET gtid_domain_id=1;
+INSERT INTO t2 VALUES (foo(10,
+ 'commit_before_enqueue SIGNAL ready1 WAIT_FOR cont1',
+ 'commit_after_release_LOCK_prepare_ordered SIGNAL ready2'));
+
+--connection server_2
+FLUSH LOGS;
+--source include/wait_for_binlog_checkpoint.inc
+SET sql_log_bin=0;
+--delimiter ||
+CREATE FUNCTION foo(x INT, d1 VARCHAR(500), d2 VARCHAR(500))
+ RETURNS INT DETERMINISTIC
+ BEGIN
+ IF d1 != '' THEN
+ SET debug_sync = d1;
+ END IF;
+ IF d2 != '' THEN
+ SET debug_sync = d2;
+ END IF;
+ RETURN x;
+ END
+||
+--delimiter ;
+SET sql_log_bin=1;
+SET @old_format=@@GLOBAL.binlog_format;
+SET GLOBAL binlog_format=statement;
+# We need to restart all parallel threads for the new global setting to
+# be copied to the session-level values.
+SET GLOBAL slave_parallel_threads=0;
+SET GLOBAL slave_parallel_threads=10;
+--source include/start_slave.inc
+
+# First make sure the first insert is ready to commit, but not queued yet.
+SET debug_sync='now WAIT_FOR ready1';
+
+--connection server_1
+SET gtid_domain_id=2;
+INSERT INTO t2 VALUES (foo(11,
+ 'commit_before_enqueue SIGNAL ready3 WAIT_FOR cont3',
+ 'commit_after_release_LOCK_prepare_ordered SIGNAL ready4 WAIT_FOR cont4'));
+SET gtid_domain_id=0;
+SELECT * FROM t2 WHERE a >= 10 ORDER BY a;
+
+--connection server_2
+# Now wait for the second insert to queue itself as the leader, and then
+# wait for more commits to queue up.
+SET debug_sync='now WAIT_FOR ready3';
+SET debug_sync='now SIGNAL cont3';
+SET debug_sync='now WAIT_FOR ready4';
+# Now allow the first insert to queue up to participate in group commit.
+SET debug_sync='now SIGNAL cont1';
+SET debug_sync='now WAIT_FOR ready2';
+# Finally allow the second insert to proceed and do the group commit.
+SET debug_sync='now SIGNAL cont4';
+
+--let $wait_condition= SELECT COUNT(*) = 2 FROM t2 WHERE a >= 10
+--source include/wait_condition.inc
+SELECT * FROM t2 WHERE a >= 10 ORDER BY a;
+# The two INSERT transactions should have been committed in opposite order,
+# but in the same group commit (seen by precense of cid=# in the SHOW
+# BINLOG output).
+--let $binlog_file= slave-bin.000002
+--source include/show_binlog_events.inc
+FLUSH LOGS;
+--source include/wait_for_binlog_checkpoint.inc
+
+# Restart all the slave parallel worker threads, to clear all debug_sync actions.
+--connection server_2
+--source include/stop_slave.inc
+SET GLOBAL slave_parallel_threads=0;
+SET GLOBAL slave_parallel_threads=10;
+SET debug_sync='RESET';
+--source include/start_slave.inc
+
+
+--echo *** Test that group-committed transactions on the master can replicate in parallel on the slave. ***
+--connection server_1
+SET debug_sync='RESET';
+FLUSH LOGS;
+--source include/wait_for_binlog_checkpoint.inc
+CREATE TABLE t3 (a INT PRIMARY KEY, b INT) ENGINE=InnoDB;
+# Create some sentinel rows so that the rows inserted in parallel fall into
+# separate gaps and do not cause gap lock conflicts.
+INSERT INTO t3 VALUES (1,1), (3,3), (5,5), (7,7);
+--save_master_pos
+--connection server_2
+--sync_with_master
+
+# We want to test that the transactions can execute out-of-order on
+# the slave, but still end up committing in-order, and in a single
+# group commit.
+#
+# The idea is to group-commit three transactions together on the master:
+# A, B, and C. On the slave, C will execute the insert first, then A,
+# and then B. But B manages to complete before A has time to commit, so
+# all three end up committing together.
+#
+# So we start by setting up some row locks that will block transactions
+# A and B from executing, allowing C to run first.
+
+--connection con_temp1
+BEGIN;
+INSERT INTO t3 VALUES (2,102);
+--connect (con_temp2,127.0.0.1,root,,test,$SERVER_MYPORT_2,)
+BEGIN;
+INSERT INTO t3 VALUES (4,104);
+
+# On the master, queue three INSERT transactions as a single group commit.
+--connect (con_temp3,127.0.0.1,root,,test,$SERVER_MYPORT_1,)
+SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued1 WAIT_FOR master_cont1';
+SET binlog_format=statement;
+send INSERT INTO t3 VALUES (2, foo(12,
+ 'commit_after_release_LOCK_prepare_ordered SIGNAL slave_queued1 WAIT_FOR slave_cont1',
+ ''));
+
+--connection server_1
+SET debug_sync='now WAIT_FOR master_queued1';
+
+--connect (con_temp4,127.0.0.1,root,,test,$SERVER_MYPORT_1,)
+SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued2';
+SET binlog_format=statement;
+send INSERT INTO t3 VALUES (4, foo(14,
+ 'commit_after_release_LOCK_prepare_ordered SIGNAL slave_queued2',
+ ''));
+
+--connection server_1
+SET debug_sync='now WAIT_FOR master_queued2';
+
+--connect (con_temp5,127.0.0.1,root,,test,$SERVER_MYPORT_1,)
+SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued3';
+SET binlog_format=statement;
+send INSERT INTO t3 VALUES (6, foo(16,
+ 'group_commit_waiting_for_prior SIGNAL slave_queued3',
+ ''));
+
+--connection server_1
+SET debug_sync='now WAIT_FOR master_queued3';
+SET debug_sync='now SIGNAL master_cont1';
+
+--connection con_temp3
+REAP;
+--connection con_temp4
+REAP;
+--connection con_temp5
+REAP;
+SET debug_sync='RESET';
+
+--connection server_1
+SELECT * FROM t3 ORDER BY a;
+--let $binlog_file= master-bin.000002
+--source include/show_binlog_events.inc
+
+# First, wait until insert 3 is ready to queue up for group commit, but is
+# waiting for insert 2 to commit before it can do so itself.
+--connection server_2
+SET debug_sync='now WAIT_FOR slave_queued3';
+
+# Next, let insert 1 proceed, and allow it to queue up as the group commit
+# leader, but let it wait for insert 2 to also queue up before proceeding.
+--connection con_temp1
+ROLLBACK;
+--connection server_2
+SET debug_sync='now WAIT_FOR slave_queued1';
+
+# Now let insert 2 proceed and queue up.
+--connection con_temp2
+ROLLBACK;
+--connection server_2
+SET debug_sync='now WAIT_FOR slave_queued2';
+# And finally, we can let insert 1 proceed and do the group commit with all
+# three insert transactions together.
+SET debug_sync='now SIGNAL slave_cont1';
+
+# Wait for the commit to complete and check that all three transactions
+# group-committed together (will be seen in the binlog as all three having
+# cid=# on their GTID event).
+--let $wait_condition= SELECT COUNT(*) = 3 FROM t3 WHERE a IN (2,4,6)
+--source include/wait_condition.inc
+SELECT * FROM t3 ORDER BY a;
+--let $binlog_file= slave-bin.000003
+--source include/show_binlog_events.inc
+
+
+--echo *** Test STOP SLAVE in parallel mode ***
+--connection server_2
+--source include/stop_slave.inc
+# Respawn all worker threads to clear any left-over debug_sync or other stuff.
+SET debug_sync='RESET';
+SET GLOBAL slave_parallel_threads=0;
+SET GLOBAL slave_parallel_threads=10;
+
+--connection server_1
+# Set up a couple of transactions. The first will be blocked halfway
+# through on a lock, and while it is blocked we initiate STOP SLAVE.
+# We then test that the halfway-initiated transaction is allowed to
+# complete, but no subsequent ones.
+# We have to use statement-based mode and set
+# binlog_direct_non_transactional_updates=0; otherwise the binlog will
+# be split into two event groups, one for the MyISAM part and one for the
+# InnoDB part.
+SET binlog_direct_non_transactional_updates=0;
+SET sql_log_bin=0;
+CALL mtr.add_suppression("Statement is unsafe because it accesses a non-transactional table after accessing a transactional table within the same transaction");
+SET sql_log_bin=1;
+BEGIN;
+INSERT INTO t2 VALUES (20);
+--disable_warnings
+INSERT INTO t1 VALUES (20);
+--enable_warnings
+INSERT INTO t2 VALUES (21);
+INSERT INTO t3 VALUES (20, 20);
+COMMIT;
+INSERT INTO t3 VALUES(21, 21);
+INSERT INTO t3 VALUES(22, 22);
+SET binlog_format=@old_format;
+--save_master_pos
+
+# Start a connection that will block the replicated transaction halfway.
+--connection con_temp1
+BEGIN;
+INSERT INTO t2 VALUES (21);
+
+--connection server_2
+START SLAVE;
+# Wait for the MyISAM change to be visible, after which replication will wait
+# for con_temp1 to roll back.
+--let $wait_condition= SELECT COUNT(*) = 1 FROM t1 WHERE a=20
+--source include/wait_condition.inc
+
+--connection con_temp2
+# Initiate slave stop. It will have to wait for the current event group
+# to complete.
+# The dbug injection causes debug_sync to signal 'wait_for_done_waiting'
+# when the SQL driver thread is ready.
+SET @old_dbug= @@GLOBAL.debug_dbug;
+SET GLOBAL debug_dbug="+d,rpl_parallel_wait_for_done_trigger";
+send STOP SLAVE;
+
+--connection con_temp1
+SET debug_sync='now WAIT_FOR wait_for_done_waiting';
+ROLLBACK;
+
+--connection con_temp2
+reap;
+SET GLOBAL debug_dbug=@old_dbug;
+SET debug_sync='RESET';
+
+--connection server_2
+--source include/wait_for_slave_to_stop.inc
+# We should see the first transaction applied, but not the two others.
+SELECT * FROM t1 WHERE a >= 20 ORDER BY a;
+SELECT * FROM t2 WHERE a >= 20 ORDER BY a;
+SELECT * FROM t3 WHERE a >= 20 ORDER BY a;
+
+--source include/start_slave.inc
+--sync_with_master
+SELECT * FROM t1 WHERE a >= 20 ORDER BY a;
+SELECT * FROM t2 WHERE a >= 20 ORDER BY a;
+SELECT * FROM t3 WHERE a >= 20 ORDER BY a;
+
+
+--connection server_2
+# Respawn all worker threads to clear any left-over debug_sync or other stuff.
+--source include/stop_slave.inc
+SET GLOBAL binlog_format=@old_format;
+SET GLOBAL slave_parallel_threads=0;
+SET GLOBAL slave_parallel_threads=10;
+--source include/start_slave.inc
+
+
+--echo *** Test killing slave threads at various wait points ***
+--echo *** 1. Test killing transaction waiting in commit for previous transaction to commit ***
+
+# Set up three transactions on the master that will be group-committed
+# together so they can be replicated in parallel on the slave.
+--connection con_temp3
+SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued1 WAIT_FOR master_cont1';
+SET binlog_format=statement;
+send INSERT INTO t3 VALUES (31, foo(31,
+ 'commit_before_prepare_ordered WAIT_FOR t2_waiting',
+ 'commit_after_prepare_ordered SIGNAL t1_ready WAIT_FOR t1_cont'));
+
+--connection server_1
+SET debug_sync='now WAIT_FOR master_queued1';
+
+--connection con_temp4
+SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued2';
+SET binlog_format=statement;
+BEGIN;
+# This insert is just so we can get T2 to wait while a query is running that we
+# can see in SHOW PROCESSLIST so we can get its thread_id to kill later.
+INSERT INTO t3 VALUES (32, foo(32,
+ 'ha_write_row_end SIGNAL t2_query WAIT_FOR t2_cont',
+ ''));
+# This insert sets up debug_sync points so that T2 will tell when it is at its
+# wait point where we want to kill it - and when it has been killed.
+INSERT INTO t3 VALUES (33, foo(33,
+ 'group_commit_waiting_for_prior SIGNAL t2_waiting',
+ 'group_commit_waiting_for_prior_killed SIGNAL t2_killed'));
+send COMMIT;
+
+--connection server_1
+SET debug_sync='now WAIT_FOR master_queued2';
+
+--connection con_temp5
+SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued3';
+SET binlog_format=statement;
+send INSERT INTO t3 VALUES (34, foo(34,
+ '',
+ ''));
+
+--connection server_1
+SET debug_sync='now WAIT_FOR master_queued3';
+SET debug_sync='now SIGNAL master_cont1';
+
+--connection con_temp3
+REAP;
+--connection con_temp4
+REAP;
+--connection con_temp5
+REAP;
+
+--connection server_1
+SELECT * FROM t3 WHERE a >= 30 ORDER BY a;
+SET debug_sync='RESET';
+
+--connection server_2
+SET sql_log_bin=0;
+CALL mtr.add_suppression("Query execution was interrupted");
+CALL mtr.add_suppression("Commit failed due to failure of an earlier commit on which this one depends");
+CALL mtr.add_suppression("Slave: Connection was killed");
+SET sql_log_bin=1;
+# Wait until T2 is inside executing its insert of 32, then find it in SHOW
+# PROCESSLIST to know its thread id for KILL later.
+SET debug_sync='now WAIT_FOR t2_query';
+--let $thd_id= `SELECT ID FROM INFORMATION_SCHEMA.PROCESSLIST WHERE INFO LIKE '%foo(32%' AND INFO NOT LIKE '%LIKE%'`
+SET debug_sync='now SIGNAL t2_cont';
+
+# Wait until T2 has entered its wait for T1 to commit, and T1 has
+# progressed into its commit phase.
+SET debug_sync='now WAIT_FOR t1_ready';
+
+# Now kill the transaction T2.
+--replace_result $thd_id THD_ID
+eval KILL $thd_id;
+
+# Wait until T2 has reacted on the kill.
+SET debug_sync='now WAIT_FOR t2_killed';
+
+# Now we can allow T1 to proceed.
+SET debug_sync='now SIGNAL t1_cont';
+
+--let $slave_sql_errno= 1317,1927,1964
+--source include/wait_for_slave_sql_error.inc
+STOP SLAVE IO_THREAD;
+SELECT * FROM t3 WHERE a >= 30 ORDER BY a;
+
+# Now we have to disable the debug_sync statements, so they do not trigger
+# when the events are retried.
+SET debug_sync='RESET';
+SET GLOBAL slave_parallel_threads=0;
+SET GLOBAL slave_parallel_threads=10;
+SET sql_log_bin=0;
+DROP FUNCTION foo;
+--delimiter ||
+CREATE FUNCTION foo(x INT, d1 VARCHAR(500), d2 VARCHAR(500))
+ RETURNS INT DETERMINISTIC
+ BEGIN
+ RETURN x;
+ END
+||
+--delimiter ;
+SET sql_log_bin=1;
+
+--connection server_1
+INSERT INTO t3 VALUES (39,0);
+--save_master_pos
+
+--connection server_2
+--source include/start_slave.inc
+--sync_with_master
+SELECT * FROM t3 WHERE a >= 30 ORDER BY a;
+# Restore the foo() function.
+SET sql_log_bin=0;
+DROP FUNCTION foo;
+--delimiter ||
+CREATE FUNCTION foo(x INT, d1 VARCHAR(500), d2 VARCHAR(500))
+ RETURNS INT DETERMINISTIC
+ BEGIN
+ IF d1 != '' THEN
+ SET debug_sync = d1;
+ END IF;
+ IF d2 != '' THEN
+ SET debug_sync = d2;
+ END IF;
+ RETURN x;
+ END
+||
+--delimiter ;
+SET sql_log_bin=1;
+
+
+--connection server_2
+# Respawn all worker threads to clear any left-over debug_sync or other stuff.
+--source include/stop_slave.inc
+SET GLOBAL binlog_format=@old_format;
+SET GLOBAL slave_parallel_threads=0;
+SET GLOBAL slave_parallel_threads=10;
+--source include/start_slave.inc
+
+
+--echo *** 2. Same as (1), but without restarting IO thread after kill of SQL threads ***
+
+# Set up three transactions on the master that will be group-committed
+# together so they can be replicated in parallel on the slave.
+--connection con_temp3
+SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued1 WAIT_FOR master_cont1';
+SET binlog_format=statement;
+send INSERT INTO t3 VALUES (41, foo(41,
+ 'commit_before_prepare_ordered WAIT_FOR t2_waiting',
+ 'commit_after_prepare_ordered SIGNAL t1_ready WAIT_FOR t1_cont'));
+
+--connection server_1
+SET debug_sync='now WAIT_FOR master_queued1';
+
+--connection con_temp4
+SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued2';
+SET binlog_format=statement;
+BEGIN;
+# This insert is just so we can get T2 to wait while a query is running that we
+# can see in SHOW PROCESSLIST so we can get its thread_id to kill later.
+INSERT INTO t3 VALUES (42, foo(42,
+ 'ha_write_row_end SIGNAL t2_query WAIT_FOR t2_cont',
+ ''));
+# This insert sets up debug_sync points so that T2 will tell when it is at its
+# wait point where we want to kill it - and when it has been killed.
+INSERT INTO t3 VALUES (43, foo(43,
+ 'group_commit_waiting_for_prior SIGNAL t2_waiting',
+ 'group_commit_waiting_for_prior_killed SIGNAL t2_killed'));
+send COMMIT;
+
+--connection server_1
+SET debug_sync='now WAIT_FOR master_queued2';
+
+--connection con_temp5
+SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued3';
+SET binlog_format=statement;
+send INSERT INTO t3 VALUES (44, foo(44,
+ '',
+ ''));
+
+--connection server_1
+SET debug_sync='now WAIT_FOR master_queued3';
+SET debug_sync='now SIGNAL master_cont1';
+
+--connection con_temp3
+REAP;
+--connection con_temp4
+REAP;
+--connection con_temp5
+REAP;
+
+--connection server_1
+SELECT * FROM t3 WHERE a >= 40 ORDER BY a;
+SET debug_sync='RESET';
+
+--connection server_2
+# Wait until T2 is inside executing its insert of 42, then find it in SHOW
+# PROCESSLIST to know its thread id for KILL later.
+SET debug_sync='now WAIT_FOR t2_query';
+--let $thd_id= `SELECT ID FROM INFORMATION_SCHEMA.PROCESSLIST WHERE INFO LIKE '%foo(42%' AND INFO NOT LIKE '%LIKE%'`
+SET debug_sync='now SIGNAL t2_cont';
+
+# Wait until T2 has entered its wait for T1 to commit, and T1 has
+# progressed into its commit phase.
+SET debug_sync='now WAIT_FOR t1_ready';
+
+# Now kill the transaction T2.
+--replace_result $thd_id THD_ID
+eval KILL $thd_id;
+
+# Wait until T2 has reacted on the kill.
+SET debug_sync='now WAIT_FOR t2_killed';
+
+# Now we can allow T1 to proceed.
+SET debug_sync='now SIGNAL t1_cont';
+
+--let $slave_sql_errno= 1317,1927,1964
+--source include/wait_for_slave_sql_error.inc
+
+# Now we have to disable the debug_sync statements, so they do not trigger
+# when the events are retried.
+SET debug_sync='RESET';
+SET GLOBAL slave_parallel_threads=0;
+SET GLOBAL slave_parallel_threads=10;
+SET sql_log_bin=0;
+DROP FUNCTION foo;
+--delimiter ||
+CREATE FUNCTION foo(x INT, d1 VARCHAR(500), d2 VARCHAR(500))
+ RETURNS INT DETERMINISTIC
+ BEGIN
+ RETURN x;
+ END
+||
+--delimiter ;
+SET sql_log_bin=1;
+
+--connection server_1
+INSERT INTO t3 VALUES (49,0);
+--save_master_pos
+
+--connection server_2
+START SLAVE SQL_THREAD;
+--sync_with_master
+SELECT * FROM t3 WHERE a >= 40 ORDER BY a;
+# Restore the foo() function.
+SET sql_log_bin=0;
+DROP FUNCTION foo;
+--delimiter ||
+CREATE FUNCTION foo(x INT, d1 VARCHAR(500), d2 VARCHAR(500))
+ RETURNS INT DETERMINISTIC
+ BEGIN
+ IF d1 != '' THEN
+ SET debug_sync = d1;
+ END IF;
+ IF d2 != '' THEN
+ SET debug_sync = d2;
+ END IF;
+ RETURN x;
+ END
+||
+--delimiter ;
+SET sql_log_bin=1;
+
+
+--connection server_2
+# Respawn all worker threads to clear any left-over debug_sync or other stuff.
+--source include/stop_slave.inc
+SET GLOBAL binlog_format=@old_format;
+SET GLOBAL slave_parallel_threads=0;
+SET GLOBAL slave_parallel_threads=10;
+--source include/start_slave.inc
+
+
+--echo *** 3. Same as (2), but not using gtid mode ***
+
+--connection server_2
+--source include/stop_slave.inc
+CHANGE MASTER TO master_use_gtid=no;
+--source include/start_slave.inc
+
+--connection server_1
+# Set up three transactions on the master that will be group-committed
+# together so they can be replicated in parallel on the slave.
+--connection con_temp3
+SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued1 WAIT_FOR master_cont1';
+SET binlog_format=statement;
+send INSERT INTO t3 VALUES (51, foo(51,
+ 'commit_before_prepare_ordered WAIT_FOR t2_waiting',
+ 'commit_after_prepare_ordered SIGNAL t1_ready WAIT_FOR t1_cont'));
+
+--connection server_1
+SET debug_sync='now WAIT_FOR master_queued1';
+
+--connection con_temp4
+SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued2';
+SET binlog_format=statement;
+BEGIN;
+# This insert is just so we can get T2 to wait while a query is running that we
+# can see in SHOW PROCESSLIST so we can get its thread_id to kill later.
+INSERT INTO t3 VALUES (52, foo(52,
+ 'ha_write_row_end SIGNAL t2_query WAIT_FOR t2_cont',
+ ''));
+# This insert sets up debug_sync points so that T2 will tell when it is at its
+# wait point where we want to kill it - and when it has been killed.
+INSERT INTO t3 VALUES (53, foo(53,
+ 'group_commit_waiting_for_prior SIGNAL t2_waiting',
+ 'group_commit_waiting_for_prior_killed SIGNAL t2_killed'));
+send COMMIT;
+
+--connection server_1
+SET debug_sync='now WAIT_FOR master_queued2';
+
+--connection con_temp5
+SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued3';
+SET binlog_format=statement;
+send INSERT INTO t3 VALUES (54, foo(54,
+ '',
+ ''));
+
+--connection server_1
+SET debug_sync='now WAIT_FOR master_queued3';
+SET debug_sync='now SIGNAL master_cont1';
+
+--connection con_temp3
+REAP;
+--connection con_temp4
+REAP;
+--connection con_temp5
+REAP;
+
+--connection server_1
+SELECT * FROM t3 WHERE a >= 50 ORDER BY a;
+SET debug_sync='RESET';
+
+--connection server_2
+# Wait until T2 is inside executing its insert of 52, then find it in SHOW
+# PROCESSLIST to know its thread id for KILL later.
+SET debug_sync='now WAIT_FOR t2_query';
+--let $thd_id= `SELECT ID FROM INFORMATION_SCHEMA.PROCESSLIST WHERE INFO LIKE '%foo(52%' AND INFO NOT LIKE '%LIKE%'`
+SET debug_sync='now SIGNAL t2_cont';
+
+# Wait until T2 has entered its wait for T1 to commit, and T1 has
+# progressed into its commit phase.
+SET debug_sync='now WAIT_FOR t1_ready';
+
+# Now kill the transaction T2.
+--replace_result $thd_id THD_ID
+eval KILL $thd_id;
+
+# Wait until T2 has reacted on the kill.
+SET debug_sync='now WAIT_FOR t2_killed';
+
+# Now we can allow T1 to proceed.
+SET debug_sync='now SIGNAL t1_cont';
+
+--let $slave_sql_errno= 1317,1927,1964
+--source include/wait_for_slave_sql_error.inc
+SELECT * FROM t3 WHERE a >= 50 ORDER BY a;
+
+# Now we have to disable the debug_sync statements, so they do not trigger
+# when the events are retried.
+SET debug_sync='RESET';
+SET GLOBAL slave_parallel_threads=0;
+SET GLOBAL slave_parallel_threads=10;
+SET sql_log_bin=0;
+DROP FUNCTION foo;
+--delimiter ||
+CREATE FUNCTION foo(x INT, d1 VARCHAR(500), d2 VARCHAR(500))
+ RETURNS INT DETERMINISTIC
+ BEGIN
+ RETURN x;
+ END
+||
+--delimiter ;
+SET sql_log_bin=1;
+
+--connection server_1
+INSERT INTO t3 VALUES (59,0);
+--save_master_pos
+
+--connection server_2
+START SLAVE SQL_THREAD;
+--sync_with_master
+SELECT * FROM t3 WHERE a >= 50 ORDER BY a;
+# Restore the foo() function.
+SET sql_log_bin=0;
+DROP FUNCTION foo;
+--delimiter ||
+CREATE FUNCTION foo(x INT, d1 VARCHAR(500), d2 VARCHAR(500))
+ RETURNS INT DETERMINISTIC
+ BEGIN
+ IF d1 != '' THEN
+ SET debug_sync = d1;
+ END IF;
+ IF d2 != '' THEN
+ SET debug_sync = d2;
+ END IF;
+ RETURN x;
+ END
+||
+--delimiter ;
+SET sql_log_bin=1;
+
+
+--source include/stop_slave.inc
+CHANGE MASTER TO master_use_gtid=slave_pos;
+--source include/start_slave.inc
+
+--connection server_2
+# Respawn all worker threads to clear any left-over debug_sync or other stuff.
+--source include/stop_slave.inc
+SET GLOBAL binlog_format=@old_format;
+SET GLOBAL slave_parallel_threads=0;
+SET GLOBAL slave_parallel_threads=4;
+--source include/start_slave.inc
+
+
+--echo *** 4. Test killing thread that is waiting to start transaction until previous transaction commits ***
+
+# We set up four transactions T1, T2, T3, and T4 on the master. T2, T3, and T4
+# can run in parallel with each other (same group commit and commit id),
+# but not in parallel with T1.
+#
+# We use four worker threads, each Ti will be queued on each their own
+# worker thread. We will delay T1 commit, T3 will wait for T1 to begin
+# commit before it can start. We will kill T3 during this wait, and
+# check that everything works correctly.
+#
+# It is rather tricky to get the correct thread id of the worker to kill.
+# We start by injecting four dummy transactions in a debug_sync-controlled
+# manner to be able to get known thread ids for the workers in a pool with
+# just 4 worker threads. Then we let in each of the real test transactions
+# T1-T4 one at a time in a way which allows us to know which transaction
+# ends up with which thread id.
+
+--connection server_1
+SET binlog_format=statement;
+SET gtid_domain_id=2;
+BEGIN;
+# This debug_sync will linger on and be used to control T4 later.
+INSERT INTO t3 VALUES (70, foo(70,
+ 'rpl_parallel_start_waiting_for_prior SIGNAL t4_waiting', ''));
+INSERT INTO t3 VALUES (60, foo(60,
+ 'ha_write_row_end SIGNAL d2_query WAIT_FOR d2_cont2',
+ 'rpl_parallel_end_of_group SIGNAL d2_done WAIT_FOR d2_cont'));
+COMMIT;
+SET gtid_domain_id=0;
+
+--connection server_2
+SET debug_sync='now WAIT_FOR d2_query';
+--let $d2_thd_id= `SELECT ID FROM INFORMATION_SCHEMA.PROCESSLIST WHERE INFO LIKE '%foo(60%' AND INFO NOT LIKE '%LIKE%'`
+
+--connection server_1
+SET gtid_domain_id=1;
+BEGIN;
+# These debug_sync's will linger on and be used to control T3 later.
+INSERT INTO t3 VALUES (61, foo(61,
+ 'rpl_parallel_start_waiting_for_prior SIGNAL t3_waiting',
+ 'rpl_parallel_start_waiting_for_prior_killed SIGNAL t3_killed'));
+INSERT INTO t3 VALUES (62, foo(62,
+ 'ha_write_row_end SIGNAL d1_query WAIT_FOR d1_cont2',
+ 'rpl_parallel_end_of_group SIGNAL d1_done WAIT_FOR d1_cont'));
+COMMIT;
+SET gtid_domain_id=0;
+
+--connection server_2
+SET debug_sync='now WAIT_FOR d1_query';
+--let $d1_thd_id= `SELECT ID FROM INFORMATION_SCHEMA.PROCESSLIST WHERE INFO LIKE '%foo(62%' AND INFO NOT LIKE '%LIKE%'`
+
+--connection server_1
+SET gtid_domain_id=0;
+INSERT INTO t3 VALUES (63, foo(63,
+ 'ha_write_row_end SIGNAL d0_query WAIT_FOR d0_cont2',
+ 'rpl_parallel_end_of_group SIGNAL d0_done WAIT_FOR d0_cont'));
+
+--connection server_2
+SET debug_sync='now WAIT_FOR d0_query';
+--let $d0_thd_id= `SELECT ID FROM INFORMATION_SCHEMA.PROCESSLIST WHERE INFO LIKE '%foo(63%' AND INFO NOT LIKE '%LIKE%'`
+
+--connection server_1
+SET gtid_domain_id=3;
+BEGIN;
+# These debug_sync's will linger on and be used to control T2 later.
+INSERT INTO t3 VALUES (68, foo(68,
+ 'rpl_parallel_start_waiting_for_prior SIGNAL t2_waiting', ''));
+INSERT INTO t3 VALUES (69, foo(69,
+ 'ha_write_row_end SIGNAL d3_query WAIT_FOR d3_cont2',
+ 'rpl_parallel_end_of_group SIGNAL d3_done WAIT_FOR d3_cont'));
+COMMIT;
+SET gtid_domain_id=0;
+
+--connection server_2
+SET debug_sync='now WAIT_FOR d3_query';
+--let $d3_thd_id= `SELECT ID FROM INFORMATION_SCHEMA.PROCESSLIST WHERE INFO LIKE '%foo(69%' AND INFO NOT LIKE '%LIKE%'`
+
+SET debug_sync='now SIGNAL d2_cont2';
+SET debug_sync='now WAIT_FOR d2_done';
+SET debug_sync='now SIGNAL d1_cont2';
+SET debug_sync='now WAIT_FOR d1_done';
+SET debug_sync='now SIGNAL d0_cont2';
+SET debug_sync='now WAIT_FOR d0_done';
+SET debug_sync='now SIGNAL d3_cont2';
+SET debug_sync='now WAIT_FOR d3_done';
+
+# Now prepare the real transactions T1, T2, T3, T4 on the master.
+
+--connection con_temp3
+# Create transaction T1.
+SET binlog_format=statement;
+INSERT INTO t3 VALUES (64, foo(64,
+ 'rpl_parallel_before_mark_start_commit SIGNAL t1_waiting WAIT_FOR t1_cont', ''));
+
+# Create transaction T2, as a group commit leader on the master.
+SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued2 WAIT_FOR master_cont2';
+send INSERT INTO t3 VALUES (65, foo(65, '', ''));
+
+--connection server_1
+SET debug_sync='now WAIT_FOR master_queued2';
+
+--connection con_temp4
+# Create transaction T3, participating in T2's group commit.
+SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued3';
+send INSERT INTO t3 VALUES (66, foo(66, '', ''));
+
+--connection server_1
+SET debug_sync='now WAIT_FOR master_queued3';
+
+--connection con_temp5
+# Create transaction T4, participating in group commit with T2 and T3.
+SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued4';
+send INSERT INTO t3 VALUES (67, foo(67, '', ''));
+
+--connection server_1
+SET debug_sync='now WAIT_FOR master_queued4';
+SET debug_sync='now SIGNAL master_cont2';
+
+--connection con_temp3
+REAP;
+--connection con_temp4
+REAP;
+--connection con_temp5
+REAP;
+
+--connection server_1
+SELECT * FROM t3 WHERE a >= 60 ORDER BY a;
+SET debug_sync='RESET';
+
+--connection server_2
+# Now we have the four transactions pending for replication on the slave.
+# Let them be queued for our three worker threads in a controlled fashion.
+# We put them at a stage where T1 is delayed and T3 is waiting for T1 to
+# commit before T3 can start. Then we kill T3.
+
+# Make the worker D0 free, and wait for T1 to be queued in it.
+SET debug_sync='now SIGNAL d0_cont';
+SET debug_sync='now WAIT_FOR t1_waiting';
+
+# Make the worker D3 free, and wait for T2 to be queued in it.
+SET debug_sync='now SIGNAL d3_cont';
+SET debug_sync='now WAIT_FOR t2_waiting';
+
+# Now release worker D1, and wait for T3 to be queued in it.
+# T3 will wait for T1 to commit before it can start.
+SET debug_sync='now SIGNAL d1_cont';
+SET debug_sync='now WAIT_FOR t3_waiting';
+
+# Release worker D2. Wait for T4 to be queued, so we are sure it has
+# received the debug_sync signal (else we might overwrite it with the
+# next debug_sync).
+SET debug_sync='now SIGNAL d2_cont';
+SET debug_sync='now WAIT_FOR t4_waiting';
+
+# Now we kill the waiting transaction T3 in worker D1.
+--replace_result $d1_thd_id THD_ID
+eval KILL $d1_thd_id;
+
+# Wait until T3 has reacted on the kill.
+SET debug_sync='now WAIT_FOR t3_killed';
+
+# Now we can allow T1 to proceed.
+SET debug_sync='now SIGNAL t1_cont';
+
+--let $slave_sql_errno= 1317,1927,1964
+--source include/wait_for_slave_sql_error.inc
+STOP SLAVE IO_THREAD;
+# Since T2, T3, and T4 run in parallel, we can not be sure if T2 will have time
+# to commit or not before the stop. However, T1 should commit, and T3/T4 may
+# not have committed. (After slave restart we check that all become committed
+# eventually).
+SELECT * FROM t3 WHERE a >= 60 AND a != 65 ORDER BY a;
+
+# Now we have to disable the debug_sync statements, so they do not trigger
+# when the events are retried.
+SET debug_sync='RESET';
+SET GLOBAL slave_parallel_threads=0;
+SET GLOBAL slave_parallel_threads=10;
+SET sql_log_bin=0;
+DROP FUNCTION foo;
+--delimiter ||
+CREATE FUNCTION foo(x INT, d1 VARCHAR(500), d2 VARCHAR(500))
+ RETURNS INT DETERMINISTIC
+ BEGIN
+ RETURN x;
+ END
+||
+--delimiter ;
+SET sql_log_bin=1;
+
+--connection server_1
+UPDATE t3 SET b=b+1 WHERE a=60;
+--save_master_pos
+
+--connection server_2
+--source include/start_slave.inc
+--sync_with_master
+SELECT * FROM t3 WHERE a >= 60 ORDER BY a;
+# Restore the foo() function.
+SET sql_log_bin=0;
+DROP FUNCTION foo;
+--delimiter ||
+CREATE FUNCTION foo(x INT, d1 VARCHAR(500), d2 VARCHAR(500))
+ RETURNS INT DETERMINISTIC
+ BEGIN
+ IF d1 != '' THEN
+ SET debug_sync = d1;
+ END IF;
+ IF d2 != '' THEN
+ SET debug_sync = d2;
+ END IF;
+ RETURN x;
+ END
+||
+--delimiter ;
+SET sql_log_bin=1;
+
+--connection server_2
+# Respawn all worker threads to clear any left-over debug_sync or other stuff.
+--source include/stop_slave.inc
+SET GLOBAL binlog_format=@old_format;
+SET GLOBAL slave_parallel_threads=0;
+SET GLOBAL slave_parallel_threads=10;
+--source include/start_slave.inc
+
+
+--echo *** 5. Test killing thread that is waiting for queue of max length to shorten ***
+
+# Find the thread id of the driver SQL thread that we want to kill.
+--let $wait_condition= SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE STATE LIKE '%Slave has read all relay log%'
+--source include/wait_condition.inc
+--let $thd_id= `SELECT ID FROM INFORMATION_SCHEMA.PROCESSLIST WHERE STATE LIKE '%Slave has read all relay log%'`
+SET @old_max_queued= @@GLOBAL.slave_parallel_max_queued;
+SET GLOBAL slave_parallel_max_queued=9000;
+
+--connection server_1
+--let bigstring= `SELECT REPEAT('x', 10000)`
+SET binlog_format=statement;
+# Create an event that will wait to be signalled.
+INSERT INTO t3 VALUES (80, foo(0,
+ 'ha_write_row_end SIGNAL query_waiting WAIT_FOR query_cont', ''));
+
+--connection server_2
+SET debug_sync='now WAIT_FOR query_waiting';
+# Inject that the SQL driver thread will signal `wait_queue_ready' to debug_sync
+# as it goes to wait for the event queue to become smaller than the value of
+# @@slave_parallel_max_queued.
+SET @old_dbug= @@GLOBAL.debug_dbug;
+SET GLOBAL debug_dbug="+d,rpl_parallel_wait_queue_max";
+
+--connection server_1
+--disable_query_log
+# Create an event that will fill up the queue.
+# The Xid event at the end of the event group will have to wait for the Query
+# event with the INSERT to drain so the queue becomes shorter. However that in
+# turn waits for the prior event group to continue.
+eval INSERT INTO t3 VALUES (81, LENGTH('$bigstring'));
+--enable_query_log
+SELECT * FROM t3 WHERE a >= 80 ORDER BY a;
+
+--connection server_2
+SET debug_sync='now WAIT_FOR wait_queue_ready';
+
+--replace_result $thd_id THD_ID
+eval KILL $thd_id;
+
+SET debug_sync='now WAIT_FOR wait_queue_killed';
+SET debug_sync='now SIGNAL query_cont';
+
+--let $slave_sql_errno= 1317,1927,1964
+--source include/wait_for_slave_sql_error.inc
+STOP SLAVE IO_THREAD;
+
+SET GLOBAL debug_dbug=@old_dbug;
+SET GLOBAL slave_parallel_max_queued= @old_max_queued;
+
+--connection server_1
+INSERT INTO t3 VALUES (82,0);
+SET binlog_format=@old_format;
+--save_master_pos
+
+--connection server_2
+SET debug_sync='RESET';
+--source include/start_slave.inc
+--sync_with_master
+SELECT * FROM t3 WHERE a >= 80 ORDER BY a;
+
+
+--connection server_2
+--source include/stop_slave.inc
+SET GLOBAL binlog_format=@old_format;
+SET GLOBAL slave_parallel_threads=0;
+SET GLOBAL slave_parallel_threads=10;
+--source include/start_slave.inc
+
+--echo *** MDEV-5788 Incorrect free of rgi->deferred_events in parallel replication ***
+
+--connection server_2
+# Use just two worker threads, so we are sure to get the rpl_group_info added
+# to the free list, which is what triggered the bug.
+--source include/stop_slave.inc
+SET GLOBAL replicate_ignore_table="test.t3";
+SET GLOBAL slave_parallel_threads=2;
+--source include/start_slave.inc
+
+--connection server_1
+INSERT INTO t3 VALUES (100, rand());
+INSERT INTO t3 VALUES (101, rand());
+
+--save_master_pos
+
+--connection server_2
+--sync_with_master
+
+--connection server_1
+INSERT INTO t3 VALUES (102, rand());
+INSERT INTO t3 VALUES (103, rand());
+INSERT INTO t3 VALUES (104, rand());
+INSERT INTO t3 VALUES (105, rand());
+
+--save_master_pos
+
+--connection server_2
+--sync_with_master
+--source include/stop_slave.inc
+SET GLOBAL replicate_ignore_table="";
+--source include/start_slave.inc
+
+--connection server_1
+INSERT INTO t3 VALUES (106, rand());
+INSERT INTO t3 VALUES (107, rand());
+--save_master_pos
+
+--connection server_2
+--sync_with_master
+--replace_column 2 #
+SELECT * FROM t3 WHERE a >= 100 ORDER BY a;
+
+
+--echo *** MDEV-5921: In parallel replication, an error is not correctly signalled to the next transaction ***
+
+--connection server_2
+--source include/stop_slave.inc
+SET GLOBAL slave_parallel_threads=10;
+--source include/start_slave.inc
+
+--connection server_1
+INSERT INTO t3 VALUES (110, 1);
+--save_master_pos
+
+--connection server_2
+--sync_with_master
+SELECT * FROM t3 WHERE a >= 110 ORDER BY a;
+# Inject a duplicate key error.
+SET sql_log_bin=0;
+INSERT INTO t3 VALUES (111, 666);
+SET sql_log_bin=1;
+
+--connection server_1
+
+# Create a group commit with two inserts, the first one conflicts with a row on the slave
+--connect (con1,127.0.0.1,root,,test,$SERVER_MYPORT_1,)
+SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued1 WAIT_FOR master_cont1';
+send INSERT INTO t3 VALUES (111, 2);
+--connection server_1
+SET debug_sync='now WAIT_FOR master_queued1';
+
+--connect (con2,127.0.0.1,root,,test,$SERVER_MYPORT_1,)
+SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued2';
+send INSERT INTO t3 VALUES (112, 3);
+
+--connection server_1
+SET debug_sync='now WAIT_FOR master_queued2';
+SET debug_sync='now SIGNAL master_cont1';
+
+--connection con1
+REAP;
+--connection con2
+REAP;
+SET debug_sync='RESET';
+--save_master_pos
+
+--connection server_2
+--let $slave_sql_errno= 1062
+--source include/wait_for_slave_sql_error.inc
+--source include/wait_for_slave_sql_to_stop.inc
+# We should not see the row (112,3) here, it should be rolled back due to
+# error signal from the prior transaction.
+SELECT * FROM t3 WHERE a >= 110 ORDER BY a;
+SET sql_log_bin=0;
+DELETE FROM t3 WHERE a=111 AND b=666;
+SET sql_log_bin=1;
+START SLAVE SQL_THREAD;
+--sync_with_master
+SELECT * FROM t3 WHERE a >= 110 ORDER BY a;
+
+
+--echo ***MDEV-5914: Parallel replication deadlock due to InnoDB lock conflicts ***
+--connection server_2
+--source include/stop_slave.inc
+
+--connection server_1
+CREATE TABLE t4 (a INT PRIMARY KEY, b INT, KEY b_idx(b)) ENGINE=InnoDB;
+INSERT INTO t4 VALUES (1,NULL), (2,2), (3,NULL), (4,4), (5, NULL), (6, 6);
+
+# Create a group commit with UPDATE and DELETE, in that order.
+# The bug was that while the UPDATE's row lock does not block the DELETE, the
+# DELETE's gap lock _does_ block the UPDATE. This could cause a deadlock
+# on the slave.
+--connection con1
+SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued1 WAIT_FOR master_cont1';
+send UPDATE t4 SET b=NULL WHERE a=6;
+--connection server_1
+SET debug_sync='now WAIT_FOR master_queued1';
+
+--connection con2
+SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued2';
+send DELETE FROM t4 WHERE b <= 3;
+
+--connection server_1
+SET debug_sync='now WAIT_FOR master_queued2';
+SET debug_sync='now SIGNAL master_cont1';
+
+--connection con1
+REAP;
+--connection con2
+REAP;
+SET debug_sync='RESET';
+--save_master_pos
+
+--connection server_2
+--source include/start_slave.inc
+--sync_with_master
+--source include/stop_slave.inc
+
+SELECT * FROM t4 ORDER BY a;
+
+
+# Another example, this one with INSERT vs. DELETE
+--connection server_1
+DELETE FROM t4;
+INSERT INTO t4 VALUES (1,NULL), (2,2), (3,NULL), (4,4), (5, NULL), (6, 6);
+
+# Create a group commit with INSERT and DELETE, in that order.
+# The bug was that while the INSERT's insert intention lock does not block
+# the DELETE, the DELETE's gap lock _does_ block the INSERT. This could cause
+# a deadlock on the slave.
+--connection con1
+SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued1 WAIT_FOR master_cont1';
+send INSERT INTO t4 VALUES (7, NULL);
+--connection server_1
+SET debug_sync='now WAIT_FOR master_queued1';
+
+--connection con2
+SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued2';
+send DELETE FROM t4 WHERE b <= 3;
+
+--connection server_1
+SET debug_sync='now WAIT_FOR master_queued2';
+SET debug_sync='now SIGNAL master_cont1';
+
+--connection con1
+REAP;
+--connection con2
+REAP;
+SET debug_sync='RESET';
+--save_master_pos
+
+--connection server_2
+--source include/start_slave.inc
+--sync_with_master
+--source include/stop_slave.inc
+
+SELECT * FROM t4 ORDER BY a;
+
+
+# MDEV-6549, failing to update gtid_slave_pos for a transaction that was retried.
+# The problem was that when a transaction updates the mysql.gtid_slave_pos
+# table, it clears the flag that marks that there is a GTID position that
+# needs to be updated. Then, if the transaction got killed after that due
+# to a deadlock, the subsequent retry would fail to notice that the GTID needs
+# to be recorded in gtid_slave_pos.
+#
+# (In the original bug report, the symptom was an assertion; this was however
+# just a side effect of the missing update of gtid_slave_pos, which also
+# happened to cause a missing clear of OPTION_GTID_BEGIN).
+--connection server_1
+DELETE FROM t4;
+INSERT INTO t4 VALUES (1,NULL), (2,2), (3,NULL), (4,4), (5, NULL), (6, 6);
+
+# Create two transactions that can run in parallel on the slave but cause
+# a deadlock if the second runs before the first.
+--connection con1
+SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued1 WAIT_FOR master_cont1';
+send UPDATE t4 SET b=NULL WHERE a=6;
+--connection server_1
+SET debug_sync='now WAIT_FOR master_queued1';
+
+--connection con2
+# Must use statement-based binlogging. Otherwise the transaction will not be
+# binlogged at all, as it modifies no rows.
+SET @old_format= @@SESSION.binlog_format;
+SET binlog_format='statement';
+SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued2';
+send DELETE FROM t4 WHERE b <= 1;
+
+--connection server_1
+SET debug_sync='now WAIT_FOR master_queued2';
+SET debug_sync='now SIGNAL master_cont1';
+
+--connection con1
+REAP;
+--connection con2
+REAP;
+SET @old_format=@@GLOBAL.binlog_format;
+SET debug_sync='RESET';
+--save_master_pos
+--let $last_gtid= `SELECT @@last_gtid`
+
+--connection server_2
+# Disable the usual skip of gap locks for transactions that are run in
+# parallel, using DBUG. This allows the deadlock to occur, and this in turn
+# triggers a retry of the second transaction, and the code that was buggy and
+# caused the gtid_slave_pos update to be skipped in the retry.
+SET @old_dbug= @@GLOBAL.debug_dbug;
+SET GLOBAL debug_dbug="+d,disable_thd_need_ordering_with";
+--source include/start_slave.inc
+--sync_with_master
+SET GLOBAL debug_dbug=@old_dbug;
+
+SELECT * FROM t4 ORDER BY a;
+# Check that the GTID of the second transaction was correctly recorded in
+# gtid_slave_pos, in the variable as well as in the table.
+--replace_result $last_gtid GTID
+eval SET @last_gtid= '$last_gtid';
+SELECT IF(@@gtid_slave_pos LIKE CONCAT('%',@last_gtid,'%'), "GTID found ok",
+ CONCAT("GTID ", @last_gtid, " not found in gtid_slave_pos=", @@gtid_slave_pos))
+ AS result;
+SELECT "ROW FOUND" AS `Is the row found?`
+ FROM mysql.gtid_slave_pos
+ WHERE CONCAT(domain_id, "-", server_id, "-", seq_no) = @last_gtid;
+
+
+--echo *** MDEV-5938: Exec_master_log_pos not updated at log rotate in parallel replication ***
+--connection server_2
+--source include/stop_slave.inc
+SET GLOBAL slave_parallel_threads=1;
+SET DEBUG_SYNC= 'RESET';
+--source include/start_slave.inc
+
+--connection server_1
+CREATE TABLE t5 (a INT PRIMARY KEY, b INT);
+INSERT INTO t5 VALUES (1,1);
+INSERT INTO t5 VALUES (2,2), (3,8);
+INSERT INTO t5 VALUES (4,16);
+--save_master_pos
+
+--connection server_2
+--sync_with_master
+let $io_file= query_get_value(SHOW SLAVE STATUS, Master_Log_File, 1);
+let $io_pos= query_get_value(SHOW SLAVE STATUS, Read_Master_Log_Pos, 1);
+let $sql_file= query_get_value(SHOW SLAVE STATUS, Relay_Master_Log_File, 1);
+let $sql_pos= query_get_value(SHOW SLAVE STATUS, Exec_Master_Log_Pos, 1);
+--disable_query_log
+eval SELECT IF('$io_file' = '$sql_file', "OK", "Not ok, $io_file <> $sql_file") AS test_check;
+eval SELECT IF('$io_pos' = '$sql_pos', "OK", "Not ok, $io_pos <> $sql_pos") AS test_check;
+--enable_query_log
+
+--connection server_1
+FLUSH LOGS;
+--source include/wait_for_binlog_checkpoint.inc
+--save_master_pos
+
+--connection server_2
+--sync_with_master
+let $io_file= query_get_value(SHOW SLAVE STATUS, Master_Log_File, 1);
+let $io_pos= query_get_value(SHOW SLAVE STATUS, Read_Master_Log_Pos, 1);
+let $sql_file= query_get_value(SHOW SLAVE STATUS, Relay_Master_Log_File, 1);
+let $sql_pos= query_get_value(SHOW SLAVE STATUS, Exec_Master_Log_Pos, 1);
+--disable_query_log
+eval SELECT IF('$io_file' = '$sql_file', "OK", "Not ok, $io_file <> $sql_file") AS test_check;
+eval SELECT IF('$io_pos' = '$sql_pos', "OK", "Not ok, $io_pos <> $sql_pos") AS test_check;
+--enable_query_log
+
+
+--echo *** MDEV_6435: Incorrect error handling when query binlogged partially on master with "killed" error ***
+
+--connection server_1
+CREATE TABLE t6 (a INT) ENGINE=MyISAM;
+CREATE TRIGGER tr AFTER INSERT ON t6 FOR EACH ROW SET @a = 1;
+
+--connection con1
+SET @old_format= @@binlog_format;
+SET binlog_format= statement;
+--let $conid = `SELECT CONNECTION_ID()`
+SET debug_sync='sp_head_execute_before_loop SIGNAL ready WAIT_FOR cont';
+send INSERT INTO t6 VALUES (1), (2), (3);
+
+--connection server_1
+SET debug_sync='now WAIT_FOR ready';
+--replace_result $conid CONID
+eval KILL QUERY $conid;
+SET debug_sync='now SIGNAL cont';
+
+--connection con1
+--error ER_QUERY_INTERRUPTED
+--reap
+SET binlog_format= @old_format;
+SET debug_sync='RESET';
+--let $after_error_gtid_pos= `SELECT @@gtid_binlog_pos`
+
+--connection server_1
+SET debug_sync='RESET';
+
+
+--connection server_2
+--let $slave_sql_errno= 1317
+--source include/wait_for_slave_sql_error.inc
+STOP SLAVE IO_THREAD;
+--replace_result $after_error_gtid_pos AFTER_ERROR_GTID_POS
+eval SET GLOBAL gtid_slave_pos= '$after_error_gtid_pos';
+--source include/start_slave.inc
+
+--connection server_1
+INSERT INTO t6 VALUES (4);
+SELECT * FROM t6 ORDER BY a;
+--save_master_pos
+
+--connection server_2
+--sync_with_master
+SELECT * FROM t6 ORDER BY a;
+
+
+--echo *** MDEV-6551: Some replication errors are ignored if slave_parallel_threads > 0 ***
+
+--connection server_1
+INSERT INTO t2 VALUES (31);
+--let $gtid1= `SELECT @@LAST_GTID`
+--source include/save_master_gtid.inc
+
+--connection server_2
+--source include/sync_with_master_gtid.inc
+--source include/stop_slave.inc
+SET GLOBAL slave_parallel_threads= 0;
+--source include/start_slave.inc
+
+# Force a duplicate key error on the slave.
+SET sql_log_bin= 0;
+INSERT INTO t2 VALUES (32);
+SET sql_log_bin= 1;
+
+--connection server_1
+INSERT INTO t2 VALUES (32);
+--let $gtid2= `SELECT @@LAST_GTID`
+# Rotate the binlog; the bug is triggered when the master binlog file changes
+# after the event group that causes the duplicate key error.
+FLUSH LOGS;
+INSERT INTO t2 VALUES (33);
+INSERT INTO t2 VALUES (34);
+SELECT * FROM t2 WHERE a >= 30 ORDER BY a;
+--source include/save_master_gtid.inc
+
+--connection server_2
+--let $slave_sql_errno= 1062
+--source include/wait_for_slave_sql_error.inc
+
+--connection server_2
+--source include/stop_slave_io.inc
+SET GLOBAL slave_parallel_threads=10;
+START SLAVE;
+
+--let $slave_sql_errno= 1062
+--source include/wait_for_slave_sql_error.inc
+
+# Note: IO thread is still running at this point.
+# The bug seems to have been that restarting the SQL thread after an error with
+# the IO thread still running, somehow picks up a later relay log position and
+# thus ends up skipping the failing event, rather than re-executing.
+
+START SLAVE SQL_THREAD;
+--let $slave_sql_errno= 1062
+--source include/wait_for_slave_sql_error.inc
+
+SELECT * FROM t2 WHERE a >= 30 ORDER BY a;
+
+# Skip the duplicate error, so we can proceed.
+--error ER_SLAVE_SKIP_NOT_IN_GTID
+SET sql_slave_skip_counter= 1;
+--source include/stop_slave_io.inc
+--disable_query_log
+eval SET GLOBAL gtid_slave_pos = REPLACE(@@gtid_slave_pos, "$gtid1", "$gtid2");
+--enable_query_log
+--source include/start_slave.inc
+--source include/sync_with_master_gtid.inc
+
+SELECT * FROM t2 WHERE a >= 30 ORDER BY a;
+
+
+--echo *** MDEV-6775: Wrong binlog order in parallel replication ***
+--connection server_1
+# A bit tricky bug to reproduce. On the master, we binlog in statement-mode
+# two transactions, an UPDATE followed by a DELETE. On the slave, we replicate
+# with binlog-mode set to ROW, which means the DELETE, which modifies no rows,
+# is not binlogged. Then we inject a wait in the group commit code on the
+# slave, shortly before the actual commit of the UPDATE. The bug was that the
+# DELETE could wake up from wait_for_prior_commit() before the commit of the
+# UPDATE. So the test could see the slave position updated to after DELETE,
+# while the UPDATE was still not visible.
+DELETE FROM t4;
+INSERT INTO t4 VALUES (1,NULL), (3,NULL), (4,4), (5, NULL), (6, 6);
+--source include/save_master_gtid.inc
+
+--connection server_2
+--source include/sync_with_master_gtid.inc
+--source include/stop_slave.inc
+SET @old_dbug= @@GLOBAL.debug_dbug;
+SET GLOBAL debug_dbug="+d,inject_binlog_commit_before_get_LOCK_log";
+SET @old_format=@@GLOBAL.binlog_format;
+SET GLOBAL binlog_format=ROW;
+# Re-spawn the worker threads to be sure they pick up the new binlog format
+SET GLOBAL slave_parallel_threads=0;
+SET GLOBAL slave_parallel_threads=10;
+
+--connection con1
+SET @old_format= @@binlog_format;
+SET binlog_format= statement;
+SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued1 WAIT_FOR master_cont1';
+send UPDATE t4 SET b=NULL WHERE a=6;
+--connection server_1
+SET debug_sync='now WAIT_FOR master_queued1';
+
+--connection con2
+SET @old_format= @@binlog_format;
+SET binlog_format= statement;
+SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued2';
+send DELETE FROM t4 WHERE b <= 3;
+
+--connection server_1
+SET debug_sync='now WAIT_FOR master_queued2';
+SET debug_sync='now SIGNAL master_cont1';
+
+--connection con1
+REAP;
+SET binlog_format= @old_format;
+--connection con2
+REAP;
+SET binlog_format= @old_format;
+SET debug_sync='RESET';
+--save_master_pos
+SELECT * FROM t4 ORDER BY a;
+
+--connection server_2
+--source include/start_slave.inc
+SET debug_sync= 'now WAIT_FOR waiting';
+--sync_with_master
+SELECT * FROM t4 ORDER BY a;
+SET debug_sync= 'now SIGNAL cont';
+
+# Re-spawn the worker threads to remove any DBUG injections or DEBUG_SYNC.
+--source include/stop_slave.inc
+SET GLOBAL debug_dbug=@old_dbug;
+SET GLOBAL binlog_format= @old_format;
+SET GLOBAL slave_parallel_threads=0;
+SET GLOBAL slave_parallel_threads=10;
+--source include/start_slave.inc
+
+
+--echo *** MDEV-7237: Parallel replication: incorrect relaylog position after stop/start the slave ***
+--connection server_1
+INSERT INTO t2 VALUES (40);
+--save_master_pos
+
+--connection server_2
+--sync_with_master
+--source include/stop_slave.inc
+CHANGE MASTER TO master_use_gtid=no;
+SET @old_dbug= @@GLOBAL.debug_dbug;
+# This DBUG injection causes a DEBUG_SYNC signal "scheduled_gtid_0_x_100" when
+# GTID 0-1-100 has been scheduled for and fetched by a worker thread.
+SET GLOBAL debug_dbug="+d,rpl_parallel_scheduled_gtid_0_x_100";
+# This DBUG injection causes a DEBUG_SYNC signal "wait_for_done_waiting" when
+# STOP SLAVE has signalled all worker threads to stop.
+SET GLOBAL debug_dbug="+d,rpl_parallel_wait_for_done_trigger";
+# Reset worker threads to make DBUG setting catch on.
+SET GLOBAL slave_parallel_threads=0;
+SET GLOBAL slave_parallel_threads=10;
+
+
+--connection server_1
+# Setup some transaction for the slave to replicate.
+INSERT INTO t2 VALUES (41);
+INSERT INTO t2 VALUES (42);
+# Need to log the DELETE in statement format, so we can see it in processlist.
+SET @old_format= @@binlog_format;
+SET binlog_format= statement;
+DELETE FROM t2 WHERE a=40;
+SET binlog_format= @old_format;
+INSERT INTO t2 VALUES (43);
+INSERT INTO t2 VALUES (44);
+# Force the slave to switch to a new relay log file.
+FLUSH LOGS;
+INSERT INTO t2 VALUES (45);
+# Inject a GTID 0-1-100, which will trigger a DEBUG_SYNC signal when this
+# transaction has been fetched by a worker thread.
+SET gtid_seq_no=100;
+INSERT INTO t2 VALUES (46);
+--save_master_pos
+
+--connection con_temp2
+# Temporarily block the DELETE on a=40 from completing.
+BEGIN;
+SELECT * FROM t2 WHERE a=40 FOR UPDATE;
+
+
+--connection server_2
+--source include/start_slave.inc
+
+# Wait for a worker thread to start on the DELETE that will be blocked
+# temporarily by the SELECT FOR UPDATE.
+--let $wait_condition= SELECT count(*) > 0 FROM information_schema.processlist WHERE state='updating' and info LIKE '%DELETE FROM t2 WHERE a=40%'
+--source include/wait_condition.inc
+
+# The DBUG injection set above will make the worker thread signal the following
+# debug_sync when the GTID 0-1-100 has been reached by a worker thread.
+# Thus, at this point, the SQL driver thread has reached the next
+# relay log file name, while a worker thread is still processing a
+# transaction in the previous relay log file, blocked on the SELECT FOR
+# UPDATE.
+SET debug_sync= 'now WAIT_FOR scheduled_gtid_0_x_100';
+# At this point, the SQL driver thread is in the new relay log file, while
+# the DELETE from the old relay log file is not yet complete. We will stop
+# the slave at this point. The bug was that the DELETE statement would
+# update the slave position to the _new_ relay log file name instead of
+# its own old file name. Thus, by stoping and restarting the slave at this
+# point, we would get an error at restart due to incorrect position. (If
+# we would let the slave catch up before stopping, the incorrect position
+# would be corrected by a later transaction).
+
+send STOP SLAVE;
+
+--connection con_temp2
+# Wait for STOP SLAVE to have proceeded sufficiently that it has signalled
+# all worker threads to stop; this ensures that we will stop after the DELETE
+# transaction (and not after a later transaction that might have been able
+# to set a fixed position).
+SET debug_sync= 'now WAIT_FOR wait_for_done_waiting';
+# Now release the row lock that was blocking the replication of DELETE.
+ROLLBACK;
+
+--connection server_2
+reap;
+--source include/wait_for_slave_sql_to_stop.inc
+SELECT * FROM t2 WHERE a >= 40 ORDER BY a;
+# Now restart the slave. With the bug present, this would start at an
+# incorrect relay log position, causing relay log read error (or if unlucky,
+# silently skip a number of events).
+--source include/start_slave.inc
+--sync_with_master
+SELECT * FROM t2 WHERE a >= 40 ORDER BY a;
+--source include/stop_slave.inc
+SET GLOBAL debug_dbug=@old_dbug;
+SET DEBUG_SYNC= 'RESET';
+SET GLOBAL slave_parallel_threads=0;
+SET GLOBAL slave_parallel_threads=10;
+CHANGE MASTER TO master_use_gtid=slave_pos;
+--source include/start_slave.inc
+
+
+--echo *** MDEV-7326 Server deadlock in connection with parallel replication ***
+# We use three transactions, each in a separate group commit.
+# T1 does mark_start_commit(), then gets a deadlock error.
+# T2 wakes up and starts running
+# T1 does unmark_start_commit()
+# T3 goes to wait for T2 to start its commit
+# T2 does mark_start_commit()
+# The bug was that at this point, T3 got deadlocked. Because T1 has unmarked(),
+# T3 did not yet see the count_committing_event_groups reach its target value
+# yet. But when T1 later re-did mark_start_commit(), it failed to send a wakeup
+# to T3.
+
+--connection server_2
+--source include/stop_slave.inc
+SET GLOBAL slave_parallel_threads=0;
+SET GLOBAL slave_parallel_threads=3;
+SET GLOBAL debug_dbug="+d,rpl_parallel_simulate_temp_err_xid";
+--source include/start_slave.inc
+
+--connection server_1
+SET @old_format= @@SESSION.binlog_format;
+SET binlog_format= STATEMENT;
+# This debug_sync will linger on and be used to control T3 later.
+INSERT INTO t1 VALUES (foo(50,
+ "rpl_parallel_start_waiting_for_prior SIGNAL t3_ready",
+ "rpl_parallel_end_of_group SIGNAL prep_ready WAIT_FOR prep_cont"));
+--save_master_pos
+--connection server_2
+# Wait for the debug_sync point for T3 to be set. But let the preparation
+# transaction remain hanging, so that T1 and T2 will be scheduled for the
+# remaining two worker threads.
+SET DEBUG_SYNC= "now WAIT_FOR prep_ready";
+
+--connection server_1
+INSERT INTO t2 VALUES (foo(50,
+ "rpl_parallel_simulate_temp_err_xid SIGNAL t1_ready1 WAIT_FOR t1_cont1",
+ "rpl_parallel_retry_after_unmark SIGNAL t1_ready2 WAIT_FOR t1_cont2"));
+--save_master_pos
+
+--connection server_2
+SET DEBUG_SYNC= "now WAIT_FOR t1_ready1";
+# T1 has now done mark_start_commit(). It will later do a rollback and retry.
+
+--connection server_1
+# Use a MyISAM table for T2 and T3, so they do not trigger the
+# rpl_parallel_simulate_temp_err_xid DBUG insertion on XID event.
+INSERT INTO t1 VALUES (foo(51,
+ "rpl_parallel_before_mark_start_commit SIGNAL t2_ready1 WAIT_FOR t2_cont1",
+ "rpl_parallel_after_mark_start_commit SIGNAL t2_ready2"));
+
+--connection server_2
+SET DEBUG_SYNC= "now WAIT_FOR t2_ready1";
+# T2 has now started running, but has not yet done mark_start_commit()
+SET DEBUG_SYNC= "now SIGNAL t1_cont1";
+SET DEBUG_SYNC= "now WAIT_FOR t1_ready2";
+# T1 has now done unmark_start_commit() in preparation for its retry.
+
+--connection server_1
+INSERT INTO t1 VALUES (52);
+SET BINLOG_FORMAT= @old_format;
+SELECT * FROM t2 WHERE a>=50 ORDER BY a;
+SELECT * FROM t1 WHERE a>=50 ORDER BY a;
+
+--connection server_2
+# Let the preparation transaction complete, so that the same worker thread
+# can continue with the transaction T3.
+SET DEBUG_SYNC= "now SIGNAL prep_cont";
+SET DEBUG_SYNC= "now WAIT_FOR t3_ready";
+# T3 has now gone to wait for T2 to start committing
+SET DEBUG_SYNC= "now SIGNAL t2_cont1";
+SET DEBUG_SYNC= "now WAIT_FOR t2_ready2";
+# T2 has now done mark_start_commit().
+# Let things run, and check that T3 does not get deadlocked.
+SET DEBUG_SYNC= "now SIGNAL t1_cont2";
+--sync_with_master
+
+--connection server_1
+--save_master_pos
+--connection server_2
+--sync_with_master
+SELECT * FROM t2 WHERE a>=50 ORDER BY a;
+SELECT * FROM t1 WHERE a>=50 ORDER BY a;
+SET DEBUG_SYNC="reset";
+
+# Re-spawn the worker threads to remove any DBUG injections or DEBUG_SYNC.
+--source include/stop_slave.inc
+SET GLOBAL debug_dbug=@old_dbug;
+SET GLOBAL slave_parallel_threads=0;
+SET GLOBAL slave_parallel_threads=10;
+--source include/start_slave.inc
+
+
+--echo *** MDEV-7326 Server deadlock in connection with parallel replication ***
+# Similar to the previous test, but with T2 and T3 in the same GCO.
+# We use three transactions, T1 in one group commit and T2/T3 in another.
+# T1 does mark_start_commit(), then gets a deadlock error.
+# T2 wakes up and starts running
+# T1 does unmark_start_commit()
+# T3 goes to wait for T1 to start its commit
+# T2 does mark_start_commit()
+# The bug was that at this point, T3 got deadlocked. T2 increments the
+# count_committing_event_groups but does not signal T3, as they are in
+# the same GCO. Then later when T1 increments, it would also not signal
+# T3, because now the count_committing_event_groups is not equal to the
+# wait_count of T3 (it is one larger).
+
+--connection server_2
+--source include/stop_slave.inc
+SET GLOBAL slave_parallel_threads=0;
+SET GLOBAL slave_parallel_threads=3;
+SET GLOBAL debug_dbug="+d,rpl_parallel_simulate_temp_err_xid";
+--source include/start_slave.inc
+
+--connection server_1
+SET @old_format= @@SESSION.binlog_format;
+SET binlog_format= STATEMENT;
+# This debug_sync will linger on and be used to control T3 later.
+INSERT INTO t1 VALUES (foo(60,
+ "rpl_parallel_start_waiting_for_prior SIGNAL t3_ready",
+ "rpl_parallel_end_of_group SIGNAL prep_ready WAIT_FOR prep_cont"));
+--save_master_pos
+--connection server_2
+# Wait for the debug_sync point for T3 to be set. But let the preparation
+# transaction remain hanging, so that T1 and T2 will be scheduled for the
+# remaining two worker threads.
+SET DEBUG_SYNC= "now WAIT_FOR prep_ready";
+
+--connection server_1
+INSERT INTO t2 VALUES (foo(60,
+ "rpl_parallel_simulate_temp_err_xid SIGNAL t1_ready1 WAIT_FOR t1_cont1",
+ "rpl_parallel_retry_after_unmark SIGNAL t1_ready2 WAIT_FOR t1_cont2"));
+--save_master_pos
+
+--connection server_2
+SET DEBUG_SYNC= "now WAIT_FOR t1_ready1";
+# T1 has now done mark_start_commit(). It will later do a rollback and retry.
+
+# Do T2 and T3 in a single group commit.
+# Use a MyISAM table for T2 and T3, so they do not trigger the
+# rpl_parallel_simulate_temp_err_xid DBUG insertion on XID event.
+--connection con_temp3
+SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued1 WAIT_FOR master_cont1';
+SET binlog_format=statement;
+send INSERT INTO t1 VALUES (foo(61,
+ "rpl_parallel_before_mark_start_commit SIGNAL t2_ready1 WAIT_FOR t2_cont1",
+ "rpl_parallel_after_mark_start_commit SIGNAL t2_ready2"));
+
+--connection server_1
+SET debug_sync='now WAIT_FOR master_queued1';
+
+--connection con_temp4
+SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued2';
+send INSERT INTO t6 VALUES (62);
+
+--connection server_1
+SET debug_sync='now WAIT_FOR master_queued2';
+SET debug_sync='now SIGNAL master_cont1';
+
+--connection con_temp3
+REAP;
+--connection con_temp4
+REAP;
+
+--connection server_1
+SET debug_sync='RESET';
+SET BINLOG_FORMAT= @old_format;
+SELECT * FROM t2 WHERE a>=60 ORDER BY a;
+SELECT * FROM t1 WHERE a>=60 ORDER BY a;
+SELECT * FROM t6 WHERE a>=60 ORDER BY a;
+
+--connection server_2
+SET DEBUG_SYNC= "now WAIT_FOR t2_ready1";
+# T2 has now started running, but has not yet done mark_start_commit()
+SET DEBUG_SYNC= "now SIGNAL t1_cont1";
+SET DEBUG_SYNC= "now WAIT_FOR t1_ready2";
+# T1 has now done unmark_start_commit() in preparation for its retry.
+
+--connection server_2
+# Let the preparation transaction complete, so that the same worker thread
+# can continue with the transaction T3.
+SET DEBUG_SYNC= "now SIGNAL prep_cont";
+SET DEBUG_SYNC= "now WAIT_FOR t3_ready";
+# T3 has now gone to wait for T2 to start committing
+SET DEBUG_SYNC= "now SIGNAL t2_cont1";
+SET DEBUG_SYNC= "now WAIT_FOR t2_ready2";
+# T2 has now done mark_start_commit().
+# Let things run, and check that T3 does not get deadlocked.
+SET DEBUG_SYNC= "now SIGNAL t1_cont2";
+--sync_with_master
+
+--connection server_1
+--save_master_pos
+--connection server_2
+--sync_with_master
+SELECT * FROM t2 WHERE a>=60 ORDER BY a;
+SELECT * FROM t1 WHERE a>=60 ORDER BY a;
+SELECT * FROM t6 WHERE a>=60 ORDER BY a;
+SET DEBUG_SYNC="reset";
+
+# Re-spawn the worker threads to remove any DBUG injections or DEBUG_SYNC.
+--source include/stop_slave.inc
+SET GLOBAL debug_dbug=@old_dbug;
+SET GLOBAL slave_parallel_threads=0;
+SET GLOBAL slave_parallel_threads=10;
+--source include/start_slave.inc
+
+--echo *** MDEV-7335: Potential parallel slave deadlock with specific binlog corruption ***
+
+--connection server_2
+--source include/stop_slave.inc
+SET GLOBAL slave_parallel_threads=1;
+SET @old_dbug= @@GLOBAL.debug_dbug;
+SET GLOBAL debug_dbug="+d,slave_discard_xid_for_gtid_0_x_1000";
+
+--connection server_1
+INSERT INTO t2 VALUES (101);
+INSERT INTO t2 VALUES (102);
+INSERT INTO t2 VALUES (103);
+INSERT INTO t2 VALUES (104);
+INSERT INTO t2 VALUES (105);
+# Inject a partial event group (missing XID at the end). The bug was that such
+# partial group was not handled appropriately, leading to server deadlock.
+SET gtid_seq_no=1000;
+INSERT INTO t2 VALUES (106);
+INSERT INTO t2 VALUES (107);
+INSERT INTO t2 VALUES (108);
+INSERT INTO t2 VALUES (109);
+INSERT INTO t2 VALUES (110);
+INSERT INTO t2 VALUES (111);
+INSERT INTO t2 VALUES (112);
+INSERT INTO t2 VALUES (113);
+INSERT INTO t2 VALUES (114);
+INSERT INTO t2 VALUES (115);
+INSERT INTO t2 VALUES (116);
+INSERT INTO t2 VALUES (117);
+INSERT INTO t2 VALUES (118);
+INSERT INTO t2 VALUES (119);
+INSERT INTO t2 VALUES (120);
+INSERT INTO t2 VALUES (121);
+INSERT INTO t2 VALUES (122);
+INSERT INTO t2 VALUES (123);
+INSERT INTO t2 VALUES (124);
+INSERT INTO t2 VALUES (125);
+INSERT INTO t2 VALUES (126);
+INSERT INTO t2 VALUES (127);
+INSERT INTO t2 VALUES (128);
+INSERT INTO t2 VALUES (129);
+INSERT INTO t2 VALUES (130);
+--source include/save_master_gtid.inc
+
+--connection server_2
+--source include/start_slave.inc
+--source include/sync_with_master_gtid.inc
+# The partial event group (a=106) should be rolled back and thus missing.
+SELECT * FROM t2 WHERE a >= 100 ORDER BY a;
+
+--source include/stop_slave.inc
+SET GLOBAL debug_dbug=@old_dbug;
+SET GLOBAL slave_parallel_threads=10;
+--source include/start_slave.inc
+
+--echo *** MDEV-6676 - test syntax of @@slave_parallel_mode ***
+--connection server_2
+
+--let $status_items= Parallel_Mode
+--source include/show_slave_status.inc
+--source include/stop_slave.inc
+SET GLOBAL slave_parallel_mode='aggressive';
+--let $status_items= Parallel_Mode
+--source include/show_slave_status.inc
+SET GLOBAL slave_parallel_mode='conservative';
+--let $status_items= Parallel_Mode
+--source include/show_slave_status.inc
+
+
+--echo *** MDEV-6676 - test that empty parallel_mode does not replicate in parallel ***
+--connection server_1
+INSERT INTO t2 VALUES (1040);
+--source include/save_master_gtid.inc
+
+--connection server_2
+SET GLOBAL slave_parallel_mode='none';
+# Test that we do not use parallel apply, by injecting an unconditional
+# crash in the parallel apply code.
+SET @old_dbug= @@GLOBAL.debug_dbug;
+SET GLOBAL debug_dbug="+d,slave_crash_if_parallel_apply";
+--source include/start_slave.inc
+--source include/sync_with_master_gtid.inc
+SELECT * FROM t2 WHERE a >= 1040 ORDER BY a;
+--source include/stop_slave.inc
+SET GLOBAL debug_dbug=@old_dbug;
+
+
+--echo *** MDEV-6676 - test disabling domain-based parallel replication ***
+--connection server_1
+# Let's do a bunch of transactions that will conflict if run out-of-order in
+# domain-based parallel replication mode.
+SET gtid_domain_id = 1;
+INSERT INTO t2 VALUES (1041);
+INSERT INTO t2 VALUES (1042);
+INSERT INTO t2 VALUES (1043);
+INSERT INTO t2 VALUES (1044);
+INSERT INTO t2 VALUES (1045);
+INSERT INTO t2 VALUES (1046);
+DELETE FROM t2 WHERE a >= 1041;
+SET gtid_domain_id = 2;
+INSERT INTO t2 VALUES (1041);
+INSERT INTO t2 VALUES (1042);
+INSERT INTO t2 VALUES (1043);
+INSERT INTO t2 VALUES (1044);
+INSERT INTO t2 VALUES (1045);
+INSERT INTO t2 VALUES (1046);
+SET gtid_domain_id = 0;
+--source include/save_master_gtid.inc
+--connection server_2
+SET GLOBAL slave_parallel_mode=minimal;
+--source include/start_slave.inc
+--source include/sync_with_master_gtid.inc
+SELECT * FROM t2 WHERE a >= 1040 ORDER BY a;
+
+--echo *** MDEV-7888: ANALYZE TABLE does wakeup_subsequent_commits(), causing wrong binlog order and parallel replication hang ***
+
+--connection server_2
+--source include/stop_slave.inc
+SET GLOBAL slave_parallel_mode='conservative';
+SET GLOBAL slave_parallel_threads=10;
+
+SET @old_dbug= @@GLOBAL.debug_dbug;
+SET GLOBAL debug_dbug= '+d,inject_analyze_table_sleep';
+
+--connection server_1
+# Inject two group commits. The bug was that ANALYZE TABLE would call
+# wakeup_subsequent_commits() too early, allowing the following transaction
+# in the same group to run ahead and binlog and free the GCO. Then we get
+# wrong binlog order and later access freed GCO, which causes lost wakeup
+# of following GCO and thus replication hang.
+# We injected a small sleep in ANALYZE to make the race easier to hit (this
+# can only cause false negatives in versions with the bug, not false positives,
+# so sleep is ok here. And it's in general not possible to trigger reliably
+# the race with debug_sync, since the bugfix makes the race impossible).
+
+SET @old_dbug= @@SESSION.debug_dbug;
+SET SESSION debug_dbug="+d,binlog_force_commit_id";
+
+# Group commit with cid=10000, two event groups.
+SET @commit_id= 10000;
+ANALYZE TABLE t2;
+INSERT INTO t3 VALUES (120, 0);
+
+# Group commit with cid=10001, one event group.
+SET @commit_id= 10001;
+INSERT INTO t3 VALUES (121, 0);
+
+SET SESSION debug_dbug=@old_dbug;
+
+SELECT * FROM t3 WHERE a >= 120 ORDER BY a;
+--source include/save_master_gtid.inc
+
+--connection server_2
+--source include/start_slave.inc
+--source include/sync_with_master_gtid.inc
+
+SELECT * FROM t3 WHERE a >= 120 ORDER BY a;
+
+--source include/stop_slave.inc
+SET GLOBAL debug_dbug= @old_dbug;
+--source include/start_slave.inc
+
+
+--echo *** MDEV-7929: record_gtid() for non-transactional event group calls wakeup_subsequent_commits() too early, causing slave hang. ***
+
+--connection server_2
+--source include/stop_slave.inc
+SET @old_dbug= @@GLOBAL.debug_dbug;
+SET GLOBAL debug_dbug= '+d,inject_record_gtid_serverid_100_sleep';
+
+--connection server_1
+# Inject two group commits. The bug was that record_gtid for a
+# non-transactional event group would commit its own transaction, which would
+# cause ha_commit_trans() to call wakeup_subsequent_commits() too early. This
+# in turn lead to access to freed group_commit_orderer object, losing a wakeup
+# and causing slave threads to hang.
+# We inject a small sleep in the corresponding record_gtid() to make the race
+# easier to hit.
+
+SET @old_dbug= @@SESSION.debug_dbug;
+SET SESSION debug_dbug="+d,binlog_force_commit_id";
+
+# Group commit with cid=10010, two event groups.
+SET @old_server_id= @@SESSION.server_id;
+SET SESSION server_id= 100;
+SET @commit_id= 10010;
+ALTER TABLE t1 COMMENT "Hulubulu!";
+SET SESSION server_id= @old_server_id;
+INSERT INTO t3 VALUES (130, 0);
+
+# Group commit with cid=10011, one event group.
+SET @commit_id= 10011;
+INSERT INTO t3 VALUES (131, 0);
+
+SET SESSION debug_dbug=@old_dbug;
+
+SELECT * FROM t3 WHERE a >= 130 ORDER BY a;
+--source include/save_master_gtid.inc
+
+--connection server_2
+--source include/start_slave.inc
+--source include/sync_with_master_gtid.inc
+
+SELECT * FROM t3 WHERE a >= 130 ORDER BY a;
+
+--source include/stop_slave.inc
+SET GLOBAL debug_dbug= @old_dbug;
+--source include/start_slave.inc
+
+
+--echo *** MDEV-8031: Parallel replication stops on "connection killed" error (probably incorrectly handled deadlock kill) ***
+
+--connection server_1
+INSERT INTO t3 VALUES (201,0), (202,0);
+--source include/save_master_gtid.inc
+
+--connection server_2
+--source include/sync_with_master_gtid.inc
+--source include/stop_slave.inc
+SET @old_dbug= @@GLOBAL.debug_dbug;
+SET GLOBAL debug_dbug= '+d,inject_mdev8031';
+
+--connection server_1
+# We artificially create a situation that hopefully resembles the original
+# bug which was only seen "in the wild", and only once.
+# Setup a fake group commit with lots of conflicts that will lead to deadloc
+# kill. The slave DBUG injection causes the slave to be deadlock killed at
+# a particular point during the retry, and then later do a small sleep at
+# another critical point where the prior transaction then has a chance to
+# complete. Finally an extra KILL check catches an unhandled, lingering
+# deadlock kill. So rather artificial, but at least it exercises the
+# relevant code paths.
+SET @old_dbug= @@SESSION.debug_dbug;
+SET SESSION debug_dbug="+d,binlog_force_commit_id";
+
+SET @commit_id= 10200;
+INSERT INTO t3 VALUES (203, 1);
+INSERT INTO t3 VALUES (204, 1);
+INSERT INTO t3 VALUES (205, 1);
+UPDATE t3 SET b=b+1 WHERE a=201;
+UPDATE t3 SET b=b+1 WHERE a=201;
+UPDATE t3 SET b=b+1 WHERE a=201;
+UPDATE t3 SET b=b+1 WHERE a=202;
+UPDATE t3 SET b=b+1 WHERE a=202;
+UPDATE t3 SET b=b+1 WHERE a=202;
+UPDATE t3 SET b=b+1 WHERE a=202;
+UPDATE t3 SET b=b+1 WHERE a=203;
+UPDATE t3 SET b=b+1 WHERE a=203;
+UPDATE t3 SET b=b+1 WHERE a=204;
+UPDATE t3 SET b=b+1 WHERE a=204;
+UPDATE t3 SET b=b+1 WHERE a=204;
+UPDATE t3 SET b=b+1 WHERE a=203;
+UPDATE t3 SET b=b+1 WHERE a=205;
+UPDATE t3 SET b=b+1 WHERE a=205;
+SET SESSION debug_dbug=@old_dbug;
+
+SELECT * FROM t3 WHERE a>=200 ORDER BY a;
+--source include/save_master_gtid.inc
+
+--connection server_2
+--source include/start_slave.inc
+--source include/sync_with_master_gtid.inc
+
+SELECT * FROM t3 WHERE a>=200 ORDER BY a;
+--source include/stop_slave.inc
+SET GLOBAL debug_dbug= @old_dbug;
+--source include/start_slave.inc
+
+
+--echo *** Check getting deadlock killed inside open_binlog() during retry. ***
+
+--connection server_2
+--source include/stop_slave.inc
+SET @old_dbug= @@GLOBAL.debug_dbug;
+SET GLOBAL debug_dbug= '+d,inject_retry_event_group_open_binlog_kill';
+SET @old_max= @@GLOBAL.max_relay_log_size;
+SET GLOBAL max_relay_log_size= 4096;
+
+--connection server_1
+SET @old_dbug= @@SESSION.debug_dbug;
+SET SESSION debug_dbug="+d,binlog_force_commit_id";
+
+--let $large= `SELECT REPEAT("*", 8192)`
+SET @commit_id= 10210;
+--echo Omit long queries that cause relaylog rotations and transaction retries...
+--disable_query_log
+eval UPDATE t3 SET b=b+1 WHERE a=201 /* $large */;
+eval UPDATE t3 SET b=b+1 WHERE a=201 /* $large */;
+eval UPDATE t3 SET b=b+1 WHERE a=201 /* $large */;
+eval UPDATE t3 SET b=b+1 WHERE a=202 /* $large */;
+eval UPDATE t3 SET b=b+1 WHERE a=202 /* $large */;
+eval UPDATE t3 SET b=b+1 WHERE a=202 /* $large */;
+eval UPDATE t3 SET b=b+1 WHERE a=202 /* $large */;
+eval UPDATE t3 SET b=b+1 WHERE a=203 /* $large */;
+eval UPDATE t3 SET b=b+1 WHERE a=203 /* $large */;
+eval UPDATE t3 SET b=b+1 WHERE a=204 /* $large */;
+eval UPDATE t3 SET b=b+1 WHERE a=204 /* $large */;
+eval UPDATE t3 SET b=b+1 WHERE a=204 /* $large */;
+eval UPDATE t3 SET b=b+1 WHERE a=203 /* $large */;
+eval UPDATE t3 SET b=b+1 WHERE a=205 /* $large */;
+eval UPDATE t3 SET b=b+1 WHERE a=205 /* $large */;
+--enable_query_log
+SET SESSION debug_dbug=@old_dbug;
+
+SELECT * FROM t3 WHERE a>=200 ORDER BY a;
+--source include/save_master_gtid.inc
+
+--connection server_2
+--source include/start_slave.inc
+--source include/sync_with_master_gtid.inc
+
+SELECT * FROM t3 WHERE a>=200 ORDER BY a;
+--source include/stop_slave.inc
+SET GLOBAL debug_dbug= @old_debg;
+SET GLOBAL max_relay_log_size= @old_max;
+--source include/start_slave.inc
+
+--echo *** MDEV-8725: Assertion on ROLLBACK statement in the binary log ***
+--connection server_1
+# Inject an event group terminated by ROLLBACK, by mixing MyISAM and InnoDB
+# in a transaction. The bug was an assertion on the ROLLBACK due to
+# mark_start_commit() being already called.
+--disable_warnings
+BEGIN;
+INSERT INTO t2 VALUES (2000);
+INSERT INTO t1 VALUES (2000);
+INSERT INTO t2 VALUES (2001);
+ROLLBACK;
+--enable_warnings
+SELECT * FROM t1 WHERE a>=2000 ORDER BY a;
+SELECT * FROM t2 WHERE a>=2000 ORDER BY a;
+--source include/save_master_gtid.inc
+
+--connection server_2
+--source include/sync_with_master_gtid.inc
+SELECT * FROM t1 WHERE a>=2000 ORDER BY a;
+SELECT * FROM t2 WHERE a>=2000 ORDER BY a;
+
+
+# Clean up.
+--connection server_2
+--source include/stop_slave.inc
+SET GLOBAL slave_parallel_threads=@old_parallel_threads;
+--source include/start_slave.inc
+SET DEBUG_SYNC= 'RESET';
+
+--connection server_1
+DROP function foo;
+DROP TABLE t1,t2,t3,t4,t5,t6;
+SET DEBUG_SYNC= 'RESET';
+
+--source include/rpl_end.inc
diff --git a/mysql-test/extra/rpl_tests/rpl_parallel_show_binlog_events_purge_logs.inc b/mysql-test/extra/rpl_tests/rpl_parallel_show_binlog_events_purge_logs.inc
new file mode 100644
index 00000000000..7801498adb4
--- /dev/null
+++ b/mysql-test/extra/rpl_tests/rpl_parallel_show_binlog_events_purge_logs.inc
@@ -0,0 +1,37 @@
+#
+# This include file is used by more than one test suite
+# (currently rpl and binlog_encryption).
+# Please check all dependent tests after modifying it
+#
+
+# BUG#13979418: SHOW BINLOG EVENTS MAY CRASH THE SERVER
+#
+# The function mysql_show_binlog_events has a local stack variable
+# 'LOG_INFO linfo;', which is assigned to thd->current_linfo, however
+# this variable goes out of scope and is destroyed before clean
+# thd->current_linfo.
+#
+# This test case runs SHOW BINLOG EVENTS and FLUSH LOGS to make sure
+# that with the fix local variable linfo is valid along all
+# mysql_show_binlog_events function scope.
+#
+--source include/have_debug_sync.inc
+--source include/master-slave.inc
+
+--connection slave
+SET DEBUG_SYNC= 'after_show_binlog_events SIGNAL on_show_binlog_events WAIT_FOR end';
+--send SHOW BINLOG EVENTS
+
+--connection slave1
+SET DEBUG_SYNC= 'now WAIT_FOR on_show_binlog_events';
+FLUSH LOGS;
+SET DEBUG_SYNC= 'now SIGNAL end';
+
+--connection slave
+--disable_result_log
+--reap
+--enable_result_log
+SET DEBUG_SYNC= 'RESET';
+
+--connection master
+--source include/rpl_end.inc
diff --git a/mysql-test/extra/rpl_tests/rpl_relayrotate.inc b/mysql-test/extra/rpl_tests/rpl_relayrotate.inc
new file mode 100644
index 00000000000..ce638e419ff
--- /dev/null
+++ b/mysql-test/extra/rpl_tests/rpl_relayrotate.inc
@@ -0,0 +1,18 @@
+#
+# This include file is used by more than one test suite
+# (currently rpl and binlog_encryption).
+# Please check all dependent tests after modifying it
+#
+
+#######################################################
+# Wrapper for rpl_relayrotate.test to allow multi #
+# Engines to reuse test code. By JBM 2006-02-15 #
+#######################################################
+-- source include/have_innodb.inc
+# Slow test, don't run during staging part
+-- source include/not_staging.inc
+-- source include/master-slave.inc
+
+let $engine_type=innodb;
+-- source extra/rpl_tests/rpl_relayrotate.test
+--source include/rpl_end.inc
diff --git a/mysql-test/extra/rpl_tests/rpl_semi_sync.inc b/mysql-test/extra/rpl_tests/rpl_semi_sync.inc
new file mode 100644
index 00000000000..12053c54f4e
--- /dev/null
+++ b/mysql-test/extra/rpl_tests/rpl_semi_sync.inc
@@ -0,0 +1,551 @@
+#
+# This include file is used by more than one test suite
+# (currently rpl and binlog_encryption).
+# Please check all dependent tests after modifying it
+#
+
+source include/have_semisync.inc;
+source include/not_embedded.inc;
+source include/have_innodb.inc;
+source include/master-slave.inc;
+
+let $engine_type= InnoDB;
+#let $engine_type= MyISAM;
+
+# Suppress warnings that might be generated during the test
+connection master;
+call mtr.add_suppression("Timeout waiting for reply of binlog");
+call mtr.add_suppression("Read semi-sync reply");
+call mtr.add_suppression("Unsafe statement written to the binary log using statement format since BINLOG_FORMAT = STATEMENT.");
+connection slave;
+call mtr.add_suppression("Master server does not support semi-sync");
+call mtr.add_suppression("Semi-sync slave .* reply");
+call mtr.add_suppression("Slave SQL.*Request to stop slave SQL Thread received while applying a group that has non-transactional changes; waiting for completion of the group");
+connection master;
+
+# wait for dying connections (if any) to disappear
+let $wait_condition= select count(*) = 0 from information_schema.processlist where command='killed';
+--source include/wait_condition.inc
+
+# After fix of BUG#45848, semi-sync slave should not create any extra
+# connections on master, save the count of connections before start
+# semi-sync slave for comparison below.
+let $_connections_normal_slave= query_get_value(SHOW STATUS LIKE 'Threads_connected', Value, 1);
+
+--echo #
+--echo # Uninstall semi-sync plugins on master and slave
+--echo #
+connection slave;
+source include/stop_slave.inc;
+reset slave;
+set global rpl_semi_sync_master_enabled= 0;
+set global rpl_semi_sync_slave_enabled= 0;
+
+connection master;
+reset master;
+set global rpl_semi_sync_master_enabled= 0;
+set global rpl_semi_sync_slave_enabled= 0;
+
+--echo #
+--echo # Main test of semi-sync replication start here
+--echo #
+
+connection master;
+
+set global rpl_semi_sync_master_timeout= 60000; # 60s
+
+echo [ default state of semi-sync on master should be OFF ];
+show variables like 'rpl_semi_sync_master_enabled';
+
+echo [ enable semi-sync on master ];
+set global rpl_semi_sync_master_enabled = 1;
+show variables like 'rpl_semi_sync_master_enabled';
+
+echo [ status of semi-sync on master should be ON even without any semi-sync slaves ];
+show status like 'Rpl_semi_sync_master_clients';
+show status like 'Rpl_semi_sync_master_status';
+show status like 'Rpl_semi_sync_master_yes_tx';
+
+--echo #
+--echo # BUG#45672 Semisync repl: ActiveTranx:insert_tranx_node: transaction node allocation failed
+--echo # BUG#45673 Semisynch reports correct operation even if no slave is connected
+--echo #
+
+# BUG#45672 When semi-sync is enabled on master, it would allocate
+# transaction node even without semi-sync slave connected, and would
+# finally result in transaction node allocation error.
+#
+# Semi-sync master will pre-allocate 'max_connections' transaction
+# nodes, so here we do more than that much transactions to check if it
+# will fail or not.
+# select @@global.max_connections + 1;
+let $i= `select @@global.max_connections + 1`;
+disable_query_log;
+eval create table t1 (a int) engine=$engine_type;
+while ($i)
+{
+ eval insert into t1 values ($i);
+ dec $i;
+}
+drop table t1;
+enable_query_log;
+
+# BUG#45673
+echo [ status of semi-sync on master should be OFF ];
+show status like 'Rpl_semi_sync_master_clients';
+show status like 'Rpl_semi_sync_master_status';
+--replace_result 305 304
+show status like 'Rpl_semi_sync_master_yes_tx';
+
+# reset master to make sure the following test will start with a clean environment
+reset master;
+
+connection slave;
+
+echo [ default state of semi-sync on slave should be OFF ];
+show variables like 'rpl_semi_sync_slave_enabled';
+
+echo [ enable semi-sync on slave ];
+set global rpl_semi_sync_slave_enabled = 1;
+show variables like 'rpl_semi_sync_slave_enabled';
+source include/start_slave.inc;
+
+connection master;
+
+# NOTE: Rpl_semi_sync_master_client will only be updated when
+# semi-sync slave has started binlog dump request
+let $status_var= Rpl_semi_sync_master_clients;
+let $status_var_value= 1;
+source include/wait_for_status_var.inc;
+
+echo [ initial master state after the semi-sync slave connected ];
+show status like 'Rpl_semi_sync_master_clients';
+show status like 'Rpl_semi_sync_master_status';
+show status like 'Rpl_semi_sync_master_no_tx';
+show status like 'Rpl_semi_sync_master_yes_tx';
+
+replace_result $engine_type ENGINE_TYPE;
+eval create table t1(a int) engine = $engine_type;
+
+echo [ master state after CREATE TABLE statement ];
+show status like 'Rpl_semi_sync_master_status';
+show status like 'Rpl_semi_sync_master_no_tx';
+show status like 'Rpl_semi_sync_master_yes_tx';
+
+# After fix of BUG#45848, semi-sync slave should not create any extra
+# connections on master.
+let $_connections_semisync_slave= query_get_value(SHOW STATUS LIKE 'Threads_connected', Value, 1);
+replace_result $_connections_normal_slave CONNECTIONS_NORMAL_SLAVE $_connections_semisync_slave CONNECTIONS_SEMISYNC_SLAVE;
+eval select $_connections_semisync_slave - $_connections_normal_slave as 'Should be 0';
+
+echo [ insert records to table ];
+insert t1 values (10);
+insert t1 values (9);
+insert t1 values (8);
+insert t1 values (7);
+insert t1 values (6);
+insert t1 values (5);
+insert t1 values (4);
+insert t1 values (3);
+insert t1 values (2);
+insert t1 values (1);
+
+echo [ master status after inserts ];
+show status like 'Rpl_semi_sync_master_status';
+show status like 'Rpl_semi_sync_master_no_tx';
+show status like 'Rpl_semi_sync_master_yes_tx';
+
+sync_slave_with_master;
+
+echo [ slave status after replicated inserts ];
+show status like 'Rpl_semi_sync_slave_status';
+
+select count(distinct a) from t1;
+select min(a) from t1;
+select max(a) from t1;
+
+--echo
+--echo # BUG#50157
+--echo # semi-sync replication crashes when replicating a transaction which
+--echo # include 'CREATE TEMPORARY TABLE `MyISAM_t` SELECT * FROM `Innodb_t` ;
+
+connection master;
+SET SESSION AUTOCOMMIT= 0;
+CREATE TABLE t2(c1 INT) ENGINE=innodb;
+sync_slave_with_master;
+
+connection master;
+BEGIN;
+--echo
+--echo # Even though it is in a transaction, this statement is binlogged into binlog
+--echo # file immediately.
+--disable_warnings
+CREATE TEMPORARY TABLE t3 SELECT c1 FROM t2 where 1=1;
+--enable_warnings
+--echo
+--echo # These statements will not be binlogged until the transaction is committed
+INSERT INTO t2 VALUES(11);
+INSERT INTO t2 VALUES(22);
+COMMIT;
+
+DROP TABLE t2, t3;
+SET SESSION AUTOCOMMIT= 1;
+sync_slave_with_master;
+
+
+--echo #
+--echo # Test semi-sync master will switch OFF after one transaction
+--echo # timeout waiting for slave reply.
+--echo #
+connection slave;
+source include/stop_slave.inc;
+
+connection master;
+set global rpl_semi_sync_master_timeout= 5000;
+
+# The first semi-sync check should be on because after slave stop,
+# there are no transactions on the master.
+echo [ master status should be ON ];
+show status like 'Rpl_semi_sync_master_status';
+show status like 'Rpl_semi_sync_master_no_tx';
+--replace_result 305 304
+show status like 'Rpl_semi_sync_master_yes_tx';
+show status like 'Rpl_semi_sync_master_clients';
+
+echo [ semi-sync replication of these transactions will fail ];
+insert into t1 values (500);
+
+# Wait for the semi-sync replication of this transaction to timeout
+let $status_var= Rpl_semi_sync_master_status;
+let $status_var_value= OFF;
+source include/wait_for_status_var.inc;
+
+# The second semi-sync check should be off because one transaction
+# times out during waiting.
+echo [ master status should be OFF ];
+show status like 'Rpl_semi_sync_master_status';
+show status like 'Rpl_semi_sync_master_no_tx';
+--replace_result 305 304
+show status like 'Rpl_semi_sync_master_yes_tx';
+
+# Semi-sync status on master is now OFF, so all these transactions
+# will be replicated asynchronously.
+delete from t1 where a=10;
+delete from t1 where a=9;
+delete from t1 where a=8;
+delete from t1 where a=7;
+delete from t1 where a=6;
+delete from t1 where a=5;
+delete from t1 where a=4;
+delete from t1 where a=3;
+delete from t1 where a=2;
+delete from t1 where a=1;
+
+insert into t1 values (100);
+
+echo [ master status should be OFF ];
+show status like 'Rpl_semi_sync_master_status';
+show status like 'Rpl_semi_sync_master_no_tx';
+--replace_result 305 304
+show status like 'Rpl_semi_sync_master_yes_tx';
+
+--echo #
+--echo # Test semi-sync status on master will be ON again when slave catches up
+--echo #
+
+# Save the master position for later use.
+save_master_pos;
+
+connection slave;
+
+echo [ slave status should be OFF ];
+show status like 'Rpl_semi_sync_slave_status';
+source include/start_slave.inc;
+sync_with_master;
+
+echo [ slave status should be ON ];
+show status like 'Rpl_semi_sync_slave_status';
+
+select count(distinct a) from t1;
+select min(a) from t1;
+select max(a) from t1;
+
+connection master;
+
+# The master semi-sync status should be on again after slave catches up.
+echo [ master status should be ON again after slave catches up ];
+show status like 'Rpl_semi_sync_master_status';
+show status like 'Rpl_semi_sync_master_no_tx';
+--replace_result 305 304
+show status like 'Rpl_semi_sync_master_yes_tx';
+show status like 'Rpl_semi_sync_master_clients';
+
+--echo #
+--echo # Test disable/enable master semi-sync on the fly.
+--echo #
+
+drop table t1;
+sync_slave_with_master;
+
+source include/stop_slave.inc;
+
+--echo #
+--echo # Flush status
+--echo #
+connection master;
+echo [ Semi-sync master status variables before FLUSH STATUS ];
+SHOW STATUS LIKE 'Rpl_semi_sync_master_no_tx';
+SHOW STATUS LIKE 'Rpl_semi_sync_master_yes_tx';
+# Do not write the FLUSH STATUS to binlog, to make sure we'll get a
+# clean status after this.
+FLUSH NO_WRITE_TO_BINLOG STATUS;
+echo [ Semi-sync master status variables after FLUSH STATUS ];
+SHOW STATUS LIKE 'Rpl_semi_sync_master_no_tx';
+SHOW STATUS LIKE 'Rpl_semi_sync_master_yes_tx';
+
+connection master;
+
+source include/show_master_logs.inc;
+show variables like 'rpl_semi_sync_master_enabled';
+
+echo [ disable semi-sync on the fly ];
+set global rpl_semi_sync_master_enabled=0;
+show variables like 'rpl_semi_sync_master_enabled';
+show status like 'Rpl_semi_sync_master_status';
+
+echo [ enable semi-sync on the fly ];
+set global rpl_semi_sync_master_enabled=1;
+show variables like 'rpl_semi_sync_master_enabled';
+show status like 'Rpl_semi_sync_master_status';
+
+--echo #
+--echo # Test RESET MASTER/SLAVE
+--echo #
+
+connection slave;
+
+source include/start_slave.inc;
+
+connection master;
+
+replace_result $engine_type ENGINE_TYPE;
+eval create table t1 (a int) engine = $engine_type;
+drop table t1;
+
+##show status like 'Rpl_semi_sync_master_status';
+
+sync_slave_with_master;
+--replace_column 2 #
+show status like 'Rpl_relay%';
+
+echo [ test reset master ];
+connection master;
+
+reset master;
+
+show status like 'Rpl_semi_sync_master_status';
+show status like 'Rpl_semi_sync_master_no_tx';
+show status like 'Rpl_semi_sync_master_yes_tx';
+
+connection slave;
+
+source include/stop_slave.inc;
+reset slave;
+
+# Kill the dump thread on master for previous slave connection and
+# wait for it to exit
+connection master;
+let $_tid= `select id from information_schema.processlist where command = 'Binlog Dump' limit 1`;
+if ($_tid)
+{
+ --replace_result $_tid _tid
+ eval kill query $_tid;
+
+ # After dump thread exit, Rpl_semi_sync_master_clients will be 0
+ let $status_var= Rpl_semi_sync_master_clients;
+ let $status_var_value= 0;
+ source include/wait_for_status_var.inc;
+}
+
+connection slave;
+source include/start_slave.inc;
+
+connection master;
+
+# Wait for dump thread to start, Rpl_semi_sync_master_clients will be
+# 1 after dump thread started.
+let $status_var= Rpl_semi_sync_master_clients;
+let $status_var_value= 1;
+source include/wait_for_status_var.inc;
+
+replace_result $engine_type ENGINE_TYPE;
+eval create table t1 (a int) engine = $engine_type;
+insert into t1 values (1);
+insert into t1 values (2), (3);
+
+sync_slave_with_master;
+
+select * from t1;
+
+connection master;
+
+echo [ master semi-sync status should be ON ];
+show status like 'Rpl_semi_sync_master_status';
+show status like 'Rpl_semi_sync_master_no_tx';
+show status like 'Rpl_semi_sync_master_yes_tx';
+
+--echo #
+--echo # Start semi-sync replication without SUPER privilege
+--echo #
+connection slave;
+source include/stop_slave.inc;
+reset slave;
+connection master;
+reset master;
+
+# Kill the dump thread on master for previous slave connection and wait for it to exit
+let $_tid= `select id from information_schema.processlist where command = 'Binlog Dump' limit 1`;
+if ($_tid)
+{
+ --replace_result $_tid _tid
+ eval kill query $_tid;
+
+ # After dump thread exit, Rpl_semi_sync_master_clients will be 0
+ let $status_var= Rpl_semi_sync_master_clients;
+ let $status_var_value= 0;
+ source include/wait_for_status_var.inc;
+}
+
+# Do not binlog the following statement because it will generate
+# different events for ROW and STATEMENT format
+set sql_log_bin=0;
+grant replication slave on *.* to rpl@127.0.0.1 identified by 'rpl_password';
+flush privileges;
+set sql_log_bin=1;
+connection slave;
+grant replication slave on *.* to rpl@127.0.0.1 identified by 'rpl_password';
+flush privileges;
+change master to master_user='rpl',master_password='rpl_password';
+source include/start_slave.inc;
+show status like 'Rpl_semi_sync_slave_status';
+connection master;
+
+# Wait for the semi-sync binlog dump thread to start
+let $status_var= Rpl_semi_sync_master_clients;
+let $status_var_value= 1;
+source include/wait_for_status_var.inc;
+echo [ master semi-sync should be ON ];
+show status like 'Rpl_semi_sync_master_clients';
+show status like 'Rpl_semi_sync_master_status';
+show status like 'Rpl_semi_sync_master_no_tx';
+show status like 'Rpl_semi_sync_master_yes_tx';
+insert into t1 values (4);
+insert into t1 values (5);
+echo [ master semi-sync should be ON ];
+show status like 'Rpl_semi_sync_master_clients';
+show status like 'Rpl_semi_sync_master_status';
+show status like 'Rpl_semi_sync_master_no_tx';
+show status like 'Rpl_semi_sync_master_yes_tx';
+
+--echo #
+--echo # Test semi-sync slave connect to non-semi-sync master
+--echo #
+
+# Disable semi-sync on master
+connection slave;
+source include/stop_slave.inc;
+SHOW STATUS LIKE 'Rpl_semi_sync_slave_status';
+
+connection master;
+
+# Kill the dump thread on master for previous slave connection and wait for it to exit
+let $_tid= `select id from information_schema.processlist where command = 'Binlog Dump' limit 1`;
+if ($_tid)
+{
+ --replace_result $_tid _tid
+ eval kill query $_tid;
+
+ # After dump thread exit, Rpl_semi_sync_master_clients will be 0
+ let $status_var= Rpl_semi_sync_master_clients;
+ let $status_var_value= 0;
+ source include/wait_for_status_var.inc;
+}
+
+echo [ Semi-sync status on master should be ON ];
+show status like 'Rpl_semi_sync_master_clients';
+show status like 'Rpl_semi_sync_master_status';
+set global rpl_semi_sync_master_enabled= 0;
+
+connection slave;
+SHOW VARIABLES LIKE 'rpl_semi_sync_slave_enabled';
+source include/start_slave.inc;
+connection master;
+insert into t1 values (8);
+let $status_var= Rpl_semi_sync_master_clients;
+let $status_var_value= 1;
+source include/wait_for_status_var.inc;
+echo [ master semi-sync clients should be 1, status should be OFF ];
+show status like 'Rpl_semi_sync_master_clients';
+show status like 'Rpl_semi_sync_master_status';
+sync_slave_with_master;
+show status like 'Rpl_semi_sync_slave_status';
+
+# Uninstall semi-sync plugin on master
+connection slave;
+source include/stop_slave.inc;
+connection master;
+set global rpl_semi_sync_master_enabled= 0;
+
+connection slave;
+SHOW VARIABLES LIKE 'rpl_semi_sync_slave_enabled';
+source include/start_slave.inc;
+
+connection master;
+insert into t1 values (10);
+sync_slave_with_master;
+
+--echo #
+--echo # Test non-semi-sync slave connect to semi-sync master
+--echo #
+
+connection master;
+set global rpl_semi_sync_master_timeout= 5000; # 5s
+set global rpl_semi_sync_master_enabled= 1;
+
+connection slave;
+source include/stop_slave.inc;
+SHOW STATUS LIKE 'Rpl_semi_sync_slave_status';
+
+echo [ uninstall semi-sync slave plugin ];
+set global rpl_semi_sync_slave_enabled= 0;
+
+echo [ reinstall semi-sync slave plugin and disable semi-sync ];
+SHOW VARIABLES LIKE 'rpl_semi_sync_slave_enabled';
+SHOW STATUS LIKE 'Rpl_semi_sync_slave_status';
+source include/start_slave.inc;
+SHOW STATUS LIKE 'Rpl_semi_sync_slave_status';
+
+--echo #
+--echo # Clean up
+--echo #
+
+connection slave;
+source include/stop_slave.inc;
+set global rpl_semi_sync_slave_enabled= 0;
+
+connection master;
+set global rpl_semi_sync_master_enabled= 0;
+
+connection slave;
+change master to master_user='root',master_password='';
+source include/start_slave.inc;
+
+connection master;
+drop table t1;
+sync_slave_with_master;
+
+connection master;
+drop user rpl@127.0.0.1;
+flush privileges;
+set global rpl_semi_sync_master_timeout= default;
+--source include/rpl_end.inc
diff --git a/mysql-test/extra/rpl_tests/rpl_skip_replication.inc b/mysql-test/extra/rpl_tests/rpl_skip_replication.inc
new file mode 100644
index 00000000000..14e3339ff5e
--- /dev/null
+++ b/mysql-test/extra/rpl_tests/rpl_skip_replication.inc
@@ -0,0 +1,402 @@
+#
+# This include file is used by more than one test suite
+# (currently rpl and binlog_encryption).
+# Please check all dependent tests after modifying it.
+#
+# Usage:
+#
+# --let $use_remote_mysqlbinlog= 1 # optional
+# --source extra/rpl_tests/rpl_skip_replication.inc
+#
+# The script uses MYSQLBINLOG to verify certain results.
+# By default, it uses binary logs directly. If it is undesirable,
+# this behavior can be overridden by setting $use_remote_binlog
+# as shown above.
+# The value will be unset after every execution of the script,
+# so if it is needed, it should be set explicitly before each call.
+#
+
+--source include/master-slave.inc
+--source include/have_innodb.inc
+
+connection slave;
+# Test that SUPER is required to change @@replicate_events_marked_for_skip.
+CREATE USER 'nonsuperuser'@'127.0.0.1';
+GRANT ALTER,CREATE,DELETE,DROP,EVENT,INSERT,PROCESS,REPLICATION SLAVE,
+ SELECT,UPDATE ON *.* TO 'nonsuperuser'@'127.0.0.1';
+connect(nonpriv, 127.0.0.1, nonsuperuser,, test, $SLAVE_MYPORT,);
+connection nonpriv;
+--error ER_SPECIFIC_ACCESS_DENIED_ERROR
+SET GLOBAL replicate_events_marked_for_skip=FILTER_ON_MASTER;
+disconnect nonpriv;
+connection slave;
+DROP USER'nonsuperuser'@'127.0.0.1';
+
+SELECT @@global.replicate_events_marked_for_skip;
+--error ER_SLAVE_MUST_STOP
+SET GLOBAL replicate_events_marked_for_skip=FILTER_ON_SLAVE;
+SELECT @@global.replicate_events_marked_for_skip;
+STOP SLAVE;
+--error ER_GLOBAL_VARIABLE
+SET SESSION replicate_events_marked_for_skip=FILTER_ON_MASTER;
+SELECT @@global.replicate_events_marked_for_skip;
+SET GLOBAL replicate_events_marked_for_skip=FILTER_ON_MASTER;
+SELECT @@global.replicate_events_marked_for_skip;
+START SLAVE;
+
+connection master;
+SELECT @@skip_replication;
+--error ER_LOCAL_VARIABLE
+SET GLOBAL skip_replication=1;
+SELECT @@skip_replication;
+
+CREATE TABLE t1 (a INT PRIMARY KEY, b INT) ENGINE=myisam;
+CREATE TABLE t2 (a INT PRIMARY KEY, b INT) ENGINE=innodb;
+INSERT INTO t1(a) VALUES (1);
+INSERT INTO t2(a) VALUES (1);
+
+
+# Test that master-side filtering works.
+SET skip_replication=1;
+
+CREATE TABLE t3 (a INT PRIMARY KEY, b INT) ENGINE=myisam;
+INSERT INTO t1(a) VALUES (2);
+INSERT INTO t2(a) VALUES (2);
+
+# Inject a rotate event in the binlog stream sent to slave (otherwise we will
+# fail sync_slave_with_master as the last event on the master is not present
+# on the slave).
+FLUSH NO_WRITE_TO_BINLOG LOGS;
+
+sync_slave_with_master;
+connection slave;
+SHOW TABLES;
+SELECT * FROM t1;
+SELECT * FROM t2;
+
+connection master;
+DROP TABLE t3;
+
+FLUSH NO_WRITE_TO_BINLOG LOGS;
+sync_slave_with_master;
+
+
+# Test that slave-side filtering works.
+connection slave;
+STOP SLAVE;
+SET GLOBAL replicate_events_marked_for_skip=FILTER_ON_SLAVE;
+START SLAVE;
+
+connection master;
+SET skip_replication=1;
+CREATE TABLE t3 (a INT PRIMARY KEY, b INT) ENGINE=myisam;
+INSERT INTO t1(a) VALUES (3);
+INSERT INTO t2(a) VALUES (3);
+
+# Inject a rotate event in the binlog stream sent to slave (otherwise we will
+# fail sync_slave_with_master as the last event on the master is not present
+# on the slave).
+FLUSH NO_WRITE_TO_BINLOG LOGS;
+
+sync_slave_with_master;
+connection slave;
+SHOW TABLES;
+SELECT * FROM t1;
+SELECT * FROM t2;
+
+connection master;
+DROP TABLE t3;
+
+FLUSH NO_WRITE_TO_BINLOG LOGS;
+sync_slave_with_master;
+connection slave;
+STOP SLAVE;
+SET GLOBAL replicate_events_marked_for_skip=REPLICATE;
+START SLAVE;
+
+
+# Test that events with @@skip_replication=1 are not filtered when filtering is
+# not set on slave.
+connection master;
+SET skip_replication=1;
+CREATE TABLE t3 (a INT PRIMARY KEY, b INT) ENGINE=myisam;
+INSERT INTO t3(a) VALUES(2);
+sync_slave_with_master;
+connection slave;
+SELECT * FROM t3;
+connection master;
+DROP TABLE t3;
+
+#
+# Test that the slave will preserve the @@skip_replication flag in its
+# own binlog.
+#
+
+TRUNCATE t1;
+sync_slave_with_master;
+connection slave;
+RESET MASTER;
+
+connection master;
+SET skip_replication=0;
+INSERT INTO t1 VALUES (1,0);
+SET skip_replication=1;
+INSERT INTO t1 VALUES (2,0);
+SET skip_replication=0;
+INSERT INTO t1 VALUES (3,0);
+
+sync_slave_with_master;
+connection slave;
+# Since slave has @@replicate_events_marked_for_skip=REPLICATE, it should have
+# applied all events.
+SELECT * FROM t1 ORDER by a;
+
+STOP SLAVE;
+SET GLOBAL replicate_events_marked_for_skip=FILTER_ON_MASTER;
+let $SLAVE_DATADIR= `select @@datadir`;
+
+connection master;
+TRUNCATE t1;
+
+# Now apply the slave binlog to the master, to check that both the slave
+# and mysqlbinlog will preserve the @@skip_replication flag.
+
+--let $mysqlbinlog_args= $SLAVE_DATADIR/slave-bin.000001
+if ($use_remote_mysqlbinlog)
+{
+ --let $mysqlbinlog_args= --read-from-remote-server --protocol=tcp --host=127.0.0.1 --port=$SLAVE_MYPORT -uroot slave-bin.000001
+ --let $use_remote_mysqlbinlog= 0
+}
+--exec $MYSQL_BINLOG $mysqlbinlog_args > $MYSQLTEST_VARDIR/tmp/rpl_skip_replication.binlog
+--exec $MYSQL test < $MYSQLTEST_VARDIR/tmp/rpl_skip_replication.binlog
+
+# The master should have all three events.
+SELECT * FROM t1 ORDER by a;
+
+# The slave should be missing event 2, which is marked with the
+# @@skip_replication flag.
+
+connection slave;
+START SLAVE;
+
+connection master;
+sync_slave_with_master;
+
+connection slave;
+SELECT * FROM t1 ORDER by a;
+
+#
+# Test that @@sql_slave_skip_counter does not count skipped @@skip_replication
+# events.
+#
+
+connection master;
+TRUNCATE t1;
+
+sync_slave_with_master;
+connection slave;
+STOP SLAVE;
+# We will skip two INSERTs (in addition to any skipped due to
+# @@skip_replication). Since from 5.5 every statement is wrapped in
+# BEGIN ... END, we need to skip 6 events for this.
+SET GLOBAL sql_slave_skip_counter=6;
+SET GLOBAL replicate_events_marked_for_skip=FILTER_ON_SLAVE;
+START SLAVE;
+
+connection master;
+# Need to fix @@binlog_format to get consistent event count.
+SET @old_binlog_format= @@binlog_format;
+SET binlog_format= statement;
+SET skip_replication=0;
+INSERT INTO t1 VALUES (1,5);
+SET skip_replication=1;
+INSERT INTO t1 VALUES (2,5);
+SET skip_replication=0;
+INSERT INTO t1 VALUES (3,5);
+INSERT INTO t1 VALUES (4,5);
+SET binlog_format= @old_binlog_format;
+
+sync_slave_with_master;
+connection slave;
+
+# The slave should have skipped the first three inserts (number 1 and 3 due
+# to @@sql_slave_skip_counter=2, number 2 due to
+# @@replicate_events_marked_for_skip=FILTER_ON_SLAVE). So only number 4
+# should be left.
+SELECT * FROM t1;
+
+
+#
+# Check that BINLOG statement preserves the @@skip_replication flag.
+#
+connection slave;
+# Need row @@binlog_format for BINLOG statements containing row events.
+--source include/stop_slave.inc
+SET @old_slave_binlog_format= @@global.binlog_format;
+SET GLOBAL binlog_format= row;
+--source include/start_slave.inc
+
+connection master;
+TRUNCATE t1;
+
+SET @old_binlog_format= @@binlog_format;
+SET binlog_format= row;
+# Format description log event.
+BINLOG 'wlZOTw8BAAAA8QAAAPUAAAAAAAQANS41LjIxLU1hcmlhREItZGVidWctbG9nAAAAAAAAAAAAAAAA
+AAAAAAAAAAAAAAAAAAAAAAAAEzgNAAgAEgAEBAQEEgAA2QAEGggAAAAICAgCAAAAAAAAAAAAAAAA
+AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
+AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
+AAAAAAAAAAAA371saA==';
+# INSERT INTO t1 VALUES (1,8) # with @@skip_replication=1
+BINLOG 'wlZOTxMBAAAAKgAAAGMBAAAAgCkAAAAAAAEABHRlc3QAAnQxAAIDAwAC
+wlZOTxcBAAAAJgAAAIkBAAAAgCkAAAAAAAEAAv/8AQAAAAgAAAA=';
+# INSERT INTO t1 VALUES (2,8) # with @@skip_replication=0
+BINLOG 'wlZOTxMBAAAAKgAAADwCAAAAACkAAAAAAAEABHRlc3QAAnQxAAIDAwAC
+wlZOTxcBAAAAJgAAAGICAAAAACkAAAAAAAEAAv/8AgAAAAgAAAA=';
+SET binlog_format= @old_binlog_format;
+
+SELECT * FROM t1 ORDER BY a;
+sync_slave_with_master;
+connection slave;
+# Slave should have only the second insert, the first should be ignored due to
+# the @@skip_replication flag.
+SELECT * FROM t1 ORDER by a;
+
+--source include/stop_slave.inc
+SET GLOBAL binlog_format= @old_slave_binlog_format;
+--source include/start_slave.inc
+
+
+# Test that it is not possible to change @@skip_replication inside a
+# transaction or statement, thereby replicating only parts of statements
+# or transactions.
+connection master;
+SET skip_replication=0;
+
+BEGIN;
+--error ER_INSIDE_TRANSACTION_PREVENTS_SWITCH_SKIP_REPLICATION
+SET skip_replication=0;
+--error ER_INSIDE_TRANSACTION_PREVENTS_SWITCH_SKIP_REPLICATION
+SET skip_replication=1;
+ROLLBACK;
+SET skip_replication=1;
+BEGIN;
+--error ER_INSIDE_TRANSACTION_PREVENTS_SWITCH_SKIP_REPLICATION
+SET skip_replication=0;
+--error ER_INSIDE_TRANSACTION_PREVENTS_SWITCH_SKIP_REPLICATION
+SET skip_replication=1;
+COMMIT;
+SET autocommit=0;
+INSERT INTO t2(a) VALUES(100);
+--error ER_INSIDE_TRANSACTION_PREVENTS_SWITCH_SKIP_REPLICATION
+SET skip_replication=1;
+ROLLBACK;
+SET autocommit=1;
+
+SET skip_replication=1;
+--delimiter |
+CREATE FUNCTION foo (x INT) RETURNS INT BEGIN SET SESSION skip_replication=x; RETURN x; END|
+CREATE PROCEDURE bar(x INT) BEGIN SET SESSION skip_replication=x; END|
+CREATE FUNCTION baz (x INT) RETURNS INT BEGIN CALL bar(x); RETURN x; END|
+--delimiter ;
+--error ER_STORED_FUNCTION_PREVENTS_SWITCH_SKIP_REPLICATION
+SELECT foo(0);
+--error ER_STORED_FUNCTION_PREVENTS_SWITCH_SKIP_REPLICATION
+SELECT baz(0);
+--error ER_STORED_FUNCTION_PREVENTS_SWITCH_SKIP_REPLICATION
+SET @a= foo(1);
+--error ER_STORED_FUNCTION_PREVENTS_SWITCH_SKIP_REPLICATION
+SET @a= baz(1);
+--error ER_STORED_FUNCTION_PREVENTS_SWITCH_SKIP_REPLICATION
+UPDATE t2 SET b=foo(0);
+--error ER_STORED_FUNCTION_PREVENTS_SWITCH_SKIP_REPLICATION
+UPDATE t2 SET b=baz(0);
+--error ER_STORED_FUNCTION_PREVENTS_SWITCH_SKIP_REPLICATION
+INSERT INTO t1 VALUES (101, foo(1));
+--error ER_STORED_FUNCTION_PREVENTS_SWITCH_SKIP_REPLICATION
+INSERT INTO t1 VALUES (101, baz(0));
+SELECT @@skip_replication;
+CALL bar(0);
+SELECT @@skip_replication;
+CALL bar(1);
+SELECT @@skip_replication;
+DROP FUNCTION foo;
+DROP PROCEDURE bar;
+DROP FUNCTION baz;
+
+
+# Test that master-side filtering happens on the master side, and that
+# slave-side filtering happens on the slave.
+
+# First test that events do not reach the slave when master-side filtering
+# is configured. Do this by replicating first with only the IO thread running
+# and master-side filtering; then change to no filtering and start the SQL
+# thread. This should still skip the events, as master-side filtering
+# means the events never reached the slave.
+connection master;
+SET skip_replication= 0;
+TRUNCATE t1;
+sync_slave_with_master;
+connection slave;
+STOP SLAVE;
+SET GLOBAL replicate_events_marked_for_skip=FILTER_ON_MASTER;
+START SLAVE IO_THREAD;
+connection master;
+SET skip_replication= 1;
+INSERT INTO t1(a) VALUES (1);
+SET skip_replication= 0;
+INSERT INTO t1(a) VALUES (2);
+--source include/save_master_pos.inc
+connection slave;
+--source include/sync_io_with_master.inc
+STOP SLAVE IO_THREAD;
+SET GLOBAL replicate_events_marked_for_skip=REPLICATE;
+START SLAVE;
+connection master;
+sync_slave_with_master;
+connection slave;
+# Now only the second insert of (2) should be visible, as the first was
+# filtered on the master, so even though the SQL thread ran without skipping
+# events, it will never see the event in the first place.
+SELECT * FROM t1;
+
+# Now tests that when slave-side filtering is configured, events _do_ reach
+# the slave.
+connection master;
+SET skip_replication= 0;
+TRUNCATE t1;
+sync_slave_with_master;
+connection slave;
+STOP SLAVE;
+SET GLOBAL replicate_events_marked_for_skip=FILTER_ON_SLAVE;
+START SLAVE IO_THREAD;
+connection master;
+SET skip_replication= 1;
+INSERT INTO t1(a) VALUES (1);
+SET skip_replication= 0;
+INSERT INTO t1(a) VALUES (2);
+--source include/save_master_pos.inc
+connection slave;
+--source include/sync_io_with_master.inc
+STOP SLAVE IO_THREAD;
+SET GLOBAL replicate_events_marked_for_skip=REPLICATE;
+START SLAVE;
+connection master;
+sync_slave_with_master;
+connection slave;
+# Now both inserts should be visible. Since filtering was configured to be
+# slave-side, the event is in the relay log, and when the SQL thread ran we
+# had disabled filtering again.
+SELECT * FROM t1 ORDER BY a;
+
+
+# Clean up.
+connection master;
+SET skip_replication=0;
+DROP TABLE t1,t2;
+connection slave;
+STOP SLAVE;
+SET GLOBAL replicate_events_marked_for_skip=REPLICATE;
+START SLAVE;
+
+--source include/rpl_end.inc
diff --git a/mysql-test/extra/rpl_tests/rpl_special_charset.inc b/mysql-test/extra/rpl_tests/rpl_special_charset.inc
new file mode 100644
index 00000000000..51119b319d7
--- /dev/null
+++ b/mysql-test/extra/rpl_tests/rpl_special_charset.inc
@@ -0,0 +1,32 @@
+#
+# This include file is used by more than one test suite
+# (currently rpl and binlog_encryption).
+# Please check all dependent tests after modifying it
+#
+
+################################################################################
+# Bug#19855907 IO THREAD AUTHENTICATION ISSUE WITH SOME CHARACTER SETS
+# Problem: IO thread fails to connect to master if servers are configured with
+# special character sets like utf16, utf32, ucs2.
+#
+# Analysis: MySQL server does not support few special character sets like
+# utf16,utf32 and ucs2 as "client's character set"(eg: utf16,utf32, ucs2).
+# When IO thread is trying to connect to Master, it sets server's character
+# set as client's character set. When Slave server is started with these
+# special character sets, IO thread (a connection to Master) fails because
+# of the above said reason.
+#
+# Fix: If server's character set is not supported as client's character set,
+# then set default's client character set(latin1) as client's character set.
+###############################################################################
+--source include/master-slave.inc
+call mtr.add_suppression("Cannot use utf16 as character_set_client");
+CREATE TABLE t1(i VARCHAR(20));
+INSERT INTO t1 VALUES (0xFFFF);
+--sync_slave_with_master
+--let diff_tables=master:t1, slave:t1
+--source include/diff_tables.inc
+# Cleanup
+--connection master
+DROP TABLE t1;
+--source include/rpl_end.inc
diff --git a/mysql-test/extra/rpl_tests/rpl_sporadic_master.inc b/mysql-test/extra/rpl_tests/rpl_sporadic_master.inc
new file mode 100644
index 00000000000..ad4c44cbf74
--- /dev/null
+++ b/mysql-test/extra/rpl_tests/rpl_sporadic_master.inc
@@ -0,0 +1,32 @@
+#
+# This include file is used by more than one test suite
+# (currently rpl and binlog_encryption).
+# Please check all dependent tests after modifying it
+#
+
+# test to see if replication can continue when master sporadically fails on
+# COM_BINLOG_DUMP and additionally limits the number of events per dump
+
+source include/master-slave.inc;
+
+create table t2(n int);
+create table t1(n int not null auto_increment primary key);
+insert into t1 values (NULL),(NULL);
+truncate table t1;
+# We have to use 4 in the following to make this test work with all table types
+insert into t1 values (4),(NULL);
+sync_slave_with_master;
+--source include/stop_slave.inc
+--source include/start_slave.inc
+connection master;
+insert into t1 values (NULL),(NULL);
+flush logs;
+truncate table t1;
+insert into t1 values (10),(NULL),(NULL),(NULL),(NULL),(NULL);
+sync_slave_with_master;
+select * from t1 ORDER BY n;
+connection master;
+drop table t1,t2;
+sync_slave_with_master;
+
+--source include/rpl_end.inc
diff --git a/mysql-test/extra/rpl_tests/rpl_ssl.inc b/mysql-test/extra/rpl_tests/rpl_ssl.inc
new file mode 100644
index 00000000000..aff5499c8e5
--- /dev/null
+++ b/mysql-test/extra/rpl_tests/rpl_ssl.inc
@@ -0,0 +1,115 @@
+#
+# This include file is used by more than one test suite
+# (currently rpl and binlog_encryption).
+# Please check all dependent tests after modifying it
+#
+
+source include/have_ssl_communication.inc;
+source include/master-slave.inc;
+
+# create a user for replication that requires ssl encryption
+connection master;
+create user replssl@localhost;
+grant replication slave on *.* to replssl@localhost require ssl;
+create table t1 (t int auto_increment, KEY(t));
+
+sync_slave_with_master;
+
+# Set slave to use SSL for connection to master
+stop slave;
+--replace_result $MYSQL_TEST_DIR MYSQL_TEST_DIR
+eval change master to
+ master_user='replssl',
+ master_password='',
+ master_ssl=1,
+ master_ssl_ca ='$MYSQL_TEST_DIR/std_data/cacert.pem',
+ master_ssl_cert='$MYSQL_TEST_DIR/std_data/client-cert.pem',
+ master_ssl_key='$MYSQL_TEST_DIR/std_data/client-key.pem';
+start slave;
+
+# Switch to master and insert one record, then sync it to slave
+connection master;
+insert into t1 values(1);
+sync_slave_with_master;
+
+# The record should now be on slave
+select * from t1;
+
+# The slave is synced and waiting/reading from master
+# SHOW SLAVE STATUS will show "Waiting for master to send event"
+let $status_items= Master_SSL_Allowed, Master_SSL_CA_Path, Master_SSL_CA_File, Master_SSL_Cert, Master_SSL_Key;
+source include/show_slave_status.inc;
+source include/check_slave_is_running.inc;
+
+# Stop the slave, as reported in bug#21871 it would hang
+STOP SLAVE;
+
+select * from t1;
+
+# Do the same thing a number of times
+disable_query_log;
+disable_result_log;
+# 2007-11-27 mats Bug #32756 Starting and stopping the slave in a loop can lose rows
+# After discussions with Engineering, I'm disabling this part of the test to avoid it causing
+# red trees.
+disable_parsing;
+let $i= 100;
+while ($i)
+{
+ start slave;
+ connection master;
+ insert into t1 values (NULL);
+ select * from t1; # Some variance
+ connection slave;
+ select * from t1; # Some variance
+ stop slave;
+ dec $i;
+}
+enable_parsing;
+START SLAVE;
+enable_query_log;
+enable_result_log;
+connection master;
+# INSERT one more record to make sure
+# the sync has something to do
+insert into t1 values (NULL);
+let $master_count= `select count(*) from t1`;
+
+sync_slave_with_master;
+--source include/wait_for_slave_to_start.inc
+source include/show_slave_status.inc;
+source include/check_slave_is_running.inc;
+
+let $slave_count= `select count(*) from t1`;
+
+if ($slave_count != $master_count)
+{
+ echo master and slave differed in number of rows;
+ echo master: $master_count;
+ echo slave: $slave_count;
+
+ connection master;
+ select count(*) t1;
+ select * from t1;
+ connection slave;
+ select count(*) t1;
+ select * from t1;
+ query_vertical show slave status;
+}
+
+connection master;
+drop user replssl@localhost;
+drop table t1;
+sync_slave_with_master;
+
+--source include/stop_slave.inc
+CHANGE MASTER TO
+ master_user = 'root',
+ master_ssl = 0,
+ master_ssl_ca = '',
+ master_ssl_cert = '',
+ master_ssl_key = '';
+
+--echo End of 5.0 tests
+--let $rpl_only_running_threads= 1
+--source include/rpl_end.inc
diff --git a/mysql-test/extra/rpl_tests/rpl_stm_relay_ign_space.inc b/mysql-test/extra/rpl_tests/rpl_stm_relay_ign_space.inc
new file mode 100644
index 00000000000..82c4b1881bf
--- /dev/null
+++ b/mysql-test/extra/rpl_tests/rpl_stm_relay_ign_space.inc
@@ -0,0 +1,107 @@
+#
+# This include file is used by more than one test suite
+# (currently rpl and binlog_encryption).
+# Please check all dependent tests after modifying it
+#
+
+#
+# BUG#12400313 / BUG#64503 test case
+#
+#
+# Description
+# -----------
+#
+# This test case starts the slave server with:
+# --relay-log-space-limit=8192 --relay-log-purge --max-relay-log-size=4096
+#
+# Then it issues some queries that will cause the slave to reach
+# relay-log-space-limit. We lock the table so that the SQL thread is
+# not able to purge the log and then we issue some more statements.
+#
+# The purpose is to show that the IO thread will honor the limits
+# while the SQL thread is not able to purge the relay logs, which did
+# not happen before this patch. In addition we assert that while
+# ignoring the limit (SQL thread needs to rotate before purging), the
+# IO thread does not do it in an uncontrolled manner.
+
+--source include/have_binlog_format_statement.inc
+--source include/master-slave.inc
+--source include/have_innodb.inc
+
+--disable_query_log
+CREATE TABLE t1 (c1 TEXT) engine=InnoDB;
+
+INSERT INTO t1 VALUES ('xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx');
+INSERT INTO t1 VALUES ('xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx');
+INSERT INTO t1 VALUES ('xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx');
+INSERT INTO t1 VALUES ('xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx');
+
+--sync_slave_with_master
+
+# wait for the SQL thread to sleep
+--let $show_statement= SHOW PROCESSLIST
+--let $field= State
+--let $condition= = 'Slave has read all relay log; waiting for the slave I/O thread to update it'
+--source include/wait_show_condition.inc
+
+# now the io thread has set rli->ignore_space_limit
+# lets lock the table so that once the SQL thread awakes
+# it blocks there and does not set rli->ignore_space_limit
+# back to zero
+LOCK TABLE t1 WRITE;
+
+# now issue more statements that will overflow the
+# rli->log_space_limit (in this case ~10K)
+--connection master
+
+INSERT INTO t1 VALUES ('xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx');
+INSERT INTO t1 VALUES ('xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx');
+INSERT INTO t1 VALUES ('xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx');
+INSERT INTO t1 VALUES ('xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx');
+INSERT INTO t1 VALUES ('xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx');
+INSERT INTO t1 VALUES ('xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx');
+INSERT INTO t1 VALUES ('xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx');
+INSERT INTO t1 VALUES ('xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx');
+INSERT INTO t1 VALUES ('xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx');
+INSERT INTO t1 VALUES ('xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx');
+
+--connection slave
+
+# ASSERT that the IO thread waits for the SQL thread to release some
+# space before continuing
+--let $show_statement= SHOW PROCESSLIST
+--let $field= State
+--let $condition= LIKE 'Waiting for %'
+# before the patch (IO would have transfered everything)
+#--let $condition= = 'Waiting for master to send event'
+# after the patch (now it waits for space to be freed)
+#--let $condition= = 'Waiting for the slave SQL thread to free enough relay log space'
+--source include/wait_show_condition.inc
+
+# without the patch we can uncomment the following two lines and
+# watch the IO thread synchronize with the master, thus writing
+# relay logs way over the space limit
+#--connection master
+#--source include/sync_slave_io_with_master.inc
+
+## ASSERT that the IO thread has honored the limit+few bytes required to be able to purge
+--let $relay_log_space_while_sql_is_executing = query_get_value(SHOW SLAVE STATUS, Relay_Log_Space, 1)
+--let $relay_log_space_limit = query_get_value(SHOW VARIABLES LIKE "relay_log_space_limit", Value, 1)
+--let $assert_text= Assert that relay log space is close to the limit
+--let $assert_cond= $relay_log_space_while_sql_is_executing <= $relay_log_space_limit * 1.15
+--source include/assert.inc
+
+# unlock the table and let SQL thread continue applying events
+UNLOCK TABLES;
+
+--connection master
+--sync_slave_with_master
+--let $diff_tables=master:test.t1,slave:test.t1
+--source include/diff_tables.inc
+
+--connection master
+DROP TABLE t1;
+--enable_query_log
+--sync_slave_with_master
+
+--source include/rpl_end.inc
diff --git a/mysql-test/extra/rpl_tests/rpl_switch_stm_row_mixed.inc b/mysql-test/extra/rpl_tests/rpl_switch_stm_row_mixed.inc
new file mode 100644
index 00000000000..e74fd6828c6
--- /dev/null
+++ b/mysql-test/extra/rpl_tests/rpl_switch_stm_row_mixed.inc
@@ -0,0 +1,631 @@
+#
+# This include file is used by more than one test suite
+# (currently rpl and binlog_encryption).
+# Please check all dependent tests after modifying it
+#
+
+#
+# rpl_switch_stm_row_mixed tests covers
+#
+# - Master is switching explicitly between STATEMENT, ROW, and MIXED
+# binlog format showing when it is possible and when not.
+# - Master switching from MIXED to RBR implicitly listing all use
+# cases, e.g a query invokes UUID(), thereafter to serve as the
+# definition of MIXED binlog format
+# - correctness of execution
+
+
+-- source include/have_binlog_format_mixed_or_row.inc
+-- source include/master-slave.inc
+
+# Since this test generates row-based events in the binary log, the
+# slave SQL thread cannot be in STATEMENT mode to execute this test,
+# so we only execute it for MIXED and ROW as default value of
+# BINLOG_FORMAT.
+
+connection slave;
+
+connection master;
+--disable_warnings
+drop database if exists mysqltest1;
+create database mysqltest1;
+--enable_warnings
+use mysqltest1;
+
+# Save binlog format
+set @my_binlog_format= @@global.binlog_format;
+
+# play with switching
+set session binlog_format=mixed;
+show session variables like "binlog_format%";
+set session binlog_format=statement;
+show session variables like "binlog_format%";
+set session binlog_format=row;
+show session variables like "binlog_format%";
+
+set global binlog_format=DEFAULT;
+show global variables like "binlog_format%";
+set global binlog_format=MIXED;
+show global variables like "binlog_format%";
+set global binlog_format=STATEMENT;
+show global variables like "binlog_format%";
+set global binlog_format=ROW;
+show global variables like "binlog_format%";
+show session variables like "binlog_format%";
+select @@global.binlog_format, @@session.binlog_format;
+
+CREATE TABLE t1 (a varchar(100));
+
+prepare stmt1 from 'insert into t1 select concat(UUID(),?)';
+set @string="emergency_1_";
+insert into t1 values("work_2_");
+execute stmt1 using @string;
+deallocate prepare stmt1;
+
+prepare stmt1 from 'insert into t1 select ?';
+insert into t1 values(concat(UUID(),"work_3_"));
+execute stmt1 using @string;
+deallocate prepare stmt1;
+
+insert into t1 values(concat("for_4_",UUID()));
+insert into t1 select "yesterday_5_";
+
+# verify that temp tables prevent a switch to SBR
+create temporary table tmp(a char(100));
+insert into tmp values("see_6_");
+--error ER_TEMP_TABLE_PREVENTS_SWITCH_OUT_OF_RBR
+set binlog_format=statement;
+insert into t1 select * from tmp;
+drop temporary table tmp;
+
+# Now we go to SBR
+set binlog_format=statement;
+show global variables like "binlog_format%";
+show session variables like "binlog_format%";
+select @@global.binlog_format, @@session.binlog_format;
+set global binlog_format=statement;
+show global variables like "binlog_format%";
+show session variables like "binlog_format%";
+select @@global.binlog_format, @@session.binlog_format;
+
+prepare stmt1 from 'insert into t1 select ?';
+set @string="emergency_7_";
+insert into t1 values("work_8_");
+execute stmt1 using @string;
+deallocate prepare stmt1;
+
+prepare stmt1 from 'insert into t1 select ?';
+insert into t1 values("work_9_");
+execute stmt1 using @string;
+deallocate prepare stmt1;
+
+insert into t1 values("for_10_");
+insert into t1 select "yesterday_11_";
+
+# test statement (is not default after wl#3368)
+set binlog_format=statement;
+select @@global.binlog_format, @@session.binlog_format;
+set global binlog_format=statement;
+select @@global.binlog_format, @@session.binlog_format;
+
+prepare stmt1 from 'insert into t1 select ?';
+set @string="emergency_12_";
+insert into t1 values("work_13_");
+execute stmt1 using @string;
+deallocate prepare stmt1;
+
+prepare stmt1 from 'insert into t1 select ?';
+insert into t1 values("work_14_");
+execute stmt1 using @string;
+deallocate prepare stmt1;
+
+insert into t1 values("for_15_");
+insert into t1 select "yesterday_16_";
+
+# and now the mixed mode
+
+set global binlog_format=mixed;
+select @@global.binlog_format, @@session.binlog_format;
+set binlog_format=default;
+select @@global.binlog_format, @@session.binlog_format;
+
+prepare stmt1 from 'insert into t1 select concat(UUID(),?)';
+set @string="emergency_17_";
+insert into t1 values("work_18_");
+execute stmt1 using @string;
+deallocate prepare stmt1;
+
+prepare stmt1 from 'insert into t1 select ?';
+insert into t1 values(concat(UUID(),"work_19_"));
+execute stmt1 using @string;
+deallocate prepare stmt1;
+
+insert into t1 values(concat("for_20_",UUID()));
+insert into t1 select "yesterday_21_";
+
+prepare stmt1 from 'insert into t1 select ?';
+insert into t1 values(concat(UUID(),"work_22_"));
+execute stmt1 using @string;
+deallocate prepare stmt1;
+
+insert into t1 values(concat("for_23_",UUID()));
+insert into t1 select "yesterday_24_";
+
+# Test of CREATE TABLE SELECT
+
+create table t2 ENGINE=MyISAM select rpad(UUID(),100,' ');
+create table t3 select 1 union select UUID();
+--disable_warnings
+create table t4 select * from t1 where 3 in (select 1 union select 2 union select UUID() union select 3);
+--enable_warnings
+create table t5 select * from t1 where 3 in (select 1 union select 2 union select curdate() union select 3);
+# what if UUID() is first:
+--disable_warnings
+insert into t5 select UUID() from t1 where 3 in (select 1 union select 2 union select 3 union select * from t4);
+--enable_warnings
+
+# inside a stored procedure
+
+delimiter |;
+create procedure foo()
+begin
+insert into t1 values("work_25_");
+insert into t1 values(concat("for_26_",UUID()));
+insert into t1 select "yesterday_27_";
+end|
+create procedure foo2()
+begin
+insert into t1 values(concat("emergency_28_",UUID()));
+insert into t1 values("work_29_");
+insert into t1 values(concat("for_30_",UUID()));
+set session binlog_format=row; # accepted for stored procs
+insert into t1 values("more work_31_");
+set session binlog_format=mixed;
+end|
+create function foo3() returns bigint unsigned
+begin
+ set session binlog_format=row; # rejected for stored funcs
+ insert into t1 values("alarm");
+ return 100;
+end|
+create procedure foo4(x varchar(100))
+begin
+insert into t1 values(concat("work_250_",x));
+insert into t1 select "yesterday_270_";
+end|
+delimiter ;|
+call foo();
+call foo2();
+call foo4("hello");
+call foo4(UUID());
+call foo4("world");
+
+# test that can't SET in a stored function
+--error ER_STORED_FUNCTION_PREVENTS_SWITCH_BINLOG_FORMAT
+select foo3();
+select * from t1 where a="alarm";
+
+# Tests of stored functions/triggers/views for BUG#20930 "Mixed
+# binlogging mode does not work with stored functions, triggers,
+# views"
+
+# Function which calls procedure
+drop function foo3;
+delimiter |;
+create function foo3() returns bigint unsigned
+begin
+ insert into t1 values("foo3_32_");
+ call foo();
+ return 100;
+end|
+delimiter ;|
+insert into t2 select foo3();
+
+prepare stmt1 from 'insert into t2 select foo3()';
+execute stmt1;
+execute stmt1;
+deallocate prepare stmt1;
+
+# Test if stored function calls stored function which calls procedure
+# which requires row-based.
+
+delimiter |;
+create function foo4() returns bigint unsigned
+begin
+ insert into t2 select foo3();
+ return 100;
+end|
+delimiter ;|
+select foo4();
+
+prepare stmt1 from 'select foo4()';
+execute stmt1;
+execute stmt1;
+deallocate prepare stmt1;
+
+# A simple stored function
+delimiter |;
+create function foo5() returns bigint unsigned
+begin
+ insert into t2 select UUID();
+ return 100;
+end|
+delimiter ;|
+select foo5();
+
+prepare stmt1 from 'select foo5()';
+execute stmt1;
+execute stmt1;
+deallocate prepare stmt1;
+
+# A simple stored function where UUID() is in the argument
+delimiter |;
+create function foo6(x varchar(100)) returns bigint unsigned
+begin
+ insert into t2 select x;
+ return 100;
+end|
+delimiter ;|
+select foo6("foo6_1_");
+select foo6(concat("foo6_2_",UUID()));
+
+prepare stmt1 from 'select foo6(concat("foo6_3_",UUID()))';
+execute stmt1;
+execute stmt1;
+deallocate prepare stmt1;
+
+
+# Test of views using UUID()
+
+create view v1 as select uuid();
+create table t11 (data varchar(255));
+insert into t11 select * from v1;
+# Test of querying INFORMATION_SCHEMA which parses the view's body,
+# to verify that it binlogs statement-based (is not polluted by
+# the parsing of the view's body).
+insert into t11 select TABLE_NAME from INFORMATION_SCHEMA.TABLES where TABLE_SCHEMA='mysqltest1' and TABLE_NAME IN ('v1','t11');
+prepare stmt1 from "insert into t11 select TABLE_NAME from INFORMATION_SCHEMA.TABLES where TABLE_SCHEMA='mysqltest1' and TABLE_NAME IN ('v1','t11')";
+execute stmt1;
+execute stmt1;
+deallocate prepare stmt1;
+
+# Test of triggers with UUID()
+delimiter |;
+create trigger t11_bi before insert on t11 for each row
+begin
+ set NEW.data = concat(NEW.data,UUID());
+end|
+delimiter ;|
+insert into t11 values("try_560_");
+
+# Test that INSERT DELAYED works in mixed mode (BUG#20649)
+insert delayed into t2 values("delay_1_");
+insert delayed into t2 values(concat("delay_2_",UUID()));
+insert delayed into t2 values("delay_6_");
+
+# Test for BUG#20633 (INSERT DELAYED RAND()/user_variable does not
+# replicate fine in statement-based ; we test that in mixed mode it
+# works).
+insert delayed into t2 values(rand());
+set @a=2.345;
+insert delayed into t2 values(@a);
+
+# With INSERT DELAYED, rows are written to the binlog after they are
+# written to the table. Therefore, it is not enough to wait until the
+# rows make it to t2 on the master (the rows may not be in the binlog
+# at that time, and may still not be in the binlog when
+# sync_slave_with_master is later called). Instead, we wait until the
+# rows make it to t2 on the slave. We first call
+# sync_slave_with_master, so that we are sure that t2 has been created
+# on the slave.
+sync_slave_with_master;
+let $wait_condition= SELECT COUNT(*) = 19 FROM mysqltest1.t2;
+--source include/wait_condition.inc
+connection master;
+
+# If you want to do manual testing of the mixed mode regarding UDFs (not
+# testable automatically as quite platform- and compiler-dependent),
+# you just need to set the variable below to 1, and to
+# "make udf_example.so" in sql/, and to copy sql/udf_example.so to
+# MYSQL_TEST_DIR/lib/mysql.
+let $you_want_to_test_UDF=0;
+if ($you_want_to_test_UDF)
+{
+ CREATE FUNCTION metaphon RETURNS STRING SONAME 'udf_example.so';
+ prepare stmt1 from 'insert into t1 select metaphon(?)';
+ set @string="emergency_133_";
+ insert into t1 values("work_134_");
+ execute stmt1 using @string;
+ deallocate prepare stmt1;
+ prepare stmt1 from 'insert into t1 select ?';
+ insert into t1 values(metaphon("work_135_"));
+ execute stmt1 using @string;
+ deallocate prepare stmt1;
+ insert into t1 values(metaphon("for_136_"));
+ insert into t1 select "yesterday_137_";
+ create table t6 select metaphon("for_138_");
+ create table t7 select 1 union select metaphon("for_139_");
+ create table t8 select * from t1 where 3 in (select 1 union select 2 union select metaphon("for_140_") union select 3);
+ create table t9 select * from t1 where 3 in (select 1 union select 2 union select curdate() union select 3);
+}
+
+create table t20 select * from t1; # save for comparing later
+create table t21 select * from t2;
+create table t22 select * from t3;
+drop table t1,t2,t3;
+
+# This tests the fix to
+# BUG#19630 stored function inserting into two auto_increment breaks statement-based binlog
+# We verify that under the mixed binlog mode, a stored function
+# modifying at least two tables having an auto_increment column,
+# is binlogged row-based. Indeed in statement-based binlogging,
+# only the auto_increment value generated for the first table
+# is recorded in the binlog, the value generated for the 2nd table
+# lacking.
+
+create table t1 (a int primary key auto_increment, b varchar(100));
+create table t2 (a int primary key auto_increment, b varchar(100));
+create table t3 (b varchar(100));
+delimiter |;
+create function f (x varchar(100)) returns int deterministic
+begin
+ insert into t1 values(null,x);
+ insert into t2 values(null,x);
+ return 1;
+end|
+delimiter ;|
+select f("try_41_");
+# Two operations which compensate each other except that their net
+# effect is that they advance the auto_increment counter of t2 on slave:
+sync_slave_with_master;
+use mysqltest1;
+insert into t2 values(2,null),(3,null),(4,null);
+delete from t2 where a>=2;
+
+connection master;
+# this is the call which didn't replicate well
+select f("try_42_");
+sync_slave_with_master;
+
+# now use prepared statement and test again, just to see that the RBB
+# mode isn't set at PREPARE but at EXECUTE.
+
+insert into t2 values(3,null),(4,null);
+delete from t2 where a>=3;
+
+connection master;
+prepare stmt1 from 'select f(?)';
+set @string="try_43_";
+insert into t1 values(null,"try_44_"); # should be SBB
+execute stmt1 using @string; # should be RBB
+deallocate prepare stmt1;
+sync_slave_with_master;
+
+# verify that if only one table has auto_inc, it does not trigger RBB
+# (we'll check in binlog further below)
+
+connection master;
+create table t12 select * from t1; # save for comparing later
+drop table t1;
+create table t1 (a int, b varchar(100), key(a));
+select f("try_45_");
+
+# restore table's key
+create table t13 select * from t1;
+drop table t1;
+create table t1 (a int primary key auto_increment, b varchar(100));
+
+# now test if it's two functions, each of them inserts in one table
+
+drop function f;
+# we need a unique key to have sorting of rows by mysqldump
+create table t14 (unique (a)) select * from t2;
+truncate table t2;
+delimiter |;
+create function f1 (x varchar(100)) returns int deterministic
+begin
+ insert into t1 values(null,x);
+ return 1;
+end|
+create function f2 (x varchar(100)) returns int deterministic
+begin
+ insert into t2 values(null,x);
+ return 1;
+end|
+delimiter ;|
+select f1("try_46_"),f2("try_47_");
+
+sync_slave_with_master;
+insert into t2 values(2,null),(3,null),(4,null);
+delete from t2 where a>=2;
+
+connection master;
+# Test with SELECT and INSERT
+select f1("try_48_"),f2("try_49_");
+insert into t3 values(concat("try_50_",f1("try_51_"),f2("try_52_")));
+sync_slave_with_master;
+
+# verify that if f2 does only read on an auto_inc table, this does not
+# switch to RBB
+connection master;
+drop function f2;
+delimiter |;
+create function f2 (x varchar(100)) returns int deterministic
+begin
+ declare y int;
+ insert into t1 values(null,x);
+ set y = (select count(*) from t2);
+ return y;
+end|
+delimiter ;|
+select f1("try_53_"),f2("try_54_");
+sync_slave_with_master;
+
+# And now, a normal statement with a trigger (no stored functions)
+
+connection master;
+drop function f2;
+delimiter |;
+create trigger t1_bi before insert on t1 for each row
+begin
+ insert into t2 values(null,"try_55_");
+end|
+delimiter ;|
+insert into t1 values(null,"try_56_");
+# and now remove one auto_increment and verify SBB
+alter table t1 modify a int, drop primary key;
+insert into t1 values(null,"try_57_");
+sync_slave_with_master;
+
+# Test for BUG#20499 "mixed mode with temporary table breaks binlog"
+# Slave used to have only 2 rows instead of 3.
+connection master;
+CREATE TEMPORARY TABLE t15 SELECT UUID();
+create table t16 like t15;
+INSERT INTO t16 SELECT * FROM t15;
+# we'll verify that this one is done RBB
+insert into t16 values("try_65_");
+drop table t15;
+# we'll verify that this one is done SBB
+insert into t16 values("try_66_");
+sync_slave_with_master;
+
+# and now compare:
+
+connection master;
+
+# first check that data on master is sensible
+select count(*) from t1;
+select count(*) from t2;
+select count(*) from t3;
+select count(*) from t4;
+select count(*) from t5;
+select count(*) from t11;
+select count(*) from t20;
+select count(*) from t21;
+select count(*) from t22;
+select count(*) from t12;
+select count(*) from t13;
+select count(*) from t14;
+select count(*) from t16;
+if ($you_want_to_test_UDF)
+{
+ select count(*) from t6;
+ select count(*) from t7;
+ select count(*) from t8;
+ select count(*) from t9;
+}
+
+sync_slave_with_master;
+
+#
+# Bug#20863 If binlog format is changed between update and unlock of
+# tables, wrong binlog
+#
+
+connection master;
+DROP TABLE IF EXISTS t11;
+SET SESSION BINLOG_FORMAT=STATEMENT;
+CREATE TABLE t11 (song VARCHAR(255));
+LOCK TABLES t11 WRITE;
+SET SESSION BINLOG_FORMAT=ROW;
+INSERT INTO t11 VALUES('Several Species of Small Furry Animals Gathered Together in a Cave and Grooving With a Pict');
+SET SESSION BINLOG_FORMAT=STATEMENT;
+INSERT INTO t11 VALUES('Careful With That Axe, Eugene');
+UNLOCK TABLES;
+
+--query_vertical SELECT * FROM t11
+sync_slave_with_master;
+USE mysqltest1;
+--query_vertical SELECT * FROM t11
+
+connection master;
+DROP TABLE IF EXISTS t12;
+SET SESSION BINLOG_FORMAT=MIXED;
+CREATE TABLE t12 (data LONG);
+LOCK TABLES t12 WRITE;
+INSERT INTO t12 VALUES(UUID());
+UNLOCK TABLES;
+sync_slave_with_master;
+
+#
+# BUG#28086: SBR of USER() becomes corrupted on slave
+#
+
+connection master;
+
+# Just to get something that is non-trivial, albeit still simple, we
+# stuff the result of USER() and CURRENT_USER() into a variable.
+--delimiter $$
+CREATE FUNCTION my_user()
+ RETURNS CHAR(64)
+BEGIN
+ DECLARE user CHAR(64);
+ SELECT USER() INTO user;
+ RETURN user;
+END $$
+--delimiter ;
+
+--delimiter $$
+CREATE FUNCTION my_current_user()
+ RETURNS CHAR(64)
+BEGIN
+ DECLARE user CHAR(64);
+ SELECT CURRENT_USER() INTO user;
+ RETURN user;
+END $$
+--delimiter ;
+
+DROP TABLE IF EXISTS t13;
+CREATE TABLE t13 (data CHAR(64));
+INSERT INTO t13 VALUES (USER());
+INSERT INTO t13 VALUES (my_user());
+INSERT INTO t13 VALUES (CURRENT_USER());
+INSERT INTO t13 VALUES (my_current_user());
+
+sync_slave_with_master;
+
+# as we're using UUID we don't SELECT but use "diff" like in rpl_row_UUID
+--exec $MYSQL_DUMP --compact --order-by-primary --skip-extended-insert --no-create-info mysqltest1 > $MYSQLTEST_VARDIR/tmp/rpl_switch_stm_row_mixed_master.sql
+--exec $MYSQL_DUMP_SLAVE --compact --order-by-primary --skip-extended-insert --no-create-info mysqltest1 > $MYSQLTEST_VARDIR/tmp/rpl_switch_stm_row_mixed_slave.sql
+
+# Let's compare. Note: If they match test will pass, if they do not match
+# the test will show that the diff statement failed and not reject file
+# will be created. You will need to go to the mysql-test dir and diff
+# the files your self to see what is not matching
+
+diff_files $MYSQLTEST_VARDIR/tmp/rpl_switch_stm_row_mixed_master.sql $MYSQLTEST_VARDIR/tmp/rpl_switch_stm_row_mixed_slave.sql;
+
+connection master;
+
+# Now test that mysqlbinlog works fine on a binlog generated by the
+# mixed mode
+
+# BUG#11312 "DELIMITER is not written to the binary log that causes
+# syntax error" makes that mysqlbinlog will fail if we pass it the
+# text of queries; this forces us to use --base64-output here.
+
+# BUG#20929 "BINLOG command causes invalid free plus assertion
+# failure" makes mysqld segfault when receiving --base64-output
+
+# So I can't enable this piece of test
+# SIGH
+
+if ($enable_when_11312_or_20929_fixed)
+{
+--exec $MYSQL_BINLOG --base64-output $MYSQLTEST_VARDIR/log/master-bin.000001 > $MYSQLTEST_VARDIR/tmp/mysqlbinlog_mixed.sql
+drop database mysqltest1;
+--exec $MYSQL < $MYSQLTEST_VARDIR/tmp/mysqlbinlog_mixed.sql
+--exec $MYSQL_DUMP --compact --order-by-primary --skip-extended-insert --no-create-info mysqltest1 > $MYSQLTEST_VARDIR/tmp/rpl_switch_stm_row_mixed_master.sql
+# the old mysqldump output on slave is the same as what it was on
+# master before restoring on master.
+diff_files $MYSQLTEST_VARDIR/tmp/rpl_switch_stm_row_mixed_master.sql $MYSQLTEST_VARDIR/tmp/rpl_switch_stm_row_mixed_slave.sql;
+}
+
+drop database mysqltest1;
+sync_slave_with_master;
+
+connection master;
+# Restore binlog format setting
+set global binlog_format =@my_binlog_format;
+--source include/rpl_end.inc
diff --git a/mysql-test/extra/rpl_tests/rpl_sync.inc b/mysql-test/extra/rpl_tests/rpl_sync.inc
new file mode 100644
index 00000000000..ede3c3c515f
--- /dev/null
+++ b/mysql-test/extra/rpl_tests/rpl_sync.inc
@@ -0,0 +1,159 @@
+#
+# This include file is used by more than one test suite
+# (currently rpl and binlog_encryption).
+# Please check all dependent tests after modifying it
+#
+
+########################################################################################
+# This test verifies the options --sync-relay-log-info and --relay-log-recovery by
+# crashing the slave in two different situations:
+# (case-1) - Corrupt the relay log with changes which were not processed by
+# the SQL Thread and crashes it.
+# (case-2) - Corrupt the master.info with wrong coordinates and crashes it.
+#
+# Case 1:
+# 1 - Stops the SQL Thread
+# 2 - Inserts new records into the master.
+# 3 - Corrupts the relay-log.bin* which most likely has such changes.
+# 4 - Crashes the slave
+# 5 - Verifies if the slave is sync with the master which means that the information
+# loss was circumvented by the recovery process.
+#
+# Case 2:
+# 1 - Stops the SQL/IO Threads
+# 2 - Inserts new records into the master.
+# 3 - Corrupts the master.info with wrong coordinates.
+# 4 - Crashes the slave
+# 5 - Verifies if the slave is sync with the master which means that the information
+# loss was circumvented by the recovery process.
+########################################################################################
+
+########################################################################################
+# Configuring the environment
+########################################################################################
+--echo =====Configuring the enviroment=======;
+--source include/master-slave.inc
+--source include/not_embedded.inc
+--source include/not_valgrind.inc
+--source include/have_debug.inc
+--source include/have_innodb.inc
+--source include/not_crashrep.inc
+
+call mtr.add_suppression('Attempting backtrace');
+call mtr.add_suppression("Recovery from master pos .* and file master-bin.000001");
+# Use innodb so we do not get "table should be repaired" issues.
+ALTER TABLE mysql.gtid_slave_pos ENGINE=InnoDB;
+flush tables;
+CREATE TABLE t1(a INT, PRIMARY KEY(a)) engine=innodb;
+
+insert into t1(a) values(1);
+insert into t1(a) values(2);
+insert into t1(a) values(3);
+
+########################################################################################
+# Case 1: Corrupt a relay-log.bin*
+########################################################################################
+--echo =====Inserting data on the master but without the SQL Thread being running=======;
+sync_slave_with_master;
+
+connection slave;
+let $MYSQLD_SLAVE_DATADIR= `select @@datadir`;
+--replace_result $MYSQLD_SLAVE_DATADIR MYSQLD_SLAVE_DATADIR
+--copy_file $MYSQLD_SLAVE_DATADIR/master.info $MYSQLD_SLAVE_DATADIR/master.backup
+--source include/stop_slave_sql.inc
+
+connection master;
+insert into t1(a) values(4);
+insert into t1(a) values(5);
+insert into t1(a) values(6);
+
+--echo =====Removing relay log files and crashing/recoverying the slave=======;
+connection slave;
+--source include/stop_slave_io.inc
+
+let $file= query_get_value("SHOW SLAVE STATUS", Relay_Log_File, 1);
+
+--let FILE_TO_CORRUPT= $MYSQLD_SLAVE_DATADIR/$file
+perl;
+$file= $ENV{'FILE_TO_CORRUPT'};
+open(FILE, ">$file") || die "Unable to open $file.";
+truncate(FILE,0);
+print FILE "failure";
+close ($file);
+EOF
+
+--exec echo "restart" > $MYSQLTEST_VARDIR/tmp/mysqld.2.expect
+SET SESSION debug_dbug="d,crash_before_rotate_relaylog";
+--error 2013
+FLUSH LOGS;
+
+--let $rpl_server_number= 2
+--source include/rpl_reconnect.inc
+
+--echo =====Dumping and comparing tables=======;
+--source include/start_slave.inc
+
+connection master;
+sync_slave_with_master;
+
+let $diff_tables=master:t1,slave:t1;
+source include/diff_tables.inc;
+
+########################################################################################
+# Case 2: Corrupt a master.info
+########################################################################################
+--echo =====Corrupting the master.info=======;
+connection slave;
+--source include/stop_slave.inc
+
+connection master;
+FLUSH LOGS;
+
+insert into t1(a) values(7);
+insert into t1(a) values(8);
+insert into t1(a) values(9);
+
+connection slave;
+let MYSQLD_SLAVE_DATADIR=`select @@datadir`;
+
+--perl
+use strict;
+use warnings;
+my $src= "$ENV{'MYSQLD_SLAVE_DATADIR'}/master.backup";
+my $dst= "$ENV{'MYSQLD_SLAVE_DATADIR'}/master.info";
+open(FILE, "<", $src) or die;
+my @content= <FILE>;
+close FILE;
+open(FILE, ">", $dst) or die;
+binmode FILE;
+print FILE @content;
+close FILE;
+EOF
+
+--exec echo "restart" > $MYSQLTEST_VARDIR/tmp/mysqld.2.expect
+SET SESSION debug_dbug="d,crash_before_rotate_relaylog";
+--error 2013
+FLUSH LOGS;
+
+--let $rpl_server_number= 2
+--source include/rpl_reconnect.inc
+
+--echo =====Dumping and comparing tables=======;
+--source include/start_slave.inc
+
+connection master;
+sync_slave_with_master;
+
+let $diff_tables=master:t1,slave:t1;
+source include/diff_tables.inc;
+
+########################################################################################
+# Clean up
+########################################################################################
+--echo =====Clean up=======;
+connection master;
+drop table t1;
+
+--remove_file $MYSQLD_SLAVE_DATADIR/master.backup
+--source include/rpl_end.inc
+
diff --git a/mysql-test/extra/rpl_tests/rpl_temporal_format_default_to_default.inc b/mysql-test/extra/rpl_tests/rpl_temporal_format_default_to_default.inc
new file mode 100644
index 00000000000..6728ff55d6f
--- /dev/null
+++ b/mysql-test/extra/rpl_tests/rpl_temporal_format_default_to_default.inc
@@ -0,0 +1,82 @@
+#
+# This include file is used by more than one test suite
+# (currently rpl and binlog_encryption).
+# Please check all dependent tests after modifying it
+#
+
+--source include/master-slave.inc
+
+if ($force_master_mysql56_temporal_format)
+{
+ connection master;
+ eval SET @@global.mysql56_temporal_format=$force_master_mysql56_temporal_format;
+}
+
+if ($force_slave_mysql56_temporal_format)
+{
+ connection slave;
+ eval SET @@global.mysql56_temporal_format=$force_slave_mysql56_temporal_format;
+}
+
+connection master;
+SELECT @@global.mysql56_temporal_format AS on_master;
+connection slave;
+SELECT @@global.mysql56_temporal_format AS on_slave;
+connection master;
+
+CREATE TABLE t1
+(
+ c0 TIME(0),
+ c1 TIME(1),
+ c2 TIME(2),
+ c3 TIME(3),
+ c4 TIME(4),
+ c5 TIME(5),
+ c6 TIME(6)
+);
+CREATE TABLE t2
+(
+ c0 TIMESTAMP(0),
+ c1 TIMESTAMP(1),
+ c2 TIMESTAMP(2),
+ c3 TIMESTAMP(3),
+ c4 TIMESTAMP(4),
+ c5 TIMESTAMP(5),
+ c6 TIMESTAMP(6)
+);
+
+CREATE TABLE t3
+(
+ c0 DATETIME(0),
+ c1 DATETIME(1),
+ c2 DATETIME(2),
+ c3 DATETIME(3),
+ c4 DATETIME(4),
+ c5 DATETIME(5),
+ c6 DATETIME(6)
+);
+INSERT INTO t1 VALUES ('01:01:01','01:01:01.1','01:01:01.11','01:01:01.111','01:01:01.1111','01:01:01.11111','01:01:01.111111');
+INSERT INTO t2 VALUES ('2001-01-01 01:01:01','2001-01-01 01:01:01.1','2001-01-01 01:01:01.11','2001-01-01 01:01:01.111','2001-01-01 01:01:01.1111','2001-01-01 01:01:01.11111','2001-01-01 01:01:01.111111');
+INSERT INTO t3 VALUES ('2001-01-01 01:01:01','2001-01-01 01:01:01.1','2001-01-01 01:01:01.11','2001-01-01 01:01:01.111','2001-01-01 01:01:01.1111','2001-01-01 01:01:01.11111','2001-01-01 01:01:01.111111');
+SELECT TABLE_NAME, TABLE_ROWS, AVG_ROW_LENGTH,DATA_LENGTH FROM INFORMATION_SCHEMA.TABLES
+WHERE TABLE_NAME RLIKE 't[1-3]' ORDER BY TABLE_NAME;
+sync_slave_with_master;
+
+connection slave;
+--query_vertical SELECT * FROM t1;
+--query_vertical SELECT * FROM t2;
+--query_vertical SELECT * FROM t3;
+SELECT TABLE_NAME, TABLE_ROWS, AVG_ROW_LENGTH,DATA_LENGTH FROM INFORMATION_SCHEMA.TABLES
+WHERE TABLE_NAME RLIKE 't[1-3]' ORDER BY TABLE_NAME;
+
+connection master;
+DROP TABLE t1;
+DROP TABLE t2;
+DROP TABLE t3;
+
+connection slave;
+SET @@global.mysql56_temporal_format=DEFAULT;
+connection master;
+SET @@global.mysql56_temporal_format=DEFAULT;
+
+--source include/rpl_end.inc
diff --git a/mysql-test/extra/rpl_tests/rpl_typeconv.inc b/mysql-test/extra/rpl_tests/rpl_typeconv.inc
new file mode 100644
index 00000000000..0f078854ec2
--- /dev/null
+++ b/mysql-test/extra/rpl_tests/rpl_typeconv.inc
@@ -0,0 +1,78 @@
+#
+# This include file is used by more than one test suite
+# (currently rpl and binlog_encryption suite).
+# Please check all dependent tests after modifying it
+#
+
+--source include/have_binlog_format_row.inc
+--source include/master-slave.inc
+
+connection slave;
+set @saved_slave_type_conversions = @@global.slave_type_conversions;
+CREATE TABLE type_conversions (
+ TestNo INT AUTO_INCREMENT PRIMARY KEY,
+ Source TEXT,
+ Target TEXT,
+ Flags TEXT,
+ On_Master TEXT,
+ On_Slave TEXT,
+ Expected TEXT,
+ Compare INT,
+ Error TEXT);
+
+SELECT @@global.slave_type_conversions;
+SET GLOBAL SLAVE_TYPE_CONVERSIONS='';
+SELECT @@global.slave_type_conversions;
+SET GLOBAL SLAVE_TYPE_CONVERSIONS='ALL_NON_LOSSY';
+SELECT @@global.slave_type_conversions;
+SET GLOBAL SLAVE_TYPE_CONVERSIONS='ALL_LOSSY';
+SELECT @@global.slave_type_conversions;
+SET GLOBAL SLAVE_TYPE_CONVERSIONS='ALL_LOSSY,ALL_NON_LOSSY';
+SELECT @@global.slave_type_conversions;
+--error ER_WRONG_VALUE_FOR_VAR
+SET GLOBAL SLAVE_TYPE_CONVERSIONS='ALL_LOSSY,ALL_NON_LOSSY,NONEXISTING_BIT';
+SELECT @@global.slave_type_conversions;
+
+# Checking strict interpretation of type conversions
+connection slave;
+SET GLOBAL SLAVE_TYPE_CONVERSIONS='';
+source extra/rpl_tests/type_conversions.test;
+
+# Checking lossy integer type conversions
+connection slave;
+SET GLOBAL SLAVE_TYPE_CONVERSIONS='ALL_NON_LOSSY';
+source extra/rpl_tests/type_conversions.test;
+
+# Checking non-lossy integer type conversions
+connection slave;
+SET GLOBAL SLAVE_TYPE_CONVERSIONS='ALL_LOSSY';
+source extra/rpl_tests/type_conversions.test;
+
+# Checking all type conversions
+connection slave;
+SET GLOBAL SLAVE_TYPE_CONVERSIONS='ALL_LOSSY,ALL_NON_LOSSY';
+source extra/rpl_tests/type_conversions.test;
+
+connection slave;
+--echo **** Result of conversions ****
+disable_query_log;
+SELECT RPAD(Source, 15, ' ') AS Source_Type,
+ RPAD(Target, 15, ' ') AS Target_Type,
+ RPAD(Flags, 25, ' ') AS All_Type_Conversion_Flags,
+ IF(Compare IS NULL AND Error IS NOT NULL, '<Correct error>',
+ IF(Compare, '<Correct value>',
+ CONCAT("'", On_Slave, "' != '", Expected, "'")))
+ AS Value_On_Slave
+ FROM type_conversions;
+enable_query_log;
+DROP TABLE type_conversions;
+
+call mtr.add_suppression("Slave SQL.*Column 1 of table .test.t1. cannot be converted from type.* error.* 1677");
+
+connection master;
+DROP TABLE t1;
+sync_slave_with_master;
+
+set global slave_type_conversions = @saved_slave_type_conversions;
+
+--source include/rpl_end.inc