summaryrefslogtreecommitdiff
path: root/mysql-test/r/lock_sync.result
diff options
context:
space:
mode:
authorMichael Widenius <monty@mariadb.org>2018-03-09 14:05:35 +0200
committerMonty <monty@mariadb.org>2018-03-29 13:59:44 +0300
commita7abddeffa6a760ce948c2dfb007cdf3f1a369d5 (patch)
tree70eb743fa965a17380bbc0ac88ae79ca1075b896 /mysql-test/r/lock_sync.result
parentab1941266c59a19703a74b5593cf3f508a5752d7 (diff)
downloadmariadb-git-a7abddeffa6a760ce948c2dfb007cdf3f1a369d5.tar.gz
Create 'main' test directory and move 't' and 'r' there
Diffstat (limited to 'mysql-test/r/lock_sync.result')
-rw-r--r--mysql-test/r/lock_sync.result857
1 files changed, 0 insertions, 857 deletions
diff --git a/mysql-test/r/lock_sync.result b/mysql-test/r/lock_sync.result
deleted file mode 100644
index 7b61c5994b6..00000000000
--- a/mysql-test/r/lock_sync.result
+++ /dev/null
@@ -1,857 +0,0 @@
-#
-# Test how we handle locking in various cases when
-# we read data from MyISAM tables.
-#
-# In this test we mostly check that the SQL-layer correctly
-# determines the type of thr_lock.c lock for a table being
-# read.
-# I.e. that it disallows concurrent inserts when the statement
-# is going to be written to the binary log and therefore
-# should be serialized, and allows concurrent inserts when
-# such serialization is not necessary (e.g. when
-# the statement is not written to binary log).
-#
-# Force concurrent inserts to be performed even if the table
-# has gaps. This allows to simplify clean up in scripts
-# used below (instead of backing up table being inserted
-# into and then restoring it from backup at the end of the
-# script we can simply delete rows which were inserted).
-set @old_concurrent_insert= @@global.concurrent_insert;
-set @@global.concurrent_insert= 2;
-select @@global.concurrent_insert;
-@@global.concurrent_insert
-ALWAYS
-# Prepare playground by creating tables, views,
-# routines and triggers used in tests.
-connect con1, localhost, root,,;
-connect con2, localhost, root,,;
-connection default;
-drop table if exists t0, t1, t2, t3, t4, t5;
-drop view if exists v1, v2;
-drop procedure if exists p1;
-drop procedure if exists p2;
-drop procedure if exists p3;
-drop function if exists f1;
-drop function if exists f2;
-drop function if exists f3;
-drop function if exists f4;
-drop function if exists f5;
-drop function if exists f6;
-drop function if exists f7;
-drop function if exists f8;
-drop function if exists f9;
-drop function if exists f10;
-drop function if exists f11;
-drop function if exists f12;
-drop function if exists f13;
-drop function if exists f14;
-drop function if exists f15;
-drop function if exists f16;
-drop function if exists f17;
-create table t1 (i int primary key);
-insert into t1 values (1), (2), (3), (4), (5);
-create table t2 (j int primary key);
-insert into t2 values (1), (2), (3), (4), (5);
-create table t3 (k int primary key);
-insert into t3 values (1), (2), (3);
-create table t4 (l int primary key);
-insert into t4 values (1);
-create table t5 (l int primary key);
-insert into t5 values (1);
-create view v1 as select i from t1;
-create view v2 as select j from t2 where j in (select i from t1);
-create procedure p1(k int) insert into t2 values (k);
-create function f1() returns int
-begin
-declare j int;
-select i from t1 where i = 1 into j;
-return j;
-end|
-create function f2() returns int
-begin
-declare k int;
-select i from t1 where i = 1 into k;
-insert into t2 values (k + 5);
-return 0;
-end|
-create function f3() returns int
-begin
-return (select i from t1 where i = 3);
-end|
-create function f4() returns int
-begin
-if (select i from t1 where i = 3) then
-return 1;
-else
-return 0;
-end if;
-end|
-create function f5() returns int
-begin
-insert into t2 values ((select i from t1 where i = 1) + 5);
-return 0;
-end|
-create function f6() returns int
-begin
-declare k int;
-select i from v1 where i = 1 into k;
-return k;
-end|
-create function f7() returns int
-begin
-declare k int;
-select j from v2 where j = 1 into k;
-return k;
-end|
-create function f8() returns int
-begin
-declare k int;
-select i from v1 where i = 1 into k;
-insert into t2 values (k+5);
-return k;
-end|
-create function f9() returns int
-begin
-update v2 set j=j+10 where j=1;
-return 1;
-end|
-create function f10() returns int
-begin
-return f1();
-end|
-create function f11() returns int
-begin
-declare k int;
-set k= f1();
-insert into t2 values (k+5);
-return k;
-end|
-create function f12(p int) returns int
-begin
-insert into t2 values (p);
-return p;
-end|
-create function f13(p int) returns int
-begin
-return p;
-end|
-create procedure p2(inout p int)
-begin
-select i from t1 where i = 1 into p;
-end|
-create function f14() returns int
-begin
-declare k int;
-call p2(k);
-insert into t2 values (k+5);
-return k;
-end|
-create function f15() returns int
-begin
-declare k int;
-call p2(k);
-return k;
-end|
-create function f16() returns int
-begin
-create temporary table if not exists temp1 (a int);
-insert into temp1 select * from t1;
-drop temporary table temp1;
-return 1;
-end|
-create function f17() returns int
-begin
-declare j int;
-select i from t1 where i = 1 into j;
-call p3;
-return 1;
-end|
-create procedure p3()
-begin
-create temporary table if not exists temp1 (a int);
-insert into temp1 select * from t1;
-drop temporary table temp1;
-end|
-create trigger t4_bi before insert on t4 for each row
-begin
-declare k int;
-select i from t1 where i=1 into k;
-set new.l= k+1;
-end|
-create trigger t4_bu before update on t4 for each row
-begin
-if (select i from t1 where i=1) then
-set new.l= 2;
-end if;
-end|
-create trigger t4_bd before delete on t4 for each row
-begin
-if !(select i from v1 where i=1) then
-signal sqlstate '45000';
-end if;
-end|
-create trigger t5_bi before insert on t5 for each row
-begin
-set new.l= f1()+1;
-end|
-create trigger t5_bu before update on t5 for each row
-begin
-declare j int;
-call p2(j);
-set new.l= j + 1;
-end|
-#
-# Set common variables to be used by the scripts
-# called below.
-#
-connection con1;
-# Cache all functions used in the tests below so statements
-# calling them won't need to open and lock mysql.proc table
-# and we can assume that each statement locks its tables
-# once during its execution.
-show create procedure p1;
-show create procedure p2;
-show create procedure p3;
-show create function f1;
-show create function f2;
-show create function f3;
-show create function f4;
-show create function f5;
-show create function f6;
-show create function f7;
-show create function f8;
-show create function f9;
-show create function f10;
-show create function f11;
-show create function f12;
-show create function f13;
-show create function f14;
-show create function f15;
-show create function f16;
-show create function f17;
-connection default;
-#
-# 1. Statements that read tables and do not use subqueries.
-#
-#
-# 1.1 Simple SELECT statement.
-#
-# No locks are necessary as this statement won't be written
-# to the binary log and thanks to how MyISAM works SELECT
-# will see version of the table prior to concurrent insert.
-connection default;
-Success: 'select * from t1' allows concurrent inserts into 't1'.
-#
-# 1.2 Multi-UPDATE statement.
-#
-# Has to take shared locks on rows in the table being read as this
-# statement will be written to the binary log and therefore should
-# be serialized with concurrent statements.
-connection default;
-Success: 'update t2, t1 set j= j - 1 where i = j' doesn't allow concurrent inserts into 't1'.
-#
-# 1.3 Multi-DELETE statement.
-#
-# The above is true for this statement as well.
-connection default;
-Success: 'delete t2 from t1, t2 where i = j' doesn't allow concurrent inserts into 't1'.
-#
-# 1.4 DESCRIBE statement.
-#
-# This statement does not really read data from the
-# target table and thus does not take any lock on it.
-# We check this for completeness of coverage.
-lock table t1 write;
-connection con1;
-# This statement should not be blocked.
-describe t1;
-connection default;
-unlock tables;
-#
-# 1.5 SHOW statements.
-#
-# The above is true for SHOW statements as well.
-lock table t1 write;
-connection con1;
-# These statements should not be blocked.
-show keys from t1;
-connection default;
-unlock tables;
-#
-# 2. Statements which read tables through subqueries.
-#
-#
-# 2.1 CALL with a subquery.
-#
-# A strong lock is not necessary as this statement is not
-# written to the binary log as a whole (it is written
-# statement-by-statement).
-connection default;
-Success: 'call p1((select i + 5 from t1 where i = 1))' allows concurrent inserts into 't1'.
-#
-# 2.2 CREATE TABLE with a subquery.
-#
-# Has to take a strong lock on the table being read as
-# this statement is written to the binary log and therefore
-# should be serialized with concurrent statements.
-connection default;
-Success: 'create table t0 select * from t1' doesn't allow concurrent inserts into 't1'.
-drop table t0;
-connection default;
-Success: 'create table t0 select j from t2 where j in (select i from t1)' doesn't allow concurrent inserts into 't1'.
-drop table t0;
-#
-# 2.3 DELETE with a subquery.
-#
-# The above is true for this statement as well.
-connection default;
-Success: 'delete from t2 where j in (select i from t1)' doesn't allow concurrent inserts into 't1'.
-#
-# 2.4 MULTI-DELETE with a subquery.
-#
-# Same is true for this statement as well.
-connection default;
-Success: 'delete t2 from t3, t2 where k = j and j in (select i from t1)' doesn't allow concurrent inserts into 't1'.
-#
-# 2.5 DO with a subquery.
-#
-# A strong lock is not necessary as it is not logged.
-connection default;
-Success: 'do (select i from t1 where i = 1)' allows concurrent inserts into 't1'.
-#
-# 2.6 INSERT with a subquery.
-#
-# Has to take a strong lock on the table being read as
-# this statement is written to the binary log and therefore
-# should be serialized with concurrent inserts.
-connection default;
-Success: 'insert into t2 select i+5 from t1' doesn't allow concurrent inserts into 't1'.
-connection default;
-Success: 'insert into t2 values ((select i+5 from t1 where i = 4))' doesn't allow concurrent inserts into 't1'.
-#
-# 2.7 LOAD DATA with a subquery.
-#
-# The above is true for this statement as well.
-connection default;
-Success: 'load data infile '../../std_data/rpl_loaddata.dat' into table t2 (@a, @b) set j= @b + (select i from t1 where i = 1)' doesn't allow concurrent inserts into 't1'.
-#
-# 2.8 REPLACE with a subquery.
-#
-# Same is true for this statement as well.
-# Suppress warnings for REPLACE ... SELECT
-connection default;
-Success: 'replace into t2 select i+5 from t1' doesn't allow concurrent inserts into 't1'.
-connection default;
-Success: 'replace into t2 values ((select i+5 from t1 where i = 4))' doesn't allow concurrent inserts into 't1'.
-#
-# 2.9 SELECT with a subquery.
-#
-# Strong locks are not necessary as this statement is not written
-# to the binary log and thanks to how MyISAM works this statement
-# sees a version of the table prior to the concurrent insert.
-connection default;
-Success: 'select * from t2 where j in (select i from t1)' allows concurrent inserts into 't1'.
-#
-# 2.10 SET with a subquery.
-#
-# The same is true for this statement as well.
-connection default;
-Success: 'set @a:= (select i from t1 where i = 1)' allows concurrent inserts into 't1'.
-#
-# 2.11 SHOW with a subquery.
-#
-# And for this statement too.
-connection default;
-Success: 'show tables from test where Tables_in_test = 't2' and (select i from t1 where i = 1)' allows concurrent inserts into 't1'.
-connection default;
-Success: 'show columns from t2 where (select i from t1 where i = 1)' allows concurrent inserts into 't1'.
-#
-# 2.12 UPDATE with a subquery.
-#
-# Has to take a strong lock on the table being read as
-# this statement is written to the binary log and therefore
-# should be serialized with concurrent inserts.
-connection default;
-Success: 'update t2 set j= j-10 where j in (select i from t1)' doesn't allow concurrent inserts into 't1'.
-#
-# 2.13 MULTI-UPDATE with a subquery.
-#
-# Same is true for this statement as well.
-connection default;
-Success: 'update t2, t3 set j= j -10 where j=k and j in (select i from t1)' doesn't allow concurrent inserts into 't1'.
-#
-# 3. Statements which read tables through a view.
-#
-#
-# 3.1 SELECT statement which uses some table through a view.
-#
-# Since this statement is not written to the binary log and
-# an old version of the table is accessible thanks to how MyISAM
-# handles concurrent insert, no locking is necessary.
-connection default;
-Success: 'select * from v1' allows concurrent inserts into 't1'.
-connection default;
-Success: 'select * from v2' allows concurrent inserts into 't1'.
-connection default;
-Success: 'select * from t2 where j in (select i from v1)' allows concurrent inserts into 't1'.
-connection default;
-Success: 'select * from t3 where k in (select j from v2)' allows concurrent inserts into 't1'.
-#
-# 3.2 Statements which modify a table and use views.
-#
-# Since such statements are going to be written to the binary
-# log they need to be serialized against concurrent statements
-# and therefore should take strong locks on the data read.
-connection default;
-Success: 'update t2 set j= j-10 where j in (select i from v1)' doesn't allow concurrent inserts into 't1'.
-connection default;
-Success: 'update t3 set k= k-10 where k in (select j from v2)' doesn't allow concurrent inserts into 't1'.
-connection default;
-Success: 'update t2, v1 set j= j-10 where j = i' doesn't allow concurrent inserts into 't1'.
-connection default;
-Success: 'update v2 set j= j-10 where j = 3' doesn't allow concurrent inserts into 't1'.
-#
-# 4. Statements which read tables through stored functions.
-#
-#
-# 4.1 SELECT/SET with a stored function which does not
-# modify data and uses SELECT in its turn.
-#
-# There is no need to take strong locks on the table
-# being selected from in SF as the call to such function
-# won't get into the binary log.
-connection default;
-Success: 'select f1()' allows concurrent inserts into 't1'.
-connection default;
-Success: 'set @a:= f1()' allows concurrent inserts into 't1'.
-#
-# 4.2 INSERT (or other statement which modifies data) with
-# a stored function which does not modify data and uses
-# SELECT.
-#
-# Since such statement is written to the binary log it should
-# be serialized with concurrent statements affecting the data
-# it uses. Therefore it should take strong lock on the data
-# it reads.
-connection default;
-Success: 'insert into t2 values (f1() + 5)' doesn't allow concurrent inserts into 't1'.
-#
-# 4.3 SELECT/SET with a stored function which
-# reads and modifies data.
-#
-# Since a call to such function is written to the binary log,
-# it should be serialized with concurrent statements affecting
-# the data it uses. Hence, a strong lock on the data read
-# should be taken.
-connection default;
-Success: 'select f2()' doesn't allow concurrent inserts into 't1'.
-connection default;
-Success: 'set @a:= f2()' doesn't allow concurrent inserts into 't1'.
-#
-# 4.4. SELECT/SET with a stored function which does not
-# modify data and reads a table through subselect
-# in a control construct.
-#
-# Call to this function won't get to the
-# binary log and thus no strong lock is needed.
-connection default;
-Success: 'select f3()' allows concurrent inserts into 't1'.
-connection default;
-Success: 'set @a:= f3()' allows concurrent inserts into 't1'.
-connection default;
-Success: 'select f4()' allows concurrent inserts into 't1'.
-connection default;
-Success: 'set @a:= f4()' allows concurrent inserts into 't1'.
-#
-# 4.5. INSERT (or other statement which modifies data) with
-# a stored function which does not modify data and reads
-# the table through a subselect in one of its control
-# constructs.
-#
-# Since such statement is written to the binary log it should
-# be serialized with concurrent statements affecting data it
-# uses. Therefore it should take a strong lock on the data
-# it reads.
-connection default;
-Success: 'insert into t2 values (f3() + 5)' doesn't allow concurrent inserts into 't1'.
-connection default;
-Success: 'insert into t2 values (f4() + 6)' doesn't allow concurrent inserts into 't1'.
-#
-# 4.6 SELECT/SET which uses a stored function with
-# DML which reads a table via a subquery.
-#
-# Since call to such function is written to the binary log
-# it should be serialized with concurrent statements.
-# Hence reads should take a strong lock.
-connection default;
-Success: 'select f5()' doesn't allow concurrent inserts into 't1'.
-connection default;
-Success: 'set @a:= f5()' doesn't allow concurrent inserts into 't1'.
-#
-# 4.7 SELECT/SET which uses a stored function which
-# doesn't modify data and reads tables through
-# a view.
-#
-# Calls to such functions won't get into
-# the binary log and thus don't need strong
-# locks.
-connection default;
-Success: 'select f6()' allows concurrent inserts into 't1'.
-connection default;
-Success: 'set @a:= f6()' allows concurrent inserts into 't1'.
-connection default;
-Success: 'select f7()' allows concurrent inserts into 't1'.
-connection default;
-Success: 'set @a:= f7()' allows concurrent inserts into 't1'.
-#
-# 4.8 INSERT which uses stored function which
-# doesn't modify data and reads a table
-# through a view.
-#
-# Since such statement is written to the binary log and
-# should be serialized with concurrent statements affecting
-# the data it uses. Therefore it should take a strong lock on
-# the table it reads.
-connection default;
-Success: 'insert into t3 values (f6() + 5)' doesn't allow concurrent inserts into 't1'.
-connection default;
-Success: 'insert into t3 values (f7() + 5)' doesn't allow concurrent inserts into 't1'.
-#
-# 4.9 SELECT which uses a stored function which
-# modifies data and reads tables through a view.
-#
-# Since a call to such function is written to the binary log
-# it should be serialized with concurrent statements.
-# Hence, reads should take strong locks.
-connection default;
-Success: 'select f8()' doesn't allow concurrent inserts into 't1'.
-connection default;
-Success: 'select f9()' doesn't allow concurrent inserts into 't1'.
-#
-# 4.10 SELECT which uses a stored function which doesn't modify
-# data and reads a table indirectly, by calling another
-# function.
-#
-# Calls to such functions won't get into the binary
-# log and thus don't need to acquire strong locks.
-connection default;
-Success: 'select f10()' allows concurrent inserts into 't1'.
-#
-# 4.11 INSERT which uses a stored function which doesn't modify
-# data and reads a table indirectly, by calling another
-# function.
-#
-# Since such statement is written to the binary log, it should
-# be serialized with concurrent statements affecting the data it
-# uses. Therefore it should take strong locks on data it reads.
-connection default;
-Success: 'insert into t2 values (f10() + 5)' doesn't allow concurrent inserts into 't1'.
-#
-# 4.12 SELECT which uses a stored function which modifies
-# data and reads a table indirectly, by calling another
-# function.
-#
-# Since a call to such function is written to the binary log
-# it should be serialized from concurrent statements.
-# Hence, read should take a strong lock.
-connection default;
-Success: 'select f11()' doesn't allow concurrent inserts into 't1'.
-#
-# 4.13 SELECT that reads a table through a subquery passed
-# as a parameter to a stored function which modifies
-# data.
-#
-# Even though a call to this function is written to the
-# binary log, values of its parameters are written as literals.
-# So there is no need to acquire strong locks for tables used in
-# the subquery.
-connection default;
-Success: 'select f12((select i+10 from t1 where i=1))' allows concurrent inserts into 't1'.
-#
-# 4.14 INSERT that reads a table via a subquery passed
-# as a parameter to a stored function which doesn't
-# modify data.
-#
-# Since this statement is written to the binary log it should
-# be serialized with concurrent statements affecting the data it
-# uses. Therefore it should take strong locks on the data it reads.
-connection default;
-Success: 'insert into t2 values (f13((select i+10 from t1 where i=1)))' doesn't allow concurrent inserts into 't1'.
-#
-# 4.15 SELECT/SET with a stored function which
-# inserts data into a temporary table using
-# SELECT on t1.
-#
-# Since this statement is written to the binary log it should
-# be serialized with concurrent statements affecting the data it
-# uses. Therefore it should take strong locks on the data it reads.
-connection default;
-Success: 'select f16()' doesn't allow concurrent inserts into 't1'.
-connection default;
-Success: 'set @a:= f16()' doesn't allow concurrent inserts into 't1'.
-#
-# 4.16 SELECT/SET with a stored function which call procedure
-# which inserts data into a temporary table using
-# SELECT on t1.
-#
-# Since this statement is written to the binary log it should
-# be serialized with concurrent statements affecting the data it
-# uses. Therefore it should take strong locks on the data it reads.
-connection default;
-Success: 'select f17()' doesn't allow concurrent inserts into 't1'.
-connection default;
-Success: 'set @a:= f17()' doesn't allow concurrent inserts into 't1'.
-#
-# 5. Statements that read tables through stored procedures.
-#
-#
-# 5.1 CALL statement which reads a table via SELECT.
-#
-# Since neither this statement nor its components are
-# written to the binary log, there is no need to take
-# strong locks on the data it reads.
-connection default;
-Success: 'call p2(@a)' allows concurrent inserts into 't1'.
-#
-# 5.2 Function that modifies data and uses CALL,
-# which reads a table through SELECT.
-#
-# Since a call to such function is written to the binary
-# log, it should be serialized with concurrent statements.
-# Hence, in this case reads should take strong locks on data.
-connection default;
-Success: 'select f14()' doesn't allow concurrent inserts into 't1'.
-#
-# 5.3 SELECT that calls a function that doesn't modify data and
-# uses a CALL statement that reads a table via SELECT.
-#
-# Calls to such functions won't get into the binary
-# log and thus don't need to acquire strong locks.
-connection default;
-Success: 'select f15()' allows concurrent inserts into 't1'.
-#
-# 5.4 INSERT which calls function which doesn't modify data and
-# uses CALL statement which reads table through SELECT.
-#
-# Since such statement is written to the binary log it should
-# be serialized with concurrent statements affecting data it
-# uses. Therefore it should take strong locks on data it reads.
-connection default;
-Success: 'insert into t2 values (f15()+5)' doesn't allow concurrent inserts into 't1'.
-#
-# 6. Statements that use triggers.
-#
-#
-# 6.1 Statement invoking a trigger that reads table via SELECT.
-#
-# Since this statement is written to the binary log it should
-# be serialized with concurrent statements affecting the data
-# it uses. Therefore, it should take strong locks on the data
-# it reads.
-connection default;
-Success: 'insert into t4 values (2)' doesn't allow concurrent inserts into 't1'.
-#
-# 6.2 Statement invoking a trigger that reads table through
-# a subquery in a control construct.
-#
-# The above is true for this statement as well.
-connection default;
-Success: 'update t4 set l= 2 where l = 1' doesn't allow concurrent inserts into 't1'.
-#
-# 6.3 Statement invoking a trigger that reads a table through
-# a view.
-#
-# And for this statement.
-connection default;
-Success: 'delete from t4 where l = 1' doesn't allow concurrent inserts into 't1'.
-#
-# 6.4 Statement invoking a trigger that reads a table through
-# a stored function.
-#
-# And for this statement.
-connection default;
-Success: 'insert into t5 values (2)' doesn't allow concurrent inserts into 't1'.
-#
-# 6.5 Statement invoking a trigger that reads a table through
-# stored procedure.
-#
-# And for this statement.
-connection default;
-Success: 'update t5 set l= 2 where l = 1' doesn't allow concurrent inserts into 't1'.
-# Clean-up.
-drop function f1;
-drop function f2;
-drop function f3;
-drop function f4;
-drop function f5;
-drop function f6;
-drop function f7;
-drop function f8;
-drop function f9;
-drop function f10;
-drop function f11;
-drop function f12;
-drop function f13;
-drop function f14;
-drop function f15;
-drop function f16;
-drop function f17;
-drop view v1, v2;
-drop procedure p1;
-drop procedure p2;
-drop procedure p3;
-drop table t1, t2, t3, t4, t5;
-disconnect con1;
-disconnect con2;
-set @@global.concurrent_insert= @old_concurrent_insert;
-#
-# Bug#50821 Deadlock between LOCK TABLES and ALTER TABLE
-#
-DROP TABLE IF EXISTS t1, t2;
-CREATE TABLE t1(id INT);
-CREATE TABLE t2(id INT);
-connect con2, localhost, root;
-START TRANSACTION;
-SELECT * FROM t1;
-id
-connection default;
-# Sending:
-ALTER TABLE t1 ADD COLUMN j INT;
-connection con2;
-# This used to cause a deadlock.
-INSERT INTO t2 SELECT * FROM t1;
-COMMIT;
-connection default;
-# Reaping ALTER TABLE t1 ADD COLUMN j INT
-DROP TABLE t1, t2;
-disconnect con2;
-#
-# Bug#51391 Deadlock involving events during rqg_info_schema test
-#
-CREATE EVENT e1 ON SCHEDULE EVERY 5 HOUR DO SELECT 1;
-CREATE EVENT e2 ON SCHEDULE EVERY 5 HOUR DO SELECT 2;
-connect con1, localhost, root;
-SET DEBUG_SYNC="before_lock_tables_takes_lock SIGNAL drop WAIT_FOR query";
-# Sending:
-DROP EVENT e1;;
-connection default;
-SET DEBUG_SYNC="now WAIT_FOR drop";
-SELECT name FROM mysql.event, INFORMATION_SCHEMA.GLOBAL_VARIABLES
-WHERE definer = VARIABLE_VALUE;
-name
-SET DEBUG_SYNC="now SIGNAL query";
-connection con1;
-# Reaping: DROP EVENT t1
-disconnect con1;
-connection default;
-DROP EVENT e2;
-SET DEBUG_SYNC="RESET";
-#
-# Bug#57130 crash in Item_field::print during SHOW CREATE TABLE or VIEW
-#
-DROP TABLE IF EXISTS t1;
-DROP VIEW IF EXISTS v1;
-DROP FUNCTION IF EXISTS f1;
-CREATE TABLE t1(a INT);
-CREATE FUNCTION f1() RETURNS INTEGER RETURN 1;
-CREATE VIEW v1 AS SELECT * FROM t1 WHERE f1() = 1;
-DROP FUNCTION f1;
-connect con2, localhost, root;
-connect con1, localhost, root;
-SET DEBUG_SYNC= 'open_tables_after_open_and_process_table SIGNAL opened WAIT_FOR dropped EXECUTE 2';
-# Sending:
-SHOW CREATE VIEW v1;
-connection con2;
-SET DEBUG_SYNC= 'now WAIT_FOR opened';
-SET DEBUG_SYNC= 'now SIGNAL dropped';
-SET DEBUG_SYNC= 'now WAIT_FOR opened';
-# Sending:
-FLUSH TABLES;
-connection default;
-# Waiting for FLUSH TABLES to be blocked.
-SET DEBUG_SYNC= 'now SIGNAL dropped';
-connection con1;
-# Reaping: SHOW CREATE VIEW v1
-View Create View character_set_client collation_connection
-v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS select `t1`.`a` AS `a` from `t1` where `f1`() = 1 latin1 latin1_swedish_ci
-Warnings:
-Warning 1356 View 'test.v1' references invalid table(s) or column(s) or function(s) or definer/invoker of view lack rights to use them
-connection con2;
-# Reaping: FLUSH TABLES
-connection default;
-SET DEBUG_SYNC= 'RESET';
-DROP VIEW v1;
-DROP TABLE t1;
-disconnect con1;
-disconnect con2;
-#
-# Bug#28587 SELECT is blocked by INSERT waiting on read lock, even with low_priority_updates
-#
-set low_priority_updates=1;
-drop table if exists t1;
-drop table if exists t2;
-set debug_sync='RESET';
-create table t1 (a int, b int, unique key t1$a (a));
-create table t2 (j int, k int);
-set debug_sync='after_lock_tables_takes_lock SIGNAL parked WAIT_FOR go';
-# Sending:
-insert into t2 select * from t1;;
-connect update,localhost,root,,;
-connection update;
-set debug_sync='now WAIT_FOR parked';
-set low_priority_updates=1;
-show variables like 'low_priority_updates';
-Variable_name Value
-low_priority_updates ON
-insert into t1 values (1, 2) ON DUPLICATE KEY UPDATE b = 2;;
-connect select,localhost,root,,;
-select * from t1;
-a b
-set debug_sync='now SIGNAL go';
-connection default;
-disconnect update;
-disconnect select;
-# Reaping INSERT SELECT
-drop tables t1, t2;
-set low_priority_updates=default;
-set debug_sync='RESET';
-#
-# Additional test coverage for LOCK TABLES ... READ LOCAL
-# for InnoDB tables.
-#
-# Check that we correctly handle deadlocks which can occur
-# during metadata lock upgrade which happens when one tries
-# to use LOCK TABLES ... READ LOCAL for InnoDB tables.
-CREATE TABLE t1 (i INT) ENGINE=InnoDB;
-CREATE TABLE t2 (j INT) ENGINE=InnoDB;
-# Execute LOCK TABLE READ LOCK which will pause after acquiring
-# SR metadata lock and before upgrading it to SRO lock.
-SET DEBUG_SYNC="after_open_table_mdl_shared SIGNAL locked WAIT_FOR go";
-# Sending:
-LOCK TABLE t1 READ LOCAL;
-connect con1, localhost, root;
-SET DEBUG_SYNC="now WAIT_FOR locked";
-# Execute RENAME TABLE which will try to acquire X lock.
-# Sending:
-RENAME TABLE t1 TO t3, t2 TO t1, t3 TO t2;
-connect con2, localhost, root;
-# Wait until RENAME TABLE is blocked.
-# Resume LOCK TABLE statement. It should try to
-# upgrade SR lock to SRO lock which will create
-# deadlock due to presence of pending X lock.
-# Deadlock should be detected and LOCK TABLES should
-# release its MDL and retry opening of tables.
-SET DEBUG_SYNC="now SIGNAL go";
-connection con1;
-# RENAME TABLE should be able to complete. Reap it.
-connection default;
-# Reap LOCK TABLES.
-# Check that we see new version of table.
-SELECT * FROM t1;
-j
-UNLOCK TABLES;
-# Clean-up.
-SET DEBUG_SYNC="RESET";
-disconnect con1;
-disconnect con2;
-DROP TABLES t1, t2;