diff options
author | Konstantin Osipov <kostja@sun.com> | 2009-12-22 19:09:15 +0300 |
---|---|---|
committer | Konstantin Osipov <kostja@sun.com> | 2009-12-22 19:09:15 +0300 |
commit | 39a1a50dfb52578224758ee1b240f1a89f95cc73 (patch) | |
tree | ec3224333b636cc2021f77ab290f43bb14031a09 | |
parent | f032795ccc86509e48b7782b0e40130869ce47b6 (diff) | |
download | mariadb-git-39a1a50dfb52578224758ee1b240f1a89f95cc73.tar.gz |
A prerequisite patch for the fix for Bug#46224
"HANDLER statements within a transaction might lead to deadlocks".
Introduce a notion of a sentinel to MDL_context. A sentinel
is a ticket that separates all tickets in the context into two
groups: before and after it. Currently we can have (and need) only
one designated sentinel -- it separates all locks taken by LOCK
TABLE or HANDLER statement, which must survive COMMIT and ROLLBACK
and all other locks, which must be released at COMMIT or ROLLBACK.
The tricky part is maintaining the sentinel up to date when
someone release its corresponding ticket. This can happen, e.g.
if someone issues DROP TABLE under LOCK TABLES (generally,
see all calls to release_all_locks_for_name()).
MDL_context::release_ticket() is modified to take care of it.
******
A fix and a test case for Bug#46224 "HANDLER statements within a
transaction might lead to deadlocks".
An attempt to mix HANDLER SQL statements, which are transaction-
agnostic, an open multi-statement transaction,
and DDL against the involved tables (in a concurrent connection)
could lead to a deadlock. The deadlock would occur when
HANDLER OPEN or HANDLER READ would have to wait on a conflicting
metadata lock. If the connection that issued HANDLER statement
also had other metadata locks (say, acquired in scope of a
transaction), a classical deadlock situation of mutual wait
could occur.
Incompatible change: entering LOCK TABLES mode automatically
closes all open HANDLERs in the current connection.
Incompatible change: previously an attempt to wait on a lock
in a connection that has an open HANDLER statement could wait
indefinitely/deadlock. After this patch, an error ER_LOCK_DEADLOCK
is produced.
The idea of the fix is to merge thd->handler_mdl_context
with the main mdl_context of the connection, used for transactional
locks. This makes deadlock detection possible, since all waits
with locks are "visible" and available to analysis in a single
MDL context of the connection.
Since HANDLER locks and transactional locks have a different life
cycle -- HANDLERs are explicitly open and closed, and so
are HANDLER locks, explicitly acquired and released, whereas
transactional locks "accumulate" till the end of a transaction
and are released only with COMMIT, ROLLBACK and ROLLBACK TO SAVEPOINT,
a concept of "sentinel" was introduced to MDL_context.
All locks, HANDLER and others, reside in the same linked list.
However, a selected element of the list separates locks with
different life cycle. HANDLER locks always reside at the
end of the list, after the sentinel. Transactional locks are
prepended to the beginning of the list, before the sentinel.
Thus, ROLLBACK, COMMIT or ROLLBACK TO SAVEPOINT, only
release those locks that reside before the sentinel. HANDLER locks
must be released explicitly as part of HANDLER CLOSE statement,
or an implicit close.
The same approach with sentinel
is also employed for LOCK TABLES locks. Since HANDLER and LOCK TABLES
statement has never worked together, the implementation is
made simple and only maintains one sentinel, which is used either
for HANDLER locks, or for LOCK TABLES locks.
-rw-r--r-- | mysql-test/include/handler.inc | 617 | ||||
-rw-r--r-- | mysql-test/r/handler_innodb.result | 586 | ||||
-rw-r--r-- | mysql-test/r/handler_myisam.result | 585 | ||||
-rw-r--r-- | sql/lock.cc | 17 | ||||
-rw-r--r-- | sql/log_event.cc | 3 | ||||
-rw-r--r-- | sql/mdl.cc | 308 | ||||
-rw-r--r-- | sql/mdl.h | 46 | ||||
-rw-r--r-- | sql/mysql_priv.h | 1 | ||||
-rw-r--r-- | sql/rpl_injector.cc | 3 | ||||
-rw-r--r-- | sql/rpl_rli.cc | 4 | ||||
-rw-r--r-- | sql/set_var.cc | 3 | ||||
-rw-r--r-- | sql/slave.cc | 3 | ||||
-rw-r--r-- | sql/sql_acl.cc | 6 | ||||
-rw-r--r-- | sql/sql_base.cc | 162 | ||||
-rw-r--r-- | sql/sql_class.cc | 17 | ||||
-rw-r--r-- | sql/sql_class.h | 21 | ||||
-rw-r--r-- | sql/sql_handler.cc | 205 | ||||
-rw-r--r-- | sql/sql_insert.cc | 3 | ||||
-rw-r--r-- | sql/sql_parse.cc | 47 | ||||
-rw-r--r-- | sql/sql_plist.h | 40 | ||||
-rw-r--r-- | sql/sql_prepare.cc | 13 | ||||
-rw-r--r-- | sql/sql_servers.cc | 3 | ||||
-rw-r--r-- | sql/sql_table.cc | 13 | ||||
-rw-r--r-- | sql/transaction.cc | 12 |
24 files changed, 2331 insertions, 387 deletions
diff --git a/mysql-test/include/handler.inc b/mysql-test/include/handler.inc index 8ff38c7e7a1..a9965f97926 100644 --- a/mysql-test/include/handler.inc +++ b/mysql-test/include/handler.inc @@ -561,14 +561,29 @@ let $wait_condition= --source include/wait_condition.inc connection default; --echo connection: default +--echo # +--echo # RENAME placed two pending locks and waits. +--echo # When HANDLER t2 OPEN does open_tables(), it calls +--echo # mysql_ha_flush(), which in turn closes the open HANDLER for t1. +--echo # RENAME TABLE gets unblocked. If it gets scheduled quickly +--echo # and manages to complete before open_tables() +--echo # of HANDLER t2 OPEN, open_tables() and therefore the whole +--echo # HANDLER t2 OPEN succeeds. Otherwise open_tables() +--echo # notices a pending or active exclusive metadata lock on t2 +--echo # and the whole HANDLER t2 OPEN fails with ER_LOCK_DEADLOCK +--echo # error. +--echo # +--error 0, ER_LOCK_DEADLOCK handler t2 open; -handler t2 read first; ---error ER_NO_SUCH_TABLE -handler t1 read next; -handler t1 close; +--error 0, ER_UNKNOWN_TABLE handler t2 close; +--echo connection: flush connection flush; reap; +--error ER_UNKNOWN_TABLE +handler t1 read next; +--error ER_UNKNOWN_TABLE +handler t1 close; connection default; drop table t2; connection flush; @@ -748,3 +763,597 @@ USE information_schema; --error ER_WRONG_USAGE HANDLER COLUMNS OPEN; USE test; + +--echo # +--echo # Add test coverage for HANDLER and LOCK TABLES, HANDLER and DDL. +--echo # +--disable_warnings +drop table if exists t1, t2, t3; +--enable_warnings +create table t1 (a int, key a (a)); +insert into t1 (a) values (1), (2), (3), (4), (5); +create table t2 (a int, key a (a)) select * from t1; +create temporary table t3 (a int, key a (a)) select * from t2; +handler t1 open; +handler t2 open; +handler t3 open; +--echo # +--echo # LOCK TABLES implicitly closes all handlers. +--echo # +lock table t3 read; +--echo # +--echo # No HANDLER sql is available under lock tables anyway. +--echo # +--error ER_LOCK_OR_ACTIVE_TRANSACTION +handler t1 open; +--error ER_LOCK_OR_ACTIVE_TRANSACTION +handler t1 read next; +--error ER_LOCK_OR_ACTIVE_TRANSACTION +handler t2 close; +--error ER_LOCK_OR_ACTIVE_TRANSACTION +handler t3 open; +--echo # After UNLOCK TABLES no handlers are around, they were +--echo # implicitly closed. +unlock tables; +drop temporary table t3; +--error ER_UNKNOWN_TABLE +handler t1 read next; +--error ER_UNKNOWN_TABLE +handler t2 close; +--error ER_UNKNOWN_TABLE +handler t3 read next; +--echo # +--echo # Other operations also implicitly close handler: +--echo # +--echo # TRUNCATE +--echo # +handler t1 open; +truncate table t1; +--error ER_UNKNOWN_TABLE +handler t1 read next; +handler t1 open; +--echo # +--echo # CREATE TRIGGER +--echo # +create trigger t1_ai after insert on t1 for each row set @a=1; +--error ER_UNKNOWN_TABLE +handler t1 read next; +--echo # +--echo # DROP TRIGGER +--echo # +handler t1 open; +drop trigger t1_ai; +--error ER_UNKNOWN_TABLE +handler t1 read next; +--echo # +--echo # ALTER TABLE +--echo # +handler t1 open; +alter table t1 add column b int; +--error ER_UNKNOWN_TABLE +handler t1 read next; +--echo # +--echo # ANALYZE TABLE +--echo # +handler t1 open; +analyze table t1; +--error ER_UNKNOWN_TABLE +handler t1 read next; +--echo # +--echo # OPTIMIZE TABLE +--echo # +handler t1 open; +optimize table t1; +--error ER_UNKNOWN_TABLE +handler t1 read next; +--echo # +--echo # REPAIR TABLE +--echo # +handler t1 open; +repair table t1; +--error ER_UNKNOWN_TABLE +handler t1 read next; +--echo # +--echo # DROP TABLE, naturally. +--echo # +handler t1 open; +drop table t1; +--error ER_UNKNOWN_TABLE +handler t1 read next; +create table t1 (a int, b int, key a (a)) select a from t2; +--echo # +--echo # RENAME TABLE, naturally +--echo # +handler t1 open; +rename table t1 to t3; +--error ER_UNKNOWN_TABLE +handler t1 read next; +--echo # +--echo # CREATE TABLE (even with IF NOT EXISTS clause, +--echo # and the table exists). +--echo # +handler t2 open; +create table if not exists t2 (a int); +--error ER_UNKNOWN_TABLE +handler t2 read next; +rename table t3 to t1; +drop table t2; +--echo # +--echo # FLUSH TABLE doesn't close the table but loses the position +--echo # +handler t1 open; +handler t1 read a prev; +flush table t1; +handler t1 read a prev; +handler t1 close; +--echo # +--echo # FLUSH TABLES WITH READ LOCK behaves like FLUSH TABLE. +--echo # +handler t1 open; +handler t1 read a prev; +flush tables with read lock; +handler t1 read a prev; +handler t1 close; +unlock tables; +--echo # +--echo # Explore the effect of HANDLER locks on concurrent DDL +--echo # +handler t1 open; +--echo # Establishing auxiliary connections con1, con2, con3 +connect(con1, localhost, root,,); +connect(con2, localhost, root,,); +connect(con3, localhost, root,,); +--echo # --> connection con1; +connection con1; +--echo # Sending: +--send drop table t1 +--echo # We can't use connection 'default' as wait_condition will +--echo # autoclose handlers. +--echo # --> connection con2 +connection con2; +--echo # Waitng for 'drop table t1' to get blocked... +let $wait_condition=select count(*)=1 from information_schema.processlist where state='Waiting for table' and info='drop table t1'; +--source include/wait_condition.inc +--echo # --> connection default +connection default; +handler t1 read a prev; +handler t1 read a prev; +handler t1 close; +--echo # --> connection con1 +connection con1; +--echo # Reaping 'drop table t1'... +--reap +--echo # --> connection default +connection default; +--echo # +--echo # Explore the effect of HANDLER locks in parallel with SELECT +--echo # +create table t1 (a int, key a (a)); +insert into t1 (a) values (1), (2), (3), (4), (5); +begin; +select * from t1; +handler t1 open; +handler t1 read a prev; +handler t1 read a prev; +handler t1 close; +--echo # --> connection con1; +connection con1; +--echo # Sending: +--send drop table t1 +--echo # --> connection con2 +connection con2; +--echo # Waiting for 'drop table t1' to get blocked... +let $wait_condition=select count(*)=1 from information_schema.processlist where state='Waiting for table' and info='drop table t1'; +--source include/wait_condition.inc +--echo # --> connection default +connection default; +--echo # We can still use the table, it's part of the transaction +select * from t1; +--echo # Such are the circumstances that t1 is a part of transaction, +--echo # thus we can reopen it in the handler +handler t1 open; +--echo # We can commit the transaction, it doesn't close the handler +--echo # and doesn't let DROP to proceed. +commit; +handler t1 read a prev; +handler t1 read a prev; +handler t1 read a prev; +handler t1 close; +--echo # --> connection con1 +connection con1; +--echo # Now drop can proceed +--echo # Reaping 'drop table t1'... +--reap +--echo # --> connection default +connection default; +--echo # +--echo # Demonstrate that HANDLER locks and transaction locks +--echo # reside in the same context, and we don't back-off +--echo # when have transaction or handler locks. +--echo # +create table t1 (a int, key a (a)); +insert into t1 (a) values (1), (2), (3), (4), (5); +create table t2 (a int, key a (a)); +insert into t2 (a) values (1), (2), (3), (4), (5); +begin; +select * from t1; +--echo # --> connection con1 +connection con1; +lock table t2 read; +--echo # --> connection con2 +connection con2; +--echo # Sending: +--send drop table t2 +--echo # --> connection con1 +connection con1; +--echo # Waiting for 'drop table t2' to get blocked... +let $wait_condition=select count(*)=1 from information_schema.processlist where state='Waiting for table' and info='drop table t2'; +--source include/wait_condition.inc +--echo # --> connection default +connection default; +--error ER_LOCK_DEADLOCK +handler t2 open; +--error ER_LOCK_DEADLOCK +select * from t2; +handler t1 open; +commit; +--error ER_LOCK_DEADLOCK +handler t2 open; +handler t1 close; +--echo # --> connection con1 +connection con1; +unlock tables; +--echo # --> connection con2 +connection con2; +--echo # Reaping 'drop table t2'... +--reap +--echo # --> connection default +connection default; +handler t1 open; +handler t1 read a prev; +handler t1 close; +--echo # +--echo # Likewise, this doesn't require a multi-statement transaction. +--echo # ER_LOCK_DEADLOCK is also produced when we have an open +--echo # HANDLER and try to acquire locks for a single statement. +--echo # +create table t2 (a int, key a (a)); +handler t1 open; +--echo # --> connection con1 +connection con1; +lock tables t2 read; +--echo # --> connection con2 +connection con2; +--echo # Sending 'drop table t2'... +--send drop table t2 +--echo # --> connection con1 +connection con1; +--echo # Waiting for 'drop table t2' to get blocked... +let $wait_condition=select count(*)=1 from information_schema.processlist where state='Waiting for table' and info='drop table t2'; +--source include/wait_condition.inc +--echo # --> connection default +connection default; +--error ER_LOCK_DEADLOCK +select * from t2; +--echo # --> connection con1 +connection con1; +unlock tables; +--echo # --> connection con2 +connection con2; +--echo # Reaping 'drop table t2'... +--reap +--echo # --> connection default +connection default; +handler t1 close; + +--echo # +--echo # ROLLBACK TO SAVEPOINT releases transactional locks, +--echo # but has no effect on open HANDLERs +--echo # +create table t2 like t1; +create table t3 like t1; +begin; +--echo # Have something before the savepoint +select * from t3; +savepoint sv; +handler t1 open; +handler t1 read a first; +handler t1 read a next; +select * from t2; +--echo # --> connection con1 +connection con1; +--echo # Sending: +--send drop table t1 +--echo # --> connection con2 +connection con2; +--echo # Sending: +--send drop table t2 +--echo # --> connection default +connection default; +--echo # Let DROP TABLE statements sync in. We must use +--echo # a separate connection for that, because otherwise SELECT +--echo # will auto-close the HANDLERs, becaues there are pending +--echo # exclusive locks against them. +--echo # --> connection con3 +connection con3; +--echo # Waiting for 'drop table t1' to get blocked... +let $wait_condition=select count(*)=1 from information_schema.processlist where state='Waiting for table' and info='drop table t1'; +--source include/wait_condition.inc +--echo # Waiting for 'drop table t2' to get blocked... +let $wait_condition=select count(*)=1 from information_schema.processlist where state='Waiting for table' and info='drop table t2'; +--source include/wait_condition.inc +--echo # Demonstrate that t2 lock was released and t2 was dropped +--echo # after ROLLBACK TO SAVEPOINT +--echo # --> connection default +connection default; +rollback to savepoint sv; +--echo # --> connection con2 +connection con2; +--echo # Reaping 'drop table t2'... +--reap +--echo # Demonstrate that ROLLBACK TO SAVEPOINT didn't release the handler +--echo # lock. +--echo # --> connection default +connection default; +handler t1 read a next; +handler t1 read a next; +--echo # Demonstrate that the drop will go through as soon as we close the +--echo # HANDLER +handler t1 close; +--echo # connection con1 +connection con1; +--echo # Reaping 'drop table t1'... +--reap +--echo # --> connection default +connection default; +commit; +drop table t3; +--echo # +--echo # A few special cases when using SAVEPOINT/ROLLBACK TO +--echo # SAVEPOINT and HANDLER. +--echo # +--echo # Show that rollback to the savepoint taken in the beginning +--echo # of the transaction doesn't release mdl lock on +--echo # the HANDLER that was opened later. +--echo # +create table t1 (a int, key a(a)); +insert into t1 (a) values (1), (2), (3), (4), (5); +create table t2 like t1; +begin; +savepoint sv; +handler t1 open; +handler t1 read a first; +handler t1 read a next; +select * from t2; +--echo # --> connection con1 +connection con1; +--echo # Sending: +--send drop table t1 +--echo # --> connection con2 +connection con2; +--echo # Sending: +--send drop table t2 +--echo # --> connection default +connection default; +--echo # Let DROP TABLE statements sync in. We must use +--echo # a separate connection for that, because otherwise SELECT +--echo # will auto-close the HANDLERs, becaues there are pending +--echo # exclusive locks against them. +--echo # --> connection con3 +connection con3; +--echo # Waiting for 'drop table t1' to get blocked... +let $wait_condition=select count(*)=1 from information_schema.processlist where state='Waiting for table' and info='drop table t1'; +--source include/wait_condition.inc +--echo # Waiting for 'drop table t2' to get blocked... +let $wait_condition=select count(*)=1 from information_schema.processlist where state='Waiting for table' and info='drop table t2'; +--source include/wait_condition.inc +--echo # Demonstrate that t2 lock was released and t2 was dropped +--echo # after ROLLBACK TO SAVEPOINT +--echo # --> connection default +connection default; +rollback to savepoint sv; +--echo # --> connection con2 +connection con2; +--echo # Reaping 'drop table t2'... +--reap +--echo # Demonstrate that ROLLBACK TO SAVEPOINT didn't release the handler +--echo # lock. +--echo # --> connection default +connection default; +handler t1 read a next; +handler t1 read a next; +--echo # Demonstrate that the drop will go through as soon as we close the +--echo # HANDLER +handler t1 close; +--echo # connection con1 +connection con1; +--echo # Reaping 'drop table t1'... +--reap +--echo # --> connection default +connection default; +commit; +--echo # +--echo # Show that rollback to the savepoint taken in the beginning +--echo # of the transaction works properly (no valgrind warnins, etc), +--echo # even though it's done after the HANDLER mdl lock that was there +--echo # at the beginning is released and added again. +--echo # +create table t1 (a int, key a(a)); +insert into t1 (a) values (1), (2), (3), (4), (5); +create table t2 like t1; +create table t3 like t1; +insert into t3 (a) select a from t1; +begin; +handler t1 open; +savepoint sv; +handler t1 read a first; +select * from t2; +handler t1 close; +handler t3 open; +handler t3 read a first; +rollback to savepoint sv; +--echo # --> connection con1 +connection con1; +drop table t1, t2; +--echo # Sending: +--send drop table t3 +--echo # Let DROP TABLE statement sync in. +--echo # --> connection con2 +connection con2; +--echo # Waiting for 'drop table t3' to get blocked... +let $wait_condition=select count(*)=1 from information_schema.processlist where state='Waiting for table' and info='drop table t3'; +--source include/wait_condition.inc +--echo # Demonstrate that ROLLBACK TO SAVEPOINT didn't release the handler +--echo # lock. +--echo # --> connection default +connection default; +handler t3 read a next; +--echo # Demonstrate that the drop will go through as soon as we close the +--echo # HANDLER +handler t3 close; +--echo # connection con1 +connection con1; +--echo # Reaping 'drop table t3'... +--reap +--echo # --> connection default +connection default; +commit; + +--echo # +--echo # If we have to wait on an exclusive locks while having +--echo # an open HANDLER, ER_LOCK_DEADLOCK is reported. +--echo # +create table t1 (a int, key a(a)); +create table t2 like t1; +handler t1 open; +--echo # --> connection con1 +connection con1; +lock table t2 read; +--echo # --> connection default +connection default; +--error ER_LOCK_DEADLOCK +drop table t2; +--error ER_LOCK_DEADLOCK +rename table t2 to t3; +--echo # Demonstrate that there is no deadlock with FLUSH TABLE, +--echo # even though it is waiting for the other table to go away +--echo # Sending: +--send flush table t2 +--echo # --> connection con2 +connection con2; +drop table t1; +--echo # --> connection con1 +connection con1; +unlock tables; +--echo # --> connection default +connection default; +--echo # Reaping 'flush table t2'... +--reap +drop table t2; + +--echo # +--echo # Bug #46224 HANDLER statements within a transaction might +--echo # lead to deadlocks +--echo # +create table t1 (a int, key a(a)); + +--echo # --> connection default +connection default; +begin; +select * from t1; +handler t1 open; + +--echo # --> connection con1 +connection con1; +lock tables t1 write; + +--echo # --> connection default +connection default; +--echo # Sending: +--send handler t1 read a next + +--echo # --> connection con1 +connection con1; +--echo # Waiting for 'handler t1 read a next' to get blocked... +let $wait_condition= + select count(*) = 1 from information_schema.processlist + where state = "Table lock" and info = "handler t1 read a next"; +--source include/wait_condition.inc +--echo # Sending: +--send drop table t1 + +--echo # --> connection con2 +connection con2; +--echo # Waiting for 'drop table t1' to get blocked... +let $wait_condition= + select count(*) = 1 from information_schema.processlist + where state = "Waiting for table" and info = "drop table t1"; +--source include/wait_condition.inc + +--echo # --> connection default +connection default; +--echo # Reaping 'handler t1 read a next'... +--error ER_LOCK_DEADLOCK +--reap +handler t1 close; +commit; + +--echo # --> connection con1 +connection con1; +--echo # Reaping 'drop table t1'... +--reap + +--echo # --> connection con1 +connection con1; +disconnect con1; +--source include/wait_until_disconnected.inc +--echo # --> connection con2 +connection con2; +disconnect con2; +--source include/wait_until_disconnected.inc +--echo # --> connection con3 +connection con3; +disconnect con3; +--source include/wait_until_disconnected.inc +connection default; + +--echo # +--echo # A temporary table test. +--echo # Check that we don't loose positions of HANDLER opened +--echo # against a temporary table. +--echo # +create table t1 (a int, b int, key a (a)); +insert into t1 (a) values (1), (2), (3), (4), (5); +create temporary table t2 (a int, b int, key a (a)); +insert into t2 (a) select a from t1; +handler t1 open; +handler t1 read a next; +handler t2 open; +handler t2 read a next; +flush table t1; +handler t2 read a next; +--echo # Sic: the position is lost +handler t1 read a next; +select * from t1; +--echo # Sic: the position is not lost +handler t2 read a next; +--error ER_CANT_REOPEN_TABLE +select * from t2; +handler t2 read a next; +drop table t1; +drop temporary table t2; + +--echo # +--echo # A test for lock_table_names()/unlock_table_names() function. +--echo # It should work properly in presence of open HANDLER. +--echo # +create table t1 (a int, b int, key a (a)); +create table t2 like t1; +create table t3 like t1; +create table t4 like t1; +handler t1 open; +handler t2 open; +rename table t4 to t5, t3 to t4, t5 to t3; +handler t1 read first; +handler t2 read first; +drop table t1, t2, t3, t4; diff --git a/mysql-test/r/handler_innodb.result b/mysql-test/r/handler_innodb.result index 5990b19062b..df948f3d0b6 100644 --- a/mysql-test/r/handler_innodb.result +++ b/mysql-test/r/handler_innodb.result @@ -570,13 +570,25 @@ connection: flush rename table t1 to t2;; connection: waiter connection: default +# +# RENAME placed two pending locks and waits. +# When HANDLER t2 OPEN does open_tables(), it calls +# mysql_ha_flush(), which in turn closes the open HANDLER for t1. +# RENAME TABLE gets unblocked. If it gets scheduled quickly +# and manages to complete before open_tables() +# of HANDLER t2 OPEN, open_tables() and therefore the whole +# HANDLER t2 OPEN succeeds. Otherwise open_tables() +# notices a pending or active exclusive metadata lock on t2 +# and the whole HANDLER t2 OPEN fails with ER_LOCK_DEADLOCK +# error. +# handler t2 open; -handler t2 read first; -c1 +handler t2 close; +connection: flush handler t1 read next; -ERROR 42S02: Table 'test.t1' doesn't exist +ERROR 42S02: Unknown table 't1' in HANDLER handler t1 close; -handler t2 close; +ERROR 42S02: Unknown table 't1' in HANDLER drop table t2; drop table if exists t1; create temporary table t1 (a int, b char(1), key a(a), key b(a,b)); @@ -745,3 +757,569 @@ USE information_schema; HANDLER COLUMNS OPEN; ERROR HY000: Incorrect usage of HANDLER OPEN and information_schema USE test; +# +# Add test coverage for HANDLER and LOCK TABLES, HANDLER and DDL. +# +drop table if exists t1, t2, t3; +create table t1 (a int, key a (a)); +insert into t1 (a) values (1), (2), (3), (4), (5); +create table t2 (a int, key a (a)) select * from t1; +create temporary table t3 (a int, key a (a)) select * from t2; +handler t1 open; +handler t2 open; +handler t3 open; +# +# LOCK TABLES implicitly closes all handlers. +# +lock table t3 read; +# +# No HANDLER sql is available under lock tables anyway. +# +handler t1 open; +ERROR HY000: Can't execute the given command because you have active locked tables or an active transaction +handler t1 read next; +ERROR HY000: Can't execute the given command because you have active locked tables or an active transaction +handler t2 close; +ERROR HY000: Can't execute the given command because you have active locked tables or an active transaction +handler t3 open; +ERROR HY000: Can't execute the given command because you have active locked tables or an active transaction +# After UNLOCK TABLES no handlers are around, they were +# implicitly closed. +unlock tables; +drop temporary table t3; +handler t1 read next; +ERROR 42S02: Unknown table 't1' in HANDLER +handler t2 close; +ERROR 42S02: Unknown table 't2' in HANDLER +handler t3 read next; +ERROR 42S02: Unknown table 't3' in HANDLER +# +# Other operations also implicitly close handler: +# +# TRUNCATE +# +handler t1 open; +truncate table t1; +handler t1 read next; +ERROR 42S02: Unknown table 't1' in HANDLER +handler t1 open; +# +# CREATE TRIGGER +# +create trigger t1_ai after insert on t1 for each row set @a=1; +handler t1 read next; +ERROR 42S02: Unknown table 't1' in HANDLER +# +# DROP TRIGGER +# +handler t1 open; +drop trigger t1_ai; +handler t1 read next; +ERROR 42S02: Unknown table 't1' in HANDLER +# +# ALTER TABLE +# +handler t1 open; +alter table t1 add column b int; +handler t1 read next; +ERROR 42S02: Unknown table 't1' in HANDLER +# +# ANALYZE TABLE +# +handler t1 open; +analyze table t1; +Table Op Msg_type Msg_text +test.t1 analyze status OK +handler t1 read next; +ERROR 42S02: Unknown table 't1' in HANDLER +# +# OPTIMIZE TABLE +# +handler t1 open; +optimize table t1; +Table Op Msg_type Msg_text +test.t1 optimize note Table does not support optimize, doing recreate + analyze instead +test.t1 optimize status OK +handler t1 read next; +ERROR 42S02: Unknown table 't1' in HANDLER +# +# REPAIR TABLE +# +handler t1 open; +repair table t1; +Table Op Msg_type Msg_text +test.t1 repair note The storage engine for the table doesn't support repair +handler t1 read next; +ERROR 42S02: Unknown table 't1' in HANDLER +# +# DROP TABLE, naturally. +# +handler t1 open; +drop table t1; +handler t1 read next; +ERROR 42S02: Unknown table 't1' in HANDLER +create table t1 (a int, b int, key a (a)) select a from t2; +# +# RENAME TABLE, naturally +# +handler t1 open; +rename table t1 to t3; +handler t1 read next; +ERROR 42S02: Unknown table 't1' in HANDLER +# +# CREATE TABLE (even with IF NOT EXISTS clause, +# and the table exists). +# +handler t2 open; +create table if not exists t2 (a int); +Warnings: +Note 1050 Table 't2' already exists +handler t2 read next; +ERROR 42S02: Unknown table 't2' in HANDLER +rename table t3 to t1; +drop table t2; +# +# FLUSH TABLE doesn't close the table but loses the position +# +handler t1 open; +handler t1 read a prev; +b a +NULL 5 +flush table t1; +handler t1 read a prev; +b a +NULL 5 +handler t1 close; +# +# FLUSH TABLES WITH READ LOCK behaves like FLUSH TABLE. +# +handler t1 open; +handler t1 read a prev; +b a +NULL 5 +flush tables with read lock; +handler t1 read a prev; +b a +NULL 5 +handler t1 close; +unlock tables; +# +# Explore the effect of HANDLER locks on concurrent DDL +# +handler t1 open; +# Establishing auxiliary connections con1, con2, con3 +# --> connection con1; +# Sending: +drop table t1 ; +# We can't use connection 'default' as wait_condition will +# autoclose handlers. +# --> connection con2 +# Waitng for 'drop table t1' to get blocked... +# --> connection default +handler t1 read a prev; +b a +NULL 5 +handler t1 read a prev; +b a +NULL 4 +handler t1 close; +# --> connection con1 +# Reaping 'drop table t1'... +# --> connection default +# +# Explore the effect of HANDLER locks in parallel with SELECT +# +create table t1 (a int, key a (a)); +insert into t1 (a) values (1), (2), (3), (4), (5); +begin; +select * from t1; +a +1 +2 +3 +4 +5 +handler t1 open; +handler t1 read a prev; +a +5 +handler t1 read a prev; +a +4 +handler t1 close; +# --> connection con1; +# Sending: +drop table t1 ; +# --> connection con2 +# Waiting for 'drop table t1' to get blocked... +# --> connection default +# We can still use the table, it's part of the transaction +select * from t1; +a +1 +2 +3 +4 +5 +# Such are the circumstances that t1 is a part of transaction, +# thus we can reopen it in the handler +handler t1 open; +# We can commit the transaction, it doesn't close the handler +# and doesn't let DROP to proceed. +commit; +handler t1 read a prev; +a +5 +handler t1 read a prev; +a +4 +handler t1 read a prev; +a +3 +handler t1 close; +# --> connection con1 +# Now drop can proceed +# Reaping 'drop table t1'... +# --> connection default +# +# Demonstrate that HANDLER locks and transaction locks +# reside in the same context, and we don't back-off +# when have transaction or handler locks. +# +create table t1 (a int, key a (a)); +insert into t1 (a) values (1), (2), (3), (4), (5); +create table t2 (a int, key a (a)); +insert into t2 (a) values (1), (2), (3), (4), (5); +begin; +select * from t1; +a +1 +2 +3 +4 +5 +# --> connection con1 +lock table t2 read; +# --> connection con2 +# Sending: +drop table t2; +# --> connection con1 +# Waiting for 'drop table t2' to get blocked... +# --> connection default +handler t2 open; +ERROR 40001: Deadlock found when trying to get lock; try restarting transaction +select * from t2; +ERROR 40001: Deadlock found when trying to get lock; try restarting transaction +handler t1 open; +commit; +handler t2 open; +ERROR 40001: Deadlock found when trying to get lock; try restarting transaction +handler t1 close; +# --> connection con1 +unlock tables; +# --> connection con2 +# Reaping 'drop table t2'... +# --> connection default +handler t1 open; +handler t1 read a prev; +a +5 +handler t1 close; +# +# Likewise, this doesn't require a multi-statement transaction. +# ER_LOCK_DEADLOCK is also produced when we have an open +# HANDLER and try to acquire locks for a single statement. +# +create table t2 (a int, key a (a)); +handler t1 open; +# --> connection con1 +lock tables t2 read; +# --> connection con2 +# Sending 'drop table t2'... +drop table t2; +# --> connection con1 +# Waiting for 'drop table t2' to get blocked... +# --> connection default +select * from t2; +ERROR 40001: Deadlock found when trying to get lock; try restarting transaction +# --> connection con1 +unlock tables; +# --> connection con2 +# Reaping 'drop table t2'... +# --> connection default +handler t1 close; +# +# ROLLBACK TO SAVEPOINT releases transactional locks, +# but has no effect on open HANDLERs +# +create table t2 like t1; +create table t3 like t1; +begin; +# Have something before the savepoint +select * from t3; +a +savepoint sv; +handler t1 open; +handler t1 read a first; +a +1 +handler t1 read a next; +a +2 +select * from t2; +a +# --> connection con1 +# Sending: +drop table t1; +# --> connection con2 +# Sending: +drop table t2; +# --> connection default +# Let DROP TABLE statements sync in. We must use +# a separate connection for that, because otherwise SELECT +# will auto-close the HANDLERs, becaues there are pending +# exclusive locks against them. +# --> connection con3 +# Waiting for 'drop table t1' to get blocked... +# Waiting for 'drop table t2' to get blocked... +# Demonstrate that t2 lock was released and t2 was dropped +# after ROLLBACK TO SAVEPOINT +# --> connection default +rollback to savepoint sv; +# --> connection con2 +# Reaping 'drop table t2'... +# Demonstrate that ROLLBACK TO SAVEPOINT didn't release the handler +# lock. +# --> connection default +handler t1 read a next; +a +3 +handler t1 read a next; +a +4 +# Demonstrate that the drop will go through as soon as we close the +# HANDLER +handler t1 close; +# connection con1 +# Reaping 'drop table t1'... +# --> connection default +commit; +drop table t3; +# +# A few special cases when using SAVEPOINT/ROLLBACK TO +# SAVEPOINT and HANDLER. +# +# Show that rollback to the savepoint taken in the beginning +# of the transaction doesn't release mdl lock on +# the HANDLER that was opened later. +# +create table t1 (a int, key a(a)); +insert into t1 (a) values (1), (2), (3), (4), (5); +create table t2 like t1; +begin; +savepoint sv; +handler t1 open; +handler t1 read a first; +a +1 +handler t1 read a next; +a +2 +select * from t2; +a +# --> connection con1 +# Sending: +drop table t1; +# --> connection con2 +# Sending: +drop table t2; +# --> connection default +# Let DROP TABLE statements sync in. We must use +# a separate connection for that, because otherwise SELECT +# will auto-close the HANDLERs, becaues there are pending +# exclusive locks against them. +# --> connection con3 +# Waiting for 'drop table t1' to get blocked... +# Waiting for 'drop table t2' to get blocked... +# Demonstrate that t2 lock was released and t2 was dropped +# after ROLLBACK TO SAVEPOINT +# --> connection default +rollback to savepoint sv; +# --> connection con2 +# Reaping 'drop table t2'... +# Demonstrate that ROLLBACK TO SAVEPOINT didn't release the handler +# lock. +# --> connection default +handler t1 read a next; +a +3 +handler t1 read a next; +a +4 +# Demonstrate that the drop will go through as soon as we close the +# HANDLER +handler t1 close; +# connection con1 +# Reaping 'drop table t1'... +# --> connection default +commit; +# +# Show that rollback to the savepoint taken in the beginning +# of the transaction works properly (no valgrind warnins, etc), +# even though it's done after the HANDLER mdl lock that was there +# at the beginning is released and added again. +# +create table t1 (a int, key a(a)); +insert into t1 (a) values (1), (2), (3), (4), (5); +create table t2 like t1; +create table t3 like t1; +insert into t3 (a) select a from t1; +begin; +handler t1 open; +savepoint sv; +handler t1 read a first; +a +1 +select * from t2; +a +handler t1 close; +handler t3 open; +handler t3 read a first; +a +1 +rollback to savepoint sv; +# --> connection con1 +drop table t1, t2; +# Sending: +drop table t3; +# Let DROP TABLE statement sync in. +# --> connection con2 +# Waiting for 'drop table t3' to get blocked... +# Demonstrate that ROLLBACK TO SAVEPOINT didn't release the handler +# lock. +# --> connection default +handler t3 read a next; +a +2 +# Demonstrate that the drop will go through as soon as we close the +# HANDLER +handler t3 close; +# connection con1 +# Reaping 'drop table t3'... +# --> connection default +commit; +# +# If we have to wait on an exclusive locks while having +# an open HANDLER, ER_LOCK_DEADLOCK is reported. +# +create table t1 (a int, key a(a)); +create table t2 like t1; +handler t1 open; +# --> connection con1 +lock table t2 read; +# --> connection default +drop table t2; +ERROR 40001: Deadlock found when trying to get lock; try restarting transaction +rename table t2 to t3; +ERROR 40001: Deadlock found when trying to get lock; try restarting transaction +# Demonstrate that there is no deadlock with FLUSH TABLE, +# even though it is waiting for the other table to go away +# Sending: +flush table t2; +# --> connection con2 +drop table t1; +# --> connection con1 +unlock tables; +# --> connection default +# Reaping 'flush table t2'... +drop table t2; +# +# Bug #46224 HANDLER statements within a transaction might +# lead to deadlocks +# +create table t1 (a int, key a(a)); +# --> connection default +begin; +select * from t1; +a +handler t1 open; +# --> connection con1 +lock tables t1 write; +# --> connection default +# Sending: +handler t1 read a next; +# --> connection con1 +# Waiting for 'handler t1 read a next' to get blocked... +# Sending: +drop table t1; +# --> connection con2 +# Waiting for 'drop table t1' to get blocked... +# --> connection default +# Reaping 'handler t1 read a next'... +ERROR 40001: Deadlock found when trying to get lock; try restarting transaction +handler t1 close; +commit; +# --> connection con1 +# Reaping 'drop table t1'... +# --> connection con1 +# --> connection con2 +# --> connection con3 +# +# A temporary table test. +# Check that we don't loose positions of HANDLER opened +# against a temporary table. +# +create table t1 (a int, b int, key a (a)); +insert into t1 (a) values (1), (2), (3), (4), (5); +create temporary table t2 (a int, b int, key a (a)); +insert into t2 (a) select a from t1; +handler t1 open; +handler t1 read a next; +a b +1 NULL +handler t2 open; +handler t2 read a next; +a b +1 NULL +flush table t1; +handler t2 read a next; +a b +2 NULL +# Sic: the position is lost +handler t1 read a next; +a b +1 NULL +select * from t1; +a b +1 NULL +2 NULL +3 NULL +4 NULL +5 NULL +# Sic: the position is not lost +handler t2 read a next; +a b +3 NULL +select * from t2; +ERROR HY000: Can't reopen table: 't2' +handler t2 read a next; +a b +4 NULL +drop table t1; +drop temporary table t2; +# +# A test for lock_table_names()/unlock_table_names() function. +# It should work properly in presence of open HANDLER. +# +create table t1 (a int, b int, key a (a)); +create table t2 like t1; +create table t3 like t1; +create table t4 like t1; +handler t1 open; +handler t2 open; +rename table t4 to t5, t3 to t4, t5 to t3; +handler t1 read first; +a b +handler t2 read first; +a b +drop table t1, t2, t3, t4; diff --git a/mysql-test/r/handler_myisam.result b/mysql-test/r/handler_myisam.result index f7b0ff2e04e..4b287e6560b 100644 --- a/mysql-test/r/handler_myisam.result +++ b/mysql-test/r/handler_myisam.result @@ -569,13 +569,25 @@ connection: flush rename table t1 to t2;; connection: waiter connection: default +# +# RENAME placed two pending locks and waits. +# When HANDLER t2 OPEN does open_tables(), it calls +# mysql_ha_flush(), which in turn closes the open HANDLER for t1. +# RENAME TABLE gets unblocked. If it gets scheduled quickly +# and manages to complete before open_tables() +# of HANDLER t2 OPEN, open_tables() and therefore the whole +# HANDLER t2 OPEN succeeds. Otherwise open_tables() +# notices a pending or active exclusive metadata lock on t2 +# and the whole HANDLER t2 OPEN fails with ER_LOCK_DEADLOCK +# error. +# handler t2 open; -handler t2 read first; -c1 +handler t2 close; +connection: flush handler t1 read next; -ERROR 42S02: Table 'test.t1' doesn't exist +ERROR 42S02: Unknown table 't1' in HANDLER handler t1 close; -handler t2 close; +ERROR 42S02: Unknown table 't1' in HANDLER drop table t2; drop table if exists t1; create temporary table t1 (a int, b char(1), key a(a), key b(a,b)); @@ -744,6 +756,571 @@ HANDLER COLUMNS OPEN; ERROR HY000: Incorrect usage of HANDLER OPEN and information_schema USE test; # +# Add test coverage for HANDLER and LOCK TABLES, HANDLER and DDL. +# +drop table if exists t1, t2, t3; +create table t1 (a int, key a (a)); +insert into t1 (a) values (1), (2), (3), (4), (5); +create table t2 (a int, key a (a)) select * from t1; +create temporary table t3 (a int, key a (a)) select * from t2; +handler t1 open; +handler t2 open; +handler t3 open; +# +# LOCK TABLES implicitly closes all handlers. +# +lock table t3 read; +# +# No HANDLER sql is available under lock tables anyway. +# +handler t1 open; +ERROR HY000: Can't execute the given command because you have active locked tables or an active transaction +handler t1 read next; +ERROR HY000: Can't execute the given command because you have active locked tables or an active transaction +handler t2 close; +ERROR HY000: Can't execute the given command because you have active locked tables or an active transaction +handler t3 open; +ERROR HY000: Can't execute the given command because you have active locked tables or an active transaction +# After UNLOCK TABLES no handlers are around, they were +# implicitly closed. +unlock tables; +drop temporary table t3; +handler t1 read next; +ERROR 42S02: Unknown table 't1' in HANDLER +handler t2 close; +ERROR 42S02: Unknown table 't2' in HANDLER +handler t3 read next; +ERROR 42S02: Unknown table 't3' in HANDLER +# +# Other operations also implicitly close handler: +# +# TRUNCATE +# +handler t1 open; +truncate table t1; +handler t1 read next; +ERROR 42S02: Unknown table 't1' in HANDLER +handler t1 open; +# +# CREATE TRIGGER +# +create trigger t1_ai after insert on t1 for each row set @a=1; +handler t1 read next; +ERROR 42S02: Unknown table 't1' in HANDLER +# +# DROP TRIGGER +# +handler t1 open; +drop trigger t1_ai; +handler t1 read next; +ERROR 42S02: Unknown table 't1' in HANDLER +# +# ALTER TABLE +# +handler t1 open; +alter table t1 add column b int; +handler t1 read next; +ERROR 42S02: Unknown table 't1' in HANDLER +# +# ANALYZE TABLE +# +handler t1 open; +analyze table t1; +Table Op Msg_type Msg_text +test.t1 analyze status Table is already up to date +handler t1 read next; +ERROR 42S02: Unknown table 't1' in HANDLER +# +# OPTIMIZE TABLE +# +handler t1 open; +optimize table t1; +Table Op Msg_type Msg_text +test.t1 optimize status OK +handler t1 read next; +ERROR 42S02: Unknown table 't1' in HANDLER +# +# REPAIR TABLE +# +handler t1 open; +repair table t1; +Table Op Msg_type Msg_text +test.t1 repair status OK +handler t1 read next; +ERROR 42S02: Unknown table 't1' in HANDLER +# +# DROP TABLE, naturally. +# +handler t1 open; +drop table t1; +handler t1 read next; +ERROR 42S02: Unknown table 't1' in HANDLER +create table t1 (a int, b int, key a (a)) select a from t2; +# +# RENAME TABLE, naturally +# +handler t1 open; +rename table t1 to t3; +handler t1 read next; +ERROR 42S02: Unknown table 't1' in HANDLER +# +# CREATE TABLE (even with IF NOT EXISTS clause, +# and the table exists). +# +handler t2 open; +create table if not exists t2 (a int); +Warnings: +Note 1050 Table 't2' already exists +handler t2 read next; +ERROR 42S02: Unknown table 't2' in HANDLER +rename table t3 to t1; +drop table t2; +# +# FLUSH TABLE doesn't close the table but loses the position +# +handler t1 open; +handler t1 read a prev; +b a +NULL 5 +flush table t1; +handler t1 read a prev; +b a +NULL 5 +handler t1 close; +# +# FLUSH TABLES WITH READ LOCK behaves like FLUSH TABLE. +# +handler t1 open; +handler t1 read a prev; +b a +NULL 5 +flush tables with read lock; +handler t1 read a prev; +b a +NULL 5 +handler t1 close; +unlock tables; +# +# Explore the effect of HANDLER locks on concurrent DDL +# +handler t1 open; +# Establishing auxiliary connections con1, con2, con3 +# --> connection con1; +# Sending: +drop table t1 ; +# We can't use connection 'default' as wait_condition will +# autoclose handlers. +# --> connection con2 +# Waitng for 'drop table t1' to get blocked... +# --> connection default +handler t1 read a prev; +b a +NULL 5 +handler t1 read a prev; +b a +NULL 4 +handler t1 close; +# --> connection con1 +# Reaping 'drop table t1'... +# --> connection default +# +# Explore the effect of HANDLER locks in parallel with SELECT +# +create table t1 (a int, key a (a)); +insert into t1 (a) values (1), (2), (3), (4), (5); +begin; +select * from t1; +a +1 +2 +3 +4 +5 +handler t1 open; +handler t1 read a prev; +a +5 +handler t1 read a prev; +a +4 +handler t1 close; +# --> connection con1; +# Sending: +drop table t1 ; +# --> connection con2 +# Waiting for 'drop table t1' to get blocked... +# --> connection default +# We can still use the table, it's part of the transaction +select * from t1; +a +1 +2 +3 +4 +5 +# Such are the circumstances that t1 is a part of transaction, +# thus we can reopen it in the handler +handler t1 open; +# We can commit the transaction, it doesn't close the handler +# and doesn't let DROP to proceed. +commit; +handler t1 read a prev; +a +5 +handler t1 read a prev; +a +4 +handler t1 read a prev; +a +3 +handler t1 close; +# --> connection con1 +# Now drop can proceed +# Reaping 'drop table t1'... +# --> connection default +# +# Demonstrate that HANDLER locks and transaction locks +# reside in the same context, and we don't back-off +# when have transaction or handler locks. +# +create table t1 (a int, key a (a)); +insert into t1 (a) values (1), (2), (3), (4), (5); +create table t2 (a int, key a (a)); +insert into t2 (a) values (1), (2), (3), (4), (5); +begin; +select * from t1; +a +1 +2 +3 +4 +5 +# --> connection con1 +lock table t2 read; +# --> connection con2 +# Sending: +drop table t2; +# --> connection con1 +# Waiting for 'drop table t2' to get blocked... +# --> connection default +handler t2 open; +ERROR 40001: Deadlock found when trying to get lock; try restarting transaction +select * from t2; +ERROR 40001: Deadlock found when trying to get lock; try restarting transaction +handler t1 open; +commit; +handler t2 open; +ERROR 40001: Deadlock found when trying to get lock; try restarting transaction +handler t1 close; +# --> connection con1 +unlock tables; +# --> connection con2 +# Reaping 'drop table t2'... +# --> connection default +handler t1 open; +handler t1 read a prev; +a +5 +handler t1 close; +# +# Likewise, this doesn't require a multi-statement transaction. +# ER_LOCK_DEADLOCK is also produced when we have an open +# HANDLER and try to acquire locks for a single statement. +# +create table t2 (a int, key a (a)); +handler t1 open; +# --> connection con1 +lock tables t2 read; +# --> connection con2 +# Sending 'drop table t2'... +drop table t2; +# --> connection con1 +# Waiting for 'drop table t2' to get blocked... +# --> connection default +select * from t2; +ERROR 40001: Deadlock found when trying to get lock; try restarting transaction +# --> connection con1 +unlock tables; +# --> connection con2 +# Reaping 'drop table t2'... +# --> connection default +handler t1 close; +# +# ROLLBACK TO SAVEPOINT releases transactional locks, +# but has no effect on open HANDLERs +# +create table t2 like t1; +create table t3 like t1; +begin; +# Have something before the savepoint +select * from t3; +a +savepoint sv; +handler t1 open; +handler t1 read a first; +a +1 +handler t1 read a next; +a +2 +select * from t2; +a +# --> connection con1 +# Sending: +drop table t1; +# --> connection con2 +# Sending: +drop table t2; +# --> connection default +# Let DROP TABLE statements sync in. We must use +# a separate connection for that, because otherwise SELECT +# will auto-close the HANDLERs, becaues there are pending +# exclusive locks against them. +# --> connection con3 +# Waiting for 'drop table t1' to get blocked... +# Waiting for 'drop table t2' to get blocked... +# Demonstrate that t2 lock was released and t2 was dropped +# after ROLLBACK TO SAVEPOINT +# --> connection default +rollback to savepoint sv; +# --> connection con2 +# Reaping 'drop table t2'... +# Demonstrate that ROLLBACK TO SAVEPOINT didn't release the handler +# lock. +# --> connection default +handler t1 read a next; +a +3 +handler t1 read a next; +a +4 +# Demonstrate that the drop will go through as soon as we close the +# HANDLER +handler t1 close; +# connection con1 +# Reaping 'drop table t1'... +# --> connection default +commit; +drop table t3; +# +# A few special cases when using SAVEPOINT/ROLLBACK TO +# SAVEPOINT and HANDLER. +# +# Show that rollback to the savepoint taken in the beginning +# of the transaction doesn't release mdl lock on +# the HANDLER that was opened later. +# +create table t1 (a int, key a(a)); +insert into t1 (a) values (1), (2), (3), (4), (5); +create table t2 like t1; +begin; +savepoint sv; +handler t1 open; +handler t1 read a first; +a +1 +handler t1 read a next; +a +2 +select * from t2; +a +# --> connection con1 +# Sending: +drop table t1; +# --> connection con2 +# Sending: +drop table t2; +# --> connection default +# Let DROP TABLE statements sync in. We must use +# a separate connection for that, because otherwise SELECT +# will auto-close the HANDLERs, becaues there are pending +# exclusive locks against them. +# --> connection con3 +# Waiting for 'drop table t1' to get blocked... +# Waiting for 'drop table t2' to get blocked... +# Demonstrate that t2 lock was released and t2 was dropped +# after ROLLBACK TO SAVEPOINT +# --> connection default +rollback to savepoint sv; +# --> connection con2 +# Reaping 'drop table t2'... +# Demonstrate that ROLLBACK TO SAVEPOINT didn't release the handler +# lock. +# --> connection default +handler t1 read a next; +a +3 +handler t1 read a next; +a +4 +# Demonstrate that the drop will go through as soon as we close the +# HANDLER +handler t1 close; +# connection con1 +# Reaping 'drop table t1'... +# --> connection default +commit; +# +# Show that rollback to the savepoint taken in the beginning +# of the transaction works properly (no valgrind warnins, etc), +# even though it's done after the HANDLER mdl lock that was there +# at the beginning is released and added again. +# +create table t1 (a int, key a(a)); +insert into t1 (a) values (1), (2), (3), (4), (5); +create table t2 like t1; +create table t3 like t1; +insert into t3 (a) select a from t1; +begin; +handler t1 open; +savepoint sv; +handler t1 read a first; +a +1 +select * from t2; +a +handler t1 close; +handler t3 open; +handler t3 read a first; +a +1 +rollback to savepoint sv; +# --> connection con1 +drop table t1, t2; +# Sending: +drop table t3; +# Let DROP TABLE statement sync in. +# --> connection con2 +# Waiting for 'drop table t3' to get blocked... +# Demonstrate that ROLLBACK TO SAVEPOINT didn't release the handler +# lock. +# --> connection default +handler t3 read a next; +a +2 +# Demonstrate that the drop will go through as soon as we close the +# HANDLER +handler t3 close; +# connection con1 +# Reaping 'drop table t3'... +# --> connection default +commit; +# +# If we have to wait on an exclusive locks while having +# an open HANDLER, ER_LOCK_DEADLOCK is reported. +# +create table t1 (a int, key a(a)); +create table t2 like t1; +handler t1 open; +# --> connection con1 +lock table t2 read; +# --> connection default +drop table t2; +ERROR 40001: Deadlock found when trying to get lock; try restarting transaction +rename table t2 to t3; +ERROR 40001: Deadlock found when trying to get lock; try restarting transaction +# Demonstrate that there is no deadlock with FLUSH TABLE, +# even though it is waiting for the other table to go away +# Sending: +flush table t2; +# --> connection con2 +drop table t1; +# --> connection con1 +unlock tables; +# --> connection default +# Reaping 'flush table t2'... +drop table t2; +# +# Bug #46224 HANDLER statements within a transaction might +# lead to deadlocks +# +create table t1 (a int, key a(a)); +# --> connection default +begin; +select * from t1; +a +handler t1 open; +# --> connection con1 +lock tables t1 write; +# --> connection default +# Sending: +handler t1 read a next; +# --> connection con1 +# Waiting for 'handler t1 read a next' to get blocked... +# Sending: +drop table t1; +# --> connection con2 +# Waiting for 'drop table t1' to get blocked... +# --> connection default +# Reaping 'handler t1 read a next'... +ERROR 40001: Deadlock found when trying to get lock; try restarting transaction +handler t1 close; +commit; +# --> connection con1 +# Reaping 'drop table t1'... +# --> connection con1 +# --> connection con2 +# --> connection con3 +# +# A temporary table test. +# Check that we don't loose positions of HANDLER opened +# against a temporary table. +# +create table t1 (a int, b int, key a (a)); +insert into t1 (a) values (1), (2), (3), (4), (5); +create temporary table t2 (a int, b int, key a (a)); +insert into t2 (a) select a from t1; +handler t1 open; +handler t1 read a next; +a b +1 NULL +handler t2 open; +handler t2 read a next; +a b +1 NULL +flush table t1; +handler t2 read a next; +a b +2 NULL +# Sic: the position is lost +handler t1 read a next; +a b +1 NULL +select * from t1; +a b +1 NULL +2 NULL +3 NULL +4 NULL +5 NULL +# Sic: the position is not lost +handler t2 read a next; +a b +3 NULL +select * from t2; +ERROR HY000: Can't reopen table: 't2' +handler t2 read a next; +a b +4 NULL +drop table t1; +drop temporary table t2; +# +# A test for lock_table_names()/unlock_table_names() function. +# It should work properly in presence of open HANDLER. +# +create table t1 (a int, b int, key a (a)); +create table t2 like t1; +create table t3 like t1; +create table t4 like t1; +handler t1 open; +handler t2 open; +rename table t4 to t5, t3 to t4, t5 to t3; +handler t1 read first; +a b +handler t2 read first; +a b +drop table t1, t2, t3, t4; +# # BUG #46456: HANDLER OPEN + TRUNCATE + DROP (temporary) TABLE, crash # CREATE TABLE t1 AS SELECT 1 AS f1; diff --git a/sql/lock.cc b/sql/lock.cc index d414d7d6ae2..31773585bff 100644 --- a/sql/lock.cc +++ b/sql/lock.cc @@ -335,23 +335,12 @@ MYSQL_LOCK *mysql_lock_tables(THD *thd, TABLE **tables, uint count, preserved. */ reset_lock_data(sql_lock); - thd->some_tables_deleted=1; // Try again sql_lock->lock_count= 0; // Locks are already freed // Fall through: unlock, reset lock data, free and retry } - else if (!thd->some_tables_deleted || (flags & MYSQL_LOCK_IGNORE_FLUSH)) - { - /* - Success and nobody set thd->some_tables_deleted to force reopen - or we were called with MYSQL_LOCK_IGNORE_FLUSH so such attempts - should be ignored. - */ - break; - } - else if (!thd->open_tables) + else { - // Only using temporary tables, no need to unlock - thd->some_tables_deleted=0; + /* Success */ break; } thd_proc_info(thd, 0); @@ -986,7 +975,7 @@ bool lock_table_names(THD *thd, TABLE_LIST *table_list) void unlock_table_names(THD *thd) { DBUG_ENTER("unlock_table_names"); - thd->mdl_context.release_all_locks(); + thd->mdl_context.release_transactional_locks(); DBUG_VOID_RETURN; } diff --git a/sql/log_event.cc b/sql/log_event.cc index 46d016b2c15..8afd243da63 100644 --- a/sql/log_event.cc +++ b/sql/log_event.cc @@ -5321,8 +5321,7 @@ int Xid_log_event::do_apply_event(Relay_log_info const *rli) if (!(res= trans_commit(thd))) { close_thread_tables(thd); - if (!thd->locked_tables_mode) - thd->mdl_context.release_all_locks(); + thd->mdl_context.release_transactional_locks(); } return res; } diff --git a/sql/mdl.cc b/sql/mdl.cc index 6187d4515a3..40074879e21 100644 --- a/sql/mdl.cc +++ b/sql/mdl.cc @@ -187,6 +187,7 @@ void MDL_context::init(THD *thd_arg) { m_has_global_shared_lock= FALSE; m_thd= thd_arg; + m_lt_or_ha_sentinel= NULL; /* FIXME: In reset_n_backup_open_tables_state, we abuse "init" as a reset, i.e. call it on an already @@ -218,76 +219,6 @@ void MDL_context::destroy() /** - Backup and reset state of meta-data locking context. - - mdl_context_backup_and_reset(), mdl_context_restore() and - mdl_context_merge() are used by HANDLER implementation which - needs to open table for new HANDLER independently of already - open HANDLERs and add this table/metadata lock to the set of - tables open/metadata locks for HANDLERs afterwards. -*/ - -void MDL_context::backup_and_reset(MDL_context *backup) -{ - DBUG_ASSERT(backup->m_tickets.is_empty()); - - m_tickets.swap(backup->m_tickets); - - backup->m_has_global_shared_lock= m_has_global_shared_lock; - /* - When the main context is swapped out, one can not take - the global shared lock, and one can not rely on it: - the functionality in this mode is reduced, since it exists as - a temporary hack to support ad-hoc opening of system tables. - */ - m_has_global_shared_lock= FALSE; -} - - -/** - Restore state of meta-data locking context from backup. -*/ - -void MDL_context::restore_from_backup(MDL_context *backup) -{ - DBUG_ASSERT(m_tickets.is_empty()); - DBUG_ASSERT(m_has_global_shared_lock == FALSE); - - m_tickets.swap(backup->m_tickets); - m_has_global_shared_lock= backup->m_has_global_shared_lock; -} - - -/** - Merge meta-data locks from one context into another. -*/ - -void MDL_context::merge(MDL_context *src) -{ - MDL_ticket *ticket; - - DBUG_ASSERT(m_thd == src->m_thd); - - if (!src->m_tickets.is_empty()) - { - Ticket_iterator it(src->m_tickets); - while ((ticket= it++)) - { - DBUG_ASSERT(ticket->m_ctx); - ticket->m_ctx= this; - m_tickets.push_front(ticket); - } - src->m_tickets.empty(); - } - /* - MDL_context::merge() is a hack used in one place only: to open - an SQL handler. We never acquire the global shared lock there. - */ - DBUG_ASSERT(! src->m_has_global_shared_lock); -} - - -/** Initialize a lock request. This is to be used for every lock request. @@ -606,7 +537,7 @@ MDL_lock::can_grant_lock(const MDL_context *requestor_ctx, enum_mdl_type type_ar if (waiting.is_empty() || type_arg == MDL_SHARED_HIGH_PRIO) can_grant= TRUE; } - else if (granted.head()->get_ctx() == requestor_ctx) + else if (granted.front()->get_ctx() == requestor_ctx) { /* When exclusive lock comes from the same context we can satisfy our @@ -659,20 +590,31 @@ MDL_lock::can_grant_lock(const MDL_context *requestor_ctx, enum_mdl_type type_ar /** Check whether the context already holds a compatible lock ticket on an object. + Start searching the transactional locks. If not + found in the list of transactional locks, look at LOCK TABLES + and HANDLER locks. @param mdl_request Lock request object for lock to be acquired + @param[out] is_lt_or_ha Did we pass beyond m_lt_or_ha_sentinel while + searching for ticket? @return A pointer to the lock ticket for the object or NULL otherwise. */ MDL_ticket * -MDL_context::find_ticket(MDL_request *mdl_request) +MDL_context::find_ticket(MDL_request *mdl_request, + bool *is_lt_or_ha) { MDL_ticket *ticket; Ticket_iterator it(m_tickets); + *is_lt_or_ha= FALSE; + while ((ticket= it++)) { + if (ticket == m_lt_or_ha_sentinel) + *is_lt_or_ha= TRUE; + if (mdl_request->type == ticket->m_type && mdl_request->key.is_equal(&ticket->m_lock->key)) break; @@ -709,6 +651,7 @@ MDL_context::try_acquire_shared_lock(MDL_request *mdl_request) MDL_lock *lock; MDL_key *key= &mdl_request->key; MDL_ticket *ticket; + bool is_lt_or_ha; DBUG_ASSERT(mdl_request->is_shared() && mdl_request->ticket == NULL); @@ -727,12 +670,35 @@ MDL_context::try_acquire_shared_lock(MDL_request *mdl_request) Check whether the context already holds a shared lock on the object, and if so, grant the request. */ - if ((ticket= find_ticket(mdl_request))) + if ((ticket= find_ticket(mdl_request, &is_lt_or_ha))) { DBUG_ASSERT(ticket->m_state == MDL_ACQUIRED); /* Only shared locks can be recursive. */ DBUG_ASSERT(ticket->is_shared()); + /* + If the request is for a transactional lock, and we found + a transactional lock, just reuse the found ticket. + + It's possible that we found a transactional lock, + but the request is for a HANDLER lock. In that case HANDLER + code will clone the ticket (see below why it's needed). + + If the request is for a transactional lock, and we found + a HANDLER lock, create a copy, to make sure that when user + does HANDLER CLOSE, the transactional lock is not released. + + If the request is for a handler lock, and we found a + HANDLER lock, also do the clone. HANDLER CLOSE for one alias + should not release the lock on the table HANDLER opened through + a different alias. + */ mdl_request->ticket= ticket; + if (is_lt_or_ha && clone_ticket(mdl_request)) + { + /* Clone failed. */ + mdl_request->ticket= NULL; + return TRUE; + } return FALSE; } @@ -787,6 +753,46 @@ MDL_context::try_acquire_shared_lock(MDL_request *mdl_request) /** + Create a copy of a granted ticket. + This is used to make sure that HANDLER ticket + is never shared with a ticket that belongs to + a transaction, so that when we HANDLER CLOSE, + we don't release a transactional ticket, and + vice versa -- when we COMMIT, we don't mistakenly + release a ticket for an open HANDLER. + + @retval TRUE Out of memory. + @retval FALSE Success. +*/ + +bool +MDL_context::clone_ticket(MDL_request *mdl_request) +{ + MDL_ticket *ticket; + + safe_mutex_assert_not_owner(&LOCK_open); + /* Only used for HANDLER. */ + DBUG_ASSERT(mdl_request->ticket && mdl_request->ticket->is_shared()); + + if (!(ticket= MDL_ticket::create(this, mdl_request->type))) + return TRUE; + + ticket->m_state= MDL_ACQUIRED; + ticket->m_lock= mdl_request->ticket->m_lock; + mdl_request->ticket= ticket; + + pthread_mutex_lock(&LOCK_mdl); + ticket->m_lock->granted.push_front(ticket); + if (mdl_request->type == MDL_SHARED_UPGRADABLE) + global_lock.active_intention_exclusive++; + pthread_mutex_unlock(&LOCK_mdl); + + m_tickets.push_front(ticket); + + return FALSE; +} + +/** Notify a thread holding a shared metadata lock which conflicts with a pending exclusive lock. @@ -850,7 +856,9 @@ bool MDL_context::acquire_exclusive_locks(MDL_request_list *mdl_requests) safe_mutex_assert_not_owner(&LOCK_open); /* Exclusive locks must always be acquired first, all at once. */ - DBUG_ASSERT(! has_locks()); + DBUG_ASSERT(! has_locks() || + (m_lt_or_ha_sentinel && + m_tickets.front() == m_lt_or_ha_sentinel)); if (m_has_global_shared_lock) { @@ -924,6 +932,17 @@ bool MDL_context::acquire_exclusive_locks(MDL_request_list *mdl_requests) if (!mdl_request) break; + if (m_lt_or_ha_sentinel) + { + /* + We're about to start waiting. Don't do it if we have + HANDLER locks (we can't have any other locks here). + Waiting with locks may lead to a deadlock. + */ + my_error(ER_LOCK_DEADLOCK, MYF(0)); + goto err; + } + /* There is a shared or exclusive lock on the object. */ DEBUG_SYNC(m_thd, "mdl_acquire_exclusive_locks_wait"); @@ -1041,6 +1060,30 @@ MDL_ticket::upgrade_shared_lock_to_exclusive() if (m_lock->can_grant_lock(m_ctx, MDL_EXCLUSIVE, TRUE)) break; + /* + If m_ctx->lt_or_ha_sentinel(), and this sentinel is for HANDLER, + we can deadlock. However, HANDLER is not allowed under + LOCK TABLES, and apart from LOCK TABLES there are only + two cases of lock upgrade: ALTER TABLE and CREATE/DROP + TRIGGER (*). This leaves us with the following scenario + for deadlock: + + connection 1 connection 2 + handler t1 open; handler t2 open; + alter table t2 ... alter table t1 ... + + This scenario is quite remote, since ALTER + (and CREATE/DROP TRIGGER) performs mysql_ha_flush() in + the beginning, and thus closes open HANDLERS against which + there is a pending lock upgrade. Still, two ALTER statements + can interleave and not notice each other's pending lock + (e.g. if both upgrade their locks at the same time). + This, however, is quite unlikely, so we do nothing to + address it. + + (*) There is no requirement to upgrade lock in + CREATE/DROP TRIGGER, it's used there just for convenience. + */ bool signalled= FALSE; MDL_ticket *conflicting_ticket; MDL_lock::Ticket_iterator it(m_lock->granted); @@ -1280,6 +1323,9 @@ void MDL_context::release_ticket(MDL_ticket *ticket) safe_mutex_assert_owner(&LOCK_mdl); + if (ticket == m_lt_or_ha_sentinel) + m_lt_or_ha_sentinel= ++Ticket_list::Iterator(m_tickets, ticket); + m_tickets.remove(ticket); switch (ticket->m_type) @@ -1317,18 +1363,27 @@ void MDL_context::release_ticket(MDL_ticket *ticket) /** - Release all locks associated with the context. - - This function is used to back off in case of a lock conflict. - It is also used to release shared locks in the end of an SQL - statement. + Release all locks associated with the context. If the sentinel + is not NULL, do not release locks stored in the list after and + including the sentinel. + + Transactional locks are added to the beginning of the list, i.e. + stored in reverse temporal order. This allows to employ this + function to: + - back off in case of a lock conflict. + - release all locks in the end of a transaction + - rollback to a savepoint. + + The sentinel semantics is used to support LOCK TABLES + mode and HANDLER statements: locks taken by these statements + survive COMMIT, ROLLBACK, ROLLBACK TO SAVEPOINT. */ -void MDL_context::release_all_locks() +void MDL_context::release_locks_stored_before(MDL_ticket *sentinel) { MDL_ticket *ticket; Ticket_iterator it(m_tickets); - DBUG_ENTER("MDL_context::release_all_locks"); + DBUG_ENTER("MDL_context::release_locks_stored_before"); safe_mutex_assert_not_owner(&LOCK_open); @@ -1336,7 +1391,7 @@ void MDL_context::release_all_locks() DBUG_VOID_RETURN; pthread_mutex_lock(&LOCK_mdl); - while ((ticket= it++)) + while ((ticket= it++) && ticket != sentinel) { DBUG_PRINT("info", ("found lock to release ticket=%p", ticket)); release_ticket(ticket); @@ -1345,8 +1400,6 @@ void MDL_context::release_all_locks() pthread_cond_broadcast(&COND_mdl); pthread_mutex_unlock(&LOCK_mdl); - m_tickets.empty(); - DBUG_VOID_RETURN; } @@ -1452,8 +1505,9 @@ MDL_context::is_exclusive_lock_owner(MDL_key::enum_mdl_namespace mdl_namespace, const char *db, const char *name) { MDL_request mdl_request; + bool is_lt_or_ha_unused; mdl_request.init(mdl_namespace, db, name, MDL_EXCLUSIVE); - MDL_ticket *ticket= find_ticket(&mdl_request); + MDL_ticket *ticket= find_ticket(&mdl_request, &is_lt_or_ha_unused); DBUG_ASSERT(ticket == NULL || ticket->m_state == MDL_ACQUIRED); @@ -1593,19 +1647,87 @@ void *MDL_ticket::get_cached_object() void MDL_context::rollback_to_savepoint(MDL_ticket *mdl_savepoint) { - MDL_ticket *ticket; - Ticket_iterator it(m_tickets); DBUG_ENTER("MDL_context::rollback_to_savepoint"); - while ((ticket= it++)) + /* If savepoint is NULL, it is from the start of the transaction. */ + release_locks_stored_before(mdl_savepoint ? + mdl_savepoint : m_lt_or_ha_sentinel); + + DBUG_VOID_RETURN; +} + + +/** + Release locks acquired by normal statements (SELECT, UPDATE, + DELETE, etc) in the course of a transaction. Do not release + HANDLER locks, if there are any. + + This method is used at the end of a transaction, in + implementation of COMMIT (implicit or explicit) and ROLLBACK. +*/ + +void MDL_context::release_transactional_locks() +{ + DBUG_ENTER("MDL_context::release_transactional_locks"); + release_locks_stored_before(m_lt_or_ha_sentinel); + DBUG_VOID_RETURN; +} + + +/** + Does this savepoint have this lock? + + @retval TRUE The ticket is older than the savepoint and + is not LT or HA ticket. Thus it belongs to + the savepoint. + @retval FALSE The ticket is newer than the savepoint + or is an LT or HA ticket. +*/ + +bool MDL_context::has_lock(MDL_ticket *mdl_savepoint, + MDL_ticket *mdl_ticket) +{ + MDL_ticket *ticket; + MDL_context::Ticket_iterator it(m_tickets); + bool found_savepoint= FALSE; + + while ((ticket= it++) && ticket != m_lt_or_ha_sentinel) { - /* Stop when lock was acquired before this savepoint. */ + /* + First met the savepoint. The ticket must be + somewhere after it. + */ if (ticket == mdl_savepoint) - break; - release_lock(ticket); + found_savepoint= TRUE; + /* + Met the ticket. If we haven't yet met the savepoint, + the ticket is newer than the savepoint. + */ + if (ticket == mdl_ticket) + return found_savepoint; } - - DBUG_VOID_RETURN; + /* Reached m_lt_or_ha_sentinel. The ticket must be an LT or HA ticket. */ + return FALSE; } +/** + Rearrange the ticket to reside in the part of the list that's + beyond m_lt_or_ha_sentinel. This effectively changes the ticket + life cycle, from automatic to manual: i.e. the ticket is no + longer released by MDL_context::release_transactional_locks() or + MDL_context::rollback_to_savepoint(), it must be released manually. +*/ + +void MDL_context::move_ticket_after_lt_or_ha_sentinel(MDL_ticket *mdl_ticket) +{ + m_tickets.remove(mdl_ticket); + if (m_lt_or_ha_sentinel == NULL) + { + m_lt_or_ha_sentinel= mdl_ticket; + /* sic: linear from the number of transactional tickets acquired so-far! */ + m_tickets.push_back(mdl_ticket); + } + else + m_tickets.insert_after(m_lt_or_ha_sentinel, mdl_ticket); +} diff --git a/sql/mdl.h b/sql/mdl.h index 2758bd3a8e6..e85f1232ff9 100644 --- a/sql/mdl.h +++ b/sql/mdl.h @@ -327,19 +327,15 @@ public: void init(THD *thd); void destroy(); - void backup_and_reset(MDL_context *backup); - void restore_from_backup(MDL_context *backup); - void merge(MDL_context *source); - bool try_acquire_shared_lock(MDL_request *mdl_request); bool acquire_exclusive_lock(MDL_request *mdl_request); bool acquire_exclusive_locks(MDL_request_list *requests); bool try_acquire_exclusive_lock(MDL_request *mdl_request); bool acquire_global_shared_lock(); + bool clone_ticket(MDL_request *mdl_request); bool wait_for_locks(MDL_request_list *requests); - void release_all_locks(); void release_all_locks_for_name(MDL_ticket *ticket); void release_lock(MDL_ticket *ticket); void release_global_shared_lock(); @@ -350,26 +346,60 @@ public: bool is_lock_owner(MDL_key::enum_mdl_namespace mdl_namespace, const char *db, const char *name); + + bool has_lock(MDL_ticket *mdl_savepoint, MDL_ticket *mdl_ticket); + inline bool has_locks() const { return !m_tickets.is_empty(); } - inline MDL_ticket *mdl_savepoint() + MDL_ticket *mdl_savepoint() + { + /* + NULL savepoint represents the start of the transaction. + Checking for m_lt_or_ha_sentinel also makes sure we never + return a pointer to HANDLER ticket as a savepoint. + */ + return m_tickets.front() == m_lt_or_ha_sentinel ? NULL : m_tickets.front(); + } + + void set_lt_or_ha_sentinel() { - return m_tickets.head(); + DBUG_ASSERT(m_lt_or_ha_sentinel == NULL); + m_lt_or_ha_sentinel= mdl_savepoint(); } + MDL_ticket *lt_or_ha_sentinel() const { return m_lt_or_ha_sentinel; } + void clear_lt_or_ha_sentinel() + { + m_lt_or_ha_sentinel= NULL; + } + void move_ticket_after_lt_or_ha_sentinel(MDL_ticket *mdl_ticket); + + void release_transactional_locks(); void rollback_to_savepoint(MDL_ticket *mdl_savepoint); inline THD *get_thd() const { return m_thd; } private: Ticket_list m_tickets; bool m_has_global_shared_lock; + /** + This member has two uses: + 1) When entering LOCK TABLES mode, remember the last taken + metadata lock. COMMIT/ROLLBACK must preserve these metadata + locks. + 2) When we have an open HANDLER tables, store the position + in the list beyond which we keep locks for HANDLER tables. + COMMIT/ROLLBACK must, again, preserve HANDLER metadata locks. + */ + MDL_ticket *m_lt_or_ha_sentinel; THD *m_thd; private: void release_ticket(MDL_ticket *ticket); - MDL_ticket *find_ticket(MDL_request *mdl_req); + MDL_ticket *find_ticket(MDL_request *mdl_req, + bool *is_lt_or_ha); + void release_locks_stored_before(MDL_ticket *sentinel); }; diff --git a/sql/mysql_priv.h b/sql/mysql_priv.h index 1a171705dae..caf3130c517 100644 --- a/sql/mysql_priv.h +++ b/sql/mysql_priv.h @@ -1534,6 +1534,7 @@ TABLE *find_temporary_table(THD *thd, TABLE_LIST *table_list); int drop_temporary_table(THD *thd, TABLE_LIST *table_list); void close_temporary_table(THD *thd, TABLE *table, bool free_share, bool delete_table); +void mark_tmp_table_for_reuse(TABLE *table); void close_temporary(TABLE *table, bool free_share, bool delete_table); bool rename_temporary_table(THD* thd, TABLE *table, const char *new_db, const char *table_name); diff --git a/sql/rpl_injector.cc b/sql/rpl_injector.cc index 9d82307d2e7..d47c49ed515 100644 --- a/sql/rpl_injector.cc +++ b/sql/rpl_injector.cc @@ -86,8 +86,7 @@ int injector::transaction::commit() if (!trans_commit(m_thd)) { close_thread_tables(m_thd); - if (!m_thd->locked_tables_mode) - m_thd->mdl_context.release_all_locks(); + m_thd->mdl_context.release_transactional_locks(); } DBUG_RETURN(0); } diff --git a/sql/rpl_rli.cc b/sql/rpl_rli.cc index b4554bb4b6c..bd03afb8dd8 100644 --- a/sql/rpl_rli.cc +++ b/sql/rpl_rli.cc @@ -1189,8 +1189,8 @@ void Relay_log_info::cleanup_context(THD *thd, bool error) } m_table_map.clear_tables(); slave_close_thread_tables(thd); - if (error && !thd->locked_tables_mode) - thd->mdl_context.release_all_locks(); + if (error) + thd->mdl_context.release_transactional_locks(); clear_flag(IN_STMT); /* Cleanup for the flags that have been set at do_apply_event. diff --git a/sql/set_var.cc b/sql/set_var.cc index dd009541274..ce7cfcc81a8 100644 --- a/sql/set_var.cc +++ b/sql/set_var.cc @@ -3196,8 +3196,7 @@ static bool set_option_autocommit(THD *thd, set_var *var) return TRUE; close_thread_tables(thd); - if (!thd->locked_tables_mode) - thd->mdl_context.release_all_locks(); + thd->mdl_context.release_transactional_locks(); } if (var->save_result.ulong_value != 0) diff --git a/sql/slave.cc b/sql/slave.cc index 62ddc8aaf21..ca72aaea69a 100644 --- a/sql/slave.cc +++ b/sql/slave.cc @@ -2432,8 +2432,7 @@ static int exec_relay_log_event(THD* thd, Relay_log_info* rli) exec_res= 0; trans_rollback(thd); close_thread_tables(thd); - if (!thd->locked_tables_mode) - thd->mdl_context.release_all_locks(); + thd->mdl_context.release_transactional_locks(); /* chance for concurrent connection to get more locks */ safe_sleep(thd, min(rli->trans_retries, MAX_SLAVE_RETRY_PAUSE), (CHECK_KILLED_FUNC)sql_slave_killed, (void*)rli); diff --git a/sql/sql_acl.cc b/sql/sql_acl.cc index 4b9cee98211..451b2293109 100644 --- a/sql/sql_acl.cc +++ b/sql/sql_acl.cc @@ -730,8 +730,7 @@ my_bool acl_reload(THD *thd) end: trans_commit_implicit(thd); close_thread_tables(thd); - if (!thd->locked_tables_mode) - thd->mdl_context.release_all_locks(); + thd->mdl_context.release_transactional_locks(); DBUG_RETURN(return_val); } @@ -3895,8 +3894,7 @@ my_bool grant_reload(THD *thd) rw_unlock(&LOCK_grant); trans_commit_implicit(thd); close_thread_tables(thd); - if (!thd->locked_tables_mode) - thd->mdl_context.release_all_locks(); + thd->mdl_context.release_transactional_locks(); /* It is OK failing to load procs_priv table because we may be diff --git a/sql/sql_base.cc b/sql/sql_base.cc index 61028f692b3..459ca646d8c 100644 --- a/sql/sql_base.cc +++ b/sql/sql_base.cc @@ -1169,37 +1169,51 @@ static void mark_temp_tables_as_free_for_reuse(THD *thd) for (TABLE *table= thd->temporary_tables ; table ; table= table->next) { if ((table->query_id == thd->query_id) && ! table->open_by_handler) - { - table->query_id= 0; - table->file->ha_reset(); + mark_tmp_table_for_reuse(table); + } +} - /* Detach temporary MERGE children from temporary parent. */ - DBUG_ASSERT(table->file); - table->file->extra(HA_EXTRA_DETACH_CHILDREN); - /* - Reset temporary table lock type to it's default value (TL_WRITE). - - Statements such as INSERT INTO .. SELECT FROM tmp, CREATE TABLE - .. SELECT FROM tmp and UPDATE may under some circumstances modify - the lock type of the tables participating in the statement. This - isn't a problem for non-temporary tables since their lock type is - reset at every open, but the same does not occur for temporary - tables for historical reasons. - - Furthermore, the lock type of temporary tables is not really that - important because they can only be used by one query at a time and - not even twice in a query -- a temporary table is represented by - only one TABLE object. Nonetheless, it's safer from a maintenance - point of view to reset the lock type of this singleton TABLE object - as to not cause problems when the table is reused. - - Even under LOCK TABLES mode its okay to reset the lock type as - LOCK TABLES is allowed (but ignored) for a temporary table. - */ - table->reginfo.lock_type= TL_WRITE; - } - } +/** + Reset a single temporary table. + Effectively this "closes" one temporary table, + in a session. + + @param table Temporary table. +*/ + +void mark_tmp_table_for_reuse(TABLE *table) +{ + DBUG_ASSERT(table->s->tmp_table); + + table->query_id= 0; + table->file->ha_reset(); + + /* Detach temporary MERGE children from temporary parent. */ + DBUG_ASSERT(table->file); + table->file->extra(HA_EXTRA_DETACH_CHILDREN); + + /* + Reset temporary table lock type to it's default value (TL_WRITE). + + Statements such as INSERT INTO .. SELECT FROM tmp, CREATE TABLE + .. SELECT FROM tmp and UPDATE may under some circumstances modify + the lock type of the tables participating in the statement. This + isn't a problem for non-temporary tables since their lock type is + reset at every open, but the same does not occur for temporary + tables for historical reasons. + + Furthermore, the lock type of temporary tables is not really that + important because they can only be used by one query at a time and + not even twice in a query -- a temporary table is represented by + only one TABLE object. Nonetheless, it's safer from a maintenance + point of view to reset the lock type of this singleton TABLE object + as to not cause problems when the table is reused. + + Even under LOCK TABLES mode its okay to reset the lock type as + LOCK TABLES is allowed (but ignored) for a temporary table. + */ + table->reginfo.lock_type= TL_WRITE; } @@ -1261,7 +1275,6 @@ static void close_open_tables(THD *thd) while (thd->open_tables) found_old_table|= close_thread_table(thd, &thd->open_tables); - thd->some_tables_deleted= 0; /* Free tables to hold down open files */ while (table_cache_count > table_cache_size && unused_tables) @@ -1475,7 +1488,7 @@ void close_thread_tables(THD *thd) if (thd->locked_tables_mode == LTM_LOCK_TABLES) DBUG_VOID_RETURN; - thd->locked_tables_mode= LTM_NONE; + thd->leave_locked_tables_mode(); /* Fallthrough */ } @@ -1505,16 +1518,27 @@ void close_thread_tables(THD *thd) if (thd->open_tables) close_open_tables(thd); - /* - Defer the release of metadata locks until the current transaction - is either committed or rolled back. This prevents other statements - from modifying the table for the entire duration of this transaction. - This provides commitment ordering for guaranteeing serializability - across multiple transactions. - */ - if (!thd->in_multi_stmt_transaction() || - (thd->state_flags & Open_tables_state::BACKUPS_AVAIL)) - thd->mdl_context.release_all_locks(); + if (thd->state_flags & Open_tables_state::BACKUPS_AVAIL) + { + /* We can't have an open HANDLER in the backup open tables state. */ + DBUG_ASSERT(thd->mdl_context.lt_or_ha_sentinel() == NULL); + /* + Due to the above assert, this is guaranteed to release *all* locks + in the context. + */ + thd->mdl_context.release_transactional_locks(); + } + else if (! thd->in_multi_stmt_transaction()) + { + /* + Defer the release of metadata locks until the current transaction + is either committed or rolled back. This prevents other statements + from modifying the table for the entire duration of this transaction. + This provides commitment ordering for guaranteeing serializability + across multiple transactions. + */ + thd->mdl_context.release_transactional_locks(); + } DBUG_VOID_RETURN; } @@ -2337,7 +2361,8 @@ open_table_get_mdl_lock(THD *thd, TABLE_LIST *table_list, enforced by asserts in metadata locking subsystem. */ mdl_request->set_type(MDL_EXCLUSIVE); - DBUG_ASSERT(! thd->mdl_context.has_locks()); + DBUG_ASSERT(! thd->mdl_context.has_locks() || + thd->handler_tables_hash.records); if (thd->mdl_context.acquire_exclusive_lock(mdl_request)) return 1; @@ -2791,7 +2816,7 @@ bool open_table(THD *thd, TABLE_LIST *table_list, MEM_ROOT *mem_root, if (!share->free_tables.is_empty()) { - table= share->free_tables.head(); + table= share->free_tables.front(); table_def_use_table(thd, table); /* We need to release share as we have EXTRA reference to it in our hands. */ release_table_share(share); @@ -3082,7 +3107,7 @@ Locked_tables_list::init_locked_tables(THD *thd) return TRUE; } } - thd->locked_tables_mode= LTM_LOCK_TABLES; + thd->enter_locked_tables_mode(LTM_LOCK_TABLES); return FALSE; } @@ -3121,7 +3146,7 @@ Locked_tables_list::unlock_locked_tables(THD *thd) */ table_list->table->pos_in_locked_tables= NULL; } - thd->locked_tables_mode= LTM_NONE; + thd->leave_locked_tables_mode(); close_thread_tables(thd); } @@ -3636,7 +3661,8 @@ end_with_lock_open: Open_table_context::Open_table_context(THD *thd) :m_action(OT_NO_ACTION), - m_can_deadlock(thd->in_multi_stmt_transaction() && + m_can_deadlock((thd->in_multi_stmt_transaction() || + thd->mdl_context.lt_or_ha_sentinel())&& thd->mdl_context.has_locks()) {} @@ -4136,7 +4162,7 @@ bool open_tables(THD *thd, TABLE_LIST **start, uint *counter, uint flags, even if they don't create problems for current thread (i.e. to avoid having DDL blocked by HANDLERs opened for long time). */ - if (thd->handler_tables) + if (thd->handler_tables_hash.records) mysql_ha_flush(thd); /* @@ -4642,13 +4668,14 @@ retry: while ((error= open_table(thd, table_list, thd->mem_root, &ot_ctx, 0)) && ot_ctx.can_recover_from_failed_open_table()) { + /* We can't back off with an open HANDLER, we don't wait with locks. */ + DBUG_ASSERT(thd->mdl_context.lt_or_ha_sentinel() == NULL); /* Even though we have failed to open table we still need to - call release_all_locks() to release metadata locks which + call release_transactional_locks() to release metadata locks which might have been acquired successfully. */ - if (! thd->locked_tables_mode) - thd->mdl_context.release_all_locks(); + thd->mdl_context.release_transactional_locks(); table_list->mdl_request.ticket= 0; if (ot_ctx.recover_from_failed_open_table_attempt(thd, table_list)) break; @@ -4699,8 +4726,12 @@ retry: close_thread_tables(thd); table_list->table= NULL; table_list->mdl_request.ticket= NULL; - if (! thd->locked_tables_mode) - thd->mdl_context.release_all_locks(); + /* + We can't back off with an open HANDLER, + we don't wait with locks. + */ + DBUG_ASSERT(thd->mdl_context.lt_or_ha_sentinel() == NULL); + thd->mdl_context.release_transactional_locks(); goto retry; } } @@ -4769,7 +4800,8 @@ bool open_and_lock_tables_derived(THD *thd, TABLE_LIST *tables, break; if (!need_reopen) DBUG_RETURN(TRUE); - if (thd->in_multi_stmt_transaction() && has_locks) + if ((thd->in_multi_stmt_transaction() || + thd->mdl_context.lt_or_ha_sentinel()) && has_locks) { my_error(ER_LOCK_DEADLOCK, MYF(0)); DBUG_RETURN(TRUE); @@ -5124,7 +5156,7 @@ bool lock_tables(THD *thd, TABLE_LIST *tables, uint count, */ mark_real_tables_as_free_for_reuse(first_not_own); DBUG_PRINT("info",("locked_tables_mode= LTM_PRELOCKED")); - thd->locked_tables_mode= LTM_PRELOCKED; + thd->enter_locked_tables_mode(LTM_PRELOCKED); } } else @@ -5226,8 +5258,13 @@ void close_tables_for_reopen(THD *thd, TABLE_LIST **tables) for (tmp= first_not_own_table; tmp; tmp= tmp->next_global) tmp->mdl_request.ticket= NULL; close_thread_tables(thd); - if (!thd->locked_tables_mode) - thd->mdl_context.release_all_locks(); + /* We can't back off with an open HANDLERs, we must not wait with locks. */ + DBUG_ASSERT(thd->mdl_context.lt_or_ha_sentinel() == NULL); + /* + Due to the above assert, this effectively releases *all* locks + of this session, so that we can safely wait on tables. + */ + thd->mdl_context.release_transactional_locks(); } @@ -8202,6 +8239,15 @@ bool mysql_notify_thread_having_shared_lock(THD *thd, THD *in_use) if (!thd_table->needs_reopen()) signalled|= mysql_lock_abort_for_thread(thd, thd_table); } + /* + Wake up threads waiting in tdc_wait_for_old_versions(). + Normally such threads would already get blocked + in MDL subsystem, when trying to acquire a shared lock. + But in case a thread has an open HANDLER statement, + (and thus already grabbed a metadata lock), it gets + blocked only too late -- at the table cache level. + */ + broadcast_refresh(); pthread_mutex_unlock(&LOCK_open); return signalled; } @@ -8721,7 +8767,9 @@ void close_performance_schema_table(THD *thd, Open_tables_state *backup) pthread_mutex_unlock(&LOCK_open); - thd->mdl_context.release_all_locks(); + /* We can't have an open HANDLER in the backup context. */ + DBUG_ASSERT(thd->mdl_context.lt_or_ha_sentinel() == NULL); + thd->mdl_context.release_transactional_locks(); thd->restore_backup_open_tables_state(backup); } diff --git a/sql/sql_class.cc b/sql/sql_class.cc index aa647aada60..95c985b2c10 100644 --- a/sql/sql_class.cc +++ b/sql/sql_class.cc @@ -482,7 +482,7 @@ THD::THD() catalog= (char*)"std"; // the only catalog we have for now main_security_ctx.init(); security_ctx= &main_security_ctx; - some_tables_deleted=no_errors=password= 0; + no_errors=password= 0; query_start_used= 0; count_cuted_fields= CHECK_FIELD_IGNORE; killed= NOT_KILLED; @@ -997,6 +997,7 @@ void THD::cleanup(void) } locked_tables_list.unlock_locked_tables(this); + mysql_ha_cleanup(this); /* If the thread was in the middle of an ongoing transaction (rolled @@ -1005,14 +1006,19 @@ void THD::cleanup(void) metadata locks. Release them. */ DBUG_ASSERT(open_tables == NULL); - mdl_context.release_all_locks(); + /* All HANDLERs must have been closed by now. */ + DBUG_ASSERT(mdl_context.lt_or_ha_sentinel() == NULL); + /* + Due to the above assert, this is guaranteed to release *all* in + this session. + */ + mdl_context.release_transactional_locks(); #if defined(ENABLED_DEBUG_SYNC) /* End the Debug Sync Facility. See debug_sync.cc. */ debug_sync_end_thread(this); #endif /* defined(ENABLED_DEBUG_SYNC) */ - mysql_ha_cleanup(this); delete_dynamic(&user_var_events); my_hash_free(&user_vars); close_temporary_tables(this); @@ -1061,8 +1067,6 @@ THD::~THD() cleanup(); mdl_context.destroy(); - handler_mdl_context.destroy(); - ha_close_connection(this); plugin_thdvar_cleanup(this); @@ -3038,12 +3042,11 @@ void THD::restore_backup_open_tables_state(Open_tables_state *backup) to be sure that it was properly cleaned up. */ DBUG_ASSERT(open_tables == 0 && temporary_tables == 0 && - handler_tables == 0 && derived_tables == 0 && + derived_tables == 0 && lock == 0 && locked_tables_mode == LTM_NONE && m_reprepare_observer == NULL); mdl_context.destroy(); - handler_mdl_context.destroy(); set_open_tables_state(backup); DBUG_VOID_RETURN; diff --git a/sql/sql_class.h b/sql/sql_class.h index ebea2041715..11e0010d85b 100644 --- a/sql/sql_class.h +++ b/sql/sql_class.h @@ -914,11 +914,6 @@ public: XXX Why are internal temporary tables added to this list? */ TABLE *temporary_tables; - /** - List of tables that were opened with HANDLER OPEN and are - still in use by this thread. - */ - TABLE *handler_tables; TABLE *derived_tables; /* During a MySQL session, one can lock tables in two modes: automatic @@ -985,7 +980,6 @@ public: uint state_flags; MDL_context mdl_context; - MDL_context handler_mdl_context; /** This constructor initializes Open_tables_state instance which can only @@ -1011,13 +1005,23 @@ public: void reset_open_tables_state(THD *thd) { - open_tables= temporary_tables= handler_tables= derived_tables= 0; + open_tables= temporary_tables= derived_tables= 0; extra_lock= lock= 0; locked_tables_mode= LTM_NONE; state_flags= 0U; m_reprepare_observer= NULL; mdl_context.init(thd); - handler_mdl_context.init(thd); + } + void enter_locked_tables_mode(enum_locked_tables_mode mode_arg) + { + DBUG_ASSERT(locked_tables_mode == LTM_NONE); + mdl_context.set_lt_or_ha_sentinel(); + locked_tables_mode= mode_arg; + } + void leave_locked_tables_mode() + { + locked_tables_mode= LTM_NONE; + mdl_context.clear_lt_or_ha_sentinel(); } }; @@ -1902,7 +1906,6 @@ public: bool slave_thread, one_shot_set; /* tells if current statement should binlog row-based(1) or stmt-based(0) */ bool current_stmt_binlog_row_based; - bool some_tables_deleted; bool last_cuted_field; bool no_errors, password; /** diff --git a/sql/sql_handler.cc b/sql/sql_handler.cc index 94f5e84fb10..ccfe21d1af5 100644 --- a/sql/sql_handler.cc +++ b/sql/sql_handler.cc @@ -33,27 +33,21 @@ */ /* - There are two containers holding information about open handler tables. - The first is 'thd->handler_tables'. It is a linked list of TABLE objects. - It is used like 'thd->open_tables' in the table cache. The trick is to - exchange these two lists during open and lock of tables. Thus the normal - table cache code can be used. - The second container is a HASH. It holds objects of the type TABLE_LIST. - Despite its name, no lists of tables but only single structs are hashed - (the 'next' pointer is always NULL). The reason for theis second container - is, that we want handler tables to survive FLUSH TABLE commands. A table - affected by FLUSH TABLE must be closed so that other threads are not - blocked by handler tables still in use. Since we use the normal table cache - functions with 'thd->handler_tables', the closed tables are removed from - this list. Hence we need the original open information for the handler - table in the case that it is used again. This information is handed over - to mysql_ha_open() as a TABLE_LIST. So we store this information in the - second container, where it is not affected by FLUSH TABLE. The second - container is implemented as a hash for performance reasons. Consequently, - we use it not only for re-opening a handler table, but also for the - HANDLER ... READ commands. For this purpose, we store a pointer to the - TABLE structure (in the first container) in the TBALE_LIST object in the - second container. When the table is flushed, the pointer is cleared. + The information about open HANDLER objects is stored in a HASH. + It holds objects of type TABLE_LIST, which are indexed by table + name/alias, and allows us to quickly find a HANDLER table for any + operation at hand - be it HANDLER READ or HANDLER CLOSE. + + It also allows us to maintain an "open" HANDLER even in cases + when there is no physically open cursor. E.g. a FLUSH TABLE + statement in this or some other connection demands that all open + HANDLERs against the flushed table are closed. In order to + preserve the information about an open HANDLER, we don't perform + a complete HANDLER CLOSE, but only close the TABLE object. The + corresponding TABLE_LIST is kept in the cache with 'table' + pointer set to NULL. The table will be reopened on next access + (this, however, leads to loss of cursor position, unless the + cursor points at the first record). */ #include "mysql_priv.h" @@ -124,32 +118,19 @@ static void mysql_ha_hash_free(TABLE_LIST *tables) static void mysql_ha_close_table(THD *thd, TABLE_LIST *tables) { - TABLE **table_ptr; - MDL_ticket *mdl_ticket; - /* - Though we could take the table pointer from hash_tables->table, - we must follow the thd->handler_tables chain anyway, as we need the - address of the 'next' pointer referencing this table - for close_thread_table(). - */ - for (table_ptr= &(thd->handler_tables); - *table_ptr && (*table_ptr != tables->table); - table_ptr= &(*table_ptr)->next) - ; - - if (*table_ptr) + if (tables->table && !tables->table->s->tmp_table) { - (*table_ptr)->file->ha_index_or_rnd_end(); - mdl_ticket= (*table_ptr)->mdl_ticket; + /* Non temporary table. */ + tables->table->file->ha_index_or_rnd_end(); pthread_mutex_lock(&LOCK_open); - if (close_thread_table(thd, table_ptr)) + if (close_thread_table(thd, &tables->table)) { /* Tell threads waiting for refresh that something has happened */ broadcast_refresh(); } pthread_mutex_unlock(&LOCK_open); - thd->handler_mdl_context.release_lock(mdl_ticket); + thd->mdl_context.release_lock(tables->mdl_request.ticket); } else if (tables->table) { @@ -158,6 +139,7 @@ static void mysql_ha_close_table(THD *thd, TABLE_LIST *tables) table->file->ha_index_or_rnd_end(); table->query_id= thd->query_id; table->open_by_handler= 0; + mark_tmp_table_for_reuse(table); } /* Mark table as closed, ready for re-open if necessary. */ @@ -195,7 +177,7 @@ bool mysql_ha_open(THD *thd, TABLE_LIST *tables, bool reopen) uint dblen, namelen, aliaslen, counter; bool error; TABLE *backup_open_tables; - MDL_context backup_mdl_context; + MDL_ticket *mdl_savepoint; DBUG_ENTER("mysql_ha_open"); DBUG_PRINT("enter",("'%s'.'%s' as '%s' reopen: %d", tables->db, tables->table_name, tables->alias, @@ -265,6 +247,8 @@ bool mysql_ha_open(THD *thd, TABLE_LIST *tables, bool reopen) memcpy(hash_tables->table_name, tables->table_name, namelen); memcpy(hash_tables->alias, tables->alias, aliaslen); hash_tables->mdl_request.init(MDL_key::TABLE, db, name, MDL_SHARED); + /* for now HANDLER can be used only for real TABLES */ + hash_tables->required_type= FRMTYPE_TABLE; /* add to hash */ if (my_hash_insert(&thd->handler_tables_hash, (uchar*) hash_tables)) @@ -283,16 +267,11 @@ bool mysql_ha_open(THD *thd, TABLE_LIST *tables, bool reopen) from open_tables(), thd->open_tables will contain only the opened table. - The thd->handler_tables list is kept as-is to avoid deadlocks if - open_table(), called by open_tables(), needs to back-off because - of a pending exclusive metadata lock or flush for the table being - opened. - See open_table() back-off comments for more details. */ backup_open_tables= thd->open_tables; thd->open_tables= NULL; - thd->mdl_context.backup_and_reset(&backup_mdl_context); + mdl_savepoint= thd->mdl_context.mdl_savepoint(); /* open_tables() will set 'hash_tables->table' if successful. @@ -300,53 +279,47 @@ bool mysql_ha_open(THD *thd, TABLE_LIST *tables, bool reopen) */ DBUG_ASSERT(! hash_tables->table); - /* for now HANDLER can be used only for real TABLES */ - hash_tables->required_type= FRMTYPE_TABLE; /* We use open_tables() here, rather than, say, open_ltable() or open_table() because we would like to be able to open a temporary table. */ error= open_tables(thd, &hash_tables, &counter, 0); - if (thd->open_tables) + + if (! error && + ! (hash_tables->table->file->ha_table_flags() & HA_CAN_SQL_HANDLER)) { - if (thd->open_tables->next) - { - /* - We opened something that is more than a single table. - This happens with MERGE engine. Don't try to link - this mess into thd->handler_tables list, close it - and report an error. We must do it right away - because mysql_ha_close_table(), called down the road, - can close a single table only. - */ - close_thread_tables(thd); - thd->mdl_context.release_all_locks(); - my_error(ER_ILLEGAL_HA, MYF(0), hash_tables->alias); - error= TRUE; - } - else - { - /* Merge the opened table into handler_tables list. */ - thd->open_tables->next= thd->handler_tables; - thd->handler_tables= thd->open_tables; - } + my_error(ER_ILLEGAL_HA, MYF(0), tables->alias); + error= TRUE; + } + if (!error && + hash_tables->mdl_request.ticket && + thd->mdl_context.has_lock(mdl_savepoint, + hash_tables->mdl_request.ticket)) + { + /* The ticket returned is within a savepoint. Make a copy. */ + error= thd->mdl_context.clone_ticket(&hash_tables->mdl_request); + hash_tables->table->mdl_ticket= hash_tables->mdl_request.ticket; } - thd->handler_mdl_context.merge(&thd->mdl_context); - - thd->open_tables= backup_open_tables; - thd->mdl_context.restore_from_backup(&backup_mdl_context); - if (error) - goto err; - - /* There can be only one table in '*tables'. */ - if (! (hash_tables->table->file->ha_table_flags() & HA_CAN_SQL_HANDLER)) { - my_error(ER_ILLEGAL_HA, MYF(0), tables->alias); - goto err; + close_thread_tables(thd); + thd->open_tables= backup_open_tables; + thd->mdl_context.rollback_to_savepoint(mdl_savepoint); + if (!reopen) + my_hash_delete(&thd->handler_tables_hash, (uchar*) hash_tables); + else + hash_tables->table= NULL; + DBUG_PRINT("exit",("ERROR")); + DBUG_RETURN(TRUE); } + thd->open_tables= backup_open_tables; + if (hash_tables->mdl_request.ticket) + thd->mdl_context. + move_ticket_after_lt_or_ha_sentinel(hash_tables->mdl_request.ticket); + /* Assert that the above check prevent opening of views and merge tables. */ + DBUG_ASSERT(hash_tables->table->next == NULL); /* If it's a temp table, don't reset table->query_id as the table is being used by this handler. Otherwise, no meaning at all. @@ -357,14 +330,6 @@ bool mysql_ha_open(THD *thd, TABLE_LIST *tables, bool reopen) my_ok(thd); DBUG_PRINT("exit",("OK")); DBUG_RETURN(FALSE); - -err: - if (hash_tables->table) - mysql_ha_close_table(thd, hash_tables); - if (!reopen) - my_hash_delete(&thd->handler_tables_hash, (uchar*) hash_tables); - DBUG_PRINT("exit",("ERROR")); - DBUG_RETURN(TRUE); } @@ -496,59 +461,41 @@ retry: hash_tables->db, hash_tables->table_name, hash_tables->alias, table)); } - table->pos_in_table_list= tables; -#if MYSQL_VERSION_ID < 40100 - if (*tables->db && strcmp(table->table_cache_key, tables->db)) - { - DBUG_PRINT("info",("wrong db")); - table= NULL; - } -#endif } else table= NULL; if (!table) { -#if MYSQL_VERSION_ID < 40100 - char buff[MAX_DBKEY_LENGTH]; - if (*tables->db) - strxnmov(buff, sizeof(buff)-1, tables->db, ".", tables->table_name, - NullS); - else - strncpy(buff, tables->alias, sizeof(buff)); - my_error(ER_UNKNOWN_TABLE, MYF(0), buff, "HANDLER"); -#else my_error(ER_UNKNOWN_TABLE, MYF(0), tables->alias, "HANDLER"); -#endif goto err0; } - tables->table=table; /* save open_tables state */ backup_open_tables= thd->open_tables; + /* Always a one-element list, see mysql_ha_open(). */ + DBUG_ASSERT(hash_tables->table->next == NULL); /* mysql_lock_tables() needs thd->open_tables to be set correctly to - be able to handle aborts properly. When the abort happens, it's - safe to not protect thd->handler_tables because it won't close any - tables. + be able to handle aborts properly. */ - thd->open_tables= thd->handler_tables; + thd->open_tables= hash_tables->table; + - lock= mysql_lock_tables(thd, &tables->table, 1, 0, &need_reopen); + lock= mysql_lock_tables(thd, &thd->open_tables, 1, 0, &need_reopen); - /* restore previous context */ + /* + In 5.1 and earlier, mysql_lock_tables() could replace the TABLE + object with another one (reopen it). This is no longer the case + with new MDL. + */ + DBUG_ASSERT(hash_tables->table == thd->open_tables); + /* Restore previous context. */ thd->open_tables= backup_open_tables; if (need_reopen) { mysql_ha_close_table(thd, hash_tables); - /* - The lock might have been aborted, we need to manually reset - thd->some_tables_deleted because handler's tables are closed - in a non-standard way. Otherwise we might loop indefinitely. - */ - thd->some_tables_deleted= 0; goto retry; } @@ -556,7 +503,8 @@ retry: goto err0; // mysql_lock_tables() printed error message already // Always read all columns - tables->table->read_set= &tables->table->s->all_set; + hash_tables->table->read_set= &hash_tables->table->s->all_set; + tables->table= hash_tables->table; if (cond) { @@ -811,6 +759,14 @@ void mysql_ha_flush(THD *thd) safe_mutex_assert_not_owner(&LOCK_open); + /* + Don't try to flush open HANDLERs when we're working with + system tables. The main MDL context is backed up and we can't + properly release HANDLER locks stored there. + */ + if (thd->state_flags & Open_tables_state::BACKUPS_AVAIL) + DBUG_VOID_RETURN; + for (uint i= 0; i < thd->handler_tables_hash.records; i++) { hash_tables= (TABLE_LIST*) my_hash_element(&thd->handler_tables_hash, i); @@ -818,9 +774,10 @@ void mysql_ha_flush(THD *thd) TABLE::mdl_ticket is 0 for temporary tables so we need extra check. */ if (hash_tables->table && - (hash_tables->table->mdl_ticket && - hash_tables->table->mdl_ticket->has_pending_conflicting_lock() || - hash_tables->table->s->needs_reopen())) + ((hash_tables->table->mdl_ticket && + hash_tables->table->mdl_ticket->has_pending_conflicting_lock()) || + (!hash_tables->table->s->tmp_table && + hash_tables->table->s->needs_reopen()))) mysql_ha_close_table(thd, hash_tables); } diff --git a/sql/sql_insert.cc b/sql/sql_insert.cc index 9f8af87f1e2..40ef55423a9 100644 --- a/sql/sql_insert.cc +++ b/sql/sql_insert.cc @@ -3610,8 +3610,7 @@ static TABLE *create_table_from_items(THD *thd, HA_CREATE_INFO *create_info, /* mysql_lock_tables() below should never fail with request to reopen table since it won't wait for the table lock (we have exclusive metadata lock on - the table) and thus can't get aborted and since it ignores other threads - setting THD::some_tables_deleted thanks to MYSQL_LOCK_IGNORE_FLUSH. + the table) and thus can't get aborted. */ if (! ((*lock)= mysql_lock_tables(thd, &table, 1, MYSQL_LOCK_IGNORE_FLUSH, ¬_used)) || diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc index 7fbf04c751f..79f10120268 100644 --- a/sql/sql_parse.cc +++ b/sql/sql_parse.cc @@ -1180,8 +1180,7 @@ bool dispatch_command(enum enum_server_command command, THD *thd, if (trans_commit_implicit(thd)) break; close_thread_tables(thd); - if (!thd->locked_tables_mode) - thd->mdl_context.release_all_locks(); + thd->mdl_context.release_transactional_locks(); if (check_global_access(thd,RELOAD_ACL)) break; general_log_print(thd, command, NullS); @@ -1211,8 +1210,7 @@ bool dispatch_command(enum enum_server_command command, THD *thd, if (trans_commit_implicit(thd)) break; close_thread_tables(thd); - if (!thd->locked_tables_mode) - thd->mdl_context.release_all_locks(); + thd->mdl_context.release_transactional_locks(); my_ok(thd); break; } @@ -1967,8 +1965,7 @@ mysql_execute_command(THD *thd) goto error; /* Close tables and release metadata locks. */ close_thread_tables(thd); - if (!thd->locked_tables_mode) - thd->mdl_context.release_all_locks(); + thd->mdl_context.release_transactional_locks(); } /* @@ -3302,7 +3299,7 @@ end_with_restore_list: if (thd->options & OPTION_TABLE_LOCK) { trans_commit_implicit(thd); - thd->mdl_context.release_all_locks(); + thd->mdl_context.release_transactional_locks(); thd->options&= ~(OPTION_TABLE_LOCK); } if (thd->global_read_lock) @@ -3311,11 +3308,19 @@ end_with_restore_list: break; case SQLCOM_LOCK_TABLES: thd->locked_tables_list.unlock_locked_tables(thd); + /* + As of 5.5, entering LOCK TABLES mode implicitly closes all + open HANDLERs. Both HANDLER code and LOCK TABLES mode use + the sentinel mechanism in MDL subsystem and thus could not be + used at the same time. All HANDLER operations are prohibited + under LOCK TABLES anyway. + */ + mysql_ha_cleanup(thd); /* we must end the trasaction first, regardless of anything */ if (trans_commit_implicit(thd)) goto error; /* release transactional metadata locks. */ - thd->mdl_context.release_all_locks(); + thd->mdl_context.release_transactional_locks(); if (check_table_access(thd, LOCK_TABLES_ACL | SELECT_ACL, all_tables, FALSE, UINT_MAX, FALSE)) goto error; @@ -3354,7 +3359,7 @@ end_with_restore_list: */ close_thread_tables(thd); DBUG_ASSERT(!thd->locked_tables_mode); - thd->mdl_context.release_all_locks(); + thd->mdl_context.release_transactional_locks(); thd->options&= ~(OPTION_TABLE_LOCK); } else @@ -3845,8 +3850,7 @@ end_with_restore_list: thd->locked_tables_mode == LTM_LOCK_TABLES); if (trans_commit(thd)) goto error; - if (!thd->locked_tables_mode) - thd->mdl_context.release_all_locks(); + thd->mdl_context.release_transactional_locks(); /* Begin transaction with the same isolation level. */ if (lex->tx_chain && trans_begin(thd)) goto error; @@ -3860,8 +3864,7 @@ end_with_restore_list: thd->locked_tables_mode == LTM_LOCK_TABLES); if (trans_rollback(thd)) goto error; - if (!thd->locked_tables_mode) - thd->mdl_context.release_all_locks(); + thd->mdl_context.release_transactional_locks(); /* Begin transaction with the same isolation level. */ if (lex->tx_chain && trans_begin(thd)) goto error; @@ -4212,8 +4215,7 @@ create_sp_error: close_thread_tables(thd); - if (!thd->locked_tables_mode) - thd->mdl_context.release_all_locks(); + thd->mdl_context.release_transactional_locks(); #ifndef NO_EMBEDDED_ACCESS_CHECKS if (sp_automatic_privileges && !opt_noacl && @@ -4394,15 +4396,13 @@ create_sp_error: case SQLCOM_XA_COMMIT: if (trans_xa_commit(thd)) goto error; - if (!thd->locked_tables_mode) - thd->mdl_context.release_all_locks(); + thd->mdl_context.release_transactional_locks(); my_ok(thd); break; case SQLCOM_XA_ROLLBACK: if (trans_xa_rollback(thd)) goto error; - if (!thd->locked_tables_mode) - thd->mdl_context.release_all_locks(); + thd->mdl_context.release_transactional_locks(); my_ok(thd); break; case SQLCOM_XA_RECOVER: @@ -4555,11 +4555,10 @@ finish: thd->is_error() ? trans_rollback_stmt(thd) : trans_commit_stmt(thd); /* Commit the normal transaction if one is active. */ trans_commit_implicit(thd); + thd->stmt_da->can_overwrite_status= FALSE; /* Close tables and release metadata locks. */ close_thread_tables(thd); - if (!thd->locked_tables_mode) - thd->mdl_context.release_all_locks(); - thd->stmt_da->can_overwrite_status= FALSE; + thd->mdl_context.release_transactional_locks(); } DBUG_RETURN(res || thd->is_error()); @@ -6539,7 +6538,9 @@ bool reload_acl_and_cache(THD *thd, ulong options, TABLE_LIST *tables, } #endif /*HAVE_QUERY_CACHE*/ - DBUG_ASSERT(!thd || thd->locked_tables_mode || !thd->mdl_context.has_locks()); + DBUG_ASSERT(!thd || thd->locked_tables_mode || + !thd->mdl_context.has_locks() || + thd->handler_tables_hash.records); /* Note that if REFRESH_READ_LOCK bit is set then REFRESH_TABLES is set too diff --git a/sql/sql_plist.h b/sql/sql_plist.h index b0a0bb016d0..94e437362a9 100644 --- a/sql/sql_plist.h +++ b/sql/sql_plist.h @@ -71,6 +71,36 @@ public: first= a; *B::prev_ptr(a)= &first; } + inline void push_back(T *a) + { + insert_after(back(), a); + } + inline T *back() + { + T *t= front(); + if (t) + { + while (*B::next_ptr(t)) + t= *B::next_ptr(t); + } + return t; + } + inline void insert_after(T *pos, T *a) + { + if (pos == NULL) + push_front(a); + else + { + *B::next_ptr(a)= *B::next_ptr(pos); + *B::prev_ptr(a)= B::next_ptr(pos); + *B::next_ptr(pos)= a; + if (*B::next_ptr(a)) + { + T *old_next= *B::next_ptr(a); + *B::prev_ptr(old_next)= B::next_ptr(a); + } + } + } inline void remove(T *a) { T *next= *B::next_ptr(a); @@ -78,8 +108,8 @@ public: *B::prev_ptr(next)= *B::prev_ptr(a); **B::prev_ptr(a)= next; } - inline T* head() { return first; } - inline const T *head() const { return first; } + inline T* front() { return first; } + inline const T *front() const { return first; } void swap(I_P_List<T,B> &rhs) { swap_variables(T *, first, rhs.first); @@ -106,6 +136,7 @@ class I_P_List_iterator T *current; public: I_P_List_iterator(I_P_List<T, B> &a) : list(&a), current(a.first) {} + I_P_List_iterator(I_P_List<T, B> &a, T* current_arg) : list(&a), current(current_arg) {} inline void init(I_P_List<T, B> &a) { list= &a; @@ -118,6 +149,11 @@ public: current= *B::next_ptr(current); return result; } + inline T* operator++() + { + current= *B::next_ptr(current); + return current; + } inline void rewind() { current= list->first; diff --git a/sql/sql_prepare.cc b/sql/sql_prepare.cc index 27fdd1e2a8d..700017f2b3e 100644 --- a/sql/sql_prepare.cc +++ b/sql/sql_prepare.cc @@ -3193,8 +3193,7 @@ bool Prepared_statement::prepare(const char *packet, uint packet_len) Marker used to release metadata locks acquired while the prepared statement is being checked. */ - if (thd->in_multi_stmt_transaction()) - mdl_savepoint= thd->mdl_context.mdl_savepoint(); + mdl_savepoint= thd->mdl_context.mdl_savepoint(); /* The only case where we should have items in the thd->free_list is @@ -3220,13 +3219,11 @@ bool Prepared_statement::prepare(const char *packet, uint packet_len) lex_end(lex); cleanup_stmt(); /* - If not inside a multi-statement transaction, the metadata locks have - already been released and the rollback_to_savepoint is a nop. - Otherwise, release acquired locks -- a NULL mdl_savepoint means that - all locks are going to be released or that the transaction didn't - own any locks. + If not inside a multi-statement transaction, the metadata + locks have already been released and our savepoint points + to ticket which has been released as well. */ - if (!thd->locked_tables_mode) + if (thd->in_multi_stmt_transaction()) thd->mdl_context.rollback_to_savepoint(mdl_savepoint); thd->restore_backup_statement(this, &stmt_backup); thd->stmt_arena= old_stmt_arena; diff --git a/sql/sql_servers.cc b/sql/sql_servers.cc index 464a70e4175..4d5a4f41849 100644 --- a/sql/sql_servers.cc +++ b/sql/sql_servers.cc @@ -247,8 +247,7 @@ bool servers_reload(THD *thd) end: trans_commit_implicit(thd); close_thread_tables(thd); - if (!thd->locked_tables_mode) - thd->mdl_context.release_all_locks(); + thd->mdl_context.release_transactional_locks(); DBUG_PRINT("info", ("unlocking servers_cache")); rw_unlock(&THR_LOCK_servers); DBUG_RETURN(return_val); diff --git a/sql/sql_table.cc b/sql/sql_table.cc index 941352cb963..3b87a4dd6e8 100644 --- a/sql/sql_table.cc +++ b/sql/sql_table.cc @@ -4714,8 +4714,7 @@ static bool mysql_admin_table(THD* thd, TABLE_LIST* tables, trans_commit_stmt(thd); trans_commit(thd); close_thread_tables(thd); - if (!thd->locked_tables_mode) - thd->mdl_context.release_all_locks(); + thd->mdl_context.release_transactional_locks(); lex->reset_query_tables_list(FALSE); table->table=0; // For query cache if (protocol->write()) @@ -4766,8 +4765,7 @@ static bool mysql_admin_table(THD* thd, TABLE_LIST* tables, trans_rollback_stmt(thd); trans_rollback(thd); close_thread_tables(thd); - if (!thd->locked_tables_mode) - thd->mdl_context.release_all_locks(); + thd->mdl_context.release_transactional_locks(); tmp_disable_binlog(thd); // binlogging is done by caller if wanted result_code= mysql_recreate_table(thd, table); reenable_binlog(thd); @@ -4883,8 +4881,7 @@ send_result_message: trans_commit_stmt(thd); trans_commit(thd); close_thread_tables(thd); - if (!thd->locked_tables_mode) - thd->mdl_context.release_all_locks(); + thd->mdl_context.release_transactional_locks(); DEBUG_SYNC(thd, "ha_admin_try_alter"); protocol->store(STRING_WITH_LEN("note"), system_charset_info); protocol->store(STRING_WITH_LEN( @@ -4910,8 +4907,7 @@ send_result_message: trans_commit_stmt(thd); trans_commit(thd); close_thread_tables(thd); - if (!thd->locked_tables_mode) - thd->mdl_context.release_all_locks(); + thd->mdl_context.release_transactional_locks(); if (!result_code) // recreation went ok { /* Clear the ticket released in close_thread_tables(). */ @@ -7429,7 +7425,6 @@ end_temporary: (ulong) (copied + deleted), (ulong) deleted, (ulong) thd->warning_info->statement_warn_count()); my_ok(thd, copied + deleted, 0L, tmp_name); - thd->some_tables_deleted=0; DBUG_RETURN(FALSE); err_new_table_cleanup: diff --git a/sql/transaction.cc b/sql/transaction.cc index d1c7244ba83..1ca455028f0 100644 --- a/sql/transaction.cc +++ b/sql/transaction.cc @@ -103,7 +103,7 @@ bool trans_begin(THD *thd, uint flags) Release transactional metadata locks only after the transaction has been committed. */ - thd->mdl_context.release_all_locks(); + thd->mdl_context.release_transactional_locks(); thd->options|= OPTION_BEGIN; thd->server_status|= SERVER_STATUS_IN_TRANS; @@ -341,6 +341,10 @@ bool trans_savepoint(THD *thd, LEX_STRING name) Remember the last acquired lock before the savepoint was set. This is used as a marker to only release locks acquired after the setting of this savepoint. + Note: this works just fine if we're under LOCK TABLES, + since mdl_savepoint() is guaranteed to be beyond + the last locked table. This allows to release some + locks acquired during LOCK TABLES. */ newsv->mdl_savepoint = thd->mdl_context.mdl_savepoint(); @@ -388,8 +392,10 @@ bool trans_rollback_to_savepoint(THD *thd, LEX_STRING name) thd->transaction.savepoints= sv; - /* Release metadata locks that were acquired during this savepoint unit. */ - if (!res && !thd->locked_tables_mode) + /* + Release metadata locks that were acquired during this savepoint unit. + */ + if (!res) thd->mdl_context.rollback_to_savepoint(sv->mdl_savepoint); DBUG_RETURN(test(res)); |