summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMarko Mäkelä <marko.makela@mariadb.com>2021-08-31 13:54:44 +0300
committerMarko Mäkelä <marko.makela@mariadb.com>2021-08-31 13:54:44 +0300
commitc5fd9aa562fb15e8d6ededceccbec0c9792a3243 (patch)
treecbfeec6848de679f26aac4b7d5a686004f692459
parent094de71742fbed4d49d38017d56f13100050c747 (diff)
downloadmariadb-git-c5fd9aa562fb15e8d6ededceccbec0c9792a3243.tar.gz
MDEV-25919: Lock tables before acquiring dict_sys.latch
In commit 1bd681c8b3c5213ce1f7976940a7dc38b48a0d39 (MDEV-25506 part 3) we introduced a "fake instant timeout" when a transaction would wait for a table or record lock while holding dict_sys.latch. This prevented a deadlock of the server but could cause bogus errors for operations on the InnoDB persistent statistics tables. A better fix is to ensure that whenever a transaction is being executed in the InnoDB internal SQL parser (which will for now require dict_sys.latch to be held), it will already have acquired all locks that could be required for the execution. So, we will acquire the following locks upfront, before acquiring dict_sys.latch: (1) MDL on the affected user table (acquired by the SQL layer) (2) If applicable (not for RENAME TABLE): InnoDB table lock (3) If persistent statistics are going to be modified: (3.a) MDL_SHARED on mysql.innodb_table_stats, mysql.innodb_index_stats (3.b) exclusive table locks on the statistics tables (4) Exclusive table locks on the InnoDB data dictionary tables (not needed in ANALYZE TABLE and the like) Note: Acquiring exclusive locks on the statistics tables may cause more locking conflicts between concurrent DDL operations. Notably, RENAME TABLE will lock the statistics tables even if no persistent statistics are enabled for the table. DROP DATABASE will only acquire locks on statistics tables if persistent statistics are enabled for the tables on which the SQL layer is invoking ha_innobase::delete_table(). For any "garbage collection" in innodb_drop_database(), a timeout while acquiring locks on the statistics tables will result in any statistics not being deleted for any tables that the SQL layer did not know about. If innodb_defragment=ON, information may be written to the statistics tables even for tables for which InnoDB persistent statistics are disabled. But, DROP TABLE will no longer attempt to delete that information if persistent statistics are not enabled for the table. This change should also fix the hangs related to InnoDB persistent statistics and STATS_AUTO_RECALC (MDEV-15020) as well as a bug that running ALTER TABLE on the statistics tables concurrently with running ALTER TABLE on InnoDB tables could cause trouble. lock_rec_enqueue_waiting(), lock_table_enqueue_waiting(): Do not issue a fake instant timeout error when the transaction is holding dict_sys.latch. Instead, assert that the dict_sys.latch is never being held here. lock_sys_tables(): A new function to acquire exclusive locks on all dictionary tables, in case DROP TABLE or similar operation is being executed. Locking non-hard-coded tables is optional to avoid a crash in row_merge_drop_temp_indexes(). The SYS_VIRTUAL table was introduced in MySQL 5.7 and MariaDB Server 10.2. Normally, we require all these dictionary tables to exist before executing any DDL, but the function row_merge_drop_temp_indexes() is an exception. When upgrading from MariaDB Server 10.1 or MySQL 5.6 or earlier, the table SYS_VIRTUAL would not exist at this point. ha_innobase::commit_inplace_alter_table(): Invoke log_write_up_to() while not holding dict_sys.latch. dict_sys_t::remove(), dict_table_close(): No longer try to drop index stubs that were left behind by aborted online ADD INDEX. Such indexes should be dropped from the InnoDB data dictionary by row_merge_drop_indexes() as part of the failed DDL operation. Stubs for aborted indexes may only be left behind in the data dictionary cache. dict_stats_fetch_from_ps(): Use a normal read-only transaction. ha_innobase::delete_table(), ha_innobase::truncate(), fts_lock_table(): While waiting for purge to stop using the table, do not hold dict_sys.latch. ha_innobase::delete_table(): Implement a work-around for the rollback of ALTER TABLE...ADD PARTITION. MDL_EXCLUSIVE would not be held if ALTER TABLE hits lock_wait_timeout while trying to upgrade the MDL due to a conflicting LOCK TABLES, such as in the first ALTER TABLE in the test case of Bug#53676 in parts.partition_special_innodb. Therefore, we must explicitly stop purge, because it would not be stopped by MDL. dict_stats_func(), btr_defragment_chunk(): Allocate a THD so that we can acquire MDL on the InnoDB persistent statistics tables. mysqltest_embedded: Invoke ha_pre_shutdown() before free_used_memory() in order to avoid ASAN heap-use-after-free related to acquire_thd(). trx_t::dict_operation_lock_mode: Changed the type to bool. row_mysql_lock_data_dictionary(), row_mysql_unlock_data_dictionary(): Implemented as macros. rollback_inplace_alter_table(): Apply an infinite timeout to lock waits. innodb_thd_increment_pending_ops(): Wrapper for thd_increment_pending_ops(). Never attempt async operation for InnoDB background threads, such as the trx_t::commit() in dict_stats_process_entry_from_recalc_pool(). lock_sys_t::cancel(trx_t*): Make dictionary transactions immune to KILL. lock_wait(): Make dictionary transactions immune to KILL, and to lock wait timeout when waiting for locks on dictionary tables. parts.partition_special_innodb: Use lock_wait_timeout=0 to instantly get ER_LOCK_WAIT_TIMEOUT. main.mdl: Filter out MDL on InnoDB persistent statistics tables Reviewed by: Thirunarayanan Balathandayuthapani
-rw-r--r--client/mysqltest.cc10
-rw-r--r--mysql-test/main/mdl.result21
-rw-r--r--mysql-test/main/mdl.test21
-rw-r--r--mysql-test/suite/innodb/r/alter_partitioned.result4
-rw-r--r--mysql-test/suite/innodb/r/innodb_defrag_stats.result1
-rw-r--r--mysql-test/suite/innodb/r/innodb_stats_drop_locked.result1
-rw-r--r--mysql-test/suite/innodb/t/alter_partitioned.test4
-rw-r--r--mysql-test/suite/innodb/t/innodb_defrag_stats.test2
-rw-r--r--mysql-test/suite/innodb/t/innodb_stats_drop_locked.test2
-rw-r--r--mysql-test/suite/parts/r/partition_special_innodb.result60
-rw-r--r--mysql-test/suite/parts/t/partition_special_innodb-master.opt1
-rw-r--r--mysql-test/suite/parts/t/partition_special_innodb.test67
-rw-r--r--sql/sql_class.cc6
-rw-r--r--sql/sql_class.h2
-rw-r--r--storage/innobase/btr/btr0defragment.cc15
-rw-r--r--storage/innobase/dict/dict0crea.cc16
-rw-r--r--storage/innobase/dict/dict0defrag_bg.cc241
-rw-r--r--storage/innobase/dict/dict0dict.cc214
-rw-r--r--storage/innobase/dict/dict0stats.cc103
-rw-r--r--storage/innobase/dict/dict0stats_bg.cc10
-rw-r--r--storage/innobase/dict/drop.cc6
-rw-r--r--storage/innobase/fts/fts0fts.cc12
-rw-r--r--storage/innobase/fts/fts0opt.cc2
-rw-r--r--storage/innobase/handler/ha_innodb.cc368
-rw-r--r--storage/innobase/handler/handler0alter.cc277
-rw-r--r--storage/innobase/handler/i_s.cc14
-rw-r--r--storage/innobase/include/dict0defrag_bg.h7
-rw-r--r--storage/innobase/include/dict0dict.h28
-rw-r--r--storage/innobase/include/dict0mem.h2
-rw-r--r--storage/innobase/include/dict0stats.h17
-rw-r--r--storage/innobase/include/ha_prototypes.h10
-rw-r--r--storage/innobase/include/lock0lock.h17
-rw-r--r--storage/innobase/include/row0merge.h11
-rw-r--r--storage/innobase/include/row0mysql.h40
-rw-r--r--storage/innobase/include/row0purge.h2
-rw-r--r--storage/innobase/include/trx0trx.h15
-rw-r--r--storage/innobase/lock/lock0lock.cc103
-rw-r--r--storage/innobase/pars/pars0pars.cc2
-rw-r--r--storage/innobase/row/row0ftsort.cc2
-rw-r--r--storage/innobase/row/row0import.cc85
-rw-r--r--storage/innobase/row/row0ins.cc2
-rw-r--r--storage/innobase/row/row0merge.cc27
-rw-r--r--storage/innobase/row/row0mysql.cc56
-rw-r--r--storage/innobase/row/row0purge.cc4
-rw-r--r--storage/innobase/row/row0uins.cc19
-rw-r--r--storage/innobase/row/row0umod.cc7
-rw-r--r--storage/innobase/row/row0upd.cc4
-rw-r--r--storage/innobase/trx/trx0trx.cc24
48 files changed, 1111 insertions, 853 deletions
diff --git a/client/mysqltest.cc b/client/mysqltest.cc
index 5008a048148..1979214de17 100644
--- a/client/mysqltest.cc
+++ b/client/mysqltest.cc
@@ -1464,8 +1464,18 @@ void free_used_memory()
}
+#ifdef EMBEDDED_LIBRARY
+void ha_pre_shutdown();
+#endif
+
+
ATTRIBUTE_NORETURN static void cleanup_and_exit(int exit_code)
{
+#ifdef EMBEDDED_LIBRARY
+ if (server_initialized)
+ ha_pre_shutdown();
+#endif
+
free_used_memory();
/* Only call mysql_server_end if mysql_server_init has been called */
diff --git a/mysql-test/main/mdl.result b/mysql-test/main/mdl.result
index fbf80312ac0..343895803b2 100644
--- a/mysql-test/main/mdl.result
+++ b/mysql-test/main/mdl.result
@@ -8,20 +8,23 @@
CREATE TABLE t1(a INT) ENGINE=InnoDB;
CREATE TABLE t3(a INT) ENGINE=myisam;
LOCK TABLES t1 WRITE CONCURRENT, t1 AS t2 READ;
-SELECT LOCK_MODE, LOCK_TYPE, TABLE_SCHEMA, TABLE_NAME FROM information_schema.metadata_lock_info;
+SELECT LOCK_MODE, LOCK_TYPE, TABLE_SCHEMA, TABLE_NAME FROM information_schema.metadata_lock_info
+WHERE TABLE_NAME NOT LIKE 'innodb_%_stats';
LOCK_MODE LOCK_TYPE TABLE_SCHEMA TABLE_NAME
MDL_BACKUP_TRANS_DML Backup lock
MDL_SHARED_NO_READ_WRITE Table metadata lock test t1
UNLOCK TABLES;
LOCK TABLES t1 AS t2 READ, t1 WRITE CONCURRENT;
-SELECT LOCK_MODE, LOCK_TYPE, TABLE_SCHEMA, TABLE_NAME FROM information_schema.metadata_lock_info;
+SELECT LOCK_MODE, LOCK_TYPE, TABLE_SCHEMA, TABLE_NAME FROM information_schema.metadata_lock_info
+WHERE TABLE_NAME NOT LIKE 'innodb_%_stats';
LOCK_MODE LOCK_TYPE TABLE_SCHEMA TABLE_NAME
MDL_BACKUP_TRANS_DML Backup lock
MDL_SHARED_WRITE Table metadata lock test t1
MDL_SHARED_READ_ONLY Table metadata lock test t1
UNLOCK TABLES;
LOCK TABLES t1 WRITE CONCURRENT, t3 WRITE;
-SELECT LOCK_MODE, LOCK_TYPE, TABLE_SCHEMA, TABLE_NAME FROM information_schema.metadata_lock_info;
+SELECT LOCK_MODE, LOCK_TYPE, TABLE_SCHEMA, TABLE_NAME FROM information_schema.metadata_lock_info
+WHERE TABLE_NAME NOT LIKE 'innodb_%_stats';
LOCK_MODE LOCK_TYPE TABLE_SCHEMA TABLE_NAME
MDL_BACKUP_DDL Backup lock
MDL_BACKUP_DML Backup lock
@@ -30,7 +33,8 @@ MDL_SHARED_NO_READ_WRITE Table metadata lock test t3
MDL_INTENTION_EXCLUSIVE Schema metadata lock test
UNLOCK TABLES;
LOCK TABLES t3 WRITE, t1 WRITE CONCURRENT;
-SELECT LOCK_MODE, LOCK_TYPE, TABLE_SCHEMA, TABLE_NAME FROM information_schema.metadata_lock_info;
+SELECT LOCK_MODE, LOCK_TYPE, TABLE_SCHEMA, TABLE_NAME FROM information_schema.metadata_lock_info
+WHERE TABLE_NAME NOT LIKE 'innodb_%_stats';
LOCK_MODE LOCK_TYPE TABLE_SCHEMA TABLE_NAME
MDL_BACKUP_DDL Backup lock
MDL_BACKUP_DML Backup lock
@@ -39,7 +43,8 @@ MDL_SHARED_NO_READ_WRITE Table metadata lock test t3
MDL_INTENTION_EXCLUSIVE Schema metadata lock test
UNLOCK TABLES;
LOCK TABLES t1 WRITE, mysql.user WRITE;
-SELECT LOCK_MODE, LOCK_TYPE, TABLE_SCHEMA, TABLE_NAME FROM information_schema.metadata_lock_info;
+SELECT LOCK_MODE, LOCK_TYPE, TABLE_SCHEMA, TABLE_NAME FROM information_schema.metadata_lock_info
+WHERE TABLE_NAME NOT LIKE 'innodb_%_stats';
LOCK_MODE LOCK_TYPE TABLE_SCHEMA TABLE_NAME
MDL_BACKUP_DDL Backup lock
MDL_SHARED_NO_READ_WRITE Table metadata lock mysql user
@@ -67,7 +72,8 @@ connection locker;
insert into t1 values (1);
connection default;
connection default;
-SELECT LOCK_MODE, LOCK_TYPE, TABLE_SCHEMA, TABLE_NAME FROM information_schema.metadata_lock_info;
+SELECT LOCK_MODE, LOCK_TYPE, TABLE_SCHEMA, TABLE_NAME FROM information_schema.metadata_lock_info
+WHERE TABLE_NAME NOT LIKE 'innodb_%_stats';
LOCK_MODE LOCK_TYPE TABLE_SCHEMA TABLE_NAME
MDL_BACKUP_FTWRL2 Backup lock
MDL_SHARED_WRITE Table metadata lock test t1
@@ -80,7 +86,8 @@ connection locker;
insert into t3 values (2);
connection default;
connection default;
-SELECT LOCK_MODE, LOCK_TYPE, TABLE_SCHEMA, TABLE_NAME FROM information_schema.metadata_lock_info;
+SELECT LOCK_MODE, LOCK_TYPE, TABLE_SCHEMA, TABLE_NAME FROM information_schema.metadata_lock_info
+WHERE TABLE_NAME NOT LIKE 'innodb_%_stats';
LOCK_MODE LOCK_TYPE TABLE_SCHEMA TABLE_NAME
MDL_BACKUP_FTWRL2 Backup lock
MDL_SHARED_WRITE Table metadata lock test t3
diff --git a/mysql-test/main/mdl.test b/mysql-test/main/mdl.test
index 0c1b7a13a0c..f7fac0a062d 100644
--- a/mysql-test/main/mdl.test
+++ b/mysql-test/main/mdl.test
@@ -12,19 +12,24 @@
CREATE TABLE t1(a INT) ENGINE=InnoDB;
CREATE TABLE t3(a INT) ENGINE=myisam;
LOCK TABLES t1 WRITE CONCURRENT, t1 AS t2 READ;
-SELECT LOCK_MODE, LOCK_TYPE, TABLE_SCHEMA, TABLE_NAME FROM information_schema.metadata_lock_info;
+SELECT LOCK_MODE, LOCK_TYPE, TABLE_SCHEMA, TABLE_NAME FROM information_schema.metadata_lock_info
+WHERE TABLE_NAME NOT LIKE 'innodb_%_stats';
UNLOCK TABLES;
LOCK TABLES t1 AS t2 READ, t1 WRITE CONCURRENT;
-SELECT LOCK_MODE, LOCK_TYPE, TABLE_SCHEMA, TABLE_NAME FROM information_schema.metadata_lock_info;
+SELECT LOCK_MODE, LOCK_TYPE, TABLE_SCHEMA, TABLE_NAME FROM information_schema.metadata_lock_info
+WHERE TABLE_NAME NOT LIKE 'innodb_%_stats';
UNLOCK TABLES;
LOCK TABLES t1 WRITE CONCURRENT, t3 WRITE;
-SELECT LOCK_MODE, LOCK_TYPE, TABLE_SCHEMA, TABLE_NAME FROM information_schema.metadata_lock_info;
+SELECT LOCK_MODE, LOCK_TYPE, TABLE_SCHEMA, TABLE_NAME FROM information_schema.metadata_lock_info
+WHERE TABLE_NAME NOT LIKE 'innodb_%_stats';
UNLOCK TABLES;
LOCK TABLES t3 WRITE, t1 WRITE CONCURRENT;
-SELECT LOCK_MODE, LOCK_TYPE, TABLE_SCHEMA, TABLE_NAME FROM information_schema.metadata_lock_info;
+SELECT LOCK_MODE, LOCK_TYPE, TABLE_SCHEMA, TABLE_NAME FROM information_schema.metadata_lock_info
+WHERE TABLE_NAME NOT LIKE 'innodb_%_stats';
UNLOCK TABLES;
LOCK TABLES t1 WRITE, mysql.user WRITE;
-SELECT LOCK_MODE, LOCK_TYPE, TABLE_SCHEMA, TABLE_NAME FROM information_schema.metadata_lock_info;
+SELECT LOCK_MODE, LOCK_TYPE, TABLE_SCHEMA, TABLE_NAME FROM information_schema.metadata_lock_info
+WHERE TABLE_NAME NOT LIKE 'innodb_%_stats';
UNLOCK TABLES;
--error ER_CANT_LOCK_LOG_TABLE
LOCK TABLES mysql.general_log WRITE;
@@ -55,7 +60,8 @@ let $wait_condition=
where state = "Waiting for backup lock";
--source include/wait_condition.inc
connection default;
-SELECT LOCK_MODE, LOCK_TYPE, TABLE_SCHEMA, TABLE_NAME FROM information_schema.metadata_lock_info;
+SELECT LOCK_MODE, LOCK_TYPE, TABLE_SCHEMA, TABLE_NAME FROM information_schema.metadata_lock_info
+WHERE TABLE_NAME NOT LIKE 'innodb_%_stats';
unlock tables;
connection locker;
--reap
@@ -72,7 +78,8 @@ let $wait_condition=
where state = "Waiting for backup lock";
--source include/wait_condition.inc
connection default;
-SELECT LOCK_MODE, LOCK_TYPE, TABLE_SCHEMA, TABLE_NAME FROM information_schema.metadata_lock_info;
+SELECT LOCK_MODE, LOCK_TYPE, TABLE_SCHEMA, TABLE_NAME FROM information_schema.metadata_lock_info
+WHERE TABLE_NAME NOT LIKE 'innodb_%_stats';
unlock tables;
connection locker;
--reap
diff --git a/mysql-test/suite/innodb/r/alter_partitioned.result b/mysql-test/suite/innodb/r/alter_partitioned.result
index 6c32142db9a..e46fa66e43b 100644
--- a/mysql-test/suite/innodb/r/alter_partitioned.result
+++ b/mysql-test/suite/innodb/r/alter_partitioned.result
@@ -11,6 +11,8 @@ SAVEPOINT sp;
INSERT INTO t1 (pk) VALUES (1);
ROLLBACK TO SAVEPOINT sp;
connection default;
+SET @save_timeout=@@lock_wait_timeout;
+SET @save_innodb_timeout=@@innodb_lock_wait_timeout;
SET lock_wait_timeout=0;
SET innodb_lock_wait_timeout=0;
ALTER TABLE t1 PARTITION BY HASH(pk);
@@ -24,6 +26,8 @@ t1 CREATE TABLE `t1` (
connection con1;
COMMIT;
connection default;
+SET lock_wait_timeout=@save_timeout;
+SET innodb_lock_wait_timeout=@save_innodb_timeout;
ALTER TABLE t2 PARTITION BY HASH(pk);
disconnect con1;
connection default;
diff --git a/mysql-test/suite/innodb/r/innodb_defrag_stats.result b/mysql-test/suite/innodb/r/innodb_defrag_stats.result
index c2fd378cb4b..2dae62ebbda 100644
--- a/mysql-test/suite/innodb/r/innodb_defrag_stats.result
+++ b/mysql-test/suite/innodb/r/innodb_defrag_stats.result
@@ -116,6 +116,7 @@ select count(stat_value) > 0 from mysql.innodb_index_stats where table_name = 't
count(stat_value) > 0
1
# Clean up
+ALTER TABLE t2 STATS_PERSISTENT=1;
DROP TABLE t2;
select * from mysql.innodb_index_stats where table_name = 't2';
database_name table_name index_name last_update stat_name stat_value sample_size stat_description
diff --git a/mysql-test/suite/innodb/r/innodb_stats_drop_locked.result b/mysql-test/suite/innodb/r/innodb_stats_drop_locked.result
index 7a6d5c46aab..13f21463390 100644
--- a/mysql-test/suite/innodb/r/innodb_stats_drop_locked.result
+++ b/mysql-test/suite/innodb/r/innodb_stats_drop_locked.result
@@ -26,6 +26,7 @@ innodb_stats_drop_locked
innodb_stats_drop_locked
innodb_stats_drop_locked
connect con1,localhost,root,,;
+SET innodb_lock_wait_timeout=1;
ALTER TABLE innodb_stats_drop_locked DROP INDEX c_key;
ERROR HY000: Lock wait timeout exceeded; try restarting transaction
SHOW CREATE TABLE innodb_stats_drop_locked;
diff --git a/mysql-test/suite/innodb/t/alter_partitioned.test b/mysql-test/suite/innodb/t/alter_partitioned.test
index 4990c265b5e..aaada25dcbe 100644
--- a/mysql-test/suite/innodb/t/alter_partitioned.test
+++ b/mysql-test/suite/innodb/t/alter_partitioned.test
@@ -18,6 +18,8 @@ INSERT INTO t1 (pk) VALUES (1);
ROLLBACK TO SAVEPOINT sp;
--connection default
+SET @save_timeout=@@lock_wait_timeout;
+SET @save_innodb_timeout=@@innodb_lock_wait_timeout;
SET lock_wait_timeout=0;
SET innodb_lock_wait_timeout=0;
--error ER_LOCK_WAIT_TIMEOUT
@@ -27,6 +29,8 @@ SHOW CREATE TABLE t1;
--connection con1
COMMIT;
--connection default
+SET lock_wait_timeout=@save_timeout;
+SET innodb_lock_wait_timeout=@save_innodb_timeout;
ALTER TABLE t2 PARTITION BY HASH(pk);
# Cleanup
--disconnect con1
diff --git a/mysql-test/suite/innodb/t/innodb_defrag_stats.test b/mysql-test/suite/innodb/t/innodb_defrag_stats.test
index 248fd24f0c8..2dc5e653091 100644
--- a/mysql-test/suite/innodb/t/innodb_defrag_stats.test
+++ b/mysql-test/suite/innodb/t/innodb_defrag_stats.test
@@ -82,6 +82,8 @@ select count(stat_value) > 0 from mysql.innodb_index_stats where table_name = 't
select count(stat_value) > 0 from mysql.innodb_index_stats where table_name = 't2' and stat_name in ('n_leaf_pages_defrag');
--echo # Clean up
+# DROP TABLE will not touch persistent statistics if the table has none!
+ALTER TABLE t2 STATS_PERSISTENT=1;
DROP TABLE t2;
select * from mysql.innodb_index_stats where table_name = 't2';
diff --git a/mysql-test/suite/innodb/t/innodb_stats_drop_locked.test b/mysql-test/suite/innodb/t/innodb_stats_drop_locked.test
index ab4cc78b337..6532816bb37 100644
--- a/mysql-test/suite/innodb/t/innodb_stats_drop_locked.test
+++ b/mysql-test/suite/innodb/t/innodb_stats_drop_locked.test
@@ -23,6 +23,8 @@ WHERE table_name='innodb_stats_drop_locked'
FOR UPDATE;
-- connect (con1,localhost,root,,)
+SET innodb_lock_wait_timeout=1;
+
--error ER_LOCK_WAIT_TIMEOUT
ALTER TABLE innodb_stats_drop_locked DROP INDEX c_key;
diff --git a/mysql-test/suite/parts/r/partition_special_innodb.result b/mysql-test/suite/parts/r/partition_special_innodb.result
index 37191eae502..c36eda2345a 100644
--- a/mysql-test/suite/parts/r/partition_special_innodb.result
+++ b/mysql-test/suite/parts/r/partition_special_innodb.result
@@ -216,14 +216,12 @@ ENGINE = InnoDB
PARTITION BY HASH (a)
PARTITIONS 2;
connect con1, localhost, root,,;
-connect con2, localhost, root,,;
-connection con1;
SET autocommit=OFF;
START TRANSACTION;
INSERT INTO t1 VALUES (NULL, 'first row t2');
-connection con2;
+connect con2, localhost, root,,;
SET autocommit=OFF;
-SET SESSION lock_wait_timeout= 1;
+SET SESSION lock_wait_timeout= 0;
ALTER TABLE t1 AUTO_INCREMENT = 10;
ERROR HY000: Lock wait timeout exceeded; try restarting transaction
disconnect con2;
@@ -233,17 +231,12 @@ DROP TABLE t1;
#
# Bug#53676: Unexpected errors and possible table corruption on
# ADD PARTITION and LOCK TABLE
-connect con1,localhost,root,,;
CREATE TABLE t1 ( i INT NOT NULL AUTO_INCREMENT PRIMARY KEY, f INT )
ENGINE = InnoDB PARTITION BY HASH(i) PARTITIONS 2;
INSERT INTO t1 VALUES (2, 2), (3, 3), (4, 4), (5, 5);
-connect con2,localhost,root,,;
-SET lock_wait_timeout = 2;
-connection con1;
-#Connection 1 locks the table
LOCK TABLE t1 READ;
-connection con2;
-# Connection 2 tries to add partitions:
+connect con2,localhost,root,,;
+SET lock_wait_timeout = 0;
# First attempt: lock wait timeout (as expected)
ALTER TABLE t1 ADD PARTITION PARTITIONS 2;
ERROR HY000: Lock wait timeout exceeded; try restarting transaction
@@ -263,12 +256,10 @@ i f
3 3
4 4
5 5
-connection con1;
-# Connection 1 unlocks the table and locks it again:
+connection default;
UNLOCK TABLES;
LOCK TABLE t1 READ;
connection con2;
-# Connection 2 tries again to add partitions:
# Third attempt: says that the table does not exist
ALTER TABLE t1 ADD PARTITION PARTITIONS 2;
ERROR HY000: Lock wait timeout exceeded; try restarting transaction
@@ -276,22 +267,21 @@ ERROR HY000: Lock wait timeout exceeded; try restarting transaction
CHECK TABLE t1;
Table Op Msg_type Msg_text
test.t1 check status OK
-connection con1;
+connection default;
UNLOCK TABLES;
connection con2;
DROP TABLE t1;
-connection con1;
+connection default;
CREATE TABLE t2 ( i INT NOT NULL AUTO_INCREMENT PRIMARY KEY, f INT )
ENGINE = InnoDB PARTITION BY HASH(i) PARTITIONS 2;
-connection con2;
-SET lock_wait_timeout = 2;
-connection con1;
LOCK TABLE t2 READ;
connection con2;
+SET lock_wait_timeout = 0;
ALTER TABLE t2 ADD PARTITION PARTITIONS 2;
ERROR HY000: Lock wait timeout exceeded; try restarting transaction
+SET lock_wait_timeout = 2;
ALTER TABLE t2 ADD PARTITION PARTITIONS 2;
-connection con1;
+connection default;
UNLOCK TABLES;
connection con2;
connect con3,localhost,root,,;
@@ -301,31 +291,23 @@ test.t2 check status OK
SELECT * FROM t2;
i f
DROP TABLE t2;
-connection con1;
+connection default;
CREATE TABLE t3 ( i INT NOT NULL AUTO_INCREMENT PRIMARY KEY, f INT )
ENGINE = InnoDB PARTITION BY HASH(i) PARTITIONS 2;
-connection con2;
-SET lock_wait_timeout = 2;
-connection con1;
-# Connection 1 locks the table
LOCK TABLE t3 READ;
connection con2;
-# Connection 2 tries to add partitions (timeout):
+SET lock_wait_timeout = 0;
ALTER TABLE t3 ADD PARTITION PARTITIONS 2;
ERROR HY000: Lock wait timeout exceeded; try restarting transaction
connection con3;
-SET lock_wait_timeout = 2;
-# Connection 3 tries to add partitions (partition already exists):
+SET lock_wait_timeout = 0;
ALTER TABLE t3 ADD PARTITION PARTITIONS 2;
ERROR HY000: Lock wait timeout exceeded; try restarting transaction
connect con4,localhost,root,,;
-# Connection 4 tries to rename the table:
RENAME TABLE t3 TO t4;
-connection con1;
-# Connection 1 unlocks the table:
+connection default;
UNLOCK TABLES;
connection con4;
-# Connection 4 gets error on rename:
connect con5,localhost,root,,;
# SHOW TABLES returns the table (not renamed):
SHOW TABLES;
@@ -339,7 +321,6 @@ disconnect con5;
disconnect con4;
disconnect con3;
disconnect con2;
-disconnect con1;
connection default;
CREATE TABLE t1(
f1 INT, f2 VARCHAR(10) CHARSET ascii,
@@ -350,17 +331,12 @@ PARTITION p2 VALUES LESS THAN (100));
ALTER TABLE t1 convert to charset ascii collate ascii_bin, ALGORITHM=INSTANT;
DROP TABLE t1;
# Test WRITE LOCK.
-connect con1,localhost,root,,;
CREATE TABLE t1 ( i INT NOT NULL AUTO_INCREMENT PRIMARY KEY, f INT )
ENGINE = InnoDB PARTITION BY HASH(i) PARTITIONS 2;
INSERT INTO t1 VALUES (3, 3), (4, 4);
-connect con2,localhost,root,,;
-SET lock_wait_timeout = 2;
-connection con1;
-#Connection 1 locks the table
LOCK TABLE t1 WRITE;
-connection con2;
-# Check that we still can SELECT, but not insert/update/delete.
+connect con2,localhost,root,,;
+SET lock_wait_timeout = 0;
# Check that we only can select, not insert/update/delete.
INSERT INTO t1 VALUES (NULL, 1), (NULL, 2), (10, 10), (11, 11);
ERROR HY000: Lock wait timeout exceeded; try restarting transaction
@@ -370,9 +346,9 @@ DELETE FROM t1 WHERE i = 10;
ERROR HY000: Lock wait timeout exceeded; try restarting transaction
SELECT * FROM t1;
ERROR HY000: Lock wait timeout exceeded; try restarting transaction
-connection con1;
+connection default;
UNLOCK TABLES;
connection con2;
DROP TABLE t1;
-disconnect con1;
+disconnect con2;
connection default;
diff --git a/mysql-test/suite/parts/t/partition_special_innodb-master.opt b/mysql-test/suite/parts/t/partition_special_innodb-master.opt
deleted file mode 100644
index 79851bac7b2..00000000000
--- a/mysql-test/suite/parts/t/partition_special_innodb-master.opt
+++ /dev/null
@@ -1 +0,0 @@
---loose-innodb-lock-wait-timeout=2 --loose-innodb-file-per-table
diff --git a/mysql-test/suite/parts/t/partition_special_innodb.test b/mysql-test/suite/parts/t/partition_special_innodb.test
index a25feed08ef..6273cc5cb2a 100644
--- a/mysql-test/suite/parts/t/partition_special_innodb.test
+++ b/mysql-test/suite/parts/t/partition_special_innodb.test
@@ -59,16 +59,13 @@ PARTITION BY HASH (a)
PARTITIONS 2;
connect (con1, localhost, root,,);
-connect (con2, localhost, root,,);
-
---connection con1
SET autocommit=OFF;
START TRANSACTION;
INSERT INTO t1 VALUES (NULL, 'first row t2');
---connection con2
+connect (con2, localhost, root,,);
SET autocommit=OFF;
-SET SESSION lock_wait_timeout= 1;
+SET SESSION lock_wait_timeout= 0;
--error ER_LOCK_WAIT_TIMEOUT
ALTER TABLE t1 AUTO_INCREMENT = 10;
@@ -80,20 +77,14 @@ DROP TABLE t1;
--echo #
--echo # Bug#53676: Unexpected errors and possible table corruption on
--echo # ADD PARTITION and LOCK TABLE
---connect (con1,localhost,root,,)
CREATE TABLE t1 ( i INT NOT NULL AUTO_INCREMENT PRIMARY KEY, f INT )
ENGINE = InnoDB PARTITION BY HASH(i) PARTITIONS 2;
INSERT INTO t1 VALUES (2, 2), (3, 3), (4, 4), (5, 5);
---connect (con2,localhost,root,,)
-SET lock_wait_timeout = 2;
-
---connection con1
---echo #Connection 1 locks the table
LOCK TABLE t1 READ;
---connection con2
---echo # Connection 2 tries to add partitions:
+--connect (con2,localhost,root,,)
+SET lock_wait_timeout = 0;
--echo # First attempt: lock wait timeout (as expected)
--error ER_LOCK_WAIT_TIMEOUT
ALTER TABLE t1 ADD PARTITION PARTITIONS 2;
@@ -110,21 +101,18 @@ DELETE FROM t1 WHERE i = 10;
--sorted_result
SELECT * FROM t1;
---connection con1
---echo # Connection 1 unlocks the table and locks it again:
+--connection default
UNLOCK TABLES;
---real_sleep 1
LOCK TABLE t1 READ;
--connection con2
---echo # Connection 2 tries again to add partitions:
--echo # Third attempt: says that the table does not exist
--error ER_LOCK_WAIT_TIMEOUT
ALTER TABLE t1 ADD PARTITION PARTITIONS 2;
--echo # Check table returns the same (not after fixing bug#56172!)
CHECK TABLE t1;
---connection con1
+--connection default
UNLOCK TABLES;
--connection con2
@@ -134,23 +122,21 @@ DROP TABLE t1;
# Test2
---connection con1
+--connection default
CREATE TABLE t2 ( i INT NOT NULL AUTO_INCREMENT PRIMARY KEY, f INT )
ENGINE = InnoDB PARTITION BY HASH(i) PARTITIONS 2;
---connection con2
-SET lock_wait_timeout = 2;
-
---connection con1
LOCK TABLE t2 READ;
--connection con2
+SET lock_wait_timeout = 0;
--error ER_LOCK_WAIT_TIMEOUT
ALTER TABLE t2 ADD PARTITION PARTITIONS 2;
+SET lock_wait_timeout = 2;
send ALTER TABLE t2 ADD PARTITION PARTITIONS 2;
---connection con1
+--connection default
UNLOCK TABLES;
--connection con2
@@ -164,40 +150,30 @@ DROP TABLE t2;
# End of Test2
# Test #3
---connection con1
+--connection default
CREATE TABLE t3 ( i INT NOT NULL AUTO_INCREMENT PRIMARY KEY, f INT )
ENGINE = InnoDB PARTITION BY HASH(i) PARTITIONS 2;
---connection con2
-SET lock_wait_timeout = 2;
-
---connection con1
---echo # Connection 1 locks the table
LOCK TABLE t3 READ;
--connection con2
---echo # Connection 2 tries to add partitions (timeout):
+SET lock_wait_timeout = 0;
--error ER_LOCK_WAIT_TIMEOUT
ALTER TABLE t3 ADD PARTITION PARTITIONS 2;
--connection con3
-SET lock_wait_timeout = 2;
---echo # Connection 3 tries to add partitions (partition already exists):
+SET lock_wait_timeout = 0;
--error ER_LOCK_WAIT_TIMEOUT
ALTER TABLE t3 ADD PARTITION PARTITIONS 2;
--connect (con4,localhost,root,,)
---echo # Connection 4 tries to rename the table:
send RENAME TABLE t3 TO t4;
---connection con1
---real_sleep 1
---echo # Connection 1 unlocks the table:
+--connection default
UNLOCK TABLES;
--connection con4
---echo # Connection 4 gets error on rename:
--reap
--connect (con5,localhost,root,,)
@@ -212,7 +188,6 @@ DROP TABLE t4;
--disconnect con4
--disconnect con3
--disconnect con2
---disconnect con1
--connection default
# End of Test #3
@@ -227,20 +202,14 @@ ALTER TABLE t1 convert to charset ascii collate ascii_bin, ALGORITHM=INSTANT;
DROP TABLE t1;
--echo # Test WRITE LOCK.
---connect (con1,localhost,root,,)
CREATE TABLE t1 ( i INT NOT NULL AUTO_INCREMENT PRIMARY KEY, f INT )
ENGINE = InnoDB PARTITION BY HASH(i) PARTITIONS 2;
INSERT INTO t1 VALUES (3, 3), (4, 4);
---connect (con2,localhost,root,,)
-SET lock_wait_timeout = 2;
-
---connection con1
---echo #Connection 1 locks the table
LOCK TABLE t1 WRITE;
---connection con2
---echo # Check that we still can SELECT, but not insert/update/delete.
+--connect (con2,localhost,root,,)
+SET lock_wait_timeout = 0;
--echo # Check that we only can select, not insert/update/delete.
--error ER_LOCK_WAIT_TIMEOUT
INSERT INTO t1 VALUES (NULL, 1), (NULL, 2), (10, 10), (11, 11);
@@ -251,11 +220,11 @@ DELETE FROM t1 WHERE i = 10;
--error ER_LOCK_WAIT_TIMEOUT
SELECT * FROM t1;
---connection con1
+--connection default
UNLOCK TABLES;
--connection con2
DROP TABLE t1;
---disconnect con1
+--disconnect con2
--connection default
diff --git a/sql/sql_class.cc b/sql/sql_class.cc
index c26d8906cbd..a758196feea 100644
--- a/sql/sql_class.cc
+++ b/sql/sql_class.cc
@@ -5018,13 +5018,9 @@ void reset_thd(MYSQL_THD thd)
guarantees, in other words, server can't send OK packet
before modified data is durable in redo log.
*/
-extern "C" MYSQL_THD thd_increment_pending_ops(void)
+extern "C" void thd_increment_pending_ops(MYSQL_THD thd)
{
- THD *thd = current_thd;
- if (!thd)
- return NULL;
thd->async_state.inc_pending_ops();
- return thd;
}
/**
diff --git a/sql/sql_class.h b/sql/sql_class.h
index 2fecf60c086..b5874d2435a 100644
--- a/sql/sql_class.h
+++ b/sql/sql_class.h
@@ -2531,7 +2531,7 @@ struct thd_async_state
}
};
-extern "C" MYSQL_THD thd_increment_pending_ops(void);
+extern "C" void thd_increment_pending_ops(MYSQL_THD);
extern "C" void thd_decrement_pending_ops(MYSQL_THD);
diff --git a/storage/innobase/btr/btr0defragment.cc b/storage/innobase/btr/btr0defragment.cc
index fa0ad06d7ab..0e21acb4fda 100644
--- a/storage/innobase/btr/btr0defragment.cc
+++ b/storage/innobase/btr/btr0defragment.cc
@@ -36,6 +36,7 @@ Modified 30/07/2014 Jan Lindström jan.lindstrom@mariadb.com
#include "ibuf0ibuf.h"
#include "lock0lock.h"
#include "srv0start.h"
+#include "mysqld.h"
#include <list>
@@ -614,6 +615,9 @@ The state (current item) is stored in function parameter.
*/
static void btr_defragment_chunk(void*)
{
+ THD *thd = innobase_create_background_thd("InnoDB defragment");
+ set_current_thd(thd);
+
btr_defragment_item_t* item = nullptr;
mtr_t mtr;
@@ -622,7 +626,11 @@ static void btr_defragment_chunk(void*)
while (srv_shutdown_state == SRV_SHUTDOWN_NONE) {
if (!item) {
if (btr_defragment_wq.empty()) {
+release_and_exit:
mysql_mutex_unlock(&btr_defragment_mutex);
+func_exit:
+ set_current_thd(nullptr);
+ innobase_destroy_background_thd(thd);
return;
}
item = *btr_defragment_wq.begin();
@@ -651,7 +659,7 @@ processed:
int sleep_ms = (int)((srv_defragment_interval - elapsed) / 1000 / 1000);
if (sleep_ms) {
btr_defragment_timer->set_time(sleep_ms, 0);
- return;
+ goto func_exit;
}
}
log_free_check();
@@ -693,7 +701,8 @@ processed:
<< " index " << index->name()
<< " failed with error " << err;
} else {
- err = dict_stats_save_defrag_summary(index);
+ err = dict_stats_save_defrag_summary(index,
+ thd);
if (err != DB_SUCCESS) {
ib::error() << "Saving defragmentation summary for table "
@@ -711,5 +720,5 @@ processed:
}
}
- mysql_mutex_unlock(&btr_defragment_mutex);
+ goto release_and_exit;
}
diff --git a/storage/innobase/dict/dict0crea.cc b/storage/innobase/dict/dict0crea.cc
index eafc9bf7814..667a64f907a 100644
--- a/storage/innobase/dict/dict0crea.cc
+++ b/storage/innobase/dict/dict0crea.cc
@@ -33,6 +33,7 @@ Created 1/8/1996 Heikki Tuuri
#include "mach0data.h"
#include "dict0boot.h"
#include "dict0dict.h"
+#include "lock0lock.h"
#include "que0que.h"
#include "row0ins.h"
#include "row0mysql.h"
@@ -653,7 +654,7 @@ dict_build_index_def_step(
index = node->index;
table = dict_table_open_on_name(
- node->table_name, TRUE, FALSE, DICT_ERR_IGNORE_DROP);
+ node->table_name, true, DICT_ERR_IGNORE_DROP);
if (!table) {
return DB_TABLE_NOT_FOUND;
@@ -1377,7 +1378,18 @@ dberr_t dict_sys_t::create_or_check_sys_tables()
return DB_SUCCESS;
trx_t *trx= trx_create();
- trx->dict_operation= true;
+ trx_start_for_ddl(trx);
+
+ {
+ LockMutexGuard g{SRW_LOCK_CALL};
+ trx->mutex_lock();
+ lock_table_create(dict_sys.sys_tables, LOCK_X, trx);
+ lock_table_create(dict_sys.sys_columns, LOCK_X, trx);
+ lock_table_create(dict_sys.sys_indexes, LOCK_X, trx);
+ lock_table_create(dict_sys.sys_fields, LOCK_X, trx);
+ trx->mutex_unlock();
+ }
+
row_mysql_lock_data_dictionary(trx);
/* NOTE: when designing InnoDB's foreign key support in 2001, Heikki Tuuri
diff --git a/storage/innobase/dict/dict0defrag_bg.cc b/storage/innobase/dict/dict0defrag_bg.cc
index 44cc4289a67..8dd1a8601aa 100644
--- a/storage/innobase/dict/dict0defrag_bg.cc
+++ b/storage/innobase/dict/dict0defrag_bg.cc
@@ -29,6 +29,9 @@ Created 25/08/2016 Jan Lindström
#include "dict0defrag_bg.h"
#include "btr0btr.h"
#include "srv0start.h"
+#include "trx0trx.h"
+#include "lock0lock.h"
+#include "row0mysql.h"
static mysql_mutex_t defrag_pool_mutex;
@@ -200,44 +203,84 @@ static void dict_stats_process_entry_from_defrag_pool()
}
}
-/*****************************************************************//**
+/**
Get the first index that has been added for updating persistent defrag
stats and eventually save its stats. */
-void
-dict_defrag_process_entries_from_defrag_pool()
-/*==========================================*/
+void dict_defrag_process_entries_from_defrag_pool()
{
- while (defrag_pool.size()) {
- dict_stats_process_entry_from_defrag_pool();
- }
+ while (!defrag_pool.empty())
+ dict_stats_process_entry_from_defrag_pool();
}
/*********************************************************************//**
Save defragmentation result.
@return DB_SUCCESS or error code */
-dberr_t
-dict_stats_save_defrag_summary(
-/*============================*/
- dict_index_t* index) /*!< in: index */
+dberr_t dict_stats_save_defrag_summary(dict_index_t *index, THD *thd)
{
- dberr_t ret=DB_SUCCESS;
-
- if (dict_index_is_ibuf(index)) {
- return DB_SUCCESS;
- }
-
- dict_sys.lock(SRW_LOCK_CALL);
+ if (index->is_ibuf())
+ return DB_SUCCESS;
- ret = dict_stats_save_index_stat(index, time(NULL), "n_pages_freed",
- index->stat_defrag_n_pages_freed,
- NULL,
- "Number of pages freed during"
- " last defragmentation run.",
- NULL);
+ MDL_ticket *mdl_table= nullptr, *mdl_index= nullptr;
+ dict_table_t *table_stats= dict_table_open_on_name(TABLE_STATS_NAME, false,
+ DICT_ERR_IGNORE_NONE);
+ if (table_stats)
+ {
+ dict_sys.freeze(SRW_LOCK_CALL);
+ table_stats= dict_acquire_mdl_shared<false>(table_stats, thd, &mdl_table);
+ dict_sys.unfreeze();
+ }
+ if (!table_stats || strcmp(table_stats->name.m_name, TABLE_STATS_NAME))
+ {
+release_and_exit:
+ if (table_stats)
+ dict_table_close(table_stats, false, thd, mdl_table);
+ return DB_STATS_DO_NOT_EXIST;
+ }
- dict_sys.unlock();
+ dict_table_t *index_stats= dict_table_open_on_name(INDEX_STATS_NAME, false,
+ DICT_ERR_IGNORE_NONE);
+ if (index_stats)
+ {
+ dict_sys.freeze(SRW_LOCK_CALL);
+ index_stats= dict_acquire_mdl_shared<false>(index_stats, thd, &mdl_index);
+ dict_sys.unfreeze();
+ }
+ if (!index_stats)
+ goto release_and_exit;
+ if (strcmp(index_stats->name.m_name, INDEX_STATS_NAME))
+ {
+ dict_table_close(index_stats, false, thd, mdl_index);
+ goto release_and_exit;
+ }
- return (ret);
+ trx_t *trx= trx_create();
+ trx->mysql_thd= thd;
+ trx_start_internal(trx);
+ dberr_t ret= lock_table_for_trx(table_stats, trx, LOCK_X);
+ if (ret == DB_SUCCESS)
+ ret= lock_table_for_trx(index_stats, trx, LOCK_X);
+ row_mysql_lock_data_dictionary(trx);
+ if (ret == DB_SUCCESS)
+ ret= dict_stats_save_index_stat(index, time(nullptr), "n_pages_freed",
+ index->stat_defrag_n_pages_freed,
+ nullptr,
+ "Number of pages freed during"
+ " last defragmentation run.",
+ trx);
+ if (ret == DB_SUCCESS)
+ trx->commit();
+ else
+ trx->rollback();
+
+ if (table_stats)
+ dict_table_close(table_stats, true, thd, mdl_table);
+ if (index_stats)
+ dict_table_close(index_stats, true, thd, mdl_index);
+
+ row_mysql_unlock_data_dictionary(trx);
+ trx->free();
+
+ return ret;
}
/**************************************************************//**
@@ -293,63 +336,97 @@ dict_stats_save_defrag_stats(
/*============================*/
dict_index_t* index) /*!< in: index */
{
- dberr_t ret;
-
- if (dict_index_is_ibuf(index)) {
- return DB_SUCCESS;
- }
-
- if (!index->is_readable()) {
- return dict_stats_report_error(index->table, true);
- }
-
- const time_t now = time(NULL);
- mtr_t mtr;
- ulint n_leaf_pages;
- ulint n_leaf_reserved;
- mtr.start();
- mtr_s_lock_index(index, &mtr);
- n_leaf_reserved = btr_get_size_and_reserved(index, BTR_N_LEAF_PAGES,
- &n_leaf_pages, &mtr);
- mtr.commit();
-
- if (n_leaf_reserved == ULINT_UNDEFINED) {
- // The index name is different during fast index creation,
- // so the stats won't be associated with the right index
- // for later use. We just return without saving.
- return DB_SUCCESS;
- }
+ if (index->is_ibuf())
+ return DB_SUCCESS;
+ if (!index->is_readable())
+ return dict_stats_report_error(index->table, true);
+
+ const time_t now= time(nullptr);
+ mtr_t mtr;
+ ulint n_leaf_pages;
+ mtr.start();
+ mtr_s_lock_index(index, &mtr);
+ ulint n_leaf_reserved= btr_get_size_and_reserved(index, BTR_N_LEAF_PAGES,
+ &n_leaf_pages, &mtr);
+ mtr.commit();
+
+ if (n_leaf_reserved == ULINT_UNDEFINED)
+ return DB_SUCCESS;
+
+ THD *thd= current_thd;
+ MDL_ticket *mdl_table= nullptr, *mdl_index= nullptr;
+ dict_table_t* table_stats= dict_table_open_on_name(TABLE_STATS_NAME, false,
+ DICT_ERR_IGNORE_NONE);
+ if (table_stats)
+ {
+ dict_sys.freeze(SRW_LOCK_CALL);
+ table_stats= dict_acquire_mdl_shared<false>(table_stats, thd, &mdl_table);
+ dict_sys.unfreeze();
+ }
+ if (!table_stats || strcmp(table_stats->name.m_name, TABLE_STATS_NAME))
+ {
+release_and_exit:
+ if (table_stats)
+ dict_table_close(table_stats, false, thd, mdl_table);
+ return DB_STATS_DO_NOT_EXIST;
+ }
- dict_sys.lock(SRW_LOCK_CALL);
- ret = dict_stats_save_index_stat(index, now, "n_page_split",
- index->stat_defrag_n_page_split,
- NULL,
- "Number of new page splits on leaves"
- " since last defragmentation.",
- NULL);
- if (ret != DB_SUCCESS) {
- goto end;
- }
+ dict_table_t *index_stats= dict_table_open_on_name(INDEX_STATS_NAME, false,
+ DICT_ERR_IGNORE_NONE);
+ if (index_stats)
+ {
+ dict_sys.freeze(SRW_LOCK_CALL);
+ index_stats= dict_acquire_mdl_shared<false>(index_stats, thd, &mdl_index);
+ dict_sys.unfreeze();
+ }
+ if (!index_stats)
+ goto release_and_exit;
- ret = dict_stats_save_index_stat(
- index, now, "n_leaf_pages_defrag",
- n_leaf_pages,
- NULL,
- "Number of leaf pages when this stat is saved to disk",
- NULL);
- if (ret != DB_SUCCESS) {
- goto end;
- }
+ if (strcmp(index_stats->name.m_name, INDEX_STATS_NAME))
+ {
+ dict_table_close(index_stats, false, thd, mdl_index);
+ goto release_and_exit;
+ }
- ret = dict_stats_save_index_stat(
- index, now, "n_leaf_pages_reserved",
- n_leaf_reserved,
- NULL,
- "Number of pages reserved for this index leaves when this stat "
- "is saved to disk",
- NULL);
-
-end:
- dict_sys.unlock();
- return ret;
+ trx_t *trx= trx_create();
+ trx->mysql_thd= thd;
+ trx_start_internal(trx);
+ dberr_t ret= lock_table_for_trx(table_stats, trx, LOCK_X);
+ if (ret == DB_SUCCESS)
+ ret= lock_table_for_trx(index_stats, trx, LOCK_X);
+
+ row_mysql_lock_data_dictionary(trx);
+
+ if (ret == DB_SUCCESS)
+ ret= dict_stats_save_index_stat(index, now, "n_page_split",
+ index->stat_defrag_n_page_split, nullptr,
+ "Number of new page splits on leaves"
+ " since last defragmentation.", trx);
+
+ if (ret == DB_SUCCESS)
+ ret= dict_stats_save_index_stat(index, now, "n_leaf_pages_defrag",
+ n_leaf_pages, nullptr,
+ "Number of leaf pages when"
+ " this stat is saved to disk", trx);
+
+ if (ret == DB_SUCCESS)
+ ret= dict_stats_save_index_stat(index, now, "n_leaf_pages_reserved",
+ n_leaf_reserved, nullptr,
+ "Number of pages reserved for"
+ " this index leaves"
+ " when this stat is saved to disk", trx);
+
+ if (ret == DB_SUCCESS)
+ trx->commit();
+ else
+ trx->rollback();
+
+ if (table_stats)
+ dict_table_close(table_stats, true, thd, mdl_table);
+ if (index_stats)
+ dict_table_close(index_stats, true, thd, mdl_index);
+ row_mysql_unlock_data_dictionary(trx);
+ trx->free();
+
+ return ret;
}
diff --git a/storage/innobase/dict/dict0dict.cc b/storage/innobase/dict/dict0dict.cc
index 13b71c9a8b6..447ff7929f0 100644
--- a/storage/innobase/dict/dict0dict.cc
+++ b/storage/innobase/dict/dict0dict.cc
@@ -209,110 +209,26 @@ dict_remove_db_name(
return(s + 1);
}
-/** Open a persistent table.
-@param[in] table_id persistent table identifier
-@param[in] ignore_err errors to ignore
-@param[in] cached_only whether to skip loading
-@return persistent table
-@retval NULL if not found */
-static dict_table_t* dict_table_open_on_id_low(
- table_id_t table_id,
- dict_err_ignore_t ignore_err,
- bool cached_only)
-{
- dict_table_t* table = dict_sys.find_table(table_id);
-
- if (!table && !cached_only) {
- table = dict_load_table_on_id(table_id, ignore_err);
- }
-
- return table;
-}
-
-/**********************************************************************//**
-Try to drop any indexes after an aborted index creation.
-This can also be after a server kill during DROP INDEX. */
-static
-void
-dict_table_try_drop_aborted(
-/*========================*/
- dict_table_t* table, /*!< in: table, or NULL if it
- needs to be looked up again */
- table_id_t table_id, /*!< in: table identifier */
- uint32_t ref_count) /*!< in: expected table->n_ref_count */
-{
- trx_t* trx;
-
- trx = trx_create();
- trx->op_info = "try to drop any indexes after an aborted index creation";
- row_mysql_lock_data_dictionary(trx);
- trx->dict_operation = true;
-
- if (table == NULL) {
- table = dict_table_open_on_id_low(
- table_id, DICT_ERR_IGNORE_FK_NOKEY, FALSE);
- } else {
- ut_ad(table->id == table_id);
- }
-
- if (table && table->get_ref_count() == ref_count && table->drop_aborted
- && !UT_LIST_GET_FIRST(table->locks)) {
- /* Silence a debug assertion in row_merge_drop_indexes(). */
- ut_d(table->acquire());
- row_merge_drop_indexes(trx, table, true);
- ut_d(table->release());
- ut_ad(table->get_ref_count() == ref_count);
- trx_commit_for_mysql(trx);
- }
-
- row_mysql_unlock_data_dictionary(trx);
- trx->free();
-}
-
-/**********************************************************************//**
-When opening a table,
-try to drop any indexes after an aborted index creation.
-Invoke dict_sys.unlock(). */
-static
-void
-dict_table_try_drop_aborted_and_unlock(
- dict_table_t* table, /*!< in: table (may be NULL) */
- ibool try_drop) /*!< in: FALSE if should try to
- drop indexes whose online creation
- was aborted */
-{
- if (try_drop
- && table != NULL
- && table->drop_aborted
- && table->get_ref_count() == 1
- && dict_table_get_first_index(table)) {
-
- /* Attempt to drop the indexes whose online creation
- was aborted. */
- table_id_t table_id = table->id;
-
- dict_sys.unlock();
-
- dict_table_try_drop_aborted(table, table_id, 1);
- } else {
- dict_sys.unlock();
- }
-}
-
/** Decrement the count of open handles */
void dict_table_close(dict_table_t *table)
{
- if (dict_stats_is_persistent_enabled(table) &&
+ if (table->get_ref_count() == 1 &&
+ dict_stats_is_persistent_enabled(table) &&
strchr(table->name.m_name, '/'))
{
- dict_sys.freeze(SRW_LOCK_CALL);
+ /* It looks like we are closing the last handle. The user could
+ have executed FLUSH TABLES in order to have the statistics reloaded
+ from the InnoDB persistent statistics tables. We must acquire
+ exclusive dict_sys.latch to prevent a race condition with another
+ thread concurrently acquiring a handle on the table. */
+ dict_sys.lock(SRW_LOCK_CALL);
if (table->release())
{
table->stats_mutex_lock();
dict_stats_deinit(table);
table->stats_mutex_unlock();
}
- dict_sys.unfreeze();
+ dict_sys.unlock();
}
else
table->release();
@@ -320,9 +236,7 @@ void dict_table_close(dict_table_t *table)
/** Decrements the count of open handles of a table.
@param[in,out] table table
-@param[in] dict_locked data dictionary locked
-@param[in] try_drop try to drop any orphan indexes after
- an aborted online index creation
+@param[in] dict_locked whether dict_sys.latch is being held
@param[in] thd thread to release MDL
@param[in] mdl metadata lock or NULL if the thread
is a foreground one. */
@@ -330,57 +244,32 @@ void
dict_table_close(
dict_table_t* table,
bool dict_locked,
- bool try_drop,
THD* thd,
MDL_ticket* mdl)
{
- if (!dict_locked) {
- dict_sys.lock(SRW_LOCK_CALL);
- }
-
- ut_ad(dict_sys.locked());
- ut_a(table->get_ref_count() > 0);
-
- const bool last_handle = table->release();
-
- /* Force persistent stats re-read upon next open of the table
- so that FLUSH TABLE can be used to forcibly fetch stats from disk
- if they have been manually modified. We reset table->stat_initialized
- only if table reference count is 0 because we do not want too frequent
- stats re-reads (e.g. in other cases than FLUSH TABLE). */
- if (last_handle
- && dict_stats_is_persistent_enabled(table)
- && strchr(table->name.m_name, '/')) {
-
- table->stats_mutex_lock();
- dict_stats_deinit(table);
- table->stats_mutex_unlock();
- }
-
- ut_ad(dict_lru_validate());
- ut_ad(dict_sys.find(table));
-
- if (!dict_locked) {
- table_id_t table_id = table->id;
- const bool drop_aborted = last_handle && try_drop
- && table->drop_aborted
- && dict_table_get_first_index(table);
-
- dict_sys.unlock();
+ if (!dict_locked)
+ dict_table_close(table);
+ else
+ {
+ if (table->release() && dict_stats_is_persistent_enabled(table) &&
+ strchr(table->name.m_name, '/'))
+ {
+ /* Force persistent stats re-read upon next open of the table so
+ that FLUSH TABLE can be used to forcibly fetch stats from disk if
+ they have been manually modified. */
+ table->stats_mutex_lock();
+ dict_stats_deinit(table);
+ table->stats_mutex_unlock();
+ }
- /* dict_table_try_drop_aborted() can generate undo logs.
- So it should be avoided after shutdown of background
- threads */
- if (drop_aborted && !srv_undo_sources) {
- dict_table_try_drop_aborted(NULL, table_id, 0);
- }
- }
+ ut_ad(dict_lru_validate());
+ ut_ad(dict_sys.find(table));
+ }
- if (!thd || !mdl) {
- } else if (MDL_context *mdl_context= static_cast<MDL_context*>(
- thd_mdl_context(thd))) {
- mdl_context->release_lock(mdl);
- }
+ if (!thd || !mdl);
+ else if (MDL_context *mdl_context= static_cast<MDL_context*>
+ (thd_mdl_context(thd)))
+ mdl_context->release_lock(mdl);
}
/** Check if the table has a given (non_virtual) column.
@@ -1117,22 +1006,20 @@ ATTRIBUTE_NOINLINE void dict_sys_t::unfreeze()
#endif /* UNIV_PFS_RWLOCK */
/**********************************************************************//**
-Returns a table object and increment its open handle count.
+Returns a table object and increments its open handle count.
NOTE! This is a high-level function to be used mainly from outside the
-'dict' module. Inside this directory dict_table_get_low
+'dict' directory. Inside this directory dict_table_get_low
is usually the appropriate function.
-@return table, NULL if does not exist */
+@param[in] table_name Table name
+@param[in] dict_locked whether dict_sys.latch is being held exclusively
+@param[in] ignore_err error to be ignored when loading the table
+@return table
+@retval nullptr if does not exist */
dict_table_t*
dict_table_open_on_name(
-/*====================*/
- const char* table_name, /*!< in: table name */
- ibool dict_locked, /*!< in: TRUE=data dictionary locked */
- ibool try_drop, /*!< in: TRUE=try to drop any orphan
- indexes after an aborted online
- index creation */
- dict_err_ignore_t
- ignore_err) /*!< in: error to be ignored when
- loading a table definition */
+ const char* table_name,
+ bool dict_locked,
+ dict_err_ignore_t ignore_err)
{
dict_table_t *table;
DBUG_ENTER("dict_table_open_on_name");
@@ -1183,7 +1070,7 @@ dict_table_open_on_name(
ut_ad(dict_lru_validate());
if (!dict_locked)
- dict_table_try_drop_aborted_and_unlock(table, try_drop);
+ dict_sys.unlock();
DBUG_RETURN(table);
}
@@ -1979,23 +1866,6 @@ void dict_sys_t::remove(dict_table_t* table, bool lru, bool keep)
UT_LIST_REMOVE(table_non_LRU, table);
}
- if (lru && table->drop_aborted) {
- /* When evicting the table definition,
- drop the orphan indexes from the data dictionary
- and free the index pages. */
- trx_t* trx = trx_create();
-
- ut_ad(dict_sys.locked());
- /* Mimic row_mysql_lock_data_dictionary(). */
- trx->dict_operation_lock_mode = RW_X_LATCH;
-
- trx->dict_operation = true;
- row_merge_drop_indexes_dict(trx, table->id);
- trx_commit_for_mysql(trx);
- trx->dict_operation_lock_mode = 0;
- trx->free();
- }
-
/* Free virtual column template if any */
if (table->vc_templ != NULL) {
dict_free_vc_templ(table->vc_templ);
diff --git a/storage/innobase/dict/dict0stats.cc b/storage/innobase/dict/dict0stats.cc
index 9c4d87f3fea..2eced56c5e2 100644
--- a/storage/innobase/dict/dict0stats.cc
+++ b/storage/innobase/dict/dict0stats.cc
@@ -25,11 +25,10 @@ Created Jan 06, 2010 Vasil Dimov
*******************************************************/
#include "dict0stats.h"
-#include "ut0ut.h"
-#include "ut0rnd.h"
#include "dyn0buf.h"
#include "row0sel.h"
#include "trx0trx.h"
+#include "lock0lock.h"
#include "pars0pars.h"
#include <mysql_com.h>
#include "log.h"
@@ -142,7 +141,7 @@ typedef ut_allocator<std::pair<const char* const, dict_index_t*> >
typedef std::map<const char*, dict_index_t*, ut_strcmp_functor,
index_map_t_allocator> index_map_t;
-inline bool dict_table_t::is_stats_table() const
+bool dict_table_t::is_stats_table() const
{
return !strcmp(name.m_name, TABLE_STATS_NAME) ||
!strcmp(name.m_name, INDEX_STATS_NAME);
@@ -517,9 +516,7 @@ This function will free the pinfo object.
@param[in,out] pinfo pinfo to pass to que_eval_sql() must already
have any literals bound to it
@param[in] sql SQL string to execute
-@param[in,out] trx in case of NULL the function will allocate and
-free the trx object. If it is not NULL then it will be rolled back
-only in the case of error, but not freed.
+@param[in,out] trx transaction
@return DB_SUCCESS or error code */
static
dberr_t dict_stats_exec_sql(pars_info_t *pinfo, const char* sql, trx_t *trx)
@@ -532,22 +529,7 @@ dberr_t dict_stats_exec_sql(pars_info_t *pinfo, const char* sql, trx_t *trx)
return DB_STATS_DO_NOT_EXIST;
}
- if (trx)
- return que_eval_sql(pinfo, sql, trx);
-
- trx= trx_create();
- trx_start_internal(trx);
-
- trx->dict_operation_lock_mode= RW_X_LATCH;
- dberr_t err= que_eval_sql(pinfo, sql, trx);
-
- if (err == DB_SUCCESS)
- trx->commit();
- else
- trx->rollback();
- trx->dict_operation_lock_mode= 0;
- trx->free();
- return err;
+ return que_eval_sql(pinfo, sql, trx);
}
/*********************************************************************//**
@@ -2557,9 +2539,7 @@ storage.
@param[in] stat_value value of the stat
@param[in] sample_size n pages sampled or NULL
@param[in] stat_description description of the stat
-@param[in,out] trx in case of NULL the function will
-allocate and free the trx object. If it is not NULL then it will be
-rolled back only in the case of error, but not freed.
+@param[in,out] trx transaction
@return DB_SUCCESS or error code */
dberr_t
dict_stats_save_index_stat(
@@ -2690,8 +2670,6 @@ dict_stats_save(
const index_id_t* only_for_index)
{
pars_info_t* pinfo;
- dberr_t ret;
- dict_table_t* table;
char db_utf8[MAX_DB_UTF8_LEN];
char table_utf8[MAX_TABLE_UTF8_LEN];
@@ -2703,16 +2681,59 @@ dict_stats_save(
return (dict_stats_report_error(table_orig));
}
- table = dict_stats_snapshot_create(table_orig);
+ THD* thd = current_thd;
+ MDL_ticket *mdl_table = nullptr, *mdl_index = nullptr;
+ dict_table_t* table_stats = dict_table_open_on_name(
+ TABLE_STATS_NAME, false, DICT_ERR_IGNORE_NONE);
+ if (table_stats) {
+ dict_sys.freeze(SRW_LOCK_CALL);
+ table_stats = dict_acquire_mdl_shared<false>(table_stats, thd,
+ &mdl_table);
+ dict_sys.unfreeze();
+ }
+ if (!table_stats
+ || strcmp(table_stats->name.m_name, TABLE_STATS_NAME)) {
+release_and_exit:
+ if (table_stats) {
+ dict_table_close(table_stats, false, thd, mdl_table);
+ }
+ return DB_STATS_DO_NOT_EXIST;
+ }
+
+ dict_table_t* index_stats = dict_table_open_on_name(
+ INDEX_STATS_NAME, false, DICT_ERR_IGNORE_NONE);
+ if (index_stats) {
+ dict_sys.freeze(SRW_LOCK_CALL);
+ index_stats = dict_acquire_mdl_shared<false>(index_stats, thd,
+ &mdl_index);
+ dict_sys.unfreeze();
+ }
+ if (!index_stats) {
+ goto release_and_exit;
+ }
+ if (strcmp(index_stats->name.m_name, INDEX_STATS_NAME)) {
+ dict_table_close(index_stats, false, thd, mdl_index);
+ goto release_and_exit;
+ }
+
+ dict_table_t* table = dict_stats_snapshot_create(table_orig);
dict_fs2utf8(table->name.m_name, db_utf8, sizeof(db_utf8),
table_utf8, sizeof(table_utf8));
-
const time_t now = time(NULL);
trx_t* trx = trx_create();
+ trx->mysql_thd = thd;
trx_start_internal(trx);
- trx->dict_operation_lock_mode = RW_X_LATCH;
- dict_sys.lock(SRW_LOCK_CALL);
+ dberr_t ret = trx->read_only
+ ? DB_READ_ONLY
+ : lock_table_for_trx(table_stats, trx, LOCK_X);
+ if (ret == DB_SUCCESS) {
+ ret = lock_table_for_trx(index_stats, trx, LOCK_X);
+ }
+ if (ret != DB_SUCCESS) {
+ trx->commit();
+ goto unlocked_free_and_exit;
+ }
pinfo = pars_info_create();
@@ -2725,6 +2746,9 @@ dict_stats_save(
pars_info_add_ull_literal(pinfo, "sum_of_other_index_sizes",
table->stat_sum_of_other_index_sizes);
+ dict_sys.lock(SRW_LOCK_CALL);
+ trx->dict_operation_lock_mode = true;
+
ret = dict_stats_exec_sql(
pinfo,
"PROCEDURE TABLE_STATS_SAVE () IS\n"
@@ -2753,10 +2777,13 @@ dict_stats_save(
rollback_and_exit:
trx->rollback();
free_and_exit:
- trx->dict_operation_lock_mode = 0;
+ trx->dict_operation_lock_mode = false;
dict_sys.unlock();
+unlocked_free_and_exit:
trx->free();
dict_stats_snapshot_free(table);
+ dict_table_close(table_stats, false, thd, mdl_table);
+ dict_table_close(index_stats, false, thd, mdl_index);
return ret;
}
@@ -3227,13 +3254,7 @@ dict_stats_fetch_from_ps(
trx = trx_create();
- /* Use 'read-uncommitted' so that the SELECTs we execute
- do not get blocked in case some user has locked the rows we
- are SELECTing */
-
- trx->isolation_level = TRX_ISO_READ_UNCOMMITTED;
-
- trx_start_internal(trx);
+ trx_start_internal_read_only(trx);
dict_fs2utf8(table->name.m_name, db_utf8, sizeof(db_utf8),
table_utf8, sizeof(table_utf8));
@@ -3644,7 +3665,7 @@ dberr_t dict_stats_delete_from_table_stats(const char *database_name,
/** Execute DELETE FROM mysql.innodb_index_stats
@param database_name database name
@param table_name table name
-@param trx transaction (nullptr=start and commit a new one)
+@param trx transaction
@return DB_SUCCESS or error code */
dberr_t dict_stats_delete_from_index_stats(const char *database_name,
const char *table_name, trx_t *trx)
@@ -3672,7 +3693,7 @@ dberr_t dict_stats_delete_from_index_stats(const char *database_name,
@param database_name database name
@param table_name table name
@param index_name name of the index
-@param trx transaction (nullptr=start and commit a new one)
+@param trx transaction
@return DB_SUCCESS or error code */
dberr_t dict_stats_delete_from_index_stats(const char *database_name,
const char *table_name,
diff --git a/storage/innobase/dict/dict0stats_bg.cc b/storage/innobase/dict/dict0stats_bg.cc
index 23b00bb8e23..7d50174c306 100644
--- a/storage/innobase/dict/dict0stats_bg.cc
+++ b/storage/innobase/dict/dict0stats_bg.cc
@@ -383,7 +383,7 @@ no_table:
dict_sys.lock(SRW_LOCK_CALL);
table->stats_bg_flag = BG_STAT_NONE;
- dict_table_close(table, TRUE, FALSE);
+ dict_table_close(table, true);
dict_sys.unlock();
return ret;
@@ -409,8 +409,12 @@ static std::mutex dict_stats_mutex;
static void dict_stats_func(void*)
{
- while (dict_stats_process_entry_from_recalc_pool()) {}
- dict_defrag_process_entries_from_defrag_pool();
+ THD *thd= innobase_create_background_thd("InnoDB statistics");
+ set_current_thd(thd);
+ while (dict_stats_process_entry_from_recalc_pool()) {}
+ dict_defrag_process_entries_from_defrag_pool();
+ set_current_thd(nullptr);
+ innobase_destroy_background_thd(thd);
}
diff --git a/storage/innobase/dict/drop.cc b/storage/innobase/dict/drop.cc
index f837798ab8f..20ee4ff545b 100644
--- a/storage/innobase/dict/drop.cc
+++ b/storage/innobase/dict/drop.cc
@@ -81,7 +81,7 @@ dberr_t trx_t::drop_table_foreign(const table_name_t &name)
ut_ad(dict_sys.locked());
ut_ad(state == TRX_STATE_ACTIVE);
ut_ad(dict_operation);
- ut_ad(dict_operation_lock_mode == RW_X_LATCH);
+ ut_ad(dict_operation_lock_mode);
if (!dict_sys.sys_foreign || !dict_sys.sys_foreign_cols)
return DB_SUCCESS;
@@ -117,7 +117,7 @@ dberr_t trx_t::drop_table_foreign(const table_name_t &name)
dberr_t trx_t::drop_table_statistics(const table_name_t &name)
{
ut_ad(dict_sys.locked());
- ut_ad(dict_operation_lock_mode == RW_X_LATCH);
+ ut_ad(dict_operation_lock_mode);
if (strstr(name.m_name, "/" TEMP_FILE_PREFIX_INNODB) ||
!strcmp(name.m_name, TABLE_STATS_NAME) ||
@@ -146,7 +146,7 @@ dberr_t trx_t::drop_table(const dict_table_t &table)
ut_ad(dict_sys.locked());
ut_ad(state == TRX_STATE_ACTIVE);
ut_ad(dict_operation);
- ut_ad(dict_operation_lock_mode == RW_X_LATCH);
+ ut_ad(dict_operation_lock_mode);
ut_ad(!table.is_temporary());
ut_ad(!(table.stats_bg_flag & BG_STAT_IN_PROGRESS));
/* The table must be exclusively locked by this transaction. */
diff --git a/storage/innobase/fts/fts0fts.cc b/storage/innobase/fts/fts0fts.cc
index 1d0d9b71489..5d75bcb37bd 100644
--- a/storage/innobase/fts/fts0fts.cc
+++ b/storage/innobase/fts/fts0fts.cc
@@ -1371,7 +1371,7 @@ fts_cache_add_doc(
@retval DB_FAIL if the table did not exist */
static dberr_t fts_drop_table(trx_t *trx, const char *table_name, bool rename)
{
- if (dict_table_t *table= dict_table_open_on_name(table_name, TRUE, FALSE,
+ if (dict_table_t *table= dict_table_open_on_name(table_name, true,
DICT_ERR_IGNORE_DROP))
{
table->release();
@@ -1503,24 +1503,20 @@ static dberr_t fts_lock_table(trx_t *trx, const char *table_name)
{
ut_ad(purge_sys.must_wait_FTS());
- if (dict_table_t *table= dict_table_open_on_name(table_name, false, false,
+ if (dict_table_t *table= dict_table_open_on_name(table_name, false,
DICT_ERR_IGNORE_DROP))
{
dberr_t err= lock_table_for_trx(table, trx, LOCK_X);
/* Wait for purge threads to stop using the table. */
- dict_sys.freeze(SRW_LOCK_CALL);
for (uint n= 15; table->get_ref_count() > 1; )
{
- dict_sys.unfreeze();
if (!--n)
{
err= DB_LOCK_WAIT_TIMEOUT;
goto fail;
}
std::this_thread::sleep_for(std::chrono::milliseconds(50));
- dict_sys.freeze(SRW_LOCK_CALL);
}
- dict_sys.unfreeze();
fail:
table->release();
return err;
@@ -4077,7 +4073,7 @@ fts_sync_commit(
}
/* Avoid assertion in trx_t::free(). */
- trx->dict_operation_lock_mode = 0;
+ trx->dict_operation_lock_mode = false;
trx->free();
return(error);
@@ -4127,7 +4123,7 @@ fts_sync_rollback(
fts_sql_rollback(trx);
/* Avoid assertion in trx_t::free(). */
- trx->dict_operation_lock_mode = 0;
+ trx->dict_operation_lock_mode = false;
trx->free();
}
diff --git a/storage/innobase/fts/fts0opt.cc b/storage/innobase/fts/fts0opt.cc
index 7f7c23aee8f..da24cd54aec 100644
--- a/storage/innobase/fts/fts0opt.cc
+++ b/storage/innobase/fts/fts0opt.cc
@@ -2798,7 +2798,7 @@ static void fts_optimize_sync_table(dict_table_t *table,
std::this_thread::sleep_for(std::chrono::seconds(6)););
if (mdl_ticket)
- dict_table_close(sync_table, false, false, fts_opt_thd, mdl_ticket);
+ dict_table_close(sync_table, false, fts_opt_thd, mdl_ticket);
}
/**********************************************************************//**
diff --git a/storage/innobase/handler/ha_innodb.cc b/storage/innobase/handler/ha_innodb.cc
index 96a9012e1cb..81a03c1208a 100644
--- a/storage/innobase/handler/ha_innodb.cc
+++ b/storage/innobase/handler/ha_innodb.cc
@@ -1337,7 +1337,8 @@ static void innodb_drop_database(handlerton*, char *path)
innobase_casedn_str(namebuf);
#endif /* _WIN32 */
- trx_t *trx= innobase_trx_allocate(current_thd);
+ THD * const thd= current_thd;
+ trx_t *trx= innobase_trx_allocate(thd);
retry:
dict_sys.lock(SRW_LOCK_CALL);
@@ -1385,21 +1386,56 @@ retry:
}
}
- trx->dict_operation_lock_mode= RW_X_LATCH;
+ dict_sys.unlock();
+
+ dict_table_t *table_stats, *index_stats;
+ MDL_ticket *mdl_table= nullptr, *mdl_index= nullptr;
+ table_stats= dict_table_open_on_name(TABLE_STATS_NAME, false,
+ DICT_ERR_IGNORE_NONE);
+ if (table_stats)
+ {
+ dict_sys.freeze(SRW_LOCK_CALL);
+ table_stats= dict_acquire_mdl_shared<false>(table_stats,
+ thd, &mdl_table);
+ dict_sys.unfreeze();
+ }
+ index_stats= dict_table_open_on_name(INDEX_STATS_NAME, false,
+ DICT_ERR_IGNORE_NONE);
+ if (index_stats)
+ {
+ dict_sys.freeze(SRW_LOCK_CALL);
+ index_stats= dict_acquire_mdl_shared<false>(index_stats,
+ thd, &mdl_index);
+ dict_sys.unfreeze();
+ }
+
trx_start_for_ddl(trx);
+
uint errors= 0;
char db[NAME_LEN + 1];
strconvert(&my_charset_filename, namebuf, len, system_charset_info, db,
sizeof db, &errors);
- if (errors);
- else if (dict_stats_delete(db, trx))
+ if (!errors && table_stats && index_stats &&
+ !strcmp(table_stats->name.m_name, TABLE_STATS_NAME) &&
+ !strcmp(index_stats->name.m_name, INDEX_STATS_NAME) &&
+ lock_table_for_trx(table_stats, trx, LOCK_X) == DB_SUCCESS &&
+ lock_table_for_trx(index_stats, trx, LOCK_X) == DB_SUCCESS)
{
- /* Ignore this error. Leaving garbage statistics behind is a
- lesser evil. Carry on to try to remove any garbage tables. */
- trx->rollback();
- trx_start_for_ddl(trx);
+ row_mysql_lock_data_dictionary(trx);
+ if (dict_stats_delete(db, trx))
+ {
+ /* Ignore this error. Leaving garbage statistics behind is a
+ lesser evil. Carry on to try to remove any garbage tables. */
+ trx->rollback();
+ trx_start_for_ddl(trx);
+ }
+ row_mysql_unlock_data_dictionary(trx);
}
+ if (err == DB_SUCCESS)
+ err= lock_sys_tables(trx);
+ row_mysql_lock_data_dictionary(trx);
+
static const char drop_database[] =
"PROCEDURE DROP_DATABASE_PROC () IS\n"
"fk CHAR;\n"
@@ -1482,8 +1518,12 @@ retry:
ib::error() << "DROP DATABASE " << namebuf << ": " << err;
}
else
- trx_commit_for_mysql(trx);
+ trx->commit();
+ if (table_stats)
+ dict_table_close(table_stats, true, thd, mdl_table);
+ if (index_stats)
+ dict_table_close(index_stats, true, thd, mdl_index);
row_mysql_unlock_data_dictionary(trx);
trx->free();
@@ -1779,6 +1819,15 @@ MYSQL_THD innobase_create_background_thd(const char* name)
return thd;
}
+extern "C" void thd_increment_pending_ops(MYSQL_THD);
+
+THD *innodb_thd_increment_pending_ops(THD *thd)
+{
+ if (!thd || THDVAR(thd, background_thread))
+ return nullptr;
+ thd_increment_pending_ops(thd);
+ return thd;
+}
/** Destroy a background purge thread THD.
@param[in] thd MYSQL_THD to destroy */
@@ -1856,7 +1905,7 @@ thd_has_edited_nontrans_tables(
/******************************************************************//**
Returns the lock wait timeout for the current connection.
@return the lock wait timeout, in seconds */
-uint
+uint&
thd_lock_wait_timeout(
/*==================*/
THD* thd) /*!< in: thread handle, or NULL to query
@@ -1936,7 +1985,7 @@ static int innodb_check_version(handlerton *hton, const char *path,
char norm_path[FN_REFLEN];
normalize_table_name(norm_path, path);
- if (dict_table_t *table= dict_table_open_on_name(norm_path, false, false,
+ if (dict_table_t *table= dict_table_open_on_name(norm_path, false,
DICT_ERR_IGNORE_NONE))
{
const trx_id_t trx_id= table->def_trx_id;
@@ -2013,8 +2062,8 @@ static void drop_garbage_tables_after_restore()
trx_start_for_ddl(trx);
std::vector<pfs_os_file_t> deleted;
- row_mysql_lock_data_dictionary(trx);
dberr_t err= DB_TABLE_NOT_FOUND;
+ row_mysql_lock_data_dictionary(trx);
if (dict_table_t *table= dict_sys.load_table
({reinterpret_cast<const char*>(pcur.old_rec), len},
@@ -2022,15 +2071,17 @@ static void drop_garbage_tables_after_restore()
{
ut_ad(table->stats_bg_flag == BG_STAT_NONE);
table->acquire();
+ row_mysql_unlock_data_dictionary(trx);
err= lock_table_for_trx(table, trx, LOCK_X);
if (err == DB_SUCCESS &&
(table->flags2 & (DICT_TF2_FTS_HAS_DOC_ID | DICT_TF2_FTS)))
{
- dict_sys.unlock();
fts_optimize_remove_table(table);
err= fts_lock_tables(trx, *table);
- dict_sys.lock(SRW_LOCK_CALL);
}
+ if (err == DB_SUCCESS)
+ err= lock_sys_tables(trx);
+ row_mysql_lock_data_dictionary(trx);
table->release();
if (err == DB_SUCCESS)
@@ -3035,7 +3086,7 @@ ha_innobase::update_thd(
trx_t* trx = check_trx_exists(thd);
- ut_ad(trx->dict_operation_lock_mode == 0);
+ ut_ad(!trx->dict_operation_lock_mode);
ut_ad(!trx->dict_operation);
if (m_prebuilt->trx != trx) {
@@ -3187,7 +3238,7 @@ static bool innobase_query_caching_table_check(
const char* norm_name)
{
dict_table_t* table = dict_table_open_on_name(
- norm_name, FALSE, FALSE, DICT_ERR_IGNORE_FK_NOKEY);
+ norm_name, false, DICT_ERR_IGNORE_FK_NOKEY);
if (table == NULL) {
return false;
@@ -4439,7 +4490,7 @@ innobase_commit(
trx_t* trx = check_trx_exists(thd);
- ut_ad(trx->dict_operation_lock_mode == 0);
+ ut_ad(!trx->dict_operation_lock_mode);
ut_ad(!trx->dict_operation);
/* Transaction is deregistered only in a commit or a rollback. If
@@ -4528,7 +4579,7 @@ innobase_rollback(
trx_t* trx = check_trx_exists(thd);
- ut_ad(trx->dict_operation_lock_mode == 0);
+ ut_ad(!trx->dict_operation_lock_mode);
ut_ad(!trx->dict_operation);
/* Reset the number AUTO-INC rows required */
@@ -4893,14 +4944,14 @@ static void innobase_kill_query(handlerton*, THD *thd, enum thd_kill_levels)
if (trx_t* trx= thd_to_trx(thd))
{
ut_ad(trx->mysql_thd == thd);
+ if (!trx->lock.wait_lock);
#ifdef WITH_WSREP
- if (trx->is_wsrep() && wsrep_thd_is_aborting(thd))
+ else if (trx->is_wsrep() && wsrep_thd_is_aborting(thd))
/* if victim has been signaled by BF thread and/or aborting is already
progressing, following query aborting is not necessary any more.
- Also, BF thread should own trx mutex for the victim. */
- DBUG_VOID_RETURN;
+ Also, BF thread should own trx mutex for the victim. */;
#endif /* WITH_WSREP */
- if (trx->lock.wait_lock)
+ else
lock_sys_t::cancel(trx);
}
@@ -6109,8 +6160,9 @@ ha_innobase::open_dict_table(
dict_err_ignore_t ignore_err)
{
DBUG_ENTER("ha_innobase::open_dict_table");
- dict_table_t* ib_table = dict_table_open_on_name(norm_name, FALSE,
- TRUE, ignore_err);
+ /* FIXME: try_drop_aborted */
+ dict_table_t* ib_table = dict_table_open_on_name(norm_name, false,
+ ignore_err);
if (NULL == ib_table && is_partition) {
/* MySQL partition engine hard codes the file name
@@ -6147,9 +6199,9 @@ ha_innobase::open_dict_table(
normalize_table_name_c_low(
par_case_name, table_name, false);
#endif
+ /* FIXME: try_drop_aborted */
ib_table = dict_table_open_on_name(
- par_case_name, FALSE, TRUE,
- ignore_err);
+ par_case_name, false, ignore_err);
}
if (ib_table != NULL) {
@@ -10603,7 +10655,6 @@ create_table_info_t::create_table_def()
table->name.m_name, field->field_name.str);
err_col:
dict_mem_table_free(table);
- ut_ad(trx_state_eq(m_trx, TRX_STATE_NOT_STARTED));
DBUG_RETURN(HA_ERR_GENERIC);
}
@@ -10771,9 +10822,9 @@ err_col:
table->space = fil_system.temp_space;
table->add_to_cache();
} else {
- if (err == DB_SUCCESS) {
- err = row_create_table_for_mysql(table, m_trx);
- }
+ ut_ad(dict_sys.sys_tables_exist());
+
+ err = row_create_table_for_mysql(table, m_trx);
DBUG_EXECUTE_IF("ib_crash_during_create_for_encryption",
DBUG_SUICIDE(););
@@ -12990,7 +13041,7 @@ create_table_info_t::create_table_update_dict()
DBUG_ENTER("create_table_update_dict");
innobase_table = dict_table_open_on_name(
- m_table_name, FALSE, FALSE, DICT_ERR_IGNORE_NONE);
+ m_table_name, false, DICT_ERR_IGNORE_NONE);
DBUG_ASSERT(innobase_table != 0);
if (innobase_table->fts != NULL) {
@@ -13126,18 +13177,27 @@ ha_innobase::create(
}
const bool own_trx = !trx;
+ int error = 0;
if (own_trx) {
info.allocate_trx();
trx = info.trx();
- /* Latch the InnoDB data dictionary exclusively so that no deadlocks
- or lock waits can happen in it during a table create operation.
- Drop table etc. do this latching in row0mysql.cc. */
- row_mysql_lock_data_dictionary(trx);
DBUG_ASSERT(trx_state_eq(trx, TRX_STATE_NOT_STARTED));
}
+ if (own_trx && !(info.flags2() & DICT_TF2_TEMPORARY)) {
+ trx_start_for_ddl(trx);
+ if (dberr_t err = lock_sys_tables(trx)) {
+ error = convert_error_code_to_mysql(err, 0, nullptr);
+ }
+ }
+ if (own_trx) {
+ row_mysql_lock_data_dictionary(trx);
+ }
+
+ if (!error) {
+ error = info.create_table(own_trx);
+ }
- int error = info.create_table(own_trx);
if (error) {
/* Drop the being-created table before rollback,
so that rollback can possibly rename back a table
@@ -13216,13 +13276,18 @@ ha_innobase::discard_or_import_tablespace(
}
trx_start_if_not_started(m_prebuilt->trx, true);
+ m_prebuilt->trx->dict_operation = true;
/* Obtain an exclusive lock on the table. */
dberr_t err = lock_table_for_trx(m_prebuilt->table,
m_prebuilt->trx, LOCK_X);
+ if (err == DB_SUCCESS) {
+ err = lock_sys_tables(m_prebuilt->trx);
+ }
if (err != DB_SUCCESS) {
/* unable to lock the table: do nothing */
+ m_prebuilt->trx->commit();
} else if (discard) {
/* Discarding an already discarded tablespace should be an
@@ -13239,7 +13304,6 @@ ha_innobase::discard_or_import_tablespace(
err = row_discard_tablespace_for_mysql(
m_prebuilt->table, m_prebuilt->trx);
-
} else if (m_prebuilt->table->is_readable()) {
/* Commit the transaction in order to
release the table lock. */
@@ -13268,8 +13332,7 @@ ha_innobase::discard_or_import_tablespace(
}
}
- /* Commit the transaction in order to release the table lock. */
- trx_commit_for_mysql(m_prebuilt->trx);
+ ut_ad(m_prebuilt->trx->state == TRX_STATE_NOT_STARTED);
if (discard || err != DB_SUCCESS) {
DBUG_RETURN(convert_error_code_to_mysql(
@@ -13398,9 +13461,13 @@ int ha_innobase::delete_table(const char *name)
trx_start_for_ddl(trx);
}
+ dict_table_t *table_stats= nullptr, *index_stats= nullptr;
+ MDL_ticket *mdl_table= nullptr, *mdl_index= nullptr;
dberr_t err= lock_table_for_trx(table, trx, LOCK_X);
+
const bool fts= err == DB_SUCCESS &&
(table->flags2 & (DICT_TF2_FTS_HAS_DOC_ID | DICT_TF2_FTS));
+ const enum_sql_command sqlcom= enum_sql_command(thd_sql_command(thd));
if (fts)
{
@@ -13409,16 +13476,73 @@ int ha_innobase::delete_table(const char *name)
err= fts_lock_tables(trx, *table);
}
+#ifdef WITH_PARTITION_STORAGE_ENGINE
+ const bool rollback_add_partition=
+ (sqlcom == SQLCOM_ALTER_TABLE && table->name.part());
+
+ if (rollback_add_partition)
+ {
+ if (!fts)
+ purge_sys.stop_FTS();
+ /* This looks like the rollback of ALTER TABLE...ADD PARTITION
+ that was caused by MDL timeout. We could have written undo log
+ for inserting the data into the new partitions. */
+ if (table->stat_persistent != DICT_STATS_PERSISTENT_OFF)
+ {
+ /* We do not really know if we are holding MDL_EXCLUSIVE. Even
+ though this code is handling the case that we are not holding
+ it, we might actually hold it. We want to avoid a deadlock
+ with dict_stats_process_entry_from_recalc_pool(). */
+ dict_stats_recalc_pool_del(table->id, true);
+ /* If statistics calculation is still using this table, we will
+ catch it below while waiting for purge to stop using this table. */
+ }
+ }
+#endif
+
+ if (err == DB_SUCCESS && dict_stats_is_persistent_enabled(table) &&
+ !table->is_stats_table())
+ {
+ table_stats= dict_table_open_on_name(TABLE_STATS_NAME, false,
+ DICT_ERR_IGNORE_NONE);
+ if (table_stats)
+ {
+ dict_sys.freeze(SRW_LOCK_CALL);
+ table_stats= dict_acquire_mdl_shared<false>(table_stats,
+ thd, &mdl_table);
+ dict_sys.unfreeze();
+ }
+
+ index_stats= dict_table_open_on_name(INDEX_STATS_NAME, false,
+ DICT_ERR_IGNORE_NONE);
+ if (index_stats)
+ {
+ dict_sys.freeze(SRW_LOCK_CALL);
+ index_stats= dict_acquire_mdl_shared<false>(index_stats,
+ thd, &mdl_index);
+ dict_sys.unfreeze();
+ }
+
+ if (table_stats && index_stats &&
+ !strcmp(table_stats->name.m_name, TABLE_STATS_NAME) &&
+ !strcmp(index_stats->name.m_name, INDEX_STATS_NAME) &&
+ !(err= lock_table_for_trx(table_stats, trx, LOCK_X)))
+ err= lock_table_for_trx(index_stats, trx, LOCK_X);
+ }
+
+ if (err == DB_SUCCESS)
+ err= lock_sys_tables(trx);
+
dict_sys.lock(SRW_LOCK_CALL);
- trx->dict_operation_lock_mode= RW_X_LATCH;
+
if (!table->release() && err == DB_SUCCESS)
{
/* Wait for purge threads to stop using the table. */
for (uint n= 15;;)
{
- row_mysql_unlock_data_dictionary(trx);
+ dict_sys.unlock();
std::this_thread::sleep_for(std::chrono::milliseconds(50));
- row_mysql_lock_data_dictionary(trx);
+ dict_sys.lock(SRW_LOCK_CALL);
if (!--n)
{
@@ -13430,11 +13554,13 @@ int ha_innobase::delete_table(const char *name)
}
}
+ trx->dict_operation_lock_mode= true;
+
if (err != DB_SUCCESS)
{
err_exit:
+ trx->dict_operation_lock_mode= false;
trx->rollback();
- trx->dict_operation_lock_mode= 0;
switch (err) {
case DB_CANNOT_DROP_CONSTRAINT:
case DB_LOCK_WAIT_TIMEOUT:
@@ -13448,6 +13574,14 @@ err_exit:
fts_optimize_add_table(table);
purge_sys.resume_FTS();
}
+#ifdef WITH_PARTITION_STORAGE_ENGINE
+ else if (rollback_add_partition)
+ purge_sys.resume_FTS();
+#endif
+ if (table_stats)
+ dict_table_close(table_stats, true, thd, mdl_table);
+ if (index_stats)
+ dict_table_close(index_stats, true, thd, mdl_index);
dict_sys.unlock();
if (trx != parent_trx)
trx->free();
@@ -13456,7 +13590,7 @@ err_exit:
if (!table->no_rollback() && trx->check_foreigns)
{
- const bool drop_db= thd_sql_command(thd) == SQLCOM_DROP_DB;
+ const bool drop_db= sqlcom == SQLCOM_DROP_DB;
for (auto foreign : table->referenced_set)
{
/* We should allow dropping a referenced table if creating
@@ -13485,7 +13619,7 @@ err_exit:
if (!table->no_rollback())
{
err= trx->drop_table_foreign(table->name);
- if (err == DB_SUCCESS)
+ if (err == DB_SUCCESS && table_stats && index_stats)
err= trx->drop_table_statistics(table->name);
if (err != DB_SUCCESS)
goto err_exit;
@@ -13497,14 +13631,22 @@ err_exit:
std::vector<pfs_os_file_t> deleted;
trx->commit(deleted);
+ if (table_stats)
+ dict_table_close(table_stats, true, thd, mdl_table);
+ if (index_stats)
+ dict_table_close(index_stats, true, thd, mdl_index);
row_mysql_unlock_data_dictionary(trx);
for (pfs_os_file_t d : deleted)
os_file_close(d);
log_write_up_to(trx->commit_lsn, true);
if (trx != parent_trx)
trx->free();
- if (fts)
- purge_sys.resume_FTS();
+ if (!fts)
+#ifdef WITH_PARTITION_STORAGE_ENGINE
+ if (!rollback_add_partition)
+#endif
+ DBUG_RETURN(0);
+ purge_sys.resume_FTS();
DBUG_RETURN(0);
}
@@ -13514,7 +13656,7 @@ err_exit:
@param[in] to new table name
@param[in] use_fk whether to enforce FOREIGN KEY
@return DB_SUCCESS or error code */
-inline dberr_t innobase_rename_table(trx_t *trx, const char *from,
+static dberr_t innobase_rename_table(trx_t *trx, const char *from,
const char *to, bool use_fk)
{
dberr_t error;
@@ -13665,6 +13807,9 @@ int ha_innobase::truncate()
heap, ib_table->name.m_name, ib_table->id);
const char* name = mem_heap_strdup(heap, ib_table->name.m_name);
+ dict_table_t *table_stats = nullptr, *index_stats = nullptr;
+ MDL_ticket *mdl_table = nullptr, *mdl_index = nullptr;
+
dberr_t error = lock_table_for_trx(ib_table, trx, LOCK_X);
const bool fts = error == DB_SUCCESS
&& ib_table->flags2 & (DICT_TF2_FTS_HAS_DOC_ID | DICT_TF2_FTS);
@@ -13675,8 +13820,6 @@ int ha_innobase::truncate()
error = fts_lock_tables(trx, *ib_table);
}
- row_mysql_lock_data_dictionary(trx);
- dict_stats_wait_bg_to_stop_using_table(ib_table);
/* Wait for purge threads to stop using the table. */
for (uint n = 15; ib_table->get_ref_count() > 1; ) {
if (!--n) {
@@ -13684,12 +13827,44 @@ int ha_innobase::truncate()
break;
}
- row_mysql_unlock_data_dictionary(trx);
std::this_thread::sleep_for(std::chrono::milliseconds(50));
- row_mysql_lock_data_dictionary(trx);
+ }
+
+ if (error == DB_SUCCESS && dict_stats_is_persistent_enabled(ib_table)
+ && !ib_table->is_stats_table()) {
+ table_stats= dict_table_open_on_name(TABLE_STATS_NAME, false,
+ DICT_ERR_IGNORE_NONE);
+ if (table_stats) {
+ dict_sys.freeze(SRW_LOCK_CALL);
+ table_stats = dict_acquire_mdl_shared<false>(
+ table_stats, m_user_thd, &mdl_table);
+ dict_sys.unfreeze();
+ }
+ index_stats = dict_table_open_on_name(INDEX_STATS_NAME, false,
+ DICT_ERR_IGNORE_NONE);
+ if (index_stats) {
+ dict_sys.freeze(SRW_LOCK_CALL);
+ index_stats = dict_acquire_mdl_shared<false>(
+ index_stats, m_user_thd, &mdl_index);
+ dict_sys.unfreeze();
+ }
+
+ if (table_stats && index_stats
+ && !strcmp(table_stats->name.m_name, TABLE_STATS_NAME)
+ && !strcmp(index_stats->name.m_name, INDEX_STATS_NAME) &&
+ !(error = lock_table_for_trx(table_stats, trx, LOCK_X))) {
+ error = lock_table_for_trx(index_stats, trx, LOCK_X);
+ }
}
if (error == DB_SUCCESS) {
+ error = lock_sys_tables(trx);
+ }
+
+ row_mysql_lock_data_dictionary(trx);
+ dict_stats_wait_bg_to_stop_using_table(ib_table);
+
+ if (error == DB_SUCCESS) {
error = innobase_rename_table(trx, ib_table->name.m_name,
temp_name, false);
@@ -13724,7 +13899,7 @@ int ha_innobase::truncate()
if (err) {
reload:
m_prebuilt->table = dict_table_open_on_name(
- name, false, false, DICT_ERR_IGNORE_NONE);
+ name, false, DICT_ERR_IGNORE_NONE);
m_prebuilt->table->def_trx_id = def_trx_id;
} else {
row_prebuilt_t* prebuilt = m_prebuilt;
@@ -13756,6 +13931,14 @@ reload:
trx->free();
mem_heap_free(heap);
+
+ if (table_stats) {
+ dict_table_close(table_stats, false, m_user_thd, mdl_table);
+ }
+ if (index_stats) {
+ dict_table_close(index_stats, false, m_user_thd, mdl_index);
+ }
+
DBUG_RETURN(err);
}
@@ -13779,23 +13962,69 @@ ha_innobase::rename_table(
}
trx_t* trx = innobase_trx_allocate(thd);
+ trx_start_for_ddl(trx);
- /* We are doing a DDL operation. */
- trx->will_lock = true;
- trx->dict_operation = true;
- row_mysql_lock_data_dictionary(trx);
+ dict_table_t *table_stats = nullptr, *index_stats = nullptr;
+ MDL_ticket *mdl_table = nullptr, *mdl_index = nullptr;
+ char norm_from[MAX_FULL_NAME_LEN];
+ char norm_to[MAX_FULL_NAME_LEN];
- dberr_t error = innobase_rename_table(trx, from, to, true);
+ normalize_table_name(norm_from, from);
+ normalize_table_name(norm_to, to);
- DEBUG_SYNC(thd, "after_innobase_rename_table");
+ dberr_t error = DB_SUCCESS;
+
+ if (strcmp(norm_from, TABLE_STATS_NAME)
+ && strcmp(norm_from, INDEX_STATS_NAME)
+ && strcmp(norm_to, TABLE_STATS_NAME)
+ && strcmp(norm_to, INDEX_STATS_NAME)) {
+ table_stats = dict_table_open_on_name(TABLE_STATS_NAME, false,
+ DICT_ERR_IGNORE_NONE);
+ if (table_stats) {
+ dict_sys.freeze(SRW_LOCK_CALL);
+ table_stats = dict_acquire_mdl_shared<false>(
+ table_stats, thd, &mdl_table);
+ dict_sys.unfreeze();
+ }
+ index_stats = dict_table_open_on_name(INDEX_STATS_NAME, false,
+ DICT_ERR_IGNORE_NONE);
+ if (index_stats) {
+ dict_sys.freeze(SRW_LOCK_CALL);
+ index_stats = dict_acquire_mdl_shared<false>(
+ index_stats, thd, &mdl_index);
+ dict_sys.unfreeze();
+ }
+
+ if (table_stats && index_stats
+ && !strcmp(table_stats->name.m_name, TABLE_STATS_NAME)
+ && !strcmp(index_stats->name.m_name, INDEX_STATS_NAME) &&
+ !(error = lock_table_for_trx(table_stats, trx, LOCK_X))) {
+ error = lock_table_for_trx(index_stats, trx, LOCK_X);
+ }
+ }
+
+ if (error == DB_SUCCESS) {
+ error = lock_table_for_trx(dict_sys.sys_tables, trx, LOCK_X);
+ if (error == DB_SUCCESS) {
+ error = lock_table_for_trx(dict_sys.sys_foreign, trx,
+ LOCK_X);
+ if (error == DB_SUCCESS) {
+ error = lock_table_for_trx(
+ dict_sys.sys_foreign_cols,
+ trx, LOCK_X);
+ }
+ }
+ }
+
+ row_mysql_lock_data_dictionary(trx);
if (error == DB_SUCCESS) {
- char norm_from[MAX_FULL_NAME_LEN];
- char norm_to[MAX_FULL_NAME_LEN];
+ error = innobase_rename_table(trx, from, to, true);
+ }
- normalize_table_name(norm_from, from);
- normalize_table_name(norm_to, to);
+ DEBUG_SYNC(thd, "after_innobase_rename_table");
+ if (error == DB_SUCCESS && table_stats && index_stats) {
error = dict_stats_rename_table(norm_from, norm_to, trx);
if (error == DB_DUPLICATE_KEY) {
/* The duplicate may also occur in
@@ -13812,6 +14041,12 @@ ha_innobase::rename_table(
trx->rollback();
}
+ if (table_stats) {
+ dict_table_close(table_stats, true, thd, mdl_table);
+ }
+ if (index_stats) {
+ dict_table_close(index_stats, true, thd, mdl_index);
+ }
row_mysql_unlock_data_dictionary(trx);
if (error == DB_SUCCESS) {
log_write_up_to(trx->commit_lsn, true);
@@ -15161,7 +15396,7 @@ get_foreign_key_info(
dict_table_t* ref_table = dict_table_open_on_name(
foreign->referenced_table_name_lookup,
- TRUE, FALSE, DICT_ERR_IGNORE_NONE);
+ true, DICT_ERR_IGNORE_NONE);
if (ref_table == NULL) {
@@ -15174,8 +15409,7 @@ get_foreign_key_info(
<< foreign->foreign_table_name;
}
} else {
-
- dict_table_close(ref_table, TRUE, FALSE);
+ dict_table_close(ref_table, true);
}
}
@@ -17097,7 +17331,7 @@ static int innodb_ft_aux_table_validate(THD *thd, st_mysql_sys_var*,
if (const char* table_name = value->val_str(value, buf, &len)) {
if (dict_table_t* table = dict_table_open_on_name(
- table_name, FALSE, TRUE, DICT_ERR_IGNORE_NONE)) {
+ table_name, false, DICT_ERR_IGNORE_NONE)) {
const table_id_t id = dict_table_has_fts_index(table)
? table->id : 0;
dict_table_close(table);
diff --git a/storage/innobase/handler/handler0alter.cc b/storage/innobase/handler/handler0alter.cc
index a9801cd35ae..06eaa934834 100644
--- a/storage/innobase/handler/handler0alter.cc
+++ b/storage/innobase/handler/handler0alter.cc
@@ -4067,7 +4067,7 @@ online_retry_drop_indexes_low(
trx_t* trx) /*!< in/out: transaction */
{
ut_ad(dict_sys.locked());
- ut_ad(trx->dict_operation_lock_mode == RW_X_LATCH);
+ ut_ad(trx->dict_operation_lock_mode);
ut_ad(trx->dict_operation);
/* We can have table->n_ref_count > 1, because other threads
@@ -4101,65 +4101,34 @@ static void commit_unlock_and_unlink(trx_t *trx)
unlock_and_close_files(deleted, trx);
}
-/********************************************************************//**
+/**
Drop any indexes that we were not able to free previously due to
-open table handles. */
-static MY_ATTRIBUTE((nonnull))
-void
-online_retry_drop_indexes(
-/*======================*/
- dict_table_t* table, /*!< in/out: table */
- THD* user_thd) /*!< in/out: MySQL connection */
-{
- if (table->drop_aborted) {
- trx_t* trx = innobase_trx_allocate(user_thd);
-
- trx_start_for_ddl(trx);
-
- row_mysql_lock_data_dictionary(trx);
- online_retry_drop_indexes_low(table, trx);
- commit_unlock_and_unlink(trx);
- trx->free();
- }
-
- ut_d(dict_sys.freeze(SRW_LOCK_CALL));
- ut_d(dict_table_check_for_dup_indexes(table, CHECK_ALL_COMPLETE));
- ut_d(dict_sys.unfreeze());
- ut_ad(!table->drop_aborted);
-}
-
-/********************************************************************//**
-Commit a dictionary transaction and drop any indexes that we were not
-able to free previously due to open table handles. */
-static MY_ATTRIBUTE((nonnull))
-void
-online_retry_drop_indexes_with_trx(
-/*===============================*/
- dict_table_t* table, /*!< in/out: table */
- trx_t* trx) /*!< in/out: transaction */
+open table handles.
+@param table InnoDB table
+@param thd connection handle
+*/
+static void online_retry_drop_indexes(dict_table_t *table, THD *thd)
{
- ut_ad(trx_state_eq(trx, TRX_STATE_NOT_STARTED));
-
- ut_ad(trx->dict_operation_lock_mode == RW_X_LATCH);
+ if (table->drop_aborted)
+ {
+ trx_t *trx= innobase_trx_allocate(thd);
- /* Now that the dictionary is being locked, check if we can
- drop any incompletely created indexes that may have been left
- behind in rollback_inplace_alter_table() earlier. */
- if (table->drop_aborted) {
- trx_start_for_ddl(trx);
+ trx_start_for_ddl(trx);
+ if (lock_sys_tables(trx) == DB_SUCCESS)
+ {
+ row_mysql_lock_data_dictionary(trx);
+ online_retry_drop_indexes_low(table, trx);
+ commit_unlock_and_unlink(trx);
+ }
+ else
+ trx->commit();
+ trx->free();
+ }
- online_retry_drop_indexes_low(table, trx);
- std::vector<pfs_os_file_t> deleted;
- trx->commit(deleted);
- /* FIXME: We are holding the data dictionary latch here
- while waiting for the files to be actually deleted.
- However, we should never have any deleted files here,
- because they would be related to ADD FULLTEXT INDEX,
- and that operation is never supported online. */
- for (pfs_os_file_t d : deleted) {
- os_file_close(d);
- }
- }
+ ut_d(dict_sys.freeze(SRW_LOCK_CALL));
+ ut_d(dict_table_check_for_dup_indexes(table, CHECK_ALL_COMPLETE));
+ ut_d(dict_sys.unfreeze());
+ ut_ad(!table->drop_aborted);
}
/** Determines if InnoDB is dropping a foreign key constraint.
@@ -4803,7 +4772,7 @@ innobase_update_gis_column_type(
DBUG_ENTER("innobase_update_gis_column_type");
DBUG_ASSERT(trx->dict_operation);
- ut_ad(trx->dict_operation_lock_mode == RW_X_LATCH);
+ ut_ad(trx->dict_operation_lock_mode);
ut_ad(dict_sys.locked());
info = pars_info_create();
@@ -6297,6 +6266,10 @@ acquire_lock:
}
}
+ if (error == DB_SUCCESS) {
+ error = lock_sys_tables(ctx->trx);
+ }
+
if (error != DB_SUCCESS) {
table_lock_failed = true;
goto error_handling;
@@ -6339,8 +6312,20 @@ new_clustered_failed:
ut_ad(user_table->get_ref_count() == 1);
- online_retry_drop_indexes_with_trx(
- user_table, ctx->trx);
+ if (user_table->drop_aborted) {
+ row_mysql_unlock_data_dictionary(ctx->trx);
+ trx_start_for_ddl(ctx->trx);
+ if (lock_sys_tables(ctx->trx) == DB_SUCCESS) {
+ row_mysql_lock_data_dictionary(
+ ctx->trx);
+ online_retry_drop_indexes_low(
+ user_table, ctx->trx);
+ commit_unlock_and_unlink(ctx->trx);
+ } else {
+ ctx->trx->commit();
+ }
+ row_mysql_lock_data_dictionary(ctx->trx);
+ }
if (ctx->need_rebuild()) {
if (ctx->new_table) {
@@ -6613,8 +6598,8 @@ wrong_column_name:
ha_alter_info, ctx->new_table, ctx->trx);
if (error != DB_SUCCESS) {
ut_ad(error == DB_ERROR);
- error = DB_UNSUPPORTED;
- goto error_handling;
+ my_error(ER_TABLE_CANT_HANDLE_SPKEYS, MYF(0), "SYS_COLUMNS");
+ goto error_handled;
}
}
@@ -7042,7 +7027,7 @@ error_handling_drop_uncached:
if (fts_index) {
ut_ad(ctx->trx->dict_operation);
- ut_ad(ctx->trx->dict_operation_lock_mode == RW_X_LATCH);
+ ut_ad(ctx->trx->dict_operation_lock_mode);
ut_ad(dict_sys.locked());
DICT_TF2_FLAG_SET(ctx->new_table, DICT_TF2_FTS);
@@ -7135,27 +7120,12 @@ error_handling:
case DB_DUPLICATE_KEY:
my_error(ER_DUP_KEY, MYF(0), "SYS_INDEXES");
break;
- case DB_UNSUPPORTED:
- my_error(ER_TABLE_CANT_HANDLE_SPKEYS, MYF(0), "SYS_COLUMNS");
- break;
default:
my_error_innodb(error, table_name, user_table->flags);
}
ctx->trx->rollback();
-error_handled:
-
- ctx->prebuilt->trx->error_info = NULL;
- ctx->trx->error_state = DB_SUCCESS;
-
- if (!dict_locked) {
- row_mysql_lock_data_dictionary(ctx->trx);
- if (table_lock_failed) {
- goto err_exit;
- }
- }
-
if (ctx->need_rebuild()) {
/* Free the log for online table rebuild, if
one was allocated. */
@@ -7175,17 +7145,48 @@ error_handled:
clust_index->lock.x_unlock();
}
+ ctx->prebuilt->trx->error_info = NULL;
+ ctx->trx->error_state = DB_SUCCESS;
+
+ if (false) {
+error_handled:
+ ut_ad(!table_lock_failed);
+ ut_ad(ctx->trx->state == TRX_STATE_ACTIVE);
+ ut_ad(!ctx->trx->undo_no);
+ ut_ad(dict_locked);
+ } else if (table_lock_failed) {
+ if (!dict_locked) {
+ row_mysql_lock_data_dictionary(ctx->trx);
+ }
+ goto err_exit;
+ } else {
+ ut_ad(ctx->trx->state == TRX_STATE_NOT_STARTED);
+ if (new_clustered && !user_table->drop_aborted) {
+ goto err_exit;
+ }
+ if (dict_locked) {
+ row_mysql_unlock_data_dictionary(ctx->trx);
+ }
+ trx_start_for_ddl(ctx->trx);
+ dberr_t err= lock_sys_tables(ctx->trx);
+ row_mysql_lock_data_dictionary(ctx->trx);
+ if (err != DB_SUCCESS) {
+ goto err_exit;
+ }
+ }
+
/* n_ref_count must be 1, because purge cannot
be executing on this very table as we are
- holding dict_sys.latch X-latch. */
+ holding MDL_EXCLUSIVE. */
ut_ad(!stats_wait || ctx->online || user_table->get_ref_count() == 1);
if (new_clustered) {
- online_retry_drop_indexes_with_trx(user_table, ctx->trx);
+ online_retry_drop_indexes_low(user_table, ctx->trx);
+ commit_unlock_and_unlink(ctx->trx);
+ row_mysql_lock_data_dictionary(ctx->trx);
} else {
- trx_start_for_ddl(ctx->trx);
row_merge_drop_indexes(ctx->trx, user_table, true);
- trx_commit_for_mysql(ctx->trx);
+ ctx->trx->commit();
}
ut_d(dict_table_check_for_dup_indexes(user_table, CHECK_ALL_COMPLETE));
@@ -7330,7 +7331,7 @@ rename_index_try(
{
DBUG_ENTER("rename_index_try");
ut_ad(dict_sys.locked());
- ut_ad(trx->dict_operation_lock_mode == RW_X_LATCH);
+ ut_ad(trx->dict_operation_lock_mode);
pars_info_t* pinfo;
dberr_t err;
@@ -7552,7 +7553,7 @@ ha_innobase::prepare_inplace_alter_table(
if (!(ha_alter_info->handler_flags & ~INNOBASE_INPLACE_IGNORE)) {
/* Nothing to do */
- DBUG_ASSERT(m_prebuilt->trx->dict_operation_lock_mode == 0);
+ DBUG_ASSERT(!m_prebuilt->trx->dict_operation_lock_mode);
DBUG_RETURN(false);
}
@@ -7632,7 +7633,7 @@ ha_innobase::prepare_inplace_alter_table(
ha_alter_info->key_info_buffer,
ha_alter_info->key_count)) {
err_exit_no_heap:
- DBUG_ASSERT(m_prebuilt->trx->dict_operation_lock_mode == 0);
+ DBUG_ASSERT(!m_prebuilt->trx->dict_operation_lock_mode);
online_retry_drop_indexes(m_prebuilt->table, m_user_thd);
DBUG_RETURN(true);
}
@@ -8091,7 +8092,7 @@ err_exit:
== ALTER_OPTIONS
&& !alter_options_need_rebuild(ha_alter_info, table))) {
- DBUG_ASSERT(m_prebuilt->trx->dict_operation_lock_mode == 0);
+ DBUG_ASSERT(!m_prebuilt->trx->dict_operation_lock_mode);
online_retry_drop_indexes(m_prebuilt->table, m_user_thd);
if (heap) {
@@ -8709,6 +8710,9 @@ inline bool rollback_inplace_alter_table(Alter_inplace_info *ha_alter_info,
if (index->type & DICT_FTS)
err= fts_lock_index_tables(ctx->trx, *index);
}
+ if (err == DB_SUCCESS)
+ err= lock_sys_tables(ctx->trx);
+
row_mysql_lock_data_dictionary(ctx->trx);
/* Detach ctx->new_table from dict_index_t::online_log. */
innobase_online_rebuild_log_free(ctx->old_table);
@@ -8735,18 +8739,28 @@ inline bool rollback_inplace_alter_table(Alter_inplace_info *ha_alter_info,
{
DBUG_ASSERT(!(ha_alter_info->handler_flags & ALTER_ADD_PK_INDEX));
DBUG_ASSERT(ctx->old_table == prebuilt->table);
+ uint &innodb_lock_wait_timeout=
+ thd_lock_wait_timeout(ctx->trx->mysql_thd);
+ const uint save_timeout= innodb_lock_wait_timeout;
+ innodb_lock_wait_timeout= ~0U; /* infinite */
+
if (fts_exist)
{
for (ulint a= 0; a < ctx->num_to_add_index; a++)
{
const dict_index_t *index = ctx->add_index[a];
- // FIXME: skip fts_drop_index_tables() if we failed to acquire locks
if (index->type & DICT_FTS)
- fts_lock_index_tables(ctx->trx, *index);
+ ut_a(!fts_lock_index_tables(ctx->trx, *index));
}
- // FIXME: skip fts_drop_tables() if we failed to acquire locks
- fts_lock_common_tables(ctx->trx, *ctx->new_table);
+ ut_a(!fts_lock_common_tables(ctx->trx, *ctx->new_table));
+ ut_a(!lock_sys_tables(ctx->trx));
+ }
+ else
+ {
+ ut_a(!lock_table_for_trx(dict_sys.sys_indexes, ctx->trx, LOCK_X));
+ ut_a(!lock_table_for_trx(dict_sys.sys_fields, ctx->trx, LOCK_X));
}
+ innodb_lock_wait_timeout= save_timeout;
row_mysql_lock_data_dictionary(ctx->trx);
ctx->rollback_instant();
innobase_rollback_sec_index(ctx->old_table, table,
@@ -8835,7 +8849,7 @@ innobase_drop_foreign_try(
DBUG_ENTER("innobase_drop_foreign_try");
DBUG_ASSERT(trx->dict_operation);
- ut_ad(trx->dict_operation_lock_mode == RW_X_LATCH);
+ ut_ad(trx->dict_operation_lock_mode);
ut_ad(dict_sys.locked());
/* Drop the constraint from the data dictionary. */
@@ -8891,7 +8905,7 @@ innobase_rename_column_try(
DBUG_ENTER("innobase_rename_column_try");
DBUG_ASSERT(trx->dict_operation);
- ut_ad(trx->dict_operation_lock_mode == RW_X_LATCH);
+ ut_ad(trx->dict_operation_lock_mode);
ut_ad(dict_sys.locked());
if (ctx.need_rebuild()) {
@@ -9205,7 +9219,7 @@ innobase_rename_or_enlarge_column_try(
DBUG_ASSERT(!ctx->need_rebuild());
DBUG_ASSERT(trx->dict_operation);
- ut_ad(trx->dict_operation_lock_mode == RW_X_LATCH);
+ ut_ad(trx->dict_operation_lock_mode);
ut_ad(dict_sys.locked());
ulint n_base;
@@ -9883,7 +9897,7 @@ commit_try_rebuild(
DBUG_ENTER("commit_try_rebuild");
DBUG_ASSERT(ctx->need_rebuild());
- DBUG_ASSERT(trx->dict_operation_lock_mode == RW_X_LATCH);
+ DBUG_ASSERT(trx->dict_operation_lock_mode);
DBUG_ASSERT(!(ha_alter_info->handler_flags
& ALTER_DROP_FOREIGN_KEY)
|| ctx->num_to_drop_fk > 0);
@@ -10150,7 +10164,7 @@ commit_try_norebuild(
{
DBUG_ENTER("commit_try_norebuild");
DBUG_ASSERT(!ctx->need_rebuild());
- DBUG_ASSERT(trx->dict_operation_lock_mode == RW_X_LATCH);
+ DBUG_ASSERT(trx->dict_operation_lock_mode);
DBUG_ASSERT(!(ha_alter_info->handler_flags
& ALTER_DROP_FOREIGN_KEY)
|| ctx->num_to_drop_fk > 0);
@@ -10908,8 +10922,55 @@ lock_fail:
}
}
- /* Latch the InnoDB data dictionary exclusively so that no deadlocks
- or lock waits can happen in it during the data dictionary operation. */
+ dict_table_t *table_stats = nullptr, *index_stats = nullptr;
+ MDL_ticket *mdl_table = nullptr, *mdl_index = nullptr;
+ dberr_t error = DB_SUCCESS;
+ if (!ctx0->old_table->is_stats_table() &&
+ !ctx0->new_table->is_stats_table()) {
+ table_stats = dict_table_open_on_name(
+ TABLE_STATS_NAME, false, DICT_ERR_IGNORE_NONE);
+ if (table_stats) {
+ dict_sys.freeze(SRW_LOCK_CALL);
+ table_stats = dict_acquire_mdl_shared<false>(
+ table_stats, m_user_thd, &mdl_table);
+ dict_sys.unfreeze();
+ }
+ index_stats = dict_table_open_on_name(
+ INDEX_STATS_NAME, false, DICT_ERR_IGNORE_NONE);
+ if (index_stats) {
+ dict_sys.freeze(SRW_LOCK_CALL);
+ index_stats = dict_acquire_mdl_shared<false>(
+ index_stats, m_user_thd, &mdl_index);
+ dict_sys.unfreeze();
+ }
+
+ if (table_stats && index_stats
+ && !strcmp(table_stats->name.m_name, TABLE_STATS_NAME)
+ && !strcmp(index_stats->name.m_name, INDEX_STATS_NAME)
+ && !(error = lock_table_for_trx(table_stats,
+ trx, LOCK_X))) {
+ error = lock_table_for_trx(index_stats, trx, LOCK_X);
+ }
+ }
+ if (error == DB_SUCCESS) {
+ error = lock_sys_tables(trx);
+ }
+ if (error != DB_SUCCESS) {
+ if (table_stats) {
+ dict_table_close(table_stats, false, m_user_thd,
+ mdl_table);
+ }
+ if (index_stats) {
+ dict_table_close(index_stats, false, m_user_thd,
+ mdl_index);
+ }
+ my_error_innodb(error, table_share->table_name.str, 0);
+ if (fts_exist) {
+ purge_sys.resume_FTS();
+ }
+ DBUG_RETURN(true);
+ }
+
row_mysql_lock_data_dictionary(trx);
/* Prevent the background statistics collection from accessing
@@ -10953,6 +11014,14 @@ lock_fail:
fail:
trx->rollback();
ut_ad(!trx->fts_trx);
+ if (table_stats) {
+ dict_table_close(table_stats, true, m_user_thd,
+ mdl_table);
+ }
+ if (index_stats) {
+ dict_table_close(index_stats, true, m_user_thd,
+ mdl_index);
+ }
row_mysql_unlock_data_dictionary(trx);
if (fts_exist) {
purge_sys.resume_FTS();
@@ -11000,6 +11069,13 @@ fail:
#endif
}
+ if (table_stats) {
+ dict_table_close(table_stats, true, m_user_thd, mdl_table);
+ }
+ if (index_stats) {
+ dict_table_close(index_stats, true, m_user_thd, mdl_index);
+ }
+
/* Commit or roll back the changes to the data dictionary. */
DEBUG_SYNC(m_user_thd, "innodb_alter_inplace_before_commit");
@@ -11051,9 +11127,6 @@ fail:
ha_alter_info->inplace_alter_table_committed = purge_sys.resume_SYS;
purge_sys.stop_SYS();
trx->commit(deleted);
- log_write_up_to(trx->commit_lsn, true);
- DBUG_EXECUTE_IF("innodb_alter_commit_crash_after_commit",
- DBUG_SUICIDE(););
/* At this point, the changes to the persistent storage have
been committed or rolled back. What remains to be done is to
@@ -11147,6 +11220,9 @@ foreign_fail:
}
unlock_and_close_files(deleted, trx);
+ log_write_up_to(trx->commit_lsn, true);
+ DBUG_EXECUTE_IF("innodb_alter_commit_crash_after_commit",
+ DBUG_SUICIDE(););
trx->free();
if (fts_exist) {
purge_sys.resume_FTS();
@@ -11199,6 +11275,9 @@ foreign_fail:
}
unlock_and_close_files(deleted, trx);
+ log_write_up_to(trx->commit_lsn, true);
+ DBUG_EXECUTE_IF("innodb_alter_commit_crash_after_commit",
+ DBUG_SUICIDE(););
trx->free();
if (fts_exist) {
purge_sys.resume_FTS();
diff --git a/storage/innobase/handler/i_s.cc b/storage/innobase/handler/i_s.cc
index 9e84e944381..e4d53b32c7f 100644
--- a/storage/innobase/handler/i_s.cc
+++ b/storage/innobase/handler/i_s.cc
@@ -2410,7 +2410,7 @@ i_s_fts_deleted_generic_fill(
DBUG_RETURN(0);
} else if (!dict_table_has_fts_index(user_table)
|| !user_table->is_readable()) {
- dict_table_close(user_table, false, false, thd, mdl_ticket);
+ dict_table_close(user_table, false, thd, mdl_ticket);
DBUG_RETURN(0);
}
@@ -2425,7 +2425,7 @@ i_s_fts_deleted_generic_fill(
fts_table_fetch_doc_ids(trx, &fts_table, deleted);
- dict_table_close(user_table, false, false, thd, mdl_ticket);
+ dict_table_close(user_table, false, thd, mdl_ticket);
trx->free();
@@ -2782,7 +2782,7 @@ i_s_fts_index_cache_fill(
}
if (!user_table->fts || !user_table->fts->cache) {
- dict_table_close(user_table, false, false, thd, mdl_ticket);
+ dict_table_close(user_table, false, thd, mdl_ticket);
DBUG_RETURN(0);
}
@@ -2807,7 +2807,7 @@ i_s_fts_index_cache_fill(
}
mysql_mutex_unlock(&cache->lock);
- dict_table_close(user_table, false, false, thd, mdl_ticket);
+ dict_table_close(user_table, false, thd, mdl_ticket);
DBUG_RETURN(ret);
}
@@ -3238,7 +3238,7 @@ i_s_fts_index_table_fill(
}
}
- dict_table_close(user_table, false, false, thd, mdl_ticket);
+ dict_table_close(user_table, false, thd, mdl_ticket);
ut_free(conv_str.f_str);
@@ -3374,7 +3374,7 @@ i_s_fts_config_fill(
}
if (!dict_table_has_fts_index(user_table)) {
- dict_table_close(user_table, false, false, thd, mdl_ticket);
+ dict_table_close(user_table, false, thd, mdl_ticket);
DBUG_RETURN(0);
}
@@ -3431,7 +3431,7 @@ i_s_fts_config_fill(
fts_sql_commit(trx);
- dict_table_close(user_table, false, false, thd, mdl_ticket);
+ dict_table_close(user_table, false, thd, mdl_ticket);
trx->free();
diff --git a/storage/innobase/include/dict0defrag_bg.h b/storage/innobase/include/dict0defrag_bg.h
index 3aea41b0bb8..0edc6304788 100644
--- a/storage/innobase/include/dict0defrag_bg.h
+++ b/storage/innobase/include/dict0defrag_bg.h
@@ -90,11 +90,8 @@ dict_defrag_process_entries_from_defrag_pool();
/*********************************************************************//**
Save defragmentation result.
@return DB_SUCCESS or error code */
-dberr_t
-dict_stats_save_defrag_summary(
-/*============================*/
- dict_index_t* index) /*!< in: index */
- MY_ATTRIBUTE((warn_unused_result));
+dberr_t dict_stats_save_defrag_summary(dict_index_t *index, THD *thd)
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/*********************************************************************//**
Save defragmentation stats for a given index.
diff --git a/storage/innobase/include/dict0dict.h b/storage/innobase/include/dict0dict.h
index a031d2d0e1e..e91ee7349d2 100644
--- a/storage/innobase/include/dict0dict.h
+++ b/storage/innobase/include/dict0dict.h
@@ -157,9 +157,7 @@ void dict_table_close(dict_table_t *table);
/** Decrements the count of open handles of a table.
@param[in,out] table table
-@param[in] dict_locked data dictionary locked
-@param[in] try_drop try to drop any orphan indexes after
- an aborted online index creation
+@param[in] dict_locked whether dict_sys.latch is being held
@param[in] thd thread to release MDL
@param[in] mdl metadata lock or NULL if the thread is a
foreground one. */
@@ -167,7 +165,6 @@ void
dict_table_close(
dict_table_t* table,
bool dict_locked,
- bool try_drop,
THD* thd = NULL,
MDL_ticket* mdl = NULL);
@@ -470,16 +467,14 @@ NOTE! This is a high-level function to be used mainly from outside the
'dict' directory. Inside this directory dict_table_get_low
is usually the appropriate function.
@param[in] table_name Table name
-@param[in] dict_locked TRUE=data dictionary locked
-@param[in] try_drop TRUE=try to drop any orphan indexes after
- an aborted online index creation
+@param[in] dict_locked whether dict_sys.latch is being held exclusively
@param[in] ignore_err error to be ignored when loading the table
-@return table, NULL if does not exist */
+@return table
+@retval nullptr if does not exist */
dict_table_t*
dict_table_open_on_name(
const char* table_name,
- ibool dict_locked,
- ibool try_drop,
+ bool dict_locked,
dict_err_ignore_t ignore_err)
MY_ATTRIBUTE((warn_unused_result));
@@ -1357,14 +1352,7 @@ class dict_sys_t
/** The my_hrtime_coarse().val of the oldest lock_wait() start, or 0 */
std::atomic<ulonglong> latch_ex_wait_start;
- /** @brief the data dictionary rw-latch protecting dict_sys
-
- Table create, drop, etc. reserve this in X-mode; implicit or
- backround operations that are not fully covered by MDL
- (rollback, foreign key checks) reserve this in S-mode.
-
- This latch also prevents lock waits when accessing the InnoDB
- data dictionary tables. @see trx_t::dict_operation_lock_mode */
+ /** the rw-latch protecting the data dictionary cache */
MY_ALIGNED(CACHE_LINE_SIZE) srw_lock latch;
#ifdef UNIV_DEBUG
/** whether latch is being held in exclusive mode (by any thread) */
@@ -1618,9 +1606,9 @@ public:
/** Estimate the used memory occupied by the data dictionary
table and index objects.
@return number of bytes occupied */
- ulint rough_size() const
+ TPOOL_SUPPRESS_TSAN ulint rough_size() const
{
- /* No mutex; this is a very crude approximation anyway */
+ /* No latch; this is a very crude approximation anyway */
ulint size = UT_LIST_GET_LEN(table_LRU) + UT_LIST_GET_LEN(table_non_LRU);
size *= sizeof(dict_table_t)
+ sizeof(dict_index_t) * 2
diff --git a/storage/innobase/include/dict0mem.h b/storage/innobase/include/dict0mem.h
index 45747c25565..8d636e056ee 100644
--- a/storage/innobase/include/dict0mem.h
+++ b/storage/innobase/include/dict0mem.h
@@ -2375,7 +2375,7 @@ public:
/** @return whether the name is
mysql.innodb_index_stats or mysql.innodb_table_stats */
- inline bool is_stats_table() const;
+ bool is_stats_table() const;
/** Create metadata.
@param name table name
diff --git a/storage/innobase/include/dict0stats.h b/storage/innobase/include/dict0stats.h
index 7112238c9b6..cdc5ec1bffb 100644
--- a/storage/innobase/include/dict0stats.h
+++ b/storage/innobase/include/dict0stats.h
@@ -140,19 +140,21 @@ dict_stats_update(
/** Execute DELETE FROM mysql.innodb_table_stats
@param database_name database name
@param table_name table name
-@param trx transaction (nullptr=start and commit a new one)
+@param trx transaction
@return DB_SUCCESS or error code */
dberr_t dict_stats_delete_from_table_stats(const char *database_name,
const char *table_name,
- trx_t *trx= nullptr);
+ trx_t *trx)
+ MY_ATTRIBUTE((nonnull));
/** Execute DELETE FROM mysql.innodb_index_stats
@param database_name database name
@param table_name table name
-@param trx transaction (nullptr=start and commit a new one)
+@param trx transaction
@return DB_SUCCESS or error code */
dberr_t dict_stats_delete_from_index_stats(const char *database_name,
const char *table_name,
- trx_t *trx= nullptr);
+ trx_t *trx)
+ MY_ATTRIBUTE((nonnull));
/** Execute DELETE FROM mysql.innodb_index_stats
@param database_name database name
@param table_name table name
@@ -203,9 +205,7 @@ storage.
@param[in] stat_value value of the stat
@param[in] sample_size n pages sampled or NULL
@param[in] stat_description description of the stat
-@param[in,out] trx in case of NULL the function will
-allocate and free the trx object. If it is not NULL then it will be
-rolled back only in the case of error, but not freed.
+@param[in,out] trx transaction
@return DB_SUCCESS or error code */
dberr_t
dict_stats_save_index_stat(
@@ -215,7 +215,8 @@ dict_stats_save_index_stat(
ib_uint64_t stat_value,
ib_uint64_t* sample_size,
const char* stat_description,
- trx_t* trx);
+ trx_t* trx)
+ MY_ATTRIBUTE((nonnull(1, 3, 6, 7)));
/** Report an error if updating table statistics failed because
.ibd file is missing, table decryption failed or table is corrupted.
diff --git a/storage/innobase/include/ha_prototypes.h b/storage/innobase/include/ha_prototypes.h
index 9a2786f9e69..2dd7c571386 100644
--- a/storage/innobase/include/ha_prototypes.h
+++ b/storage/innobase/include/ha_prototypes.h
@@ -207,6 +207,14 @@ but can be used for comparison.
*/
extern "C" unsigned long long thd_start_utime(const MYSQL_THD thd);
+
+/**
+ Indicate the start of an async operation in a foreground thread.
+@param thd current_thd
+@return thd
+@retval nullptr if this is not a foreground thread */
+THD *innodb_thd_increment_pending_ops(THD *thd);
+
/** Determines the current SQL statement.
Thread unsafe, can only be called from the thread owning the THD.
@param[in] thd MySQL thread handle
@@ -242,7 +250,7 @@ const char *thd_innodb_tmpdir(THD *thd);
/******************************************************************//**
Returns the lock wait timeout for the current connection.
@return the lock wait timeout, in seconds */
-uint
+uint&
thd_lock_wait_timeout(
/*==================*/
THD* thd); /*!< in: thread handle, or NULL to query
diff --git a/storage/innobase/include/lock0lock.h b/storage/innobase/include/lock0lock.h
index 9b83cf6ee1a..42e8bf4ad22 100644
--- a/storage/innobase/include/lock0lock.h
+++ b/storage/innobase/include/lock0lock.h
@@ -221,6 +221,17 @@ state was stored on the infimum of a page.
whose infimum stored the lock state; lock bits are reset on the infimum */
void lock_rec_restore_from_page_infimum(const buf_block_t &block,
const rec_t *rec, page_id_t donator);
+
+/**
+Create a table lock, without checking for deadlocks or lock compatibility.
+@param table table on which the lock is created
+@param type_mode lock type and mode
+@param trx transaction
+@param c_lock conflicting lock
+@return the created lock object */
+lock_t *lock_table_create(dict_table_t *table, unsigned type_mode, trx_t *trx,
+ lock_t *c_lock= nullptr);
+
/*********************************************************************//**
Checks if locks of other transactions prevent an immediate insert of
a record. If they do, first tests if the query thread should anyway
@@ -393,6 +404,12 @@ lock_table_for_trx(
enum lock_mode mode)
MY_ATTRIBUTE((nonnull, warn_unused_result));
+/** Exclusively lock the data dictionary tables.
+@param trx dictionary transaction
+@return error code
+@retval DB_SUCCESS on success */
+dberr_t lock_sys_tables(trx_t *trx);
+
/*************************************************************//**
Removes a granted record lock of a transaction from the queue and grants
locks to other transactions waiting in the queue if they now are entitled
diff --git a/storage/innobase/include/row0merge.h b/storage/innobase/include/row0merge.h
index c2a474abe4d..e9e250435f0 100644
--- a/storage/innobase/include/row0merge.h
+++ b/storage/innobase/include/row0merge.h
@@ -145,17 +145,6 @@ row_merge_dup_report(
const dfield_t* entry) /*!< in: duplicate index entry */
MY_ATTRIBUTE((nonnull));
-/*********************************************************************//**
-Drop indexes that were created before an error occurred.
-The data dictionary must have been locked exclusively by the caller,
-because the transaction will not be committed. */
-void
-row_merge_drop_indexes_dict(
-/*========================*/
- trx_t* trx, /*!< in/out: dictionary transaction */
- table_id_t table_id)/*!< in: table identifier */
- MY_ATTRIBUTE((nonnull));
-
/** Drop indexes that were created before an error occurred.
The data dictionary must have been locked exclusively by the caller,
because the transaction will not be committed.
diff --git a/storage/innobase/include/row0mysql.h b/storage/innobase/include/row0mysql.h
index ba8ac02dfd1..ed51a3a3d90 100644
--- a/storage/innobase/include/row0mysql.h
+++ b/storage/innobase/include/row0mysql.h
@@ -300,30 +300,24 @@ row_update_cascade_for_mysql(
or set null operation */
dict_table_t* table) /*!< in: table where we do the operation */
MY_ATTRIBUTE((nonnull, warn_unused_result));
-/*********************************************************************//**
-Locks the data dictionary exclusively for performing a table create or other
-data dictionary modification operation. */
-void
-row_mysql_lock_data_dictionary_func(
-/*================================*/
-#ifdef UNIV_PFS_RWLOCK
- const char* file, /*!< in: file name */
- unsigned line, /*!< in: line number */
-#endif
- trx_t* trx); /*!< in/out: transaction */
-#ifdef UNIV_PFS_RWLOCK
-#define row_mysql_lock_data_dictionary(trx) \
- row_mysql_lock_data_dictionary_func(__FILE__, __LINE__, trx)
-#else
-#define row_mysql_lock_data_dictionary row_mysql_lock_data_dictionary_func
-#endif
-/*********************************************************************//**
-Unlocks the data dictionary exclusive lock. */
-void
-row_mysql_unlock_data_dictionary(
-/*=============================*/
- trx_t* trx); /*!< in/out: transaction */
+/** Lock the data dictionary cache exclusively. */
+#define row_mysql_lock_data_dictionary(trx) \
+ do { \
+ ut_ad(!trx->dict_operation_lock_mode); \
+ dict_sys.lock(SRW_LOCK_CALL); \
+ trx->dict_operation_lock_mode = true; \
+ } while (0)
+
+/** Unlock the data dictionary. */
+#define row_mysql_unlock_data_dictionary(trx) \
+ do { \
+ ut_ad(!lock_trx_has_sys_table_locks(trx)); \
+ ut_ad(trx->dict_operation_lock_mode); \
+ trx->dict_operation_lock_mode = false; \
+ dict_sys.unlock(); \
+ } while (0)
+
/*********************************************************************//**
Creates a table for MySQL. On failure the transaction will be rolled back
and the 'table' object will be freed.
diff --git a/storage/innobase/include/row0purge.h b/storage/innobase/include/row0purge.h
index 091d80adec5..34af658cb12 100644
--- a/storage/innobase/include/row0purge.h
+++ b/storage/innobase/include/row0purge.h
@@ -226,7 +226,7 @@ public:
}
innobase_reset_background_thd(purge_thd);
- dict_table_close(table, false, false, purge_thd, mdl_ticket);
+ dict_table_close(table, false, purge_thd, mdl_ticket);
table= nullptr;
mdl_ticket= nullptr;
}
diff --git a/storage/innobase/include/trx0trx.h b/storage/innobase/include/trx0trx.h
index 96289f2aa39..d64fd019b85 100644
--- a/storage/innobase/include/trx0trx.h
+++ b/storage/innobase/include/trx0trx.h
@@ -122,11 +122,18 @@ void trx_start_internal_low(trx_t *trx, bool read_write);
(t)->start_file = __FILE__; \
trx_start_internal_low(t, true); \
} while (false)
+#define trx_start_internal_read_only(t) \
+ do { \
+ (t)->start_line = __LINE__; \
+ (t)->start_file = __FILE__; \
+ trx_start_internal_low(t, false); \
+ } while (false)
#else
#define trx_start_if_not_started(t, rw) \
trx_start_if_not_started_low((t), rw)
#define trx_start_internal(t) trx_start_internal_low(t, true)
+#define trx_start_internal_read_only(t) trx_start_internal_low(t, false)
#define trx_start_if_not_started_xa(t, rw) \
trx_start_if_not_started_xa_low((t), (rw))
@@ -725,11 +732,9 @@ public:
ulint duplicates; /*!< TRX_DUP_IGNORE | TRX_DUP_REPLACE */
bool dict_operation; /**< whether this modifies InnoDB
data dictionary */
- ib_uint32_t dict_operation_lock_mode;
- /*!< 0, RW_S_LATCH, or RW_X_LATCH:
- the latch mode trx currently holds
- on dict_sys.latch. Protected
- by dict_sys.latch. */
+ /** whether dict_sys.latch is held exclusively; protected by
+ dict_sys.latch */
+ bool dict_operation_lock_mode;
/** wall-clock time of the latest transition to TRX_STATE_ACTIVE;
used for diagnostic purposes only */
diff --git a/storage/innobase/lock/lock0lock.cc b/storage/innobase/lock/lock0lock.cc
index f3f5ce42372..41da85dcc98 100644
--- a/storage/innobase/lock/lock0lock.cc
+++ b/storage/innobase/lock/lock0lock.cc
@@ -1264,19 +1264,13 @@ lock_rec_enqueue_waiting(
trx_t* trx = thr_get_trx(thr);
ut_ad(trx->mutex_is_owner());
+ ut_ad(!trx->dict_operation_lock_mode);
- if (UNIV_UNLIKELY(trx->dict_operation_lock_mode == RW_X_LATCH)) {
- ut_ad(!strcmp(index->table->name.m_name, TABLE_STATS_NAME)
- || !strcmp(index->table->name.m_name, INDEX_STATS_NAME));
-instant_timeout:
+ if (trx->mysql_thd && thd_lock_wait_timeout(trx->mysql_thd) == 0) {
trx->error_state = DB_LOCK_WAIT_TIMEOUT;
return DB_LOCK_WAIT_TIMEOUT;
}
- if (trx->mysql_thd && thd_lock_wait_timeout(trx->mysql_thd) == 0) {
- goto instant_timeout;
- }
-
/* Enqueue the lock request that will wait to be granted, note that
we already own the trx mutex. */
lock_t* lock = lock_rec_create_low(
@@ -1491,6 +1485,12 @@ lock_rec_lock(
static_cast<lock_mode>(LOCK_MODE_MASK & mode)))
return DB_SUCCESS;
+ /* During CREATE TABLE, we will write to newly created FTS_*_CONFIG
+ on which no lock has been created yet. */
+ ut_ad(!trx->dict_operation_lock_mode ||
+ (strstr(index->table->name.m_name, "/FTS_") &&
+ strstr(index->table->name.m_name, "_CONFIG") + sizeof("_CONFIG") ==
+ index->table->name.m_name + strlen(index->table->name.m_name) + 1));
MONITOR_ATOMIC_INC(MONITOR_NUM_RECLOCK_REQ);
const page_id_t id{block->page.id()};
LockGuard g{lock_sys.rec_hash, id};
@@ -1701,7 +1701,6 @@ dberr_t lock_wait(que_thr_t *thr)
/* InnoDB system transactions may use the global value of
innodb_lock_wait_timeout, because trx->mysql_thd == NULL. */
const ulong innodb_lock_wait_timeout= trx_lock_wait_timeout_get(trx);
- const bool no_timeout= innodb_lock_wait_timeout >= 100000000;
const my_hrtime_t suspend_time= my_hrtime_coarse();
ut_ad(!trx->dict_operation_lock_mode);
@@ -1757,6 +1756,14 @@ dberr_t lock_wait(que_thr_t *thr)
timespec abstime;
set_timespec_time_nsec(abstime, suspend_time.val * 1000);
abstime.MY_tv_sec+= innodb_lock_wait_timeout;
+ /* Dictionary transactions must wait be immune to lock wait timeouts
+ for locks on data dictionary tables. Here we check only for
+ SYS_TABLES, SYS_COLUMNS, SYS_INDEXES, SYS_FIELDS. Locks on further
+ tables SYS_FOREIGN, SYS_FOREIGN_COLS, SYS_VIRTUAL will only be
+ acquired while holding an exclusive lock on one of the 4 tables. */
+ const bool no_timeout= innodb_lock_wait_timeout >= 100000000 ||
+ ((type_mode & LOCK_TABLE) &&
+ wait_lock->un_member.tab_lock.table->id <= DICT_FIELDS_ID);
thd_wait_begin(trx->mysql_thd, (type_mode & LOCK_TABLE)
? THD_WAIT_TABLE_LOCK : THD_WAIT_ROW_LOCK);
dberr_t error_state= DB_SUCCESS;
@@ -1803,7 +1810,10 @@ dberr_t lock_wait(que_thr_t *thr)
break;
default:
ut_ad(error_state != DB_LOCK_WAIT_TIMEOUT);
- if (trx_is_interrupted(trx))
+ /* Dictionary transactions must ignore KILL, because they could
+ be executed as part of a multi-transaction DDL operation,
+ such as rollback_inplace_alter_table() or ha_innobase::delete_table(). */
+ if (!trx->dict_operation && trx_is_interrupted(trx))
/* innobase_kill_query() can only set trx->error_state=DB_INTERRUPTED
for any transaction that is attached to a connection. */
error_state= DB_INTERRUPTED;
@@ -3072,20 +3082,15 @@ void lock_rec_restore_from_page_infimum(const buf_block_t &block,
/*========================= TABLE LOCKS ==============================*/
-/*********************************************************************//**
-Creates a table lock object and adds it as the last in the lock queue
-of the table. Does NOT check for deadlocks or lock compatibility.
-@return own: new lock object */
-UNIV_INLINE
-lock_t*
-lock_table_create(
-/*==============*/
- dict_table_t* table, /*!< in/out: database table
- in dictionary cache */
- unsigned type_mode,/*!< in: lock mode possibly ORed with
- LOCK_WAIT */
- trx_t* trx, /*!< in: trx */
- lock_t* c_lock) /*!< in: conflicting lock */
+/**
+Create a table lock, without checking for deadlocks or lock compatibility.
+@param table table on which the lock is created
+@param type_mode lock type and mode
+@param trx transaction
+@param c_lock conflicting lock
+@return the created lock object */
+lock_t *lock_table_create(dict_table_t *table, unsigned type_mode, trx_t *trx,
+ lock_t *c_lock)
{
lock_t* lock;
@@ -3094,6 +3099,12 @@ lock_table_create(
ut_ad(!trx->is_wsrep() || lock_sys.is_writer());
ut_ad(trx->state == TRX_STATE_ACTIVE || trx->is_recovered);
ut_ad(!trx->is_autocommit_non_locking());
+ /* During CREATE TABLE, we will write to newly created FTS_*_CONFIG
+ on which no lock has been created yet. */
+ ut_ad(!trx->dict_operation_lock_mode
+ || (strstr(table->name.m_name, "/FTS_")
+ && strstr(table->name.m_name, "_CONFIG") + sizeof("_CONFIG")
+ == table->name.m_name + strlen(table->name.m_name) + 1));
switch (LOCK_MODE_MASK & type_mode) {
case LOCK_AUTO_INC:
@@ -3310,13 +3321,7 @@ lock_table_enqueue_waiting(
trx_t* trx = thr_get_trx(thr);
ut_ad(trx->mutex_is_owner());
-
- if (UNIV_UNLIKELY(trx->dict_operation_lock_mode == RW_X_LATCH)) {
- ut_ad(!strcmp(table->name.m_name, TABLE_STATS_NAME)
- || !strcmp(table->name.m_name, INDEX_STATS_NAME));
- trx->error_state = DB_LOCK_WAIT_TIMEOUT;
- return DB_LOCK_WAIT_TIMEOUT;
- }
+ ut_ad(!trx->dict_operation_lock_mode);
#ifdef WITH_WSREP
if (trx->is_wsrep() && trx->lock.was_chosen_as_deadlock_victim) {
@@ -3478,7 +3483,7 @@ void lock_table_resurrect(dict_table_t *table, trx_t *trx, lock_mode mode)
ut_ad(!lock_table_other_has_incompatible(trx, LOCK_WAIT, table, mode));
trx->mutex_lock();
- lock_table_create(table, mode, trx, nullptr);
+ lock_table_create(table, mode, trx);
}
trx->mutex_unlock();
}
@@ -3619,6 +3624,28 @@ run_again:
return(err);
}
+/** Exclusively lock the data dictionary tables.
+@param trx dictionary transaction
+@return error code
+@retval DB_SUCCESS on success */
+dberr_t lock_sys_tables(trx_t *trx)
+{
+ dberr_t err;
+ if (!(err= lock_table_for_trx(dict_sys.sys_tables, trx, LOCK_X)) &&
+ !(err= lock_table_for_trx(dict_sys.sys_columns, trx, LOCK_X)) &&
+ !(err= lock_table_for_trx(dict_sys.sys_indexes, trx, LOCK_X)) &&
+ !(err= lock_table_for_trx(dict_sys.sys_fields, trx, LOCK_X)))
+ {
+ if (dict_sys.sys_foreign)
+ err= lock_table_for_trx(dict_sys.sys_foreign, trx, LOCK_X);
+ if (!err && dict_sys.sys_foreign_cols)
+ err= lock_table_for_trx(dict_sys.sys_foreign_cols, trx, LOCK_X);
+ if (!err && dict_sys.sys_virtual)
+ err= lock_table_for_trx(dict_sys.sys_virtual, trx, LOCK_X);
+ }
+ return err;
+}
+
/*=========================== LOCK RELEASE ==============================*/
/*************************************************************//**
@@ -3785,7 +3812,7 @@ void lock_release(trx_t *trx)
#if defined SAFE_MUTEX && defined UNIV_DEBUG
std::set<table_id_t> to_evict;
if (innodb_evict_tables_on_commit_debug && !trx->is_recovered)
- if (!trx->dict_operation_lock_mode && !trx->dict_operation)
+ if (!!trx->dict_operation)
for (const auto& p: trx->mod_tables)
if (!p.first->is_temporary())
to_evict.emplace(p.first->id);
@@ -5531,8 +5558,14 @@ void lock_sys_t::cancel(trx_t *trx)
mysql_mutex_lock(&lock_sys.wait_mutex);
if (lock_t *lock= trx->lock.wait_lock)
{
- trx->error_state= DB_INTERRUPTED;
- cancel(trx, lock, false);
+ /* Dictionary transactions must be immune to KILL, because they
+ may be executed as part of a multi-transaction DDL operation, such
+ as rollback_inplace_alter_table() or ha_innobase::delete_table(). */
+ if (!trx->dict_operation)
+ {
+ trx->error_state= DB_INTERRUPTED;
+ cancel(trx, lock, false);
+ }
}
lock_sys.deadlock_check();
mysql_mutex_unlock(&lock_sys.wait_mutex);
diff --git a/storage/innobase/pars/pars0pars.cc b/storage/innobase/pars/pars0pars.cc
index 1ade99eb1c3..61614007bd4 100644
--- a/storage/innobase/pars/pars0pars.cc
+++ b/storage/innobase/pars/pars0pars.cc
@@ -766,7 +766,7 @@ pars_retrieve_table_def(
sym_node->token_type = SYM_TABLE_REF_COUNTED;
sym_node->table = dict_table_open_on_name(
- sym_node->name, TRUE, FALSE, DICT_ERR_IGNORE_NONE);
+ sym_node->name, true, DICT_ERR_IGNORE_NONE);
ut_a(sym_node->table != NULL);
}
diff --git a/storage/innobase/row/row0ftsort.cc b/storage/innobase/row/row0ftsort.cc
index e22b08346cc..792a9b46e8a 100644
--- a/storage/innobase/row/row0ftsort.cc
+++ b/storage/innobase/row/row0ftsort.cc
@@ -1634,7 +1634,7 @@ row_fts_merge_insert(
/* Get aux index */
fts_get_table_name(&fts_table, aux_table_name);
- aux_table = dict_table_open_on_name(aux_table_name, FALSE, FALSE,
+ aux_table = dict_table_open_on_name(aux_table_name, false,
DICT_ERR_IGNORE_NONE);
ut_ad(aux_table != NULL);
aux_table->release();
diff --git a/storage/innobase/row/row0import.cc b/storage/innobase/row/row0import.cc
index ce235870909..b9245eb4325 100644
--- a/storage/innobase/row/row0import.cc
+++ b/storage/innobase/row/row0import.cc
@@ -40,6 +40,7 @@ Created 2012-02-08 by Sunny Bains.
#include "row0quiesce.h"
#include "fil0pagecompress.h"
#include "trx0undo.h"
+#include "lock0lock.h"
#ifdef HAVE_LZO
#include "lzo/lzo1x.h"
#endif
@@ -2191,11 +2192,8 @@ dberr_t
row_import_cleanup(
/*===============*/
row_prebuilt_t* prebuilt, /*!< in/out: prebuilt from handler */
- trx_t* trx, /*!< in/out: transaction for import */
dberr_t err) /*!< in: error code */
{
- ut_a(prebuilt->trx != trx);
-
if (err != DB_SUCCESS) {
dict_table_t* table = prebuilt->table;
table->file_unreadable = true;
@@ -2218,14 +2216,12 @@ row_import_cleanup(
DBUG_EXECUTE_IF("ib_import_before_commit_crash", DBUG_SUICIDE(););
- trx_commit_for_mysql(trx);
+ prebuilt->trx->commit();
- if (trx->dict_operation_lock_mode) {
- row_mysql_unlock_data_dictionary(trx);
+ if (prebuilt->trx->dict_operation_lock_mode) {
+ row_mysql_unlock_data_dictionary(prebuilt->trx);
}
- trx->free();
-
prebuilt->trx->op_info = "";
DBUG_EXECUTE_IF("ib_import_before_checkpoint_crash", DBUG_SUICIDE(););
@@ -2242,10 +2238,9 @@ dberr_t
row_import_error(
/*=============*/
row_prebuilt_t* prebuilt, /*!< in/out: prebuilt from handler */
- trx_t* trx, /*!< in/out: transaction for import */
dberr_t err) /*!< in: error code */
{
- if (!trx_is_interrupted(trx)) {
+ if (!trx_is_interrupted(prebuilt->trx)) {
char table_name[MAX_FULL_NAME_LEN + 1];
innobase_format_name(
@@ -2253,12 +2248,12 @@ row_import_error(
prebuilt->table->name.m_name);
ib_senderrf(
- trx->mysql_thd, IB_LOG_LEVEL_WARN,
+ prebuilt->trx->mysql_thd, IB_LOG_LEVEL_WARN,
ER_INNODB_IMPORT_ERROR,
table_name, (ulong) err, ut_strerr(err));
}
- return(row_import_cleanup(prebuilt, trx, err));
+ return row_import_cleanup(prebuilt, err);
}
/*****************************************************************//**
@@ -4002,9 +3997,9 @@ row_import_for_mysql(
row_prebuilt_t* prebuilt) /*!< in: prebuilt struct in MySQL */
{
dberr_t err;
- trx_t* trx;
ib_uint64_t autoinc = 0;
char* filepath = NULL;
+ trx_t* trx = prebuilt->trx;
/* The caller assured that this is not read_only_mode and that no
temorary tablespace is being imported. */
@@ -4013,22 +4008,12 @@ row_import_for_mysql(
ut_ad(table->space_id);
ut_ad(table->space_id < SRV_SPACE_ID_UPPER_BOUND);
- ut_ad(prebuilt->trx);
+ ut_ad(trx);
+ ut_ad(trx->state == TRX_STATE_ACTIVE);
ut_ad(!table->is_readable());
ibuf_delete_for_discarded_space(table->space_id);
- trx_start_if_not_started(prebuilt->trx, true);
-
- trx = trx_create();
-
- trx->dict_operation = true;
-
- trx_start_if_not_started(trx, true);
-
- /* So that we can send error messages to the user. */
- trx->mysql_thd = prebuilt->trx->mysql_thd;
-
/* Assign an undo segment for the transaction, so that the
transaction will be recovered after a crash. */
@@ -4043,21 +4028,19 @@ row_import_for_mysql(
DBUG_EXECUTE_IF("ib_import_undo_assign_failure",
err = DB_TOO_MANY_CONCURRENT_TRXS;);
- if (err != DB_SUCCESS) {
-
- return(row_import_cleanup(prebuilt, trx, err));
-
- } else if (trx->rsegs.m_redo.undo == 0) {
-
+ if (err == DB_SUCCESS && !trx->has_logged_persistent()) {
err = DB_TOO_MANY_CONCURRENT_TRXS;
- return(row_import_cleanup(prebuilt, trx, err));
+ }
+ if (err != DB_SUCCESS) {
+ return row_import_cleanup(prebuilt, err);
}
- prebuilt->trx->op_info = "read meta-data file";
+ trx->op_info = "read meta-data file";
row_import cfg;
+ THD* thd = trx->mysql_thd;
- err = row_import_read_cfg(table, trx->mysql_thd, cfg);
+ err = row_import_read_cfg(table, thd, cfg);
/* Check if the table column definitions match the contents
of the config file. */
@@ -4067,7 +4050,7 @@ row_import_for_mysql(
/* We have a schema file, try and match it with our
data dictionary. */
- err = cfg.match_schema(trx->mysql_thd);
+ err = cfg.match_schema(thd);
/* Update index->page and SYS_INDEXES.PAGE_NO to match the
B-tree root page numbers in the tablespace. Use the index
@@ -4091,13 +4074,13 @@ row_import_for_mysql(
cfg.m_zip_size = 0;
if (UT_LIST_GET_LEN(table->indexes) > 1) {
- ib_errf(trx->mysql_thd, IB_LOG_LEVEL_ERROR,
+ ib_errf(thd, IB_LOG_LEVEL_ERROR,
ER_INTERNAL_ERROR,
"Drop all secondary indexes before importing "
"table %s when .cfg file is missing.",
table->name.m_name);
err = DB_ERROR;
- return row_import_error(prebuilt, trx, err);
+ return row_import_error(prebuilt, err);
}
FetchIndexRootPages fetchIndexRootPages(table, trx);
@@ -4121,10 +4104,10 @@ row_import_for_mysql(
}
if (err != DB_SUCCESS) {
- return(row_import_error(prebuilt, trx, err));
+ return row_import_error(prebuilt, err);
}
- prebuilt->trx->op_info = "importing tablespace";
+ trx->op_info = "importing tablespace";
ib::info() << "Phase I - Update all pages";
@@ -4166,13 +4149,13 @@ row_import_for_mysql(
if (err != DB_DECRYPTION_FAILED) {
- ib_errf(trx->mysql_thd, IB_LOG_LEVEL_ERROR,
+ ib_errf(thd, IB_LOG_LEVEL_ERROR,
ER_INTERNAL_ERROR,
"Cannot reset LSNs in table %s : %s",
table_name, ut_strerr(err));
}
- return(row_import_cleanup(prebuilt, trx, err));
+ return row_import_cleanup(prebuilt, err);
}
/* If the table is stored in a remote tablespace, we need to
@@ -4196,7 +4179,7 @@ row_import_for_mysql(
);
if (filepath == NULL) {
- return(row_import_cleanup(prebuilt, trx, DB_OUT_OF_MEMORY));
+ return row_import_cleanup(prebuilt, DB_OUT_OF_MEMORY);
}
/* Open the tablespace so that we can access via the buffer pool.
@@ -4215,7 +4198,7 @@ row_import_for_mysql(
err = DB_TABLESPACE_NOT_FOUND; table->space = NULL;);
if (!table->space) {
- ib_senderrf(trx->mysql_thd, IB_LOG_LEVEL_ERROR,
+ ib_senderrf(thd, IB_LOG_LEVEL_ERROR,
ER_GET_ERRMSG,
err, ut_strerr(err), filepath);
}
@@ -4229,7 +4212,7 @@ row_import_for_mysql(
DBUG_EXECUTE_IF("ib_import_check_bitmap_failure", err = DB_CORRUPTION;);
if (err != DB_SUCCESS) {
- return(row_import_cleanup(prebuilt, trx, err));
+ return row_import_cleanup(prebuilt, err);
}
/* The first index must always be the clustered index. */
@@ -4237,7 +4220,7 @@ row_import_for_mysql(
dict_index_t* index = dict_table_get_first_index(table);
if (!dict_index_is_clust(index)) {
- return(row_import_error(prebuilt, trx, DB_CORRUPTION));
+ return row_import_error(prebuilt, DB_CORRUPTION);
}
/* Update the Btree segment headers for index node and
@@ -4249,7 +4232,7 @@ row_import_for_mysql(
err = DB_CORRUPTION;);
if (err != DB_SUCCESS) {
- return(row_import_error(prebuilt, trx, err));
+ return row_import_error(prebuilt, err);
} else if (cfg.requires_purge(index->name)) {
/* Purge any delete-marked records that couldn't be
@@ -4268,7 +4251,7 @@ row_import_for_mysql(
DBUG_EXECUTE_IF("ib_import_cluster_failure", err = DB_CORRUPTION;);
if (err != DB_SUCCESS) {
- return(row_import_error(prebuilt, trx, err));
+ return row_import_error(prebuilt, err);
}
/* For secondary indexes, purge any records that couldn't be purged
@@ -4281,7 +4264,7 @@ row_import_for_mysql(
err = DB_CORRUPTION;);
if (err != DB_SUCCESS) {
- return(row_import_error(prebuilt, trx, err));
+ return row_import_error(prebuilt, err);
}
/* Ensure that the next available DB_ROW_ID is not smaller than
@@ -4320,13 +4303,13 @@ row_import_for_mysql(
err = row_import_update_index_root(trx, table, false);
if (err != DB_SUCCESS) {
- return(row_import_error(prebuilt, trx, err));
+ return row_import_error(prebuilt, err);
}
err = row_import_update_discarded_flag(trx, table->id, false);
if (err != DB_SUCCESS) {
- return(row_import_error(prebuilt, trx, err));
+ return row_import_error(prebuilt, err);
}
table->file_unreadable = false;
@@ -4342,5 +4325,5 @@ row_import_for_mysql(
btr_write_autoinc(dict_table_get_first_index(table), autoinc);
}
- return(row_import_cleanup(prebuilt, trx, err));
+ return row_import_cleanup(prebuilt, err);
}
diff --git a/storage/innobase/row/row0ins.cc b/storage/innobase/row/row0ins.cc
index 40b51e2e866..6f228142cba 100644
--- a/storage/innobase/row/row0ins.cc
+++ b/storage/innobase/row/row0ins.cc
@@ -1944,7 +1944,7 @@ row_ins_check_foreign_constraints(
ref_table = dict_table_open_on_name(
foreign->referenced_table_name_lookup,
- FALSE, FALSE, DICT_ERR_IGNORE_NONE);
+ false, DICT_ERR_IGNORE_NONE);
}
err = row_ins_check_foreign_constraint(
diff --git a/storage/innobase/row/row0merge.cc b/storage/innobase/row/row0merge.cc
index ab294e65541..53e3016180a 100644
--- a/storage/innobase/row/row0merge.cc
+++ b/storage/innobase/row/row0merge.cc
@@ -3678,7 +3678,7 @@ row_merge_drop_index_dict(
pars_info_t* info;
ut_ad(!srv_read_only_mode);
- ut_ad(trx->dict_operation_lock_mode == RW_X_LATCH);
+ ut_ad(trx->dict_operation_lock_mode);
ut_ad(trx->dict_operation);
ut_ad(dict_sys.locked());
@@ -3704,6 +3704,7 @@ row_merge_drop_index_dict(
Drop indexes that were created before an error occurred.
The data dictionary must have been locked exclusively by the caller,
because the transaction will not be committed. */
+static
void
row_merge_drop_indexes_dict(
/*========================*/
@@ -3740,7 +3741,7 @@ row_merge_drop_indexes_dict(
pars_info_t* info;
ut_ad(!srv_read_only_mode);
- ut_ad(trx->dict_operation_lock_mode == RW_X_LATCH);
+ ut_ad(trx->dict_operation_lock_mode);
ut_ad(trx->dict_operation);
ut_ad(dict_sys.locked());
@@ -3813,7 +3814,7 @@ row_merge_drop_indexes(
dict_index_t* next_index;
ut_ad(!srv_read_only_mode);
- ut_ad(trx->dict_operation_lock_mode == RW_X_LATCH);
+ ut_ad(trx->dict_operation_lock_mode);
ut_ad(trx->dict_operation);
ut_ad(dict_sys.locked());
@@ -3829,18 +3830,10 @@ row_merge_drop_indexes(
handle to the table be waiting for the next statement to execute,
or waiting for a meta-data lock.
- A concurrent purge will be prevented by dict_sys.latch. */
+ A concurrent purge will be prevented by MDL. */
if (!locked && (table->get_ref_count() > 1
|| table->has_lock_other_than(alter_trx))) {
- /* We will have to drop the indexes later, when the
- table is guaranteed to be no longer in use. Mark the
- indexes as incomplete and corrupted, so that other
- threads will stop using them. Let dict_table_close()
- or crash recovery or the next invocation of
- prepare_inplace_alter_table() take care of dropping
- the indexes. */
-
while ((index = dict_table_get_next_index(index)) != NULL) {
ut_ad(!dict_index_is_clust(index));
@@ -4094,7 +4087,10 @@ void row_merge_drop_temp_indexes()
indexes, so that the data dictionary information can be checked
when accessing the tablename.ibd files. */
trx_t* trx = trx_create();
+ trx_start_for_ddl(trx);
trx->op_info = "dropping partially created indexes";
+ dberr_t error = lock_sys_tables(trx);
+
row_mysql_lock_data_dictionary(trx);
/* Ensure that this transaction will be rolled back and locks
will be released, if the server gets killed before the commit
@@ -4105,8 +4101,11 @@ void row_merge_drop_temp_indexes()
pars_info_t* pinfo = pars_info_create();
pars_info_bind_function(pinfo, "drop_fts", row_merge_drop_fts, trx);
+ if (error == DB_SUCCESS) {
+ error = que_eval_sql(pinfo, sql, trx);
+ }
- if (dberr_t error = que_eval_sql(pinfo, sql, trx)) {
+ if (error) {
/* Even though we ensure that DDL transactions are WAIT
and DEADLOCK free, we could encounter other errors e.g.,
DB_TOO_MANY_CONCURRENT_TRXS. */
@@ -4246,7 +4245,7 @@ row_merge_rename_index_to_add(
"WHERE TABLE_ID = :tableid AND ID = :indexid;\n"
"END;\n";
- ut_a(trx->dict_operation_lock_mode == RW_X_LATCH);
+ ut_ad(trx->dict_operation_lock_mode);
ut_ad(trx->dict_operation);
trx->op_info = "renaming index to add";
diff --git a/storage/innobase/row/row0mysql.cc b/storage/innobase/row/row0mysql.cc
index 17ac86f2300..a0ef110f606 100644
--- a/storage/innobase/row/row0mysql.cc
+++ b/storage/innobase/row/row0mysql.cc
@@ -2133,36 +2133,6 @@ row_update_cascade_for_mysql(
}
/*********************************************************************//**
-Locks the data dictionary exclusively for performing a table create or other
-data dictionary modification operation. */
-void
-row_mysql_lock_data_dictionary_func(
-/*================================*/
-#ifdef UNIV_PFS_RWLOCK
- const char* file, /*!< in: file name */
- unsigned line, /*!< in: line number */
-#endif
- trx_t* trx) /*!< in/out: transaction */
-{
- ut_ad(trx->dict_operation_lock_mode == 0);
- dict_sys.lock(SRW_LOCK_ARGS(file, line));
- trx->dict_operation_lock_mode = RW_X_LATCH;
-}
-
-/*********************************************************************//**
-Unlocks the data dictionary exclusive lock. */
-void
-row_mysql_unlock_data_dictionary(
-/*=============================*/
- trx_t* trx) /*!< in/out: transaction */
-{
- ut_ad(lock_trx_has_sys_table_locks(trx) == NULL);
- ut_ad(trx->dict_operation_lock_mode == RW_X_LATCH);
- trx->dict_operation_lock_mode = 0;
- dict_sys.unlock();
-}
-
-/*********************************************************************//**
Creates a table for MySQL. On failure the transaction will be rolled back
and the 'table' object will be freed.
@return error code or DB_SUCCESS */
@@ -2177,11 +2147,11 @@ row_create_table_for_mysql(
tab_node_t* node;
mem_heap_t* heap;
que_thr_t* thr;
- dberr_t err;
+ ut_ad(trx->state == TRX_STATE_ACTIVE);
ut_ad(dict_sys.sys_tables_exist());
ut_ad(dict_sys.locked());
- ut_ad(trx->dict_operation_lock_mode == RW_X_LATCH);
+ ut_ad(trx->dict_operation_lock_mode);
DEBUG_SYNC_C("create_table");
@@ -2205,7 +2175,7 @@ row_create_table_for_mysql(
que_run_threads(thr);
- err = trx->error_state;
+ dberr_t err = trx->error_state;
if (err != DB_SUCCESS) {
trx->error_state = DB_SUCCESS;
@@ -2271,14 +2241,14 @@ row_create_index_for_mysql(
}
}
- trx->op_info = "creating index";
-
/* For temp-table we avoid insertion into SYSTEM TABLES to
maintain performance and so we have separate path that directly
just updates dictonary cache. */
if (!table->is_temporary()) {
- trx_start_if_not_started_xa(trx, true);
- trx->dict_operation = true;
+ ut_ad(trx->state == TRX_STATE_ACTIVE);
+ ut_ad(trx->dict_operation);
+ trx->op_info = "creating index";
+
/* Note that the space id where we store the index is
inherited from the table in dict_build_index_def_step()
in dict0crea.cc. */
@@ -2306,6 +2276,8 @@ row_create_index_for_mysql(
if (index && (index->type & DICT_FTS)) {
err = fts_create_index_tables(trx, index, table->id);
}
+
+ trx->op_info = "";
} else {
dict_build_index_def(table, index, trx);
@@ -2327,8 +2299,6 @@ row_create_index_for_mysql(
}
}
- trx->op_info = "";
-
return(err);
}
@@ -2675,7 +2645,7 @@ row_rename_table_for_mysql(
ut_a(old_name != NULL);
ut_a(new_name != NULL);
ut_ad(trx->state == TRX_STATE_ACTIVE);
- ut_ad(trx->dict_operation_lock_mode == RW_X_LATCH);
+ ut_ad(trx->dict_operation_lock_mode);
if (high_level_read_only) {
return(DB_READ_ONLY);
@@ -2686,7 +2656,7 @@ row_rename_table_for_mysql(
old_is_tmp = dict_table_t::is_temporary_name(old_name);
new_is_tmp = dict_table_t::is_temporary_name(new_name);
- table = dict_table_open_on_name(old_name, true, false,
+ table = dict_table_open_on_name(old_name, true,
DICT_ERR_IGNORE_FK_NOKEY);
/* MariaDB partition engine hard codes the file name
@@ -2706,7 +2676,7 @@ row_rename_table_for_mysql(
check the existence of table name without lowering
case them in the system table. */
if (!table && lower_case_table_names == 1
- && strstr(old_name, IF_WIN("#p#", "#P#"))) {
+ && strstr(old_name, table_name_t::part_suffix)) {
char par_case_name[MAX_FULL_NAME_LEN + 1];
#ifndef _WIN32
/* Check for the table using lower
@@ -2724,7 +2694,7 @@ row_rename_table_for_mysql(
normalize_table_name_c_low(
par_case_name, old_name, FALSE);
#endif
- table = dict_table_open_on_name(par_case_name, true, false,
+ table = dict_table_open_on_name(par_case_name, true,
DICT_ERR_IGNORE_FK_NOKEY);
}
diff --git a/storage/innobase/row/row0purge.cc b/storage/innobase/row/row0purge.cc
index 7e10dcdfe9c..4588c1d2b0a 100644
--- a/storage/innobase/row/row0purge.cc
+++ b/storage/innobase/row/row0purge.cc
@@ -141,7 +141,7 @@ removed:
mtr.commit();
close_and_exit:
if (table) {
- dict_table_close(table, true, false);
+ dict_table_close(table, true);
dict_sys.unlock();
}
return success;
@@ -181,7 +181,7 @@ close_and_exit:
mtr.commit();
if (table) {
- dict_table_close(table, true, false);
+ dict_table_close(table, true);
dict_sys.unlock();
table = nullptr;
}
diff --git a/storage/innobase/row/row0uins.cc b/storage/innobase/row/row0uins.cc
index 2c7408c950a..c826d632969 100644
--- a/storage/innobase/row/row0uins.cc
+++ b/storage/innobase/row/row0uins.cc
@@ -73,12 +73,11 @@ row_undo_ins_remove_clust_rec(
dict_index_t* index = node->pcur.btr_cur.index;
bool online;
table_id_t table_id = 0;
- const bool dict_locked = node->trx->dict_operation_lock_mode
- == RW_X_LATCH;
+ const bool dict_locked = node->trx->dict_operation_lock_mode;
restart:
MDL_ticket* mdl_ticket = nullptr;
ut_ad(!table_id || dict_locked
- || node->trx->dict_operation_lock_mode == 0);
+ || !node->trx->dict_operation_lock_mode);
dict_table_t *table = table_id
? dict_table_open_on_id(table_id, dict_locked,
DICT_TABLE_OP_OPEN_ONLY_IF_CACHED,
@@ -147,8 +146,7 @@ restart:
completed. At this point, any corresponding operation
to the metadata record will have been rolled back. */
ut_ad(!online);
- ut_ad(node->trx->dict_operation_lock_mode
- == RW_X_LATCH);
+ ut_ad(node->trx->dict_operation_lock_mode);
ut_ad(node->rec_type == TRX_UNDO_INSERT_REC);
if (rec_get_n_fields_old(rec)
!= DICT_NUM_FIELDS__SYS_COLUMNS
@@ -162,8 +160,7 @@ restart:
break;
case DICT_INDEXES_ID:
ut_ad(!online);
- ut_ad(node->trx->dict_operation_lock_mode
- == RW_X_LATCH);
+ ut_ad(node->trx->dict_operation_lock_mode);
ut_ad(node->rec_type == TRX_UNDO_INSERT_REC);
if (!table_id) {
table_id = mach_read_from_8(rec);
@@ -272,7 +269,7 @@ func_exit:
btr_pcur_commit_specify_mtr(&node->pcur, &mtr);
if (UNIV_LIKELY_NULL(table)) {
- dict_table_close(table, dict_locked, false,
+ dict_table_close(table, dict_locked,
node->trx->mysql_thd, mdl_ticket);
}
@@ -486,7 +483,7 @@ close_table:
would probably be better to just drop all temporary
tables (and temporary undo log records) of the current
connection, instead of doing this rollback. */
- dict_table_close(node->table, dict_locked, FALSE);
+ dict_table_close(node->table, dict_locked);
node->table = NULL;
return false;
} else {
@@ -610,7 +607,7 @@ row_undo_ins(
que_thr_t* thr) /*!< in: query thread */
{
dberr_t err;
- bool dict_locked = node->trx->dict_operation_lock_mode == RW_X_LATCH;
+ const bool dict_locked = node->trx->dict_operation_lock_mode;
if (!row_undo_ins_parse_undo_rec(node, dict_locked)) {
return DB_SUCCESS;
@@ -683,7 +680,7 @@ row_undo_ins(
break;
}
- dict_table_close(node->table, dict_locked, FALSE);
+ dict_table_close(node->table, dict_locked);
node->table = NULL;
diff --git a/storage/innobase/row/row0umod.cc b/storage/innobase/row/row0umod.cc
index 5039df2e1a6..751c4e73401 100644
--- a/storage/innobase/row/row0umod.cc
+++ b/storage/innobase/row/row0umod.cc
@@ -1239,7 +1239,7 @@ close_table:
would probably be better to just drop all temporary
tables (and temporary undo log records) of the current
connection, instead of doing this rollback. */
- dict_table_close(node->table, dict_locked, FALSE);
+ dict_table_close(node->table, dict_locked);
node->table = NULL;
return false;
}
@@ -1327,8 +1327,7 @@ row_undo_mod(
{
dberr_t err;
ut_ad(thr_get_trx(thr) == node->trx);
- const bool dict_locked = node->trx->dict_operation_lock_mode
- == RW_X_LATCH;
+ const bool dict_locked = node->trx->dict_operation_lock_mode;
if (!row_undo_mod_parse_undo_rec(node, dict_locked)) {
return DB_SUCCESS;
@@ -1402,7 +1401,7 @@ rollback_clust:
}
}
- dict_table_close(node->table, dict_locked, FALSE);
+ dict_table_close(node->table, dict_locked);
node->table = NULL;
diff --git a/storage/innobase/row/row0upd.cc b/storage/innobase/row/row0upd.cc
index b343141f84b..9d8a19a8dff 100644
--- a/storage/innobase/row/row0upd.cc
+++ b/storage/innobase/row/row0upd.cc
@@ -249,7 +249,7 @@ row_upd_check_references_constraints(
ref_table = dict_table_open_on_name(
foreign->foreign_table_name_lookup,
- FALSE, FALSE, DICT_ERR_IGNORE_NONE);
+ false, DICT_ERR_IGNORE_NONE);
}
err = row_ins_check_foreign_constraint(
@@ -332,7 +332,7 @@ wsrep_row_upd_check_foreign_constraints(
foreign->referenced_table =
dict_table_open_on_name(
foreign->referenced_table_name_lookup,
- FALSE, FALSE, DICT_ERR_IGNORE_NONE);
+ false, DICT_ERR_IGNORE_NONE);
opened = (foreign->referenced_table) ? TRUE : FALSE;
}
diff --git a/storage/innobase/trx/trx0trx.cc b/storage/innobase/trx/trx0trx.cc
index 246a0363796..bb0b9882419 100644
--- a/storage/innobase/trx/trx0trx.cc
+++ b/storage/innobase/trx/trx0trx.cc
@@ -174,7 +174,7 @@ struct TrxFactory {
trx->rw_trx_hash_pins = 0;
trx_init(trx);
- trx->dict_operation_lock_mode = 0;
+ trx->dict_operation_lock_mode = false;
trx->detailed_error = reinterpret_cast<char*>(
ut_zalloc_nokey(MAX_DETAILED_ERROR_LEN));
@@ -215,7 +215,7 @@ struct TrxFactory {
ut_a(trx->lock.wait_lock == NULL);
ut_a(trx->lock.wait_thr == NULL);
- ut_a(trx->dict_operation_lock_mode == 0);
+ ut_a(!trx->dict_operation_lock_mode);
if (trx->lock.lock_heap != NULL) {
mem_heap_free(trx->lock.lock_heap);
@@ -1114,7 +1114,7 @@ trx_finalize_for_fts(
trx->fts_trx = NULL;
}
-extern "C" MYSQL_THD thd_increment_pending_ops();
+
extern "C" void thd_decrement_pending_ops(MYSQL_THD);
@@ -1123,11 +1123,11 @@ extern "C" void thd_decrement_pending_ops(MYSQL_THD);
/*
If required, initiates write and optionally flush of the log to
disk
- @param[in] lsn - lsn up to which logs are to be flushed.
- @param[in] trx_state - if trx_state is PREPARED, the function will
+ @param lsn LSN up to which logs are to be flushed.
+ @param trx transaction; if trx->state is PREPARED, the function will
also wait for the flush to complete.
*/
-static void trx_flush_log_if_needed_low(lsn_t lsn, trx_state_t trx_state)
+static void trx_flush_log_if_needed_low(lsn_t lsn, const trx_t *trx)
{
if (!srv_flush_log_at_trx_commit)
return;
@@ -1138,25 +1138,23 @@ static void trx_flush_log_if_needed_low(lsn_t lsn, trx_state_t trx_state)
const bool flush= srv_file_flush_method != SRV_NOSYNC &&
(srv_flush_log_at_trx_commit & 1);
- if (trx_state == TRX_STATE_PREPARED)
+ if (trx->state == TRX_STATE_PREPARED)
{
/* XA, which is used with binlog as well.
Be conservative, use synchronous wait.*/
+sync:
log_write_up_to(lsn, flush);
return;
}
completion_callback cb;
- if ((cb.m_param = thd_increment_pending_ops()))
+ if ((cb.m_param = innodb_thd_increment_pending_ops(trx->mysql_thd)))
{
cb.m_callback = (void (*)(void *)) thd_decrement_pending_ops;
log_write_up_to(lsn, flush, false, &cb);
}
else
- {
- /* No THD, synchronous write */
- log_write_up_to(lsn, flush);
- }
+ goto sync;
}
/**********************************************************************//**
@@ -1171,7 +1169,7 @@ trx_flush_log_if_needed(
trx_t* trx) /*!< in/out: transaction */
{
trx->op_info = "flushing log";
- trx_flush_log_if_needed_low(lsn,trx->state);
+ trx_flush_log_if_needed_low(lsn, trx);
trx->op_info = "";
}