diff options
author | Monty <monty@mariadb.org> | 2018-10-09 18:55:18 +0300 |
---|---|---|
committer | Monty <monty@mariadb.org> | 2018-12-09 22:12:25 +0200 |
commit | 163b34fe25919b25ff83860f30f2440b44c8b53b (patch) | |
tree | 7d21575247bdba318889ad4bc650028e59549781 | |
parent | 306b7a2243eb3c3e8dcc567ef6d4e7e50dca21a4 (diff) | |
download | mariadb-git-163b34fe25919b25ff83860f30f2440b44c8b53b.tar.gz |
Optimize flush tables with read lock (FTWRL) to not wait for select's
Part of MDEV-5336 Implement LOCK FOR BACKUP
The idea is that instead of waiting in close_cached_tables() for all
tables to be closed, we instead call flush_tables() that does:
- Flush not used objects in table cache to free memory
- Collect all tables that are open
- Call HA_EXTRA_FLUSH on the objects, to get them into "closed state"
- Added HA_EXTRA_FLUSH support to archive and CSV
- Added multi-user protection to HA_EXTRA_FLUSH in MyISAM and Aria
The benefit compared to old code is:
- FTWRL doesn't have to wait for long running read operations or
open HANDLER's
35 files changed, 456 insertions, 81 deletions
diff --git a/mysql-test/main/flush.result b/mysql-test/main/flush.result index af8e327657b..c5176527a3a 100644 --- a/mysql-test/main/flush.result +++ b/mysql-test/main/flush.result @@ -472,10 +472,7 @@ create table t1 (i int); create table t2 (i int); handler t1 open; connection con1; -# Sending: flush tables with read lock; -connection con2; -# Wait until FTWRL starts waiting for 't1' to be closed. connection default; # The below statement should not cause deadlock. # Sending: @@ -483,8 +480,6 @@ insert into t2 values (1); connection con2; # Wait until INSERT starts to wait for FTWRL to go away. connection con1; -# FTWRL should be able to continue now. -# Reap FTWRL. unlock tables; connection default; # Reap INSERT. diff --git a/mysql-test/main/flush.test b/mysql-test/main/flush.test index 51b5c48c137..d626abf8880 100644 --- a/mysql-test/main/flush.test +++ b/mysql-test/main/flush.test @@ -566,17 +566,7 @@ create table t2 (i int); handler t1 open; connection con1; ---echo # Sending: ---send flush tables with read lock - -connection con2; ---echo # Wait until FTWRL starts waiting for 't1' to be closed. -let $wait_condition= - select count(*) = 1 from information_schema.processlist - where state = "Waiting for table flush" - and info = "flush tables with read lock"; ---source include/wait_condition.inc - +flush tables with read lock; connection default; --echo # The below statement should not cause deadlock. --echo # Sending: @@ -591,9 +581,6 @@ let $wait_condition= --source include/wait_condition.inc connection con1; ---echo # FTWRL should be able to continue now. ---echo # Reap FTWRL. ---reap unlock tables; connection default; diff --git a/mysql-test/main/flush_read_lock.result b/mysql-test/main/flush_read_lock.result index 55c31ae8d12..aa67ccdce55 100644 --- a/mysql-test/main/flush_read_lock.result +++ b/mysql-test/main/flush_read_lock.result @@ -652,6 +652,7 @@ connection default; # 14.2) FLUSH TABLES <list> WITH READ LOCK is not blocked by # active FTWRL. But since the latter keeps tables open # FTWRL is blocked by FLUSH TABLES <list> WITH READ LOCK. +# Fixed by MDEV-5336 flush tables with read lock; # FT <list> WRL is allowed under FTWRL at the moment. # It does not make much sense though. @@ -668,12 +669,9 @@ connection default; flush tables t1_base, t2_base with read lock; connection con1; flush tables with read lock; -connection con2; -# Wait until FTWRL is blocked. connection default; unlock tables; connection con1; -# Reap FTWRL. unlock tables; connection default; # @@ -1677,3 +1675,31 @@ disconnect con1; disconnect con2; disconnect con3; set global sql_mode=default; +# +# Deadlock between FTWRL under open handler and DDL/LOCK TABLES +# +CREATE TABLE t1(a INT); +HANDLER t1 OPEN; +# +connect con1,localhost,root,,; +SET DEBUG_SYNC= 'mdl_acquire_lock_wait SIGNAL ready'; +LOCK TABLE t1 WRITE; +# +# we need to do it in a separate connection, +# because SET DEBUG_SYNC call open_tables()/mysql_ha_flush() :( +connect con2,localhost,root,,; +SET DEBUG_SYNC= 'now WAIT_FOR ready'; +disconnect con2; +# +connection default; +FLUSH TABLES WITH READ LOCK; +# +connection con1; +UNLOCK TABLES; +disconnect con1; +# +connection default; +UNLOCK TABLES; +HANDLER t1 CLOSE; +DROP TABLE t1; +SET DEBUG_SYNC= 'RESET'; diff --git a/mysql-test/main/flush_read_lock.test b/mysql-test/main/flush_read_lock.test index 4a9752ae9f1..66525551ced 100644 --- a/mysql-test/main/flush_read_lock.test +++ b/mysql-test/main/flush_read_lock.test @@ -800,6 +800,8 @@ connection default; --echo # 14.2) FLUSH TABLES <list> WITH READ LOCK is not blocked by --echo # active FTWRL. But since the latter keeps tables open --echo # FTWRL is blocked by FLUSH TABLES <list> WITH READ LOCK. +--echo # Fixed by MDEV-5336 + flush tables with read lock; --echo # FT <list> WRL is allowed under FTWRL at the moment. --echo # It does not make much sense though. @@ -815,19 +817,10 @@ unlock tables; connection default; flush tables t1_base, t2_base with read lock; connection $con_aux1; ---send flush tables with read lock -connection $con_aux2; ---echo # Wait until FTWRL is blocked. -let $wait_condition= - select count(*) = 1 from information_schema.processlist - where state = "Waiting for table flush" and - info = "flush tables with read lock"; ---source include/wait_condition.inc +flush tables with read lock; connection default; unlock tables; connection $con_aux1; ---echo # Reap FTWRL. ---reap unlock tables; connection default; @@ -2022,3 +2015,40 @@ set global sql_mode=default; # Check that all connections opened by test cases in this file are really # gone so execution of other tests won't be affected by their presence. --source include/wait_until_count_sessions.inc + + +--echo # +--echo # Deadlock between FTWRL under open handler and DDL/LOCK TABLES +--echo # +CREATE TABLE t1(a INT); +HANDLER t1 OPEN; + +--echo # +connect (con1,localhost,root,,); +SET DEBUG_SYNC= 'mdl_acquire_lock_wait SIGNAL ready'; +--send LOCK TABLE t1 WRITE + +--echo # +--echo # we need to do it in a separate connection, +--echo # because SET DEBUG_SYNC call open_tables()/mysql_ha_flush() :( +connect (con2,localhost,root,,); +SET DEBUG_SYNC= 'now WAIT_FOR ready'; +disconnect con2; + +--echo # +connection default; +--send FLUSH TABLES WITH READ LOCK + +--echo # +connection con1; +reap; +UNLOCK TABLES; +disconnect con1; + +--echo # +connection default; +reap; +UNLOCK TABLES; +HANDLER t1 CLOSE; +DROP TABLE t1; +SET DEBUG_SYNC= 'RESET'; diff --git a/mysql-test/main/mdl_sync.result b/mysql-test/main/mdl_sync.result index 3880fc5ef91..bf659a3616f 100644 --- a/mysql-test/main/mdl_sync.result +++ b/mysql-test/main/mdl_sync.result @@ -3055,7 +3055,7 @@ disconnect con3; # CREATE TABLE t1(a INT) ENGINE=InnoDB; SET debug_sync='open_tables_after_open_and_process_table SIGNAL ready WAIT_FOR go'; -SELECT * FROM t1; +INSERT INTO t1 values (1); connect con1,localhost,root,,; SET debug_sync='now WAIT_FOR ready'; SET lock_wait_timeout=1; @@ -3063,7 +3063,21 @@ FLUSH TABLES WITH READ LOCK; ERROR HY000: Lock wait timeout exceeded; try restarting transaction SET debug_sync='now SIGNAL go'; connection default; +# After MDEV-5536, SELECT will not block FLUSH TABLES +SET debug_sync='RESET'; +SET debug_sync='open_tables_after_open_and_process_table SIGNAL ready WAIT_FOR go'; +SELECT * FROM t1; +connection con1; +SET debug_sync='now WAIT_FOR ready'; +SET lock_wait_timeout=1; +FLUSH TABLES WITH READ LOCK; +SET debug_sync='now SIGNAL go'; +connection default; a +1 +connection con1; +unlock tables; +connection default; SET debug_sync='RESET'; DROP TABLE t1; disconnect con1; diff --git a/mysql-test/main/mdl_sync.test b/mysql-test/main/mdl_sync.test index fbecd6bf547..20f850a2744 100644 --- a/mysql-test/main/mdl_sync.test +++ b/mysql-test/main/mdl_sync.test @@ -4079,9 +4079,10 @@ disconnect con3; --echo # MDEV-12620 - set lock_wait_timeout = 1;flush tables with read lock; --echo # lock not released after timeout --echo # + CREATE TABLE t1(a INT) ENGINE=InnoDB; SET debug_sync='open_tables_after_open_and_process_table SIGNAL ready WAIT_FOR go'; -send SELECT * FROM t1; +send INSERT INTO t1 values (1); connect (con1,localhost,root,,); SET debug_sync='now WAIT_FOR ready'; @@ -4093,12 +4094,31 @@ SET debug_sync='now SIGNAL go'; connection default; reap; + +--echo # After MDEV-5536, SELECT will not block FLUSH TABLES + +SET debug_sync='RESET'; +SET debug_sync='open_tables_after_open_and_process_table SIGNAL ready WAIT_FOR go'; +send SELECT * FROM t1; + +connection con1; +SET debug_sync='now WAIT_FOR ready'; +# lock_wait_timeout should be 0 in 10.3, so that we don't have to wait at all +SET lock_wait_timeout=1; +FLUSH TABLES WITH READ LOCK; +SET debug_sync='now SIGNAL go'; + +connection default; +reap; +connection con1; +unlock tables; +connection default; + SET debug_sync='RESET'; DROP TABLE t1; disconnect con1; - # Check that all connections opened by test cases in this file are really # gone so execution of other tests won't be affected by their presence. --source include/wait_until_count_sessions.inc diff --git a/mysql-test/suite/archive/flush.result b/mysql-test/suite/archive/flush.result new file mode 100644 index 00000000000..428f32d09f8 --- /dev/null +++ b/mysql-test/suite/archive/flush.result @@ -0,0 +1,18 @@ +CREATE TABLE t1(a INT) ENGINE=archive; +INSERT INTO t1 VALUES(1); +connect con1, localhost, root; +LOCK TABLE t1 READ; +connection default; +FLUSH TABLES WITH READ LOCK; +UNLOCK TABLES; +# Must return 1 row +SELECT * FROM t2; +a +1 +SELECT * FROM t1; +a +1 +connection con1; +UNLOCK TABLES; +connection default; +DROP TABLE t1, t2; diff --git a/mysql-test/suite/archive/flush.test b/mysql-test/suite/archive/flush.test new file mode 100644 index 00000000000..feadef08d7a --- /dev/null +++ b/mysql-test/suite/archive/flush.test @@ -0,0 +1,25 @@ +--source include/have_archive.inc + +let $MYSQLD_DATADIR= `SELECT @@datadir`; +CREATE TABLE t1(a INT) ENGINE=archive; +INSERT INTO t1 VALUES(1); +# Works correct if we uncomment next row +#FLUSH TABLE t1; + +connect(con1, localhost, root); +LOCK TABLE t1 READ; + +connection default; +FLUSH TABLES WITH READ LOCK; +copy_file $MYSQLD_DATADIR/test/t1.frm $MYSQLD_DATADIR/test/t2.frm; +copy_file $MYSQLD_DATADIR/test/t1.ARZ $MYSQLD_DATADIR/test/t2.ARZ; +UNLOCK TABLES; +--echo # Must return 1 row +SELECT * FROM t2; +SELECT * FROM t1; + +connection con1; +UNLOCK TABLES; + +connection default; +DROP TABLE t1, t2; diff --git a/mysql-test/suite/csv/flush.result b/mysql-test/suite/csv/flush.result new file mode 100644 index 00000000000..b0b9b21bd0a --- /dev/null +++ b/mysql-test/suite/csv/flush.result @@ -0,0 +1,25 @@ +CREATE TABLE t1(a INT NOT NULL) ENGINE=csv; +INSERT INTO t1 VALUES(1); +connect con1, localhost, root; +LOCK TABLE t1 READ; +connection default; +FLUSH TABLES WITH READ LOCK; +UNLOCK TABLES; +# Must return 1 row +SELECT * FROM t2; +a +1 +SELECT * FROM t1; +a +1 +connection con1; +UNLOCK TABLES; +connection default; +INSERT INTO t2 VALUES(2); +INSERT INTO t2 VALUES(2); +SELECT * from t1,t2; +a a +1 1 +1 2 +1 2 +DROP TABLE t1, t2; diff --git a/mysql-test/suite/csv/flush.test b/mysql-test/suite/csv/flush.test new file mode 100644 index 00000000000..934ac26f291 --- /dev/null +++ b/mysql-test/suite/csv/flush.test @@ -0,0 +1,30 @@ +--source include/have_csv.inc + +let $MYSQLD_DATADIR= `SELECT @@datadir`; +CREATE TABLE t1(a INT NOT NULL) ENGINE=csv; +INSERT INTO t1 VALUES(1); +# works correct if uncommented +#FLUSH TABLE t1; + +connect(con1, localhost, root); +LOCK TABLE t1 READ; + +connection default; +FLUSH TABLES WITH READ LOCK; +copy_file $MYSQLD_DATADIR/test/t1.frm $MYSQLD_DATADIR/test/t2.frm; +copy_file $MYSQLD_DATADIR/test/t1.CSV $MYSQLD_DATADIR/test/t2.CSV; +copy_file $MYSQLD_DATADIR/test/t1.CSM $MYSQLD_DATADIR/test/t2.CSM; +UNLOCK TABLES; +--echo # Must return 1 row +SELECT * FROM t2; +SELECT * FROM t1; +connection con1; +UNLOCK TABLES; + +connection default; + +INSERT INTO t2 VALUES(2); +INSERT INTO t2 VALUES(2); +SELECT * from t1,t2; + +DROP TABLE t1, t2; diff --git a/mysql-test/suite/handler/aria.result b/mysql-test/suite/handler/aria.result index bd5cda77b5a..1896e30f7d5 100644 --- a/mysql-test/suite/handler/aria.result +++ b/mysql-test/suite/handler/aria.result @@ -1485,7 +1485,7 @@ handler t2 open; flush tables with read lock; handler t1 read next; a b -1 1 +2 1 select a from t3; a 1 diff --git a/mysql-test/suite/handler/heap.result b/mysql-test/suite/handler/heap.result index 24103debc7f..32d06b79604 100644 --- a/mysql-test/suite/handler/heap.result +++ b/mysql-test/suite/handler/heap.result @@ -1484,7 +1484,7 @@ handler t2 open; flush tables with read lock; handler t1 read next; a b -1 1 +2 1 select a from t3; a 1 diff --git a/mysql-test/suite/handler/innodb.result b/mysql-test/suite/handler/innodb.result index c6932015927..a1b2b318911 100644 --- a/mysql-test/suite/handler/innodb.result +++ b/mysql-test/suite/handler/innodb.result @@ -1489,7 +1489,7 @@ handler t2 open; flush tables with read lock; handler t1 read next; a b -1 1 +2 1 select a from t3; a 1 diff --git a/mysql-test/suite/handler/myisam.result b/mysql-test/suite/handler/myisam.result index 0acbc8edee0..2c5f8c3bfde 100644 --- a/mysql-test/suite/handler/myisam.result +++ b/mysql-test/suite/handler/myisam.result @@ -1485,7 +1485,7 @@ handler t2 open; flush tables with read lock; handler t1 read next; a b -1 1 +2 1 select a from t3; a 1 diff --git a/sql/lock.cc b/sql/lock.cc index 5420e9f42b5..59563487822 100644 --- a/sql/lock.cc +++ b/sql/lock.cc @@ -77,6 +77,7 @@ #include "sql_base.h" // close_tables_for_reopen #include "sql_parse.h" // is_log_table_write_query #include "sql_acl.h" // SUPER_ACL +#include "sql_handler.h" #include <hash.h> #include "wsrep_mysqld.h" @@ -1015,6 +1016,8 @@ bool Global_read_lock::lock_global_read_lock(THD *thd) { MDL_request mdl_request; + mysql_ha_cleanup_no_free(thd); + DBUG_ASSERT(! thd->mdl_context.is_lock_owner(MDL_key::GLOBAL, "", "", MDL_SHARED)); mdl_request.init(MDL_key::GLOBAL, "", "", MDL_SHARED, MDL_EXPLICIT); diff --git a/sql/sql_base.cc b/sql/sql_base.cc index 8dc68c32a0e..700175f14d6 100644 --- a/sql/sql_base.cc +++ b/sql/sql_base.cc @@ -349,6 +349,34 @@ static my_bool close_cached_tables_callback(TDC_element *element, } +/** + Close all tables that are not in use in table definition cache + + @param purge_flag Argument for tc_purge. true if we should force all + shares to be deleted. false if it's enough to just + evict those that are not in use. +*/ + +void purge_tables(bool purge_flag) +{ + /* + Force close of all open tables. + + Note that code in TABLE_SHARE::wait_for_old_version() assumes that + incrementing of refresh_version is followed by purge of unused table + shares. + */ + kill_delayed_threads(); + /* + Get rid of all unused TABLE and TABLE_SHARE instances. By doing + this we automatically close all tables which were marked as "old". + */ + tc_purge(purge_flag); + /* Free table shares which were not freed implicitly by loop above. */ + tdc_purge(true); +} + + bool close_cached_tables(THD *thd, TABLE_LIST *tables, bool wait_for_refresh, ulong timeout) { @@ -361,23 +389,7 @@ bool close_cached_tables(THD *thd, TABLE_LIST *tables, refresh_version= tdc_increment_refresh_version(); if (!tables) - { - /* - Force close of all open tables. - - Note that code in TABLE_SHARE::wait_for_old_version() assumes that - incrementing of refresh_version is followed by purge of unused table - shares. - */ - kill_delayed_threads(); - /* - Get rid of all unused TABLE and TABLE_SHARE instances. By doing - this we automatically close all tables which were marked as "old". - */ - tc_purge(true); - /* Free table shares which were not freed implicitly by loop above. */ - tdc_purge(true); - } + purge_tables(true); else { bool found=0; @@ -502,6 +514,128 @@ err_with_reopen: /** + Collect all shares that has open tables +*/ + +struct tc_collect_arg +{ + DYNAMIC_ARRAY shares; +}; + +static my_bool tc_collect_used_shares(TDC_element *element, + tc_collect_arg *arg) +{ + my_bool result= FALSE; + + DYNAMIC_ARRAY *shares= &arg->shares; + mysql_mutex_lock(&element->LOCK_table_share); + if (element->ref_count > 0 && !element->share->is_view) + { + DBUG_ASSERT(element->share); + element->ref_count++; // Protect against delete + if (push_dynamic(shares,(uchar*) &element->share)) + result= TRUE; + } + mysql_mutex_unlock(&element->LOCK_table_share); + return result; +} + + +/** + Flush cached table as part of global read lock + + @param thd + @param flag What type of tables should be flushed + + @return 0 ok + @return 1 error + + After we get the list of table shares, we will call flush on all + possible tables, even if some flush fails. +*/ + +bool flush_tables(THD *thd) +{ + bool result= TRUE; + uint open_errors= 0; + tc_collect_arg collect_arg; + TABLE *tmp_table; + DBUG_ENTER("flush_tables"); + + purge_tables(false); /* Flush unused tables and shares */ + + /* + Loop over all shares and collect shares that have open tables + TODO: + Optimize this to only collect shares that have been used for + write after last time all tables was closed. + */ + + if (!(tmp_table= (TABLE*) my_malloc(sizeof(*tmp_table), + MYF(MY_WME | MY_THREAD_SPECIFIC)))) + DBUG_RETURN(1); + + my_init_dynamic_array(&collect_arg.shares, sizeof(TABLE_SHARE*), 100, 100, + MYF(0)); + if (tdc_iterate(thd, (my_hash_walk_action) tc_collect_used_shares, + &collect_arg, true)) + { + /* Release already collected shares */ + for (uint i= 0 ; i < collect_arg.shares.elements ; i++) + { + TABLE_SHARE *share= *dynamic_element(&collect_arg.shares, i, + TABLE_SHARE**); + tdc_release_share(share); + } + goto err; + } + + /* Call HA_EXTRA_FLUSH on all found shares */ + for (uint i= 0 ; i < collect_arg.shares.elements ; i++) + { + TABLE_SHARE *share= *dynamic_element(&collect_arg.shares, i, + TABLE_SHARE**); + TABLE *table= tc_acquire_table(thd, share->tdc); + if (table) + { + (void) table->file->extra(HA_EXTRA_FLUSH); + tc_release_table(table); + } + else + { + /* + HA_OPEN_FOR_ALTER is used to allow us to open the table even if + TABLE_SHARE::incompatible_version is set. + */ + if (!open_table_from_share(thd, share, &empty_clex_str, + HA_OPEN_KEYFILE, 0, + HA_OPEN_FOR_ALTER, + tmp_table, FALSE, + NULL)) + { + (void) tmp_table->file->extra(HA_EXTRA_FLUSH); + /* + We don't put the table into the TDC as the table was not fully + opened (we didn't open triggers) + */ + closefrm(tmp_table); + } + else + open_errors++; + } + tdc_release_share(share); + } + + result= open_errors ? TRUE : FALSE; + DBUG_PRINT("note", ("open_errors: %u", open_errors)); +err: + my_free(tmp_table); + delete_dynamic(&collect_arg.shares); + DBUG_RETURN(result); +} + + +/** Close all tables which match specified connection string or if specified string is NULL, then any table with a connection string. */ diff --git a/sql/sql_base.h b/sql/sql_base.h index bdadb420383..47ca2229af5 100644 --- a/sql/sql_base.h +++ b/sql/sql_base.h @@ -290,6 +290,8 @@ void close_log_table(THD *thd, Open_tables_backup *backup); bool close_cached_tables(THD *thd, TABLE_LIST *tables, bool wait_for_refresh, ulong timeout); +void purge_tables(bool purge_flag); +bool flush_tables(THD *thd); bool close_cached_connection_tables(THD *thd, LEX_CSTRING *connect_string); void close_all_tables_for_name(THD *thd, TABLE_SHARE *share, ha_extra_function extra, diff --git a/sql/sql_handler.cc b/sql/sql_handler.cc index 72df8367dc7..08114b99757 100644 --- a/sql/sql_handler.cc +++ b/sql/sql_handler.cc @@ -1196,10 +1196,10 @@ void mysql_ha_flush(THD *thd) @note Broadcasts refresh if it closed a table with old version. */ -void mysql_ha_cleanup(THD *thd) +void mysql_ha_cleanup_no_free(THD *thd) { SQL_HANDLER *hash_tables; - DBUG_ENTER("mysql_ha_cleanup"); + DBUG_ENTER("mysql_ha_cleanup_no_free"); for (uint i= 0; i < thd->handler_tables_hash.records; i++) { @@ -1207,9 +1207,15 @@ void mysql_ha_cleanup(THD *thd) if (hash_tables->table) mysql_ha_close_table(hash_tables); } + DBUG_VOID_RETURN; +} - my_hash_free(&thd->handler_tables_hash); +void mysql_ha_cleanup(THD *thd) +{ + DBUG_ENTER("mysql_ha_cleanup"); + mysql_ha_cleanup_no_free(thd); + my_hash_free(&thd->handler_tables_hash); DBUG_VOID_RETURN; } diff --git a/sql/sql_handler.h b/sql/sql_handler.h index 4c16f7e5c57..16063bb1f35 100644 --- a/sql/sql_handler.h +++ b/sql/sql_handler.h @@ -73,6 +73,7 @@ bool mysql_ha_read(THD *, TABLE_LIST *,enum enum_ha_read_modes, const char *, void mysql_ha_flush(THD *thd); void mysql_ha_flush_tables(THD *thd, TABLE_LIST *all_tables); void mysql_ha_rm_tables(THD *thd, TABLE_LIST *tables); +void mysql_ha_cleanup_no_free(THD *thd); void mysql_ha_cleanup(THD *thd); void mysql_ha_set_explicit_lock_duration(THD *thd); void mysql_ha_rm_temporary_tables(THD *thd); diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc index 5193524ca05..6a2af72e351 100644 --- a/sql/sql_parse.cc +++ b/sql/sql_parse.cc @@ -2131,6 +2131,8 @@ bool dispatch_command(enum enum_server_command command, THD *thd, DBUG_EXECUTE_IF("simulate_detached_thread_refresh", debug_simulate= TRUE;); if (debug_simulate) { + /* This code doesn't work under FTWRL */ + DBUG_ASSERT(! (options & REFRESH_READ_LOCK)); /* Simulate a reload without a attached thread session. Provides a environment similar to that of when the diff --git a/sql/sql_reload.cc b/sql/sql_reload.cc index abdf9d76d15..961f31eb728 100644 --- a/sql/sql_reload.cc +++ b/sql/sql_reload.cc @@ -231,6 +231,7 @@ bool reload_acl_and_cache(THD *thd, unsigned long long options, { if ((options & REFRESH_READ_LOCK) && thd) { + DBUG_ASSERT(!(options & REFRESH_FAST) && !tables); /* On the first hand we need write lock on the tables to be flushed, on the other hand we must not try to aspire a global read lock @@ -249,9 +250,7 @@ bool reload_acl_and_cache(THD *thd, unsigned long long options, tmp_write_to_binlog= 0; if (thd->global_read_lock.lock_global_read_lock(thd)) return 1; // Killed - if (close_cached_tables(thd, tables, - ((options & REFRESH_FAST) ? FALSE : TRUE), - thd->variables.lock_wait_timeout)) + if (flush_tables(thd)) { /* NOTE: my_error() has been already called by reopen_tables() within @@ -274,11 +273,9 @@ bool reload_acl_and_cache(THD *thd, unsigned long long options, make_global_read_lock_block_commit(thd) above since they could have modified the tables too. */ - if (WSREP(thd) && - close_cached_tables(thd, tables, (options & REFRESH_FAST) ? - FALSE : TRUE, TRUE)) - result= 1; - } + if (WSREP(thd) && flush_tables(thd)) + result= 1; + } else { if (thd && thd->locked_tables_mode) diff --git a/sql/table_cache.cc b/sql/table_cache.cc index 9afe3ca61d4..d997aeff9f8 100644 --- a/sql/table_cache.cc +++ b/sql/table_cache.cc @@ -406,7 +406,7 @@ void tc_add_table(THD *thd, TABLE *table) @return TABLE object, or NULL if no unused objects. */ -static TABLE *tc_acquire_table(THD *thd, TDC_element *element) +TABLE *tc_acquire_table(THD *thd, TDC_element *element) { uint32 n_instances= my_atomic_load32_explicit((int32*) &tc_active_instances, @@ -657,7 +657,7 @@ void tdc_start_shutdown(void) tdc_size= 0; tc_size= 0; /* Free all cached but unused TABLEs and TABLE_SHAREs. */ - close_cached_tables(NULL, NULL, FALSE, LONG_TIMEOUT); + purge_tables(true); } DBUG_VOID_RETURN; } diff --git a/sql/table_cache.h b/sql/table_cache.h index b41665258c9..148edc84223 100644 --- a/sql/table_cache.h +++ b/sql/table_cache.h @@ -88,7 +88,6 @@ extern bool tdc_remove_table(THD *thd, enum_tdc_remove_table_type remove_type, const char *db, const char *table_name, bool kill_delayed_threads); - extern int tdc_wait_for_old_version(THD *thd, const char *db, const char *table_name, ulong wait_timeout, uint deadlock_weight, @@ -102,6 +101,7 @@ extern uint tc_records(void); extern void tc_purge(bool mark_flushed= false); extern void tc_add_table(THD *thd, TABLE *table); extern void tc_release_table(TABLE *table); +extern TABLE *tc_acquire_table(THD *thd, TDC_element *element); /** Create a table cache key for non-temporary table. diff --git a/storage/archive/azio.c b/storage/archive/azio.c index 0f66b999c94..3529d875f72 100644 --- a/storage/archive/azio.c +++ b/storage/archive/azio.c @@ -866,7 +866,10 @@ int azclose (azio_stream *s) if (s->mode == 'w') { if (do_flush(s, Z_FINISH) != Z_OK) - return destroy(s); + { + destroy(s); + return Z_ERRNO; + } putLong(s->file, s->crc); putLong(s->file, (uLong)(s->in & 0xffffffff)); diff --git a/storage/archive/ha_archive.cc b/storage/archive/ha_archive.cc index bb15aa9297d..f97bc52b6a9 100644 --- a/storage/archive/ha_archive.cc +++ b/storage/archive/ha_archive.cc @@ -1737,6 +1737,20 @@ int ha_archive::info(uint flag) } +int ha_archive::extra(enum ha_extra_function operation) +{ + switch (operation) { + case HA_EXTRA_FLUSH: + mysql_mutex_lock(&share->mutex); + share->close_archive_writer(); + mysql_mutex_unlock(&share->mutex); + break; + default: + break; + } + return 0; +} + /* This method tells us that a bulk insert operation is about to occur. We set a flag which will keep write_row from saying that its data is dirty. This in diff --git a/storage/archive/ha_archive.h b/storage/archive/ha_archive.h index 1f25fba4eed..17649c9c110 100644 --- a/storage/archive/ha_archive.h +++ b/storage/archive/ha_archive.h @@ -148,6 +148,7 @@ public: int read_data_header(azio_stream *file_to_read); void position(const uchar *record); int info(uint); + int extra(enum ha_extra_function operation); void update_create_info(HA_CREATE_INFO *create_info); int create(const char *name, TABLE *form, HA_CREATE_INFO *create_info); int optimize(THD* thd, HA_CHECK_OPT* check_opt); diff --git a/storage/csv/ha_tina.cc b/storage/csv/ha_tina.cc index d5db465588a..f69dd7989a2 100644 --- a/storage/csv/ha_tina.cc +++ b/storage/csv/ha_tina.cc @@ -1311,12 +1311,28 @@ int ha_tina::info(uint flag) int ha_tina::extra(enum ha_extra_function operation) { DBUG_ENTER("ha_tina::extra"); - if (operation == HA_EXTRA_MARK_AS_LOG_TABLE) - { - mysql_mutex_lock(&share->mutex); - share->is_log_table= TRUE; - mysql_mutex_unlock(&share->mutex); - } + switch (operation) { + case HA_EXTRA_MARK_AS_LOG_TABLE: + { + mysql_mutex_lock(&share->mutex); + share->is_log_table= TRUE; + mysql_mutex_unlock(&share->mutex); + } + break; + case HA_EXTRA_FLUSH: + mysql_mutex_lock(&share->mutex); + if (share->tina_write_opened) + { + (void)write_meta_file(share->meta_file, share->rows_recorded, + share->crashed ? TRUE :FALSE); + mysql_file_close(share->tina_write_filedes, MYF(0)); + share->tina_write_opened= FALSE; + } + mysql_mutex_unlock(&share->mutex); + break; + default: + break; + } DBUG_RETURN(0); } diff --git a/storage/maria/ma_check.c b/storage/maria/ma_check.c index 4eab0e07315..998bb984452 100644 --- a/storage/maria/ma_check.c +++ b/storage/maria/ma_check.c @@ -167,6 +167,9 @@ int maria_chk_status(HA_CHECK *param, MARIA_HA *info) { MARIA_SHARE *share= info->s; + /* Protection for HA_EXTRA_FLUSH */ + mysql_mutex_lock(&share->intern_lock); + if (maria_is_crashed_on_repair(info)) _ma_check_print_warning(param, "Table is marked as crashed and last repair failed"); @@ -189,6 +192,9 @@ int maria_chk_status(HA_CHECK *param, MARIA_HA *info) if (param->testflag & T_UPDATE_STATE) param->warning_printed=save; } + + mysql_mutex_unlock(&share->intern_lock); + if (share->state.create_trid > param->max_trid) { param->wrong_trd_printed= 1; /* Force should run zerofill */ diff --git a/storage/maria/ma_extra.c b/storage/maria/ma_extra.c index 9feead42cf7..9b544aaba59 100644 --- a/storage/maria/ma_extra.c +++ b/storage/maria/ma_extra.c @@ -420,7 +420,11 @@ int maria_extra(MARIA_HA *info, enum ha_extra_function function, error= _ma_flush_table_files(info, MARIA_FLUSH_DATA | MARIA_FLUSH_INDEX, FLUSH_KEEP, FLUSH_KEEP); + mysql_mutex_lock(&share->intern_lock); + /* Tell maria_lock_database() that we locked the intern_lock mutex */ + info->intern_lock_locked= 1; _ma_decrement_open_count(info, 1); + info->intern_lock_locked= 0; if (share->not_flushed) { share->not_flushed= 0; @@ -433,6 +437,7 @@ int maria_extra(MARIA_HA *info, enum ha_extra_function function, _ma_set_fatal_error(share, HA_ERR_CRASHED); } } + mysql_mutex_unlock(&share->intern_lock); break; case HA_EXTRA_NORMAL: /* Theese isn't in use */ info->quick_mode= 0; diff --git a/storage/maria/ma_locking.c b/storage/maria/ma_locking.c index 54f072ede5c..203fd394d26 100644 --- a/storage/maria/ma_locking.c +++ b/storage/maria/ma_locking.c @@ -47,7 +47,8 @@ int maria_lock_database(MARIA_HA *info, int lock_type) } error=0; - mysql_mutex_lock(&share->intern_lock); + if (!info->intern_lock_locked) + mysql_mutex_lock(&share->intern_lock); if (share->kfile.file >= 0) /* May only be false on windows */ { switch (lock_type) { @@ -234,7 +235,8 @@ int maria_lock_database(MARIA_HA *info, int lock_type) } } #endif - mysql_mutex_unlock(&share->intern_lock); + if (!info->intern_lock_locked) + mysql_mutex_unlock(&share->intern_lock); DBUG_RETURN(error); } /* maria_lock_database */ diff --git a/storage/maria/maria_def.h b/storage/maria/maria_def.h index 7d051faeac9..1c77beb3c0f 100644 --- a/storage/maria/maria_def.h +++ b/storage/maria/maria_def.h @@ -689,6 +689,7 @@ struct st_maria_handler uint16 last_used_keyseg; /* For MARIAMRG */ uint8 key_del_used; /* != 0 if key_del is used */ my_bool was_locked; /* Was locked in panic */ + my_bool intern_lock_locked; /* locked in ma_extra() */ my_bool append_insert_at_end; /* Set if concurrent insert */ my_bool quick_mode; my_bool in_check_table; /* We are running check tables */ diff --git a/storage/myisam/mi_check.c b/storage/myisam/mi_check.c index f6929438ac0..01078c2a264 100644 --- a/storage/myisam/mi_check.c +++ b/storage/myisam/mi_check.c @@ -102,6 +102,9 @@ int chk_status(HA_CHECK *param, register MI_INFO *info) { MYISAM_SHARE *share=info->s; + /* Protection for HA_EXTRA_FLUSH */ + mysql_mutex_lock(&share->intern_lock); + if (mi_is_crashed_on_repair(info)) mi_check_print_warning(param, "Table is marked as crashed and last repair failed"); @@ -121,6 +124,7 @@ int chk_status(HA_CHECK *param, register MI_INFO *info) if (param->testflag & T_UPDATE_STATE) param->warning_printed=save; } + mysql_mutex_unlock(&share->intern_lock); return 0; } diff --git a/storage/myisam/mi_extra.c b/storage/myisam/mi_extra.c index c10bf61a477..194ad5bf07a 100644 --- a/storage/myisam/mi_extra.c +++ b/storage/myisam/mi_extra.c @@ -332,7 +332,11 @@ int mi_extra(MI_INFO *info, enum ha_extra_function function, void *extra_arg) if (!share->temporary) flush_key_blocks(share->key_cache, share->kfile, &share->dirty_part_map, FLUSH_KEEP); + mysql_mutex_lock(&share->intern_lock); + /* Tell mi_lock_database() that we locked the intern_lock mutex */ + info->intern_lock_locked= 1; _mi_decrement_open_count(info); + info->intern_lock_locked= 0; if (share->not_flushed) { share->not_flushed=0; @@ -349,6 +353,7 @@ int mi_extra(MI_INFO *info, enum ha_extra_function function, void *extra_arg) } if (share->base.blobs) mi_alloc_rec_buff(info, -1, &info->rec_buff); + mysql_mutex_unlock(&share->intern_lock); break; case HA_EXTRA_NORMAL: /* Theese isn't in use */ info->quick_mode=0; diff --git a/storage/myisam/mi_locking.c b/storage/myisam/mi_locking.c index b348429fd3c..f3030148044 100644 --- a/storage/myisam/mi_locking.c +++ b/storage/myisam/mi_locking.c @@ -53,7 +53,8 @@ int mi_lock_database(MI_INFO *info, int lock_type) error= 0; DBUG_EXECUTE_IF ("mi_lock_database_failure", error= EINVAL;); - mysql_mutex_lock(&share->intern_lock); + if (!info->intern_lock_locked) + mysql_mutex_lock(&share->intern_lock); if (share->kfile >= 0) /* May only be false on windows */ { switch (lock_type) { @@ -261,7 +262,8 @@ int mi_lock_database(MI_INFO *info, int lock_type) } } #endif - mysql_mutex_unlock(&share->intern_lock); + if (!info->intern_lock_locked) + mysql_mutex_unlock(&share->intern_lock); if (mark_crashed) mi_mark_crashed(info); DBUG_RETURN(error); diff --git a/storage/myisam/myisamdef.h b/storage/myisam/myisamdef.h index e350626f192..2f8d52863fc 100644 --- a/storage/myisam/myisamdef.h +++ b/storage/myisam/myisamdef.h @@ -296,6 +296,7 @@ struct st_myisam_info uint preload_buff_size; /* When preloading indexes */ myf lock_wait; /* is 0 or MY_SHORT_WAIT */ my_bool was_locked; /* Was locked in panic */ + my_bool intern_lock_locked; /* locked in mi_extra() */ my_bool append_insert_at_end; /* Set if concurrent insert */ my_bool quick_mode; /* If info->buff can't be used for rnext */ |