summaryrefslogtreecommitdiff
path: root/storage
diff options
context:
space:
mode:
authorMarko Mäkelä <marko.makela@mariadb.com>2018-06-26 11:34:51 +0300
committerMarko Mäkelä <marko.makela@mariadb.com>2018-06-26 11:34:51 +0300
commitc4eb4bcef648eb2ebdc6edc06905f39f95ef7f6b (patch)
treebe034e1ec0e803ebafaf8a264195565cc78db274 /storage
parentc09a8b5b36edb494e2bcc93074c06e26cd9f2b92 (diff)
downloadmariadb-git-c4eb4bcef648eb2ebdc6edc06905f39f95ef7f6b.tar.gz
MDEV-16515 InnoDB: Failing assertion: ++retries < 10000 in file
dict0dict.cc buf_LRU_drop_page_hash_for_tablespace(): Return whether any adaptive hash index entries existed. If yes, the caller should keep retrying to drop the adaptive hash index. row_import_for_mysql(), row_truncate_table_for_mysql(), row_drop_table_for_mysql(): Ensure that the adaptive hash index was entirely dropped for the table.
Diffstat (limited to 'storage')
-rw-r--r--storage/innobase/buf/buf0lru.cc11
-rw-r--r--storage/innobase/dict/dict0dict.cc5
-rw-r--r--storage/innobase/include/buf0lru.h8
-rw-r--r--storage/innobase/row/row0import.cc28
-rw-r--r--storage/innobase/row/row0mysql.cc44
-rw-r--r--storage/xtradb/buf/buf0lru.cc11
-rw-r--r--storage/xtradb/dict/dict0dict.cc4
-rw-r--r--storage/xtradb/include/buf0lru.h8
-rw-r--r--storage/xtradb/row/row0import.cc28
-rw-r--r--storage/xtradb/row/row0mysql.cc44
10 files changed, 118 insertions, 73 deletions
diff --git a/storage/innobase/buf/buf0lru.cc b/storage/innobase/buf/buf0lru.cc
index 7039ecdf4a6..1f2b6f40529 100644
--- a/storage/innobase/buf/buf0lru.cc
+++ b/storage/innobase/buf/buf0lru.cc
@@ -356,9 +356,10 @@ next_page:
ut_free(page_arr);
}
-/** Drop the adaptive hash index for a tablespace.
-@param[in,out] table table */
-UNIV_INTERN void buf_LRU_drop_page_hash_for_tablespace(dict_table_t* table)
+/** Try to drop the adaptive hash index for a tablespace.
+@param[in,out] table table
+@return whether anything was dropped */
+UNIV_INTERN bool buf_LRU_drop_page_hash_for_tablespace(dict_table_t* table)
{
for (dict_index_t* index = dict_table_get_first_index(table);
index != NULL;
@@ -369,13 +370,15 @@ UNIV_INTERN void buf_LRU_drop_page_hash_for_tablespace(dict_table_t* table)
}
}
- return;
+ return false;
drop_ahi:
ulint id = table->space;
for (ulint i = 0; i < srv_buf_pool_instances; i++) {
buf_LRU_drop_page_hash_for_tablespace(buf_pool_from_array(i),
id);
}
+
+ return true;
}
/******************************************************************//**
diff --git a/storage/innobase/dict/dict0dict.cc b/storage/innobase/dict/dict0dict.cc
index 9609ef96343..b48d51c15a7 100644
--- a/storage/innobase/dict/dict0dict.cc
+++ b/storage/innobase/dict/dict0dict.cc
@@ -2719,12 +2719,11 @@ dict_index_remove_from_cache_low(
zero. See also: dict_table_can_be_evicted() */
do {
- if (!btr_search_info_get_ref_count(info)) {
+ if (!btr_search_info_get_ref_count(info)
+ || !buf_LRU_drop_page_hash_for_tablespace(table)) {
break;
}
- buf_LRU_drop_page_hash_for_tablespace(table);
-
ut_a(++retries < 10000);
} while (srv_shutdown_state == SRV_SHUTDOWN_NONE || !lru_evict);
diff --git a/storage/innobase/include/buf0lru.h b/storage/innobase/include/buf0lru.h
index 623883433c2..b04086287ce 100644
--- a/storage/innobase/include/buf0lru.h
+++ b/storage/innobase/include/buf0lru.h
@@ -53,9 +53,11 @@ These are low-level functions
/** Minimum LRU list length for which the LRU_old pointer is defined */
#define BUF_LRU_OLD_MIN_LEN 512 /* 8 megabytes of 16k pages */
-/** Drop the adaptive hash index for a tablespace.
-@param[in,out] table table */
-UNIV_INTERN void buf_LRU_drop_page_hash_for_tablespace(dict_table_t* table);
+/** Try to drop the adaptive hash index for a tablespace.
+@param[in,out] table table
+@return whether anything was dropped */
+UNIV_INTERN bool buf_LRU_drop_page_hash_for_tablespace(dict_table_t* table)
+ MY_ATTRIBUTE((warn_unused_result,nonnull));
/** Empty the flush list for all pages belonging to a tablespace.
@param[in] id tablespace identifier
diff --git a/storage/innobase/row/row0import.cc b/storage/innobase/row/row0import.cc
index c808a991f25..68acf656034 100644
--- a/storage/innobase/row/row0import.cc
+++ b/storage/innobase/row/row0import.cc
@@ -3983,6 +3983,23 @@ row_import_for_mysql(
DBUG_EXECUTE_IF("ib_import_reset_space_and_lsn_failure",
err = DB_TOO_MANY_CONCURRENT_TRXS;);
+ /* On DISCARD TABLESPACE, we did not drop any adaptive hash
+ index entries. If we replaced the discarded tablespace with a
+ smaller one here, there could still be some adaptive hash
+ index entries that point to cached garbage pages in the buffer
+ pool, because PageConverter::operator() only evicted those
+ pages that were replaced by the imported pages. We must
+ discard all remaining adaptive hash index entries, because the
+ adaptive hash index must be a subset of the table contents;
+ false positives are not tolerated. */
+ while (buf_LRU_drop_page_hash_for_tablespace(table)) {
+ if (trx_is_interrupted(trx)
+ || srv_shutdown_state != SRV_SHUTDOWN_NONE) {
+ err = DB_INTERRUPTED;
+ break;
+ }
+ }
+
if (err != DB_SUCCESS) {
char table_name[MAX_FULL_NAME_LEN + 1];
@@ -4000,17 +4017,6 @@ row_import_for_mysql(
return(row_import_cleanup(prebuilt, trx, err));
}
- /* On DISCARD TABLESPACE, we did not drop any adaptive hash
- index entries. If we replaced the discarded tablespace with a
- smaller one here, there could still be some adaptive hash
- index entries that point to cached garbage pages in the buffer
- pool, because PageConverter::operator() only evicted those
- pages that were replaced by the imported pages. We must
- discard all remaining adaptive hash index entries, because the
- adaptive hash index must be a subset of the table contents;
- false positives are not tolerated. */
- buf_LRU_drop_page_hash_for_tablespace(table);
-
row_mysql_lock_data_dictionary(trx);
/* If the table is stored in a remote tablespace, we need to
diff --git a/storage/innobase/row/row0mysql.cc b/storage/innobase/row/row0mysql.cc
index be24ae885a2..6a971a973f5 100644
--- a/storage/innobase/row/row0mysql.cc
+++ b/storage/innobase/row/row0mysql.cc
@@ -3516,7 +3516,13 @@ row_truncate_table_for_mysql(
fil_space_release(space);
}
- buf_LRU_drop_page_hash_for_tablespace(table);
+ while (buf_LRU_drop_page_hash_for_tablespace(table)) {
+ if (trx_is_interrupted(trx)
+ || srv_shutdown_state != SRV_SHUTDOWN_NONE) {
+ err = DB_INTERRUPTED;
+ goto funct_exit;
+ }
+ }
if (flags != ULINT_UNDEFINED
&& fil_discard_tablespace(space_id) == DB_SUCCESS) {
@@ -4172,6 +4178,27 @@ row_drop_table_for_mysql(
ut_a(!lock_table_has_locks(table));
+ if (table->space != TRX_SYS_SPACE) {
+ /* On DISCARD TABLESPACE, we would not drop the
+ adaptive hash index entries. If the tablespace is
+ missing here, delete-marking the record in SYS_INDEXES
+ would not free any pages in the buffer pool. Thus,
+ dict_index_remove_from_cache() would hang due to
+ adaptive hash index entries existing in the buffer
+ pool. To prevent this hang, and also to guarantee
+ that btr_search_drop_page_hash_when_freed() will avoid
+ calling btr_search_drop_page_hash_index() while we
+ hold the InnoDB dictionary lock, we will drop any
+ adaptive hash index entries upfront. */
+ while (buf_LRU_drop_page_hash_for_tablespace(table)) {
+ if (trx_is_interrupted(trx)
+ || srv_shutdown_state != SRV_SHUTDOWN_NONE) {
+ err = DB_INTERRUPTED;
+ goto funct_exit;
+ }
+ }
+ }
+
switch (trx_get_dict_operation(trx)) {
case TRX_DICT_OP_NONE:
trx_set_dict_operation(trx, TRX_DICT_OP_TABLE);
@@ -4211,21 +4238,6 @@ row_drop_table_for_mysql(
rw_lock_x_unlock(dict_index_get_lock(index));
}
- if (table->space != TRX_SYS_SPACE) {
- /* On DISCARD TABLESPACE, we would not drop the
- adaptive hash index entries. If the tablespace is
- missing here, delete-marking the record in SYS_INDEXES
- would not free any pages in the buffer pool. Thus,
- dict_index_remove_from_cache() would hang due to
- adaptive hash index entries existing in the buffer
- pool. To prevent this hang, and also to guarantee
- that btr_search_drop_page_hash_when_freed() will avoid
- calling btr_search_drop_page_hash_index() while we
- hold the InnoDB dictionary lock, we will drop any
- adaptive hash index entries upfront. */
- buf_LRU_drop_page_hash_for_tablespace(table);
- }
-
/* We use the private SQL parser of Innobase to generate the
query graphs needed in deleting the dictionary data from system
tables in Innobase. Deleting a row from SYS_INDEXES table also
diff --git a/storage/xtradb/buf/buf0lru.cc b/storage/xtradb/buf/buf0lru.cc
index 2c4a4049de6..ec65bfbcce4 100644
--- a/storage/xtradb/buf/buf0lru.cc
+++ b/storage/xtradb/buf/buf0lru.cc
@@ -354,9 +354,10 @@ next_page:
ut_free(page_arr);
}
-/** Drop the adaptive hash index for a tablespace.
-@param[in,out] table table */
-UNIV_INTERN void buf_LRU_drop_page_hash_for_tablespace(dict_table_t* table)
+/** Try to drop the adaptive hash index for a tablespace.
+@param[in,out] table table
+@return whether anything was dropped */
+UNIV_INTERN bool buf_LRU_drop_page_hash_for_tablespace(dict_table_t* table)
{
for (dict_index_t* index = dict_table_get_first_index(table);
index != NULL;
@@ -367,13 +368,15 @@ UNIV_INTERN void buf_LRU_drop_page_hash_for_tablespace(dict_table_t* table)
}
}
- return;
+ return false;
drop_ahi:
ulint id = table->space;
for (ulint i = 0; i < srv_buf_pool_instances; i++) {
buf_LRU_drop_page_hash_for_tablespace(buf_pool_from_array(i),
id);
}
+
+ return true;
}
/******************************************************************//**
diff --git a/storage/xtradb/dict/dict0dict.cc b/storage/xtradb/dict/dict0dict.cc
index 7ade6d79adf..23082e53ec7 100644
--- a/storage/xtradb/dict/dict0dict.cc
+++ b/storage/xtradb/dict/dict0dict.cc
@@ -2729,11 +2729,11 @@ dict_index_remove_from_cache_low(
zero. See also: dict_table_can_be_evicted() */
do {
- if (!btr_search_info_get_ref_count(info, index)) {
+ if (!btr_search_info_get_ref_count(info, index)
+ || !buf_LRU_drop_page_hash_for_tablespace(table)) {
break;
}
- buf_LRU_drop_page_hash_for_tablespace(table);
ut_a(++retries < 10000);
} while (srv_shutdown_state == SRV_SHUTDOWN_NONE || !lru_evict);
diff --git a/storage/xtradb/include/buf0lru.h b/storage/xtradb/include/buf0lru.h
index f0ba1bb227d..ee84d168e28 100644
--- a/storage/xtradb/include/buf0lru.h
+++ b/storage/xtradb/include/buf0lru.h
@@ -55,9 +55,11 @@ These are low-level functions
/** Minimum LRU list length for which the LRU_old pointer is defined */
#define BUF_LRU_OLD_MIN_LEN 512 /* 8 megabytes of 16k pages */
-/** Drop the adaptive hash index for a tablespace.
-@param[in,out] table table */
-UNIV_INTERN void buf_LRU_drop_page_hash_for_tablespace(dict_table_t* table);
+/** Try to drop the adaptive hash index for a tablespace.
+@param[in,out] table table
+@return whether anything was dropped */
+UNIV_INTERN bool buf_LRU_drop_page_hash_for_tablespace(dict_table_t* table)
+ MY_ATTRIBUTE((warn_unused_result,nonnull));
/** Empty the flush list for all pages belonging to a tablespace.
@param[in] id tablespace identifier
diff --git a/storage/xtradb/row/row0import.cc b/storage/xtradb/row/row0import.cc
index 4af40953567..731412c22ec 100644
--- a/storage/xtradb/row/row0import.cc
+++ b/storage/xtradb/row/row0import.cc
@@ -3982,6 +3982,23 @@ row_import_for_mysql(
DBUG_EXECUTE_IF("ib_import_reset_space_and_lsn_failure",
err = DB_TOO_MANY_CONCURRENT_TRXS;);
+ /* On DISCARD TABLESPACE, we did not drop any adaptive hash
+ index entries. If we replaced the discarded tablespace with a
+ smaller one here, there could still be some adaptive hash
+ index entries that point to cached garbage pages in the buffer
+ pool, because PageConverter::operator() only evicted those
+ pages that were replaced by the imported pages. We must
+ discard all remaining adaptive hash index entries, because the
+ adaptive hash index must be a subset of the table contents;
+ false positives are not tolerated. */
+ while (buf_LRU_drop_page_hash_for_tablespace(table)) {
+ if (trx_is_interrupted(trx)
+ || srv_shutdown_state != SRV_SHUTDOWN_NONE) {
+ err = DB_INTERRUPTED;
+ break;
+ }
+ }
+
if (err != DB_SUCCESS) {
char table_name[MAX_FULL_NAME_LEN + 1];
@@ -3999,17 +4016,6 @@ row_import_for_mysql(
return(row_import_cleanup(prebuilt, trx, err));
}
- /* On DISCARD TABLESPACE, we did not drop any adaptive hash
- index entries. If we replaced the discarded tablespace with a
- smaller one here, there could still be some adaptive hash
- index entries that point to cached garbage pages in the buffer
- pool, because PageConverter::operator() only evicted those
- pages that were replaced by the imported pages. We must
- discard all remaining adaptive hash index entries, because the
- adaptive hash index must be a subset of the table contents;
- false positives are not tolerated. */
- buf_LRU_drop_page_hash_for_tablespace(table);
-
row_mysql_lock_data_dictionary(trx);
/* If the table is stored in a remote tablespace, we need to
diff --git a/storage/xtradb/row/row0mysql.cc b/storage/xtradb/row/row0mysql.cc
index 2b6f38ba2af..1347cbdedf8 100644
--- a/storage/xtradb/row/row0mysql.cc
+++ b/storage/xtradb/row/row0mysql.cc
@@ -3540,7 +3540,13 @@ row_truncate_table_for_mysql(
fil_space_release(space);
}
- buf_LRU_drop_page_hash_for_tablespace(table);
+ while (buf_LRU_drop_page_hash_for_tablespace(table)) {
+ if (trx_is_interrupted(trx)
+ || srv_shutdown_state != SRV_SHUTDOWN_NONE) {
+ err = DB_INTERRUPTED;
+ goto funct_exit;
+ }
+ }
if (flags != ULINT_UNDEFINED
&& fil_discard_tablespace(space_id) == DB_SUCCESS) {
@@ -4202,6 +4208,27 @@ row_drop_table_for_mysql(
ut_a(!lock_table_has_locks(table));
+ if (table->space != TRX_SYS_SPACE) {
+ /* On DISCARD TABLESPACE, we would not drop the
+ adaptive hash index entries. If the tablespace is
+ missing here, delete-marking the record in SYS_INDEXES
+ would not free any pages in the buffer pool. Thus,
+ dict_index_remove_from_cache() would hang due to
+ adaptive hash index entries existing in the buffer
+ pool. To prevent this hang, and also to guarantee
+ that btr_search_drop_page_hash_when_freed() will avoid
+ calling btr_search_drop_page_hash_index() while we
+ hold the InnoDB dictionary lock, we will drop any
+ adaptive hash index entries upfront. */
+ while (buf_LRU_drop_page_hash_for_tablespace(table)) {
+ if (trx_is_interrupted(trx)
+ || srv_shutdown_state != SRV_SHUTDOWN_NONE) {
+ err = DB_INTERRUPTED;
+ goto funct_exit;
+ }
+ }
+ }
+
switch (trx_get_dict_operation(trx)) {
case TRX_DICT_OP_NONE:
trx_set_dict_operation(trx, TRX_DICT_OP_TABLE);
@@ -4241,21 +4268,6 @@ row_drop_table_for_mysql(
rw_lock_x_unlock(dict_index_get_lock(index));
}
- if (table->space != TRX_SYS_SPACE) {
- /* On DISCARD TABLESPACE, we would not drop the
- adaptive hash index entries. If the tablespace is
- missing here, delete-marking the record in SYS_INDEXES
- would not free any pages in the buffer pool. Thus,
- dict_index_remove_from_cache() would hang due to
- adaptive hash index entries existing in the buffer
- pool. To prevent this hang, and also to guarantee
- that btr_search_drop_page_hash_when_freed() will avoid
- calling btr_search_drop_page_hash_index() while we
- hold the InnoDB dictionary lock, we will drop any
- adaptive hash index entries upfront. */
- buf_LRU_drop_page_hash_for_tablespace(table);
- }
-
/* We use the private SQL parser of Innobase to generate the
query graphs needed in deleting the dictionary data from system
tables in Innobase. Deleting a row from SYS_INDEXES table also