summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMarko Mäkelä <marko.makela@mariadb.com>2023-02-16 08:30:20 +0200
committerMarko Mäkelä <marko.makela@mariadb.com>2023-02-16 08:30:20 +0200
commit201cfc33e671705dc0f452fc69a98e3a09de61eb (patch)
tree68b9c6ea968ea76ebe8949fbc60b13d78a873155
parent54c0ac72e300acb4405529a827a8c02c4a5e5d15 (diff)
downloadmariadb-git-201cfc33e671705dc0f452fc69a98e3a09de61eb.tar.gz
MDEV-30638 Deadlock between INSERT and InnoDB non-persistent statistics update
This is a partial revert of commit 8b6a308e463f937eb8d2498b04967a222c83af90 (MDEV-29883) and a follow-up to the merge commit 394fc71f4fa8f8b1b6d24adfead0ec45121d271e (MDEV-24569). The latching order related to any operation that accesses the allocation metadata of an InnoDB index tree is as follows: 1. Acquire dict_index_t::lock in non-shared mode. 2. Acquire the index root page latch in non-shared mode. 3. Possibly acquire further index page latches. Unless an exclusive dict_index_t::lock is held, this must follow the root-to-leaf, left-to-right order. 4. Acquire a *non-shared* fil_space_t::latch. 5. Acquire latches on the allocation metadata pages. 6. Possibly allocate and write some pages, or free some pages. btr_get_size_and_reserved(), dict_stats_update_transient_for_index(), dict_stats_analyze_index(): Acquire an exclusive fil_space_t::latch in order to avoid a deadlock in fseg_n_reserved_pages() in case of concurrent access to multiple indexes sharing the same "inode page". fseg_page_is_allocated(): Acquire an exclusive fil_space_t::latch in order to avoid deadlocks. All callers are holding latches on a buffer pool page, or an index, or both. Before commit edbde4a11fd0b6437202f8019a79911441b6fb32 (MDEV-24167) a third mode was available that would not conflict with the shared fil_space_t::latch acquired by ha_innobase::info_low(), i_s_sys_tablespaces_fill_table(), or i_s_tablespaces_encryption_fill_table(). Because those calls should be rather rare, it makes sense to use the simple rw_lock with only shared and exclusive modes. fil_crypt_get_page_throttle(): Avoid invoking fseg_page_is_allocated() on an allocation bitmap page (which can never be freed), to avoid acquiring a shared latch on top of an exclusive one. mtr_t::s_lock_space(), MTR_MEMO_SPACE_S_LOCK: Remove.
-rw-r--r--storage/innobase/dict/dict0defrag_bg.cc2
-rw-r--r--storage/innobase/dict/dict0stats.cc4
-rw-r--r--storage/innobase/fil/fil0crypt.cc5
-rw-r--r--storage/innobase/fsp/fsp0fsp.cc2
-rw-r--r--storage/innobase/include/mtr0mtr.h16
-rw-r--r--storage/innobase/include/mtr0types.h4
-rw-r--r--storage/innobase/mtr/mtr0mtr.cc13
7 files changed, 13 insertions, 33 deletions
diff --git a/storage/innobase/dict/dict0defrag_bg.cc b/storage/innobase/dict/dict0defrag_bg.cc
index ea2914e52dc..bec6da8e6af 100644
--- a/storage/innobase/dict/dict0defrag_bg.cc
+++ b/storage/innobase/dict/dict0defrag_bg.cc
@@ -314,7 +314,7 @@ btr_get_size_and_reserved(
return ULINT_UNDEFINED;
}
- mtr->s_lock_space(index->table->space);
+ mtr->x_lock_space(index->table->space);
ulint n = fseg_n_reserved_pages(*root, PAGE_HEADER + PAGE_BTR_SEG_LEAF
+ root->page.frame, used, mtr);
diff --git a/storage/innobase/dict/dict0stats.cc b/storage/innobase/dict/dict0stats.cc
index 0d630583daa..7bdccd899b8 100644
--- a/storage/innobase/dict/dict0stats.cc
+++ b/storage/innobase/dict/dict0stats.cc
@@ -1490,7 +1490,7 @@ invalid:
goto invalid;
}
- mtr.s_lock_space(index->table->space);
+ mtr.x_lock_space(index->table->space);
ulint dummy, size;
index->stat_index_size
@@ -2697,7 +2697,7 @@ empty_index:
}
uint16_t root_level = btr_page_get_level(root->page.frame);
- mtr.s_lock_space(index->table->space);
+ mtr.x_lock_space(index->table->space);
ulint dummy, size;
result.index_size
= fseg_n_reserved_pages(*root, PAGE_HEADER + PAGE_BTR_SEG_LEAF
diff --git a/storage/innobase/fil/fil0crypt.cc b/storage/innobase/fil/fil0crypt.cc
index 7410986c441..fe75b7d58fa 100644
--- a/storage/innobase/fil/fil0crypt.cc
+++ b/storage/innobase/fil/fil0crypt.cc
@@ -1677,8 +1677,9 @@ fil_crypt_get_page_throttle(
return NULL;
}
- if (DB_SUCCESS_LOCKED_REC
- != fseg_page_is_allocated(space, state->offset)) {
+ if (offset % (zip_size ? zip_size : srv_page_size)
+ && DB_SUCCESS_LOCKED_REC
+ != fseg_page_is_allocated(space, offset)) {
/* page is already freed */
return NULL;
}
diff --git a/storage/innobase/fsp/fsp0fsp.cc b/storage/innobase/fsp/fsp0fsp.cc
index e9f3106feb0..514083d35cc 100644
--- a/storage/innobase/fsp/fsp0fsp.cc
+++ b/storage/innobase/fsp/fsp0fsp.cc
@@ -2653,7 +2653,7 @@ dberr_t fseg_page_is_allocated(fil_space_t *space, unsigned page)
mtr.start();
if (!space->is_owner())
- mtr.s_lock_space(space);
+ mtr.x_lock_space(space);
if (page >= space->free_limit || page >= space->size_in_header);
else if (const buf_block_t *b=
diff --git a/storage/innobase/include/mtr0mtr.h b/storage/innobase/include/mtr0mtr.h
index 140cd3dc1b6..f3fe1841b2e 100644
--- a/storage/innobase/include/mtr0mtr.h
+++ b/storage/innobase/include/mtr0mtr.h
@@ -282,17 +282,6 @@ struct mtr_t {
memo_push(lock, MTR_MEMO_SX_LOCK);
}
- /** Acquire a tablespace S-latch.
- @param space tablespace */
- void s_lock_space(fil_space_t *space)
- {
- ut_ad(space->purpose == FIL_TYPE_TEMPORARY ||
- space->purpose == FIL_TYPE_IMPORT ||
- space->purpose == FIL_TYPE_TABLESPACE);
- memo_push(space, MTR_MEMO_SPACE_S_LOCK);
- space->s_lock();
- }
-
/** Acquire an exclusive tablespace latch.
@param space tablespace */
void x_lock_space(fil_space_t *space);
@@ -357,9 +346,8 @@ public:
/** Check if we are holding tablespace latch
@param space tablespace to search for
- @param shared whether to look for shared latch, instead of exclusive
@return whether space.latch is being held */
- bool memo_contains(const fil_space_t& space, bool shared= false) const
+ bool memo_contains(const fil_space_t& space) const
MY_ATTRIBUTE((warn_unused_result));
#ifdef UNIV_DEBUG
/** Check if we are holding an rw-latch in this mini-transaction
@@ -412,7 +400,7 @@ public:
break;
case MTR_MEMO_MODIFY:
case MTR_MEMO_S_LOCK: case MTR_MEMO_X_LOCK: case MTR_MEMO_SX_LOCK:
- case MTR_MEMO_SPACE_X_LOCK: case MTR_MEMO_SPACE_S_LOCK:
+ case MTR_MEMO_SPACE_X_LOCK:
ut_ad("invalid type" == 0);
}
#endif
diff --git a/storage/innobase/include/mtr0types.h b/storage/innobase/include/mtr0types.h
index 7acc255da36..465c20fe7d2 100644
--- a/storage/innobase/include/mtr0types.h
+++ b/storage/innobase/include/mtr0types.h
@@ -337,8 +337,6 @@ enum mtr_memo_type_t {
MTR_MEMO_SX_LOCK = RW_SX_LATCH << 5,
/** wr_lock() on fil_space_t::latch */
- MTR_MEMO_SPACE_X_LOCK = MTR_MEMO_SX_LOCK << 1,
- /** rd_lock() on fil_space_t::latch */
- MTR_MEMO_SPACE_S_LOCK = MTR_MEMO_SX_LOCK << 2
+ MTR_MEMO_SPACE_X_LOCK = MTR_MEMO_SX_LOCK << 1
};
#endif /* !UNIV_INNOCHECKSUM */
diff --git a/storage/innobase/mtr/mtr0mtr.cc b/storage/innobase/mtr/mtr0mtr.cc
index 45a1424e572..2c004cb0aa6 100644
--- a/storage/innobase/mtr/mtr0mtr.cc
+++ b/storage/innobase/mtr/mtr0mtr.cc
@@ -55,9 +55,6 @@ void mtr_memo_slot_t::release() const
static_cast<fil_space_t*>(object)->set_committed_size();
static_cast<fil_space_t*>(object)->x_unlock();
break;
- case MTR_MEMO_SPACE_S_LOCK:
- static_cast<fil_space_t*>(object)->s_unlock();
- break;
default:
buf_page_t *bpage= static_cast<buf_page_t*>(object);
ut_d(const auto s=)
@@ -912,18 +909,14 @@ bool mtr_t::have_u_or_x_latch(const buf_block_t &block) const
/** Check if we are holding exclusive tablespace latch
@param space tablespace to search for
-@param shared whether to look for shared latch, instead of exclusive
@return whether space.latch is being held */
-bool mtr_t::memo_contains(const fil_space_t& space, bool shared) const
+bool mtr_t::memo_contains(const fil_space_t& space) const
{
- const mtr_memo_type_t type= shared
- ? MTR_MEMO_SPACE_S_LOCK : MTR_MEMO_SPACE_X_LOCK;
-
for (const mtr_memo_slot_t &slot : m_memo)
{
- if (slot.object == &space && slot.type == type)
+ if (slot.object == &space && slot.type == MTR_MEMO_SPACE_X_LOCK)
{
- ut_ad(shared || space.is_owner());
+ ut_ad(space.is_owner());
return true;
}
}