diff options
author | Marko Mäkelä <marko.makela@mariadb.com> | 2020-11-24 15:41:03 +0200 |
---|---|---|
committer | Marko Mäkelä <marko.makela@mariadb.com> | 2020-11-24 15:41:03 +0200 |
commit | c561f9e6e857dbae905c3d01db9ffa5b2ae2e6db (patch) | |
tree | 508626dbb5380f3b1a31191e17bc710cd392ce7b /storage/innobase | |
parent | f87e4b4e4d2ef25a8f98b4ad8ce1ce2407262940 (diff) | |
download | mariadb-git-c561f9e6e857dbae905c3d01db9ffa5b2ae2e6db.tar.gz |
MDEV-24167: Use lightweight srw_lock for btr_search_latch
Many InnoDB rw-locks unnecessarily depend on the complex
InnoDB rw_lock_t implementation that support the SX lock mode
as well as recursive acquisition of X or SX locks.
One of them is the bunch of adaptive hash index search latches,
instrumented as btr_search_latch in PERFORMANCE_SCHEMA.
Let us introduce a simpler lock for those in order to
reduce overhead.
srw_lock: A simple read-write lock that does not support recursion.
On Microsoft Windows, this wraps SRWLOCK, only adding
runtime overhead if PERFORMANCE_SCHEMA is enabled.
On Linux (all architectures), this is implemented with
std::atomic<uint32_t> and the futex system call.
On other platforms, we will wrap mysql_rwlock_t with
zero runtime overhead.
The PERFORMANCE_SCHEMA instrumentation differs
from InnoDB rw_lock_t in that we will only invoke
PSI_RWLOCK_CALL(start_rwlock_wrwait) or
PSI_RWLOCK_CALL(start_rwlock_rdwait)
if there is an actual conflict.
Diffstat (limited to 'storage/innobase')
-rw-r--r-- | storage/innobase/CMakeLists.txt | 6 | ||||
-rw-r--r-- | storage/innobase/btr/btr0cur.cc | 17 | ||||
-rw-r--r-- | storage/innobase/btr/btr0sea.cc | 103 | ||||
-rw-r--r-- | storage/innobase/buf/buf0buf.cc | 9 | ||||
-rw-r--r-- | storage/innobase/handler/ha_innodb.cc | 4 | ||||
-rw-r--r-- | storage/innobase/include/btr0cur.h | 8 | ||||
-rw-r--r-- | storage/innobase/include/btr0pcur.h | 5 | ||||
-rw-r--r-- | storage/innobase/include/btr0pcur.ic | 5 | ||||
-rw-r--r-- | storage/innobase/include/btr0sea.h | 49 | ||||
-rw-r--r-- | storage/innobase/include/btr0sea.ic | 53 | ||||
-rw-r--r-- | storage/innobase/include/rw_lock.h | 34 | ||||
-rw-r--r-- | storage/innobase/include/srw_lock.h | 145 | ||||
-rw-r--r-- | storage/innobase/include/sync0sync.h | 1 | ||||
-rw-r--r-- | storage/innobase/include/sync0types.h | 3 | ||||
-rw-r--r-- | storage/innobase/row/row0sel.cc | 10 | ||||
-rw-r--r-- | storage/innobase/srv/srv0srv.cc | 8 | ||||
-rw-r--r-- | storage/innobase/sync/srw_lock_futex.cc | 107 | ||||
-rw-r--r-- | storage/innobase/sync/sync0debug.cc | 4 | ||||
-rw-r--r-- | storage/innobase/sync/sync0sync.cc | 1 |
19 files changed, 379 insertions, 193 deletions
diff --git a/storage/innobase/CMakeLists.txt b/storage/innobase/CMakeLists.txt index b7e7fb93bf3..67b5b856e0f 100644 --- a/storage/innobase/CMakeLists.txt +++ b/storage/innobase/CMakeLists.txt @@ -350,6 +350,12 @@ SET(INNOBASE_SOURCES ut/ut0vec.cc ut/ut0wqueue.cc) +IF (UNIX) + IF(CMAKE_SYSTEM_NAME STREQUAL "Linux") + SET(INNOBASE_SOURCES ${INNOBASE_SOURCES} "sync/srw_lock_futex.cc") + ENDIF() +ENDIF() + MYSQL_ADD_PLUGIN(innobase ${INNOBASE_SOURCES} STORAGE_ENGINE MODULE_OUTPUT_NAME ha_innodb DEFAULT RECOMPILE_FOR_EMBEDDED diff --git a/storage/innobase/btr/btr0cur.cc b/storage/innobase/btr/btr0cur.cc index 846d8ecfd7e..aee237888ed 100644 --- a/storage/innobase/btr/btr0cur.cc +++ b/storage/innobase/btr/btr0cur.cc @@ -1245,9 +1245,8 @@ btr_cur_search_to_nth_level_func( btr_cur_t* cursor, /*!< in/out: tree cursor; the cursor page is s- or x-latched, but see also above! */ #ifdef BTR_CUR_HASH_ADAPT - rw_lock_t* ahi_latch, - /*!< in: currently held btr_search_latch - (in RW_S_LATCH mode), or NULL */ + srw_lock* ahi_latch, + /*!< in: currently held AHI rdlock, or NULL */ #endif /* BTR_CUR_HASH_ADAPT */ const char* file, /*!< in: file name */ unsigned line, /*!< in: line where called */ @@ -1449,7 +1448,7 @@ btr_cur_search_to_nth_level_func( #ifdef BTR_CUR_HASH_ADAPT if (ahi_latch) { /* Release possible search latch to obey latching order */ - rw_lock_s_unlock(ahi_latch); + ahi_latch->rd_unlock(); } #endif /* BTR_CUR_HASH_ADAPT */ @@ -2502,7 +2501,7 @@ func_exit: #ifdef BTR_CUR_HASH_ADAPT if (ahi_latch) { - rw_lock_s_lock(ahi_latch); + ahi_latch->rd_lock(); } #endif /* BTR_CUR_HASH_ADAPT */ @@ -3584,7 +3583,7 @@ fail_err: ut_ad(index->is_instant()); ut_ad(flags == BTR_NO_LOCKING_FLAG); } else { - rw_lock_t* ahi_latch = btr_search_sys.get_latch(*index); + srw_lock* ahi_latch = btr_search_sys.get_latch(*index); if (!reorg && cursor->flag == BTR_CUR_HASH) { btr_search_update_hash_node_on_insert( cursor, ahi_latch); @@ -4296,7 +4295,7 @@ btr_cur_update_in_place( #ifdef BTR_CUR_HASH_ADAPT { - rw_lock_t* ahi_latch = block->index + srw_lock* ahi_latch = block->index ? btr_search_sys.get_latch(*index) : NULL; if (ahi_latch) { /* TO DO: Can we skip this if none of the fields @@ -4316,7 +4315,7 @@ btr_cur_update_in_place( btr_search_update_hash_on_delete(cursor); } - rw_lock_x_lock(ahi_latch); + ahi_latch->wr_lock(); } assert_block_ahi_valid(block); @@ -4327,7 +4326,7 @@ btr_cur_update_in_place( #ifdef BTR_CUR_HASH_ADAPT if (ahi_latch) { - rw_lock_x_unlock(ahi_latch); + ahi_latch->wr_unlock(); } } #endif /* BTR_CUR_HASH_ADAPT */ diff --git a/storage/innobase/btr/btr0sea.cc b/storage/innobase/btr/btr0sea.cc index 68d946aa79d..78af9f50d07 100644 --- a/storage/innobase/btr/btr0sea.cc +++ b/storage/innobase/btr/btr0sea.cc @@ -55,6 +55,10 @@ ulint btr_search_n_succ = 0; ulint btr_search_n_hash_fail = 0; #endif /* UNIV_SEARCH_PERF_STAT */ +#ifdef UNIV_PFS_RWLOCK +mysql_pfs_key_t btr_search_latch_key; +#endif /* UNIV_PFS_RWLOCK */ + /** The adaptive hash index */ btr_search_sys_t btr_search_sys; @@ -177,14 +181,14 @@ static void btr_search_check_free_space_in_heap(const dict_index_t *index) buf_block_t *block= buf_block_alloc(); auto part= btr_search_sys.get_part(*index); - rw_lock_x_lock(&part->latch); + part->latch.wr_lock(); if (!btr_search_enabled || part->heap->free_block) buf_block_free(block); else part->heap->free_block= block; - rw_lock_x_unlock(&part->latch); + part->latch.wr_unlock(); } /** Set index->ref_count = 0 on all indexes of a table. @@ -298,9 +302,6 @@ btr_search_info_update_hash( dict_index_t* index = cursor->index; int cmp; - ut_ad(!btr_search_own_any(RW_LOCK_S)); - ut_ad(!btr_search_own_any(RW_LOCK_X)); - if (dict_index_is_ibuf(index)) { /* So many deletes are performed on an insert buffer tree that we do not consider a hash index useful on it: */ @@ -405,7 +406,6 @@ static bool btr_search_update_block_hash_info(btr_search_t* info, buf_block_t* block) { - ut_ad(!btr_search_own_any()); ut_ad(rw_lock_own_flagged(&block->lock, RW_LOCK_FLAG_X | RW_LOCK_FLAG_S)); @@ -711,7 +711,7 @@ btr_search_update_hash_ref( ut_ad(index == cursor->index); ut_ad(!dict_index_is_ibuf(index)); auto part = btr_search_sys.get_part(*index); - rw_lock_x_lock(&part->latch); + part->latch.wr_lock(); ut_ad(!block->index || block->index == index); if (block->index @@ -745,7 +745,7 @@ btr_search_update_hash_ref( } func_exit: - rw_lock_x_unlock(&part->latch); + part->latch.wr_unlock(); } /** Checks if a guessed position for a tree cursor is right. Note that if @@ -900,7 +900,6 @@ btr_search_failure(btr_search_t* info, btr_cur_t* cursor) /** Clear the adaptive hash index on all pages in the buffer pool. */ inline void buf_pool_t::clear_hash_index() { - ut_ad(btr_search_own_all(RW_LOCK_X)); ut_ad(!resizing); ut_ad(!btr_search_enabled); @@ -915,7 +914,7 @@ inline void buf_pool_t::clear_hash_index() assert_block_ahi_valid(block); /* We can clear block->index and block->n_pointers when - btr_search_own_all(RW_LOCK_X); see the comments in buf0buf.h */ + holding all AHI latches exclusively; see the comments in buf0buf.h */ if (!index) { @@ -1005,15 +1004,13 @@ btr_search_guess_on_hash( ulint mode, ulint latch_mode, btr_cur_t* cursor, - rw_lock_t* ahi_latch, + srw_lock* ahi_latch, mtr_t* mtr) { ulint fold; index_id_t index_id; ut_ad(mtr->is_active()); - ut_ad(!ahi_latch || rw_lock_own_flagged( - ahi_latch, RW_LOCK_FLAG_X | RW_LOCK_FLAG_S)); if (!btr_search_enabled) { return false; @@ -1022,8 +1019,7 @@ btr_search_guess_on_hash( ut_ad(!index->is_ibuf()); ut_ad(!ahi_latch || ahi_latch == &btr_search_sys.get_part(*index)->latch); - ut_ad((latch_mode == BTR_SEARCH_LEAF) - || (latch_mode == BTR_MODIFY_LEAF)); + ut_ad(latch_mode == BTR_SEARCH_LEAF || latch_mode == BTR_MODIFY_LEAF); compile_time_assert(ulint{BTR_SEARCH_LEAF} == ulint{RW_S_LATCH}); compile_time_assert(ulint{BTR_MODIFY_LEAF} == ulint{RW_X_LATCH}); @@ -1058,14 +1054,13 @@ btr_search_guess_on_hash( const rec_t* rec; if (!ahi_latch) { - rw_lock_s_lock(&part->latch); + part->latch.rd_lock(); if (!btr_search_enabled) { goto fail; } } else { ut_ad(btr_search_enabled); - ut_ad(rw_lock_own(ahi_latch, RW_LOCK_S)); } rec = static_cast<const rec_t*>( @@ -1074,7 +1069,7 @@ btr_search_guess_on_hash( if (!rec) { if (!ahi_latch) { fail: - rw_lock_s_unlock(&part->latch); + part->latch.rd_unlock(); } btr_search_failure(info, cursor); @@ -1127,7 +1122,7 @@ got_no_latch: buf_pool.stat.n_page_gets++; - rw_lock_s_unlock(&part->latch); + part->latch.rd_unlock(); buf_block_dbg_add_level(block, SYNC_TREE_NODE_FROM_HASH); if (UNIV_UNLIKELY(fail)) { @@ -1249,8 +1244,6 @@ retry: /* This debug check uses a dirty read that could theoretically cause false positives while buf_pool.clear_hash_index() is executing. */ assert_block_ahi_valid(block); - ut_ad(!btr_search_own_any(RW_LOCK_S)); - ut_ad(!btr_search_own_any(RW_LOCK_X)); if (!block->index) { return; @@ -1273,11 +1266,11 @@ retry: auto part = btr_search_sys.get_part(index_id, block->page.id().space()); - rw_lock_s_lock(&part->latch); + part->latch.rd_lock(); assert_block_ahi_valid(block); if (!block->index || !btr_search_enabled) { - rw_lock_s_unlock(&part->latch); + part->latch.rd_unlock(); return; } @@ -1297,7 +1290,7 @@ retry: /* NOTE: The AHI fields of block must not be accessed after releasing search latch, as the index page might only be s-latched! */ - rw_lock_s_unlock(&part->latch); + part->latch.rd_unlock(); ut_a(n_fields > 0 || n_bytes > 0); @@ -1348,7 +1341,7 @@ next_rec: mem_heap_free(heap); } - rw_lock_x_lock(&part->latch); + part->latch.wr_lock(); if (UNIV_UNLIKELY(!block->index)) { /* Someone else has meanwhile dropped the hash index */ @@ -1364,7 +1357,7 @@ next_rec: /* Someone else has meanwhile built a new hash index on the page, with different parameters */ - rw_lock_x_unlock(&part->latch); + part->latch.wr_unlock(); ut_free(folds); goto retry; @@ -1391,7 +1384,7 @@ next_rec: cleanup: assert_block_ahi_valid(block); - rw_lock_x_unlock(&part->latch); + part->latch.wr_unlock(); ut_free(folds); } @@ -1454,7 +1447,7 @@ void btr_search_build_page_hash_index( dict_index_t* index, buf_block_t* block, - rw_lock_t* ahi_latch, + srw_lock* ahi_latch, uint16_t n_fields, uint16_t n_bytes, bool left_side) @@ -1489,7 +1482,7 @@ btr_search_build_page_hash_index( RW_LOCK_FLAG_X | RW_LOCK_FLAG_S)); ut_ad(block->page.id().page_no() >= 3); - rw_lock_s_lock(ahi_latch); + ahi_latch->rd_lock(); const bool enabled = btr_search_enabled; const bool rebuild = enabled && block->index @@ -1497,7 +1490,7 @@ btr_search_build_page_hash_index( || block->curr_n_bytes != n_bytes || block->curr_left_side != left_side); - rw_lock_s_unlock(ahi_latch); + ahi_latch->rd_unlock(); if (!enabled) { return; @@ -1603,7 +1596,7 @@ btr_search_build_page_hash_index( btr_search_check_free_space_in_heap(index); - rw_lock_x_lock(ahi_latch); + ahi_latch->wr_lock(); if (!btr_search_enabled) { goto exit_func; @@ -1642,7 +1635,7 @@ btr_search_build_page_hash_index( MONITOR_INC_VALUE(MONITOR_ADAPTIVE_HASH_ROW_ADDED, n_cached); exit_func: assert_block_ahi_valid(block); - rw_lock_x_unlock(ahi_latch); + ahi_latch->wr_unlock(); ut_free(folds); ut_free(recs); @@ -1657,11 +1650,8 @@ exit_func: void btr_search_info_update_slow(btr_search_t* info, btr_cur_t* cursor) { - rw_lock_t* ahi_latch = &btr_search_sys.get_part(*cursor->index) + srw_lock* ahi_latch = &btr_search_sys.get_part(*cursor->index) ->latch; - ut_ad(!rw_lock_own_flagged(ahi_latch, - RW_LOCK_FLAG_X | RW_LOCK_FLAG_S)); - buf_block_t* block = btr_cur_get_block(cursor); /* NOTE that the following two function calls do NOT protect @@ -1727,7 +1717,7 @@ btr_search_move_or_delete_hash_entries( assert_block_ahi_valid(block); assert_block_ahi_valid(new_block); - rw_lock_t* ahi_latch = index + srw_lock* ahi_latch = index ? &btr_search_sys.get_part(*index)->latch : nullptr; @@ -1740,7 +1730,7 @@ btr_search_move_or_delete_hash_entries( return; } - rw_lock_s_lock(ahi_latch); + ahi_latch->rd_lock(); if (block->index) { uint16_t n_fields = block->curr_n_fields; @@ -1751,7 +1741,7 @@ btr_search_move_or_delete_hash_entries( new_block->n_bytes = block->curr_n_bytes; new_block->left_side = left_side; - rw_lock_s_unlock(ahi_latch); + ahi_latch->rd_unlock(); ut_a(n_fields > 0 || n_bytes > 0); @@ -1764,7 +1754,7 @@ btr_search_move_or_delete_hash_entries( return; } - rw_lock_s_unlock(ahi_latch); + ahi_latch->rd_unlock(); } /** Updates the page hash index when a single record is deleted from a page. @@ -1817,7 +1807,7 @@ void btr_search_update_hash_on_delete(btr_cur_t* cursor) auto part = btr_search_sys.get_part(*index); - rw_lock_x_lock(&part->latch); + part->latch.wr_lock(); assert_block_ahi_valid(block); if (block->index && btr_search_enabled) { @@ -1833,7 +1823,7 @@ void btr_search_update_hash_on_delete(btr_cur_t* cursor) assert_block_ahi_valid(block); } - rw_lock_x_unlock(&part->latch); + part->latch.wr_unlock(); } /** Updates the page hash index when a single record is inserted on a page. @@ -1841,16 +1831,14 @@ void btr_search_update_hash_on_delete(btr_cur_t* cursor) using btr_cur_search_, and the new record has been inserted next to the cursor. @param[in] ahi_latch the adaptive hash index latch */ -void -btr_search_update_hash_node_on_insert(btr_cur_t* cursor, rw_lock_t* ahi_latch) +void btr_search_update_hash_node_on_insert(btr_cur_t *cursor, + srw_lock *ahi_latch) { buf_block_t* block; dict_index_t* index; rec_t* rec; ut_ad(ahi_latch == &btr_search_sys.get_part(*cursor->index)->latch); - ut_ad(!btr_search_own_any(RW_LOCK_S)); - ut_ad(!btr_search_own_any(RW_LOCK_X)); #ifdef MYSQL_INDEX_DISABLE_AHI if (cursor->index->disable_ahi) return; #endif @@ -1873,7 +1861,7 @@ btr_search_update_hash_node_on_insert(btr_cur_t* cursor, rw_lock_t* ahi_latch) ut_a(cursor->index == index); ut_ad(!dict_index_is_ibuf(index)); - rw_lock_x_lock(ahi_latch); + ahi_latch->wr_lock(); if (!block->index || !btr_search_enabled) { @@ -1896,9 +1884,9 @@ btr_search_update_hash_node_on_insert(btr_cur_t* cursor, rw_lock_t* ahi_latch) func_exit: assert_block_ahi_valid(block); - rw_lock_x_unlock(ahi_latch); + ahi_latch->wr_unlock(); } else { - rw_lock_x_unlock(ahi_latch); + ahi_latch->wr_unlock(); btr_search_update_hash_on_insert(cursor, ahi_latch); } @@ -1910,8 +1898,8 @@ func_exit: and the new record has been inserted next to the cursor @param[in] ahi_latch the adaptive hash index latch */ -void -btr_search_update_hash_on_insert(btr_cur_t* cursor, rw_lock_t* ahi_latch) +void btr_search_update_hash_on_insert(btr_cur_t *cursor, + srw_lock *ahi_latch) { buf_block_t* block; dict_index_t* index; @@ -1930,8 +1918,6 @@ btr_search_update_hash_on_insert(btr_cur_t* cursor, rw_lock_t* ahi_latch) ut_ad(ahi_latch == &btr_search_sys.get_part(*cursor->index)->latch); ut_ad(page_is_leaf(btr_cur_get_page(cursor))); - ut_ad(!btr_search_own_any(RW_LOCK_S)); - ut_ad(!btr_search_own_any(RW_LOCK_X)); #ifdef MYSQL_INDEX_DISABLE_AHI if (cursor->index->disable_ahi) return; #endif @@ -1993,7 +1979,7 @@ btr_search_update_hash_on_insert(btr_cur_t* cursor, rw_lock_t* ahi_latch) } else { if (left_side) { locked = true; - rw_lock_x_lock(ahi_latch); + ahi_latch->wr_lock(); if (!btr_search_enabled || !block->index) { goto function_exit; @@ -2012,7 +1998,7 @@ btr_search_update_hash_on_insert(btr_cur_t* cursor, rw_lock_t* ahi_latch) if (!locked) { locked = true; - rw_lock_x_lock(ahi_latch); + ahi_latch->wr_lock(); if (!btr_search_enabled || !block->index) { goto function_exit; @@ -2037,7 +2023,7 @@ check_next_rec: if (!left_side) { if (!locked) { locked = true; - rw_lock_x_lock(ahi_latch); + ahi_latch->wr_lock(); if (!btr_search_enabled || !block->index) { goto function_exit; @@ -2057,7 +2043,7 @@ check_next_rec: if (ins_fold != next_fold) { if (!locked) { locked = true; - rw_lock_x_lock(ahi_latch); + ahi_latch->wr_lock(); if (!btr_search_enabled || !block->index) { goto function_exit; @@ -2081,9 +2067,8 @@ function_exit: mem_heap_free(heap); } if (locked) { - rw_lock_x_unlock(ahi_latch); + ahi_latch->wr_unlock(); } - ut_ad(!rw_lock_own(ahi_latch, RW_LOCK_X)); } #if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG diff --git a/storage/innobase/buf/buf0buf.cc b/storage/innobase/buf/buf0buf.cc index 2ddd9f278b1..28ded2c3f37 100644 --- a/storage/innobase/buf/buf0buf.cc +++ b/storage/innobase/buf/buf0buf.cc @@ -1980,16 +1980,11 @@ inline void buf_pool_t::resize() #ifdef BTR_CUR_HASH_ADAPT /* disable AHI if needed */ - const bool btr_search_disabled = btr_search_enabled; - buf_resize_status("Disabling adaptive hash index."); btr_search_s_lock_all(); - if (btr_search_disabled) { - btr_search_s_unlock_all(); - } else { - btr_search_s_unlock_all(); - } + const bool btr_search_disabled = btr_search_enabled; + btr_search_s_unlock_all(); btr_search_disable(); diff --git a/storage/innobase/handler/ha_innodb.cc b/storage/innobase/handler/ha_innodb.cc index 31e0ac89358..8a8de28877c 100644 --- a/storage/innobase/handler/ha_innodb.cc +++ b/storage/innobase/handler/ha_innodb.cc @@ -561,7 +561,9 @@ static PSI_mutex_info all_innodb_mutexes[] = { performance schema instrumented if "UNIV_PFS_RWLOCK" is defined */ static PSI_rwlock_info all_innodb_rwlocks[] = { - PSI_RWLOCK_KEY(btr_search_latch), +# ifdef BTR_CUR_HASH_ADAPT + { &btr_search_latch_key, "btr_search_latch", 0 }, +# endif PSI_RWLOCK_KEY(dict_operation_lock), PSI_RWLOCK_KEY(fil_space_latch), PSI_RWLOCK_KEY(fts_cache_rw_lock), diff --git a/storage/innobase/include/btr0cur.h b/storage/innobase/include/btr0cur.h index 7136d726db0..46a5bf397d2 100644 --- a/storage/innobase/include/btr0cur.h +++ b/storage/innobase/include/btr0cur.h @@ -33,6 +33,9 @@ Created 10/16/1994 Heikki Tuuri #include "rem0types.h" #include "gis0type.h" #include "my_base.h" +#ifdef BTR_CUR_HASH_ADAPT +# include "srw_lock.h" +#endif /** Mode flags for btr_cur operations; these can be ORed */ enum { @@ -202,9 +205,8 @@ btr_cur_search_to_nth_level_func( btr_cur_t* cursor, /*!< in/out: tree cursor; the cursor page is s- or x-latched, but see also above! */ #ifdef BTR_CUR_HASH_ADAPT - rw_lock_t* ahi_latch, - /*!< in: currently held btr_search_latch - (in RW_S_LATCH mode), or NULL */ + srw_lock* ahi_latch, + /*!< in: currently held AHI rdlock, or NULL */ #endif /* BTR_CUR_HASH_ADAPT */ const char* file, /*!< in: file name */ unsigned line, /*!< in: line where called */ diff --git a/storage/innobase/include/btr0pcur.h b/storage/innobase/include/btr0pcur.h index bc7afbf3b67..a1f6034345d 100644 --- a/storage/innobase/include/btr0pcur.h +++ b/storage/innobase/include/btr0pcur.h @@ -137,9 +137,8 @@ btr_pcur_open_with_no_init_func( that the ahi_latch protects the record! */ btr_pcur_t* cursor, /*!< in: memory buffer for persistent cursor */ #ifdef BTR_CUR_HASH_ADAPT - rw_lock_t* ahi_latch, - /*!< in: adaptive hash index latch held - by the caller, or NULL if none */ + srw_lock* ahi_latch, + /*!< in: currently held AHI rdlock, or NULL */ #endif /* BTR_CUR_HASH_ADAPT */ const char* file, /*!< in: file name */ unsigned line, /*!< in: line where called */ diff --git a/storage/innobase/include/btr0pcur.ic b/storage/innobase/include/btr0pcur.ic index d93da475a1f..c436a110757 100644 --- a/storage/innobase/include/btr0pcur.ic +++ b/storage/innobase/include/btr0pcur.ic @@ -483,9 +483,8 @@ btr_pcur_open_with_no_init_func( that the ahi_latch protects the record! */ btr_pcur_t* cursor, /*!< in: memory buffer for persistent cursor */ #ifdef BTR_CUR_HASH_ADAPT - rw_lock_t* ahi_latch, - /*!< in: adaptive hash index latch held - by the caller, or NULL if none */ + srw_lock* ahi_latch, + /*!< in: currently held AHI rdlock, or NULL */ #endif /* BTR_CUR_HASH_ADAPT */ const char* file, /*!< in: file name */ unsigned line, /*!< in: line where called */ diff --git a/storage/innobase/include/btr0sea.h b/storage/innobase/include/btr0sea.h index 1e6b667c324..c7cfb1f259c 100644 --- a/storage/innobase/include/btr0sea.h +++ b/storage/innobase/include/btr0sea.h @@ -30,7 +30,11 @@ Created 2/17/1996 Heikki Tuuri #include "dict0dict.h" #ifdef BTR_CUR_HASH_ADAPT #include "ha0ha.h" -#include "sync0sync.h" +#include "srw_lock.h" + +#ifdef UNIV_PFS_RWLOCK +extern mysql_pfs_key_t btr_search_latch_key; +#endif /* UNIV_PFS_RWLOCK */ #define btr_search_sys_create() btr_search_sys.create() #define btr_search_sys_free() btr_search_sys.free() @@ -77,7 +81,7 @@ btr_search_guess_on_hash( ulint mode, ulint latch_mode, btr_cur_t* cursor, - rw_lock_t* ahi_latch, + srw_lock* ahi_latch, mtr_t* mtr); /** Move or delete hash entries for moved records, usually in a page split. @@ -109,8 +113,8 @@ void btr_search_drop_page_hash_when_freed(const page_id_t page_id); using btr_cur_search_, and the new record has been inserted next to the cursor. @param[in] ahi_latch the adaptive hash index latch */ -void -btr_search_update_hash_node_on_insert(btr_cur_t* cursor, rw_lock_t* ahi_latch); +void btr_search_update_hash_node_on_insert(btr_cur_t *cursor, + srw_lock *ahi_latch); /** Updates the page hash index when a single record is inserted on a page. @param[in,out] cursor cursor which was positioned to the @@ -118,8 +122,8 @@ btr_search_update_hash_node_on_insert(btr_cur_t* cursor, rw_lock_t* ahi_latch); and the new record has been inserted next to the cursor @param[in] ahi_latch the adaptive hash index latch */ -void -btr_search_update_hash_on_insert(btr_cur_t* cursor, rw_lock_t* ahi_latch); +void btr_search_update_hash_on_insert(btr_cur_t *cursor, + srw_lock *ahi_latch); /** Updates the page hash index when a single record is deleted from a page. @param[in] cursor cursor which was positioned on the record to delete @@ -139,23 +143,6 @@ static inline void btr_search_x_unlock_all(); /** Lock all search latches in shared mode. */ static inline void btr_search_s_lock_all(); -#ifdef UNIV_DEBUG -/** Check if thread owns all the search latches. -@param[in] mode lock mode check -@retval true if owns all of them -@retval false if does not own some of them */ -static inline bool btr_search_own_all(ulint mode); - -/** Check if thread owns any of the search latches. -@param[in] mode lock mode check -@retval true if owns any of them -@retval false if owns no search latch */ -static inline bool btr_search_own_any(ulint mode); - -/** @return whether this thread holds any of the search latches */ -static inline bool btr_search_own_any(); -#endif /* UNIV_DEBUG */ - /** Unlock all search latches from shared mode. */ static inline void btr_search_s_unlock_all(); @@ -250,20 +237,20 @@ struct btr_search_sys_t struct partition { /** latches protecting hash_table */ - rw_lock_t latch; + srw_lock latch; /** mapping of dtuple_fold() to rec_t* in buf_block_t::frame */ hash_table_t table; /** memory heap for table */ mem_heap_t *heap; - char pad[(CPU_LEVEL1_DCACHE_LINESIZE - sizeof(rw_lock_t) - + char pad[(CPU_LEVEL1_DCACHE_LINESIZE - sizeof(srw_lock) - sizeof(hash_table_t) - sizeof(mem_heap_t)) & (CPU_LEVEL1_DCACHE_LINESIZE - 1)]; void init() { memset((void*) this, 0, sizeof *this); - rw_lock_create(btr_search_latch_key, &latch, SYNC_SEARCH_SYS); + latch.init(btr_search_latch_key); } void alloc(ulint hash_size) @@ -285,7 +272,7 @@ struct btr_search_sys_t void free() { - rw_lock_free(&latch); + latch.destroy(); if (heap) clear(); } @@ -309,7 +296,7 @@ struct btr_search_sys_t } /** Get the search latch for the adaptive hash index partition */ - rw_lock_t *get_latch(const dict_index_t &index) const + srw_lock *get_latch(const dict_index_t &index) const { return &get_part(index)->latch; } /** Create and initialize at startup */ @@ -354,10 +341,10 @@ inline ulint dict_index_t::n_ahi_pages() const { if (!btr_search_enabled) return 0; - rw_lock_t *latch = &btr_search_sys.get_part(*this)->latch; - rw_lock_s_lock(latch); + srw_lock *latch= &btr_search_sys.get_part(*this)->latch; + latch->rd_lock(); ulint ref_count= search_info->ref_count; - rw_lock_s_unlock(latch); + latch->rd_unlock(); return ref_count; } diff --git a/storage/innobase/include/btr0sea.ic b/storage/innobase/include/btr0sea.ic index 40eb5d86ead..f0d35203c3a 100644 --- a/storage/innobase/include/btr0sea.ic +++ b/storage/innobase/include/btr0sea.ic @@ -59,9 +59,6 @@ btr_search_info_update( dict_index_t* index, /*!< in: index of the cursor */ btr_cur_t* cursor) /*!< in: cursor which was just positioned */ { - ut_ad(!btr_search_own_any(RW_LOCK_S)); - ut_ad(!btr_search_own_any(RW_LOCK_X)); - if (dict_index_is_spatial(index) || !btr_search_enabled) { return; } @@ -88,7 +85,7 @@ btr_search_info_update( static inline void btr_search_x_lock_all() { for (ulint i = 0; i < btr_ahi_parts; ++i) { - rw_lock_x_lock(&btr_search_sys.parts[i].latch); + btr_search_sys.parts[i].latch.wr_lock(); } } @@ -96,7 +93,7 @@ static inline void btr_search_x_lock_all() static inline void btr_search_x_unlock_all() { for (ulint i = 0; i < btr_ahi_parts; ++i) { - rw_lock_x_unlock(&btr_search_sys.parts[i].latch); + btr_search_sys.parts[i].latch.wr_unlock(); } } @@ -104,7 +101,7 @@ static inline void btr_search_x_unlock_all() static inline void btr_search_s_lock_all() { for (ulint i = 0; i < btr_ahi_parts; ++i) { - rw_lock_s_lock(&btr_search_sys.parts[i].latch); + btr_search_sys.parts[i].latch.rd_lock(); } } @@ -112,49 +109,7 @@ static inline void btr_search_s_lock_all() static inline void btr_search_s_unlock_all() { for (ulint i = 0; i < btr_ahi_parts; ++i) { - rw_lock_s_unlock(&btr_search_sys.parts[i].latch); - } -} - -#ifdef UNIV_DEBUG -/** Check if thread owns all the search latches. -@param[in] mode lock mode check -@retval true if owns all of them -@retval false if does not own some of them */ -static inline bool btr_search_own_all(ulint mode) -{ - for (ulint i = 0; i < btr_ahi_parts; ++i) { - if (!rw_lock_own(&btr_search_sys.parts[i].latch, mode)) { - return(false); - } - } - return(true); -} - -/** Check if thread owns any of the search latches. -@param[in] mode lock mode check -@retval true if owns any of them -@retval false if owns no search latch */ -static inline bool btr_search_own_any(ulint mode) -{ - for (ulint i = 0; i < btr_ahi_parts; ++i) { - if (rw_lock_own(&btr_search_sys.parts[i].latch, mode)) { - return(true); - } - } - return(false); -} - -/** @return whether this thread holds any of the search latches */ -static inline bool btr_search_own_any() -{ - for (ulint i = btr_ahi_parts; i--; ) { - if (rw_lock_own_flagged(&btr_search_sys.parts[i].latch, - RW_LOCK_FLAG_X | RW_LOCK_FLAG_S)) { - return true; - } + btr_search_sys.parts[i].latch.rd_unlock(); } - return false; } -#endif /* UNIV_DEBUG */ #endif /* BTR_CUR_HASH_ADAPT */ diff --git a/storage/innobase/include/rw_lock.h b/storage/innobase/include/rw_lock.h index 9fcafacc426..1388093dc25 100644 --- a/storage/innobase/include/rw_lock.h +++ b/storage/innobase/include/rw_lock.h @@ -36,9 +36,19 @@ protected: /** Flag to indicate that write_lock() or write_lock_wait() is pending */ static constexpr uint32_t WRITER_PENDING= WRITER | WRITER_WAITING; - /** Start waiting for an exclusive lock. */ - void write_lock_wait_start() - { lock.fetch_or(WRITER_WAITING, std::memory_order_relaxed); } + /** Start waiting for an exclusive lock. + @return current value of the lock word */ + uint32_t write_lock_wait_start() + { return lock.fetch_or(WRITER_WAITING, std::memory_order_relaxed); } + /** Wait for an exclusive lock. + @param l the value of the lock word + @return whether the exclusive lock was acquired */ + bool write_lock_wait_try(uint32_t &l) + { + l= WRITER_WAITING; + return lock.compare_exchange_strong(l, WRITER, std::memory_order_acquire, + std::memory_order_relaxed); + } /** Try to acquire a shared lock. @param l the value of the lock word @return whether the lock was acquired */ @@ -58,9 +68,8 @@ protected: @return whether the exclusive lock was acquired */ bool write_lock_poll() { - auto l= WRITER_WAITING; - if (lock.compare_exchange_strong(l, WRITER, std::memory_order_acquire, - std::memory_order_relaxed)) + uint32_t l; + if (write_lock_wait_try(l)) return true; if (!(l & WRITER_WAITING)) /* write_lock() must have succeeded for another thread */ @@ -72,12 +81,14 @@ public: /** Default constructor */ rw_lock() : lock(UNLOCKED) {} - /** Release a shared lock */ - void read_unlock() + /** Release a shared lock. + @return whether any writers may have to be woken up */ + bool read_unlock() { - IF_DBUG_ASSERT(auto l=,) lock.fetch_sub(1, std::memory_order_release); - DBUG_ASSERT(l & ~WRITER_PENDING); /* at least one read lock */ + auto l= lock.fetch_sub(1, std::memory_order_release); + DBUG_ASSERT(~WRITER_PENDING & l); /* at least one read lock */ DBUG_ASSERT(!(l & WRITER)); /* no write lock must have existed */ + return (~WRITER_PENDING & l) == 1; } /** Release an exclusive lock */ void write_unlock() @@ -106,6 +117,9 @@ public: auto l= lock.load(std::memory_order_relaxed); return (l & ~WRITER_PENDING) && !(l & WRITER); } + /** @return whether any lock is being held or waited for by any thread */ + bool is_locked_or_waiting() const + { return lock.load(std::memory_order_relaxed) != 0; } /** @return whether any lock is being held by any thread */ bool is_locked() const { return (lock.load(std::memory_order_relaxed) & ~WRITER_WAITING) != 0; } diff --git a/storage/innobase/include/srw_lock.h b/storage/innobase/include/srw_lock.h new file mode 100644 index 00000000000..cf7f9c50a21 --- /dev/null +++ b/storage/innobase/include/srw_lock.h @@ -0,0 +1,145 @@ +/***************************************************************************** + +Copyright (c) 2020, MariaDB Corporation. + +This program is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free Software +Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, but WITHOUT +ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along with +this program; if not, write to the Free Software Foundation, Inc., +51 Franklin Street, Fifth Floor, Boston, MA 02110-1335 USA + +*****************************************************************************/ + +#pragma once +#include "univ.i" + +#if 0 // defined SAFE_MUTEX +# define SRW_LOCK_DUMMY /* Use mysql_rwlock_t for debugging purposes */ +#endif + +#if defined SRW_LOCK_DUMMY || (!defined _WIN32 && !defined __linux__) +#else +# ifdef _WIN32 +# include <windows.h> +# else +# include "rw_lock.h" +# endif +#endif + +class srw_lock final +#if defined __linux__ && !defined SRW_LOCK_DUMMY + : protected rw_lock +#endif +{ +#if defined SRW_LOCK_DUMMY || (!defined _WIN32 && !defined __linux__) + mysql_rwlock_t lock; +public: + void init(mysql_pfs_key_t key) { mysql_rwlock_init(key, &lock); } + void destroy() { mysql_rwlock_destroy(&lock); } + void rd_lock() { mysql_rwlock_rdlock(&lock); } + void rd_unlock() { mysql_rwlock_unlock(&lock); } + void wr_lock() { mysql_rwlock_wrlock(&lock); } + void wr_unlock() { mysql_rwlock_unlock(&lock); } +#else +# ifdef UNIV_PFS_RWLOCK + PSI_rwlock *pfs_psi; +# endif +# ifdef _WIN32 + SRWLOCK lock; + bool read_trylock() { return TryAcquireSRWLockShared(&lock); } + bool write_trylock() { return TryAcquireSRWLockExclusive(&lock); } + void read_lock() { AcquireSRWLockShared(&lock); } + void write_lock() { AcquireSRWLockExclusive(&lock); } +# else + /** @return pointer to the lock word */ + rw_lock *word() { return static_cast<rw_lock*>(this); } + /** Wait for a read lock. + @param l lock word from a failed read_trylock() */ + void read_lock(uint32_t l); + /** Wait for a write lock after a failed write_trylock() */ + void write_lock(); +# endif + +public: + void init(mysql_pfs_key_t key) + { +# ifdef UNIV_PFS_RWLOCK + pfs_psi= PSI_RWLOCK_CALL(init_rwlock)(key, this); +# endif + IF_WIN(lock= SRWLOCK_INIT, static_assert(4 == sizeof(rw_lock), "ABI")); + } + void destroy() + { +# ifdef UNIV_PFS_RWLOCK + if (pfs_psi) + { + PSI_RWLOCK_CALL(destroy_rwlock)(pfs_psi); + pfs_psi= nullptr; + } +# endif + DBUG_ASSERT(!is_locked_or_waiting()); + } + void rd_lock() + { + IF_WIN(, uint32_t l); +# ifdef UNIV_PFS_RWLOCK + if (read_trylock(IF_WIN(, l))) + return; + if (pfs_psi) + { + PSI_rwlock_locker_state state; + PSI_rwlock_locker *locker= PSI_RWLOCK_CALL(start_rwlock_rdwait) + (&state, pfs_psi, PSI_RWLOCK_READLOCK, __FILE__, __LINE__); + read_lock(IF_WIN(, l)); + if (locker) + PSI_RWLOCK_CALL(end_rwlock_rdwait)(locker, 0); + return; + } +# endif /* UNIV_PFS_RWLOCK */ + IF_WIN(read_lock(), if (!read_trylock(l)) read_lock(l)); + } + void wr_lock() + { +# ifdef UNIV_PFS_RWLOCK + if (write_trylock()) + return; + if (pfs_psi) + { + PSI_rwlock_locker_state state; + PSI_rwlock_locker *locker= PSI_RWLOCK_CALL(start_rwlock_wrwait) + (&state, pfs_psi, PSI_RWLOCK_WRITELOCK, __FILE__, __LINE__); + write_lock(); + if (locker) + PSI_RWLOCK_CALL(end_rwlock_rdwait)(locker, 0); + return; + } +# endif /* UNIV_PFS_RWLOCK */ + IF_WIN(, if (!write_trylock())) write_lock(); + } +#ifdef _WIN32 + void rd_unlock() + { +#ifdef UNIV_PFS_RWLOCK + if (pfs_psi) PSI_RWLOCK_CALL(unlock_rwlock)(pfs_psi); +#endif + ReleaseSRWLockShared(&lock); + } + void wr_unlock() + { +#ifdef UNIV_PFS_RWLOCK + if (pfs_psi) PSI_RWLOCK_CALL(unlock_rwlock)(pfs_psi); +#endif + ReleaseSRWLockExclusive(&lock); + } +#else + void rd_unlock(); + void wr_unlock(); +#endif +#endif +}; diff --git a/storage/innobase/include/sync0sync.h b/storage/innobase/include/sync0sync.h index b7f3cff2925..4ba3ceb437c 100644 --- a/storage/innobase/include/sync0sync.h +++ b/storage/innobase/include/sync0sync.h @@ -87,7 +87,6 @@ extern mysql_pfs_key_t read_view_mutex_key; #ifdef UNIV_PFS_RWLOCK /* Following are rwlock keys used to register with MySQL performance schema */ -extern mysql_pfs_key_t btr_search_latch_key; extern mysql_pfs_key_t dict_operation_lock_key; extern mysql_pfs_key_t fil_space_latch_key; extern mysql_pfs_key_t fts_cache_rw_lock_key; diff --git a/storage/innobase/include/sync0types.h b/storage/innobase/include/sync0types.h index feb1e3b45ef..b3b67b106ef 100644 --- a/storage/innobase/include/sync0types.h +++ b/storage/innobase/include/sync0types.h @@ -191,8 +191,6 @@ enum latch_level_t { SYNC_POOL, SYNC_POOL_MANAGER, - SYNC_SEARCH_SYS, - SYNC_WORK_QUEUE, SYNC_FTS_TOKENIZE, @@ -294,7 +292,6 @@ enum latch_id_t { LATCH_ID_ROW_DROP_LIST, LATCH_ID_INDEX_ONLINE_LOG, LATCH_ID_WORK_QUEUE, - LATCH_ID_BTR_SEARCH, LATCH_ID_BUF_BLOCK_LOCK, LATCH_ID_BUF_BLOCK_DEBUG, LATCH_ID_DICT_OPERATION, diff --git a/storage/innobase/row/row0sel.cc b/storage/innobase/row/row0sel.cc index 7a859ab8b09..a5dc4ce0e86 100644 --- a/storage/innobase/row/row0sel.cc +++ b/storage/innobase/row/row0sel.cc @@ -3857,15 +3857,15 @@ row_sel_try_search_shortcut_for_mysql( ut_ad(dict_index_is_clust(index)); ut_ad(!prebuilt->templ_contains_blob); - rw_lock_t* ahi_latch = btr_search_sys.get_latch(*index); - rw_lock_s_lock(ahi_latch); + srw_lock* ahi_latch = btr_search_sys.get_latch(*index); + ahi_latch->rd_lock(); btr_pcur_open_with_no_init(index, search_tuple, PAGE_CUR_GE, BTR_SEARCH_LEAF, pcur, ahi_latch, mtr); rec = btr_pcur_get_rec(pcur); if (!page_rec_is_user_rec(rec) || rec_is_metadata(rec, *index)) { retry: - rw_lock_s_unlock(ahi_latch); + ahi_latch->rd_unlock(); return(SEL_RETRY); } @@ -3875,7 +3875,7 @@ retry: if (btr_pcur_get_up_match(pcur) < dtuple_get_n_fields(search_tuple)) { exhausted: - rw_lock_s_unlock(ahi_latch); + ahi_latch->rd_unlock(); return(SEL_EXHAUSTED); } @@ -3899,7 +3899,7 @@ exhausted: *out_rec = rec; - rw_lock_s_unlock(ahi_latch); + ahi_latch->rd_unlock(); return(SEL_FOUND); } #endif /* BTR_CUR_HASH_ADAPT */ diff --git a/storage/innobase/srv/srv0srv.cc b/storage/innobase/srv/srv0srv.cc index 554e2043ad1..c4785192665 100644 --- a/storage/innobase/srv/srv0srv.cc +++ b/storage/innobase/srv/srv0srv.cc @@ -924,13 +924,13 @@ srv_printf_innodb_monitor( #ifdef BTR_CUR_HASH_ADAPT for (ulint i = 0; i < btr_ahi_parts && btr_search_enabled; ++i) { const auto part= &btr_search_sys.parts[i]; - rw_lock_s_lock(&part->latch); + part->latch.rd_lock(); ut_ad(part->heap->type == MEM_HEAP_FOR_BTR_SEARCH); fprintf(file, "Hash table size " ULINTPF ", node heap has " ULINTPF " buffer(s)\n", part->table.n_cells, part->heap->base.count - !part->heap->free_block); - rw_lock_s_unlock(&part->latch); + part->latch.rd_unlock(); } fprintf(file, @@ -1065,14 +1065,14 @@ srv_export_innodb_status(void) ulint mem_adaptive_hash = 0; for (ulong i = 0; i < btr_ahi_parts; i++) { const auto part= &btr_search_sys.parts[i]; - rw_lock_s_lock(&part->latch); + part->latch.rd_lock(); if (part->heap) { ut_ad(part->heap->type == MEM_HEAP_FOR_BTR_SEARCH); mem_adaptive_hash += mem_heap_get_size(part->heap) + part->table.n_cells * sizeof(hash_cell_t); } - rw_lock_s_unlock(&part->latch); + part->latch.rd_unlock(); } export_vars.innodb_mem_adaptive_hash = mem_adaptive_hash; #endif diff --git a/storage/innobase/sync/srw_lock_futex.cc b/storage/innobase/sync/srw_lock_futex.cc new file mode 100644 index 00000000000..98d7744e34a --- /dev/null +++ b/storage/innobase/sync/srw_lock_futex.cc @@ -0,0 +1,107 @@ +/***************************************************************************** + +Copyright (c) 2020, MariaDB Corporation. + +This program is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free Software +Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, but WITHOUT +ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along with +this program; if not, write to the Free Software Foundation, Inc., +51 Franklin Street, Fifth Floor, Boston, MA 02110-1335 USA + +*****************************************************************************/ + +#ifndef __linux__ +# error "This file is for Linux only" +#endif + +#include "srw_lock.h" + +#ifdef SRW_LOCK_DUMMY +/* Work around a potential build failure by preventing an empty .o file */ +int srw_lock_dummy_function() { return 0; } +#else +# include <linux/futex.h> +# include <sys/syscall.h> + +# include "srv0srv.h" + +/** Wait for a read lock. +@param lock word value from a failed read_trylock() */ +void srw_lock::read_lock(uint32_t l) +{ + do + { + if (l == WRITER_WAITING) + wake_writer: + syscall(SYS_futex, word(), FUTEX_WAKE_PRIVATE, 1, nullptr, nullptr, 0); + else + for (auto spin= srv_n_spin_wait_rounds; spin; spin--) + { + ut_delay(srv_spin_wait_delay); + if (read_trylock(l)) + return; + else if (l == WRITER_WAITING) + goto wake_writer; + } + + syscall(SYS_futex, word(), FUTEX_WAIT_PRIVATE, l, nullptr, nullptr, 0); + } + while (!read_trylock(l)); +} + +/** Wait for a write lock after a failed write_trylock() */ +void srw_lock::write_lock() +{ + for (;;) + { + uint32_t l= write_lock_wait_start(); + /* We are the first writer to be granted the lock. Spin for a while. */ + for (auto spin= srv_n_spin_wait_rounds; spin; spin--) + { + if (write_lock_wait_try(l)) + return; + if (!(l & WRITER_WAITING)) + l= write_lock_wait_start(); + ut_delay(srv_spin_wait_delay); + } + + if (write_lock_wait_try(l)) + return; + + if (!(l & WRITER_WAITING)) + { + if (l == UNLOCKED && write_trylock()) + return; + l= write_lock_wait_start() | WRITER_WAITING; + } + else + DBUG_ASSERT(~WRITER_WAITING & l); + + syscall(SYS_futex, word(), FUTEX_WAIT_PRIVATE, l, nullptr, nullptr, 0); + } +} + +void srw_lock::rd_unlock() +{ +#ifdef UNIV_PFS_RWLOCK + if (pfs_psi) PSI_RWLOCK_CALL(unlock_rwlock)(pfs_psi); +#endif + if (read_unlock()) + syscall(SYS_futex, word(), FUTEX_WAKE_PRIVATE, 1, nullptr, nullptr, 0); +} + +void srw_lock::wr_unlock() +{ +#ifdef UNIV_PFS_RWLOCK + if (pfs_psi) PSI_RWLOCK_CALL(unlock_rwlock)(pfs_psi); +#endif + write_unlock(); + syscall(SYS_futex, word(), FUTEX_WAKE_PRIVATE, INT_MAX, nullptr, nullptr, 0); +} +#endif diff --git a/storage/innobase/sync/sync0debug.cc b/storage/innobase/sync/sync0debug.cc index f2f7c13d34c..7af95a3195a 100644 --- a/storage/innobase/sync/sync0debug.cc +++ b/storage/innobase/sync/sync0debug.cc @@ -455,7 +455,6 @@ LatchDebug::LatchDebug() LEVEL_MAP_INSERT(SYNC_ANY_LATCH); LEVEL_MAP_INSERT(SYNC_POOL); LEVEL_MAP_INSERT(SYNC_POOL_MANAGER); - LEVEL_MAP_INSERT(SYNC_SEARCH_SYS); LEVEL_MAP_INSERT(SYNC_WORK_QUEUE); LEVEL_MAP_INSERT(SYNC_FTS_TOKENIZE); LEVEL_MAP_INSERT(SYNC_FTS_OPTIMIZE); @@ -732,7 +731,6 @@ LatchDebug::check_order( case SYNC_FTS_OPTIMIZE: case SYNC_FTS_CACHE: case SYNC_FTS_CACHE_INIT: - case SYNC_SEARCH_SYS: case SYNC_LOCK_SYS: case SYNC_LOCK_WAIT_SYS: case SYNC_RW_TRX_HASH_ELEMENT: @@ -1304,8 +1302,6 @@ sync_latch_meta_init() LATCH_ADD_MUTEX(WORK_QUEUE, SYNC_WORK_QUEUE, PFS_NOT_INSTRUMENTED); // Add the RW locks - LATCH_ADD_RWLOCK(BTR_SEARCH, SYNC_SEARCH_SYS, btr_search_latch_key); - LATCH_ADD_RWLOCK(BUF_BLOCK_LOCK, SYNC_LEVEL_VARYING, PFS_NOT_INSTRUMENTED); diff --git a/storage/innobase/sync/sync0sync.cc b/storage/innobase/sync/sync0sync.cc index 0a6f8bfbebd..8ee9ffd2947 100644 --- a/storage/innobase/sync/sync0sync.cc +++ b/storage/innobase/sync/sync0sync.cc @@ -81,7 +81,6 @@ mysql_pfs_key_t rw_trx_hash_element_mutex_key; mysql_pfs_key_t read_view_mutex_key; #endif /* UNIV_PFS_MUTEX */ #ifdef UNIV_PFS_RWLOCK -mysql_pfs_key_t btr_search_latch_key; mysql_pfs_key_t dict_operation_lock_key; mysql_pfs_key_t index_tree_rw_lock_key; mysql_pfs_key_t index_online_log_key; |