diff options
author | Marko Mäkelä <marko.makela@mariadb.com> | 2021-01-07 11:31:26 +0200 |
---|---|---|
committer | Marko Mäkelä <marko.makela@mariadb.com> | 2021-01-07 11:31:26 +0200 |
commit | 197ae579b817e2de38a93e034820221744edac88 (patch) | |
tree | 215231a043bde33762fc818ceda2f0a7a6a6cfec | |
parent | 92abdcca5a5324f0727112ab2417d10c7a8d5094 (diff) | |
download | mariadb-git-197ae579b817e2de38a93e034820221744edac88.tar.gz |
MDEV-20612 preparation: LockMutexGuard, LockGuard
Let us use the RAII wrapper LockMutexGuard for most operations where
lock_sys.mutex is acquired.
Let us use the RAII wrapper LockGuard for operations that are related
to a buffer pool page.
-rw-r--r-- | storage/innobase/btr/btr0btr.cc | 11 | ||||
-rw-r--r-- | storage/innobase/btr/btr0cur.cc | 13 | ||||
-rw-r--r-- | storage/innobase/buf/buf0buf.cc | 10 | ||||
-rw-r--r-- | storage/innobase/gis/gis0sea.cc | 25 | ||||
-rw-r--r-- | storage/innobase/handler/ha_innodb.cc | 19 | ||||
-rw-r--r-- | storage/innobase/ibuf/ibuf0ibuf.cc | 8 | ||||
-rw-r--r-- | storage/innobase/include/lock0lock.h | 29 | ||||
-rw-r--r-- | storage/innobase/include/lock0priv.h | 27 | ||||
-rw-r--r-- | storage/innobase/include/lock0priv.ic | 22 | ||||
-rw-r--r-- | storage/innobase/lock/lock0lock.cc | 1096 | ||||
-rw-r--r-- | storage/innobase/lock/lock0prdt.cc | 235 | ||||
-rw-r--r-- | storage/innobase/lock/lock0wait.cc | 22 | ||||
-rw-r--r-- | storage/innobase/row/row0ins.cc | 11 | ||||
-rw-r--r-- | storage/innobase/row/row0mysql.cc | 9 | ||||
-rw-r--r-- | storage/innobase/trx/trx0i_s.cc | 5 | ||||
-rw-r--r-- | storage/innobase/trx/trx0trx.cc | 31 |
16 files changed, 671 insertions, 902 deletions
diff --git a/storage/innobase/btr/btr0btr.cc b/storage/innobase/btr/btr0btr.cc index a1b1909be48..0ad00bfef11 100644 --- a/storage/innobase/btr/btr0btr.cc +++ b/storage/innobase/btr/btr0btr.cc @@ -2,7 +2,7 @@ Copyright (c) 1994, 2016, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 2012, Facebook Inc. -Copyright (c) 2014, 2020, MariaDB Corporation. +Copyright (c) 2014, 2021, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -3295,10 +3295,9 @@ btr_lift_page_up( if (!dict_table_is_locking_disabled(index->table)) { /* Free predicate page locks on the block */ if (dict_index_is_spatial(index)) { - lock_sys.mutex_lock(); + LockMutexGuard g; lock_prdt_page_free_from_discard( block, &lock_sys.prdt_page_hash); - lock_sys.mutex_unlock(); } lock_update_copy_and_discard(father_block, block); } @@ -3548,11 +3547,10 @@ retry: } /* No GAP lock needs to be worrying about */ - lock_sys.mutex_lock(); + LockMutexGuard g; lock_prdt_page_free_from_discard( block, &lock_sys.prdt_page_hash); lock_rec_free_all_from_discard_page(block); - lock_sys.mutex_unlock(); } else { btr_cur_node_ptr_delete(&father_cursor, mtr); if (!dict_table_is_locking_disabled(index->table)) { @@ -3701,11 +3699,10 @@ retry: offsets2, offsets, merge_page, mtr); } - lock_sys.mutex_lock(); + LockMutexGuard g; lock_prdt_page_free_from_discard( block, &lock_sys.prdt_page_hash); lock_rec_free_all_from_discard_page(block); - lock_sys.mutex_unlock(); } else { compressed = btr_cur_pessimistic_delete(&err, TRUE, diff --git a/storage/innobase/btr/btr0cur.cc b/storage/innobase/btr/btr0cur.cc index 2f4db18ca50..47196e1228d 100644 --- a/storage/innobase/btr/btr0cur.cc +++ b/storage/innobase/btr/btr0cur.cc @@ -3,7 +3,7 @@ Copyright (c) 1994, 2019, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 2008, Google Inc. Copyright (c) 2012, Facebook Inc. -Copyright (c) 2015, 2020, MariaDB Corporation. +Copyright (c) 2015, 2021, MariaDB Corporation. Portions of this file contain modifications contributed and copyrighted by Google, Inc. Those modifications are gratefully acknowledged and are described @@ -1997,11 +1997,12 @@ retry_page_get: trx_t* trx = thr_get_trx(cursor->thr); lock_prdt_t prdt; - lock_sys.mutex_lock(); - lock_init_prdt_from_mbr( - &prdt, &cursor->rtr_info->mbr, mode, - trx->lock.lock_heap); - lock_sys.mutex_unlock(); + { + LockMutexGuard g; + lock_init_prdt_from_mbr( + &prdt, &cursor->rtr_info->mbr, mode, + trx->lock.lock_heap); + } if (rw_latch == RW_NO_LATCH && height != 0) { block->lock.s_lock(); diff --git a/storage/innobase/buf/buf0buf.cc b/storage/innobase/buf/buf0buf.cc index cc92cb3fdbe..43e3b6dc5d4 100644 --- a/storage/innobase/buf/buf0buf.cc +++ b/storage/innobase/buf/buf0buf.cc @@ -2034,13 +2034,11 @@ withdraw_retry: message_interval *= 2; } - lock_sys.mutex_lock(); - bool found = false; - trx_sys.trx_list.for_each(find_interesting_trx{ - found, withdraw_started, current_time}); - lock_sys.mutex_unlock(); - + bool found= false; + find_interesting_trx f{found, withdraw_started, current_time}; withdraw_started = current_time; + LockMutexGuard g; + trx_sys.trx_list.for_each(f); } if (should_retry_withdraw) { diff --git a/storage/innobase/gis/gis0sea.cc b/storage/innobase/gis/gis0sea.cc index 9c4f91716a4..9320fcf88fa 100644 --- a/storage/innobase/gis/gis0sea.cc +++ b/storage/innobase/gis/gis0sea.cc @@ -1,7 +1,7 @@ /***************************************************************************** Copyright (c) 2016, 2018, Oracle and/or its affiliates. All Rights Reserved. -Copyright (c) 2017, 2020, MariaDB Corporation. +Copyright (c) 2017, 2021, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -386,11 +386,12 @@ rtr_pcur_getnext_from_path( trx_t* trx = thr_get_trx( btr_cur->rtr_info->thr); - lock_sys.mutex_lock(); - lock_init_prdt_from_mbr( - &prdt, &btr_cur->rtr_info->mbr, - mode, trx->lock.lock_heap); - lock_sys.mutex_unlock(); + { + LockMutexGuard g; + lock_init_prdt_from_mbr( + &prdt, &btr_cur->rtr_info->mbr, + mode, trx->lock.lock_heap); + } if (rw_latch == RW_NO_LATCH) { block->lock.s_lock(); @@ -1164,7 +1165,7 @@ rtr_check_discard_page( the root page */ buf_block_t* block) /*!< in: block of page to be discarded */ { - const ulint pageno = block->page.id().page_no(); + const page_id_t page_id{block->page.id()}; mysql_mutex_lock(&index->rtr_track->rtr_active_mutex); @@ -1175,8 +1176,8 @@ rtr_check_discard_page( mysql_mutex_lock(&rtr_info->rtr_path_mutex); for (const node_visit_t& node : *rtr_info->path) { - if (node.page_no == pageno) { - rtr_rebuild_path(rtr_info, pageno); + if (node.page_no == page_id.page_no()) { + rtr_rebuild_path(rtr_info, node.page_no); break; } } @@ -1185,8 +1186,7 @@ rtr_check_discard_page( if (rtr_info->matches) { mysql_mutex_lock(&rtr_info->matches->rtr_match_mutex); - if ((&rtr_info->matches->block)->page.id().page_no() - == pageno) { + if (rtr_info->matches->block.page.id() == page_id) { if (!rtr_info->matches->matched_recs->empty()) { rtr_info->matches->matched_recs->clear(); } @@ -1200,10 +1200,9 @@ rtr_check_discard_page( mysql_mutex_unlock(&index->rtr_track->rtr_active_mutex); - lock_sys.mutex_lock(); + LockGuard g(page_id); lock_prdt_page_free_from_discard(block, &lock_sys.prdt_hash); lock_prdt_page_free_from_discard(block, &lock_sys.prdt_page_hash); - lock_sys.mutex_unlock(); } /** Structure acts as functor to get the optimistic access of the page. diff --git a/storage/innobase/handler/ha_innodb.cc b/storage/innobase/handler/ha_innodb.cc index a20085d9c1d..451ba256db2 100644 --- a/storage/innobase/handler/ha_innodb.cc +++ b/storage/innobase/handler/ha_innodb.cc @@ -4,7 +4,7 @@ Copyright (c) 2000, 2020, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 2008, 2009 Google Inc. Copyright (c) 2009, Percona Inc. Copyright (c) 2012, Facebook Inc. -Copyright (c) 2013, 2020, MariaDB Corporation. +Copyright (c) 2013, 2021, MariaDB Corporation. Portions of this file contain modifications contributed and copyrighted by Google, Inc. Those modifications are gratefully acknowledged and are described @@ -2670,17 +2670,18 @@ static bool innobase_query_caching_table_check_low( For read-only transaction: should satisfy (1) and (3) For read-write transaction: should satisfy (1), (2), (3) */ - if (lock_table_get_n_locks(table)) { + if (trx->id && trx->id < table->query_cache_inv_trx_id) { return false; } - if (trx->id && trx->id < table->query_cache_inv_trx_id) { + if (trx->read_view.is_open() + && trx->read_view.low_limit_id() + < table->query_cache_inv_trx_id) { return false; } - return !trx->read_view.is_open() - || trx->read_view.low_limit_id() - >= table->query_cache_inv_trx_id; + LockMutexGuard g; + return UT_LIST_GET_LEN(table->locks) == 0; } /** Checks if MySQL at the moment is allowed for this table to retrieve a @@ -4449,7 +4450,7 @@ static void innobase_kill_query(handlerton*, THD *thd, enum thd_kill_levels) Also, BF thread should own trx mutex for the victim. */ DBUG_VOID_RETURN; #endif /* WITH_WSREP */ - lock_sys.mutex_lock(); + LockMutexGuard g; trx_sys.trx_list.freeze(); trx->mutex.wr_lock(); /* It is possible that innobase_close_connection() is concurrently @@ -4470,7 +4471,6 @@ static void innobase_kill_query(handlerton*, THD *thd, enum thd_kill_levels) if (!cancel); else if (lock_t *lock= trx->lock.wait_lock) lock_cancel_waiting_and_release(lock); - lock_sys.mutex_unlock(); trx->mutex.wr_unlock(); } @@ -18091,11 +18091,10 @@ wsrep_abort_transaction( wsrep_thd_transaction_state_str(victim_thd)); if (victim_trx) { - lock_sys.mutex_lock(); + LockMutexGuard g; victim_trx->mutex.wr_lock(); int rcode= wsrep_innobase_kill_one_trx(bf_thd, victim_trx, signal); - lock_sys.mutex_unlock(); victim_trx->mutex.wr_unlock(); DBUG_RETURN(rcode); } else { diff --git a/storage/innobase/ibuf/ibuf0ibuf.cc b/storage/innobase/ibuf/ibuf0ibuf.cc index 36918640fa8..8af53112c02 100644 --- a/storage/innobase/ibuf/ibuf0ibuf.cc +++ b/storage/innobase/ibuf/ibuf0ibuf.cc @@ -1,7 +1,7 @@ /***************************************************************************** Copyright (c) 1997, 2016, Oracle and/or its affiliates. All Rights Reserved. -Copyright (c) 2016, 2020, MariaDB Corporation. +Copyright (c) 2016, 2021, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -3280,10 +3280,8 @@ commit_exit: ibuf_mtr_commit(&bitmap_mtr); goto fail_exit; } else { - lock_sys.mutex_lock(); - const auto lock_exists = lock_sys.get_first(page_id); - lock_sys.mutex_unlock(); - if (lock_exists) { + LockGuard g(page_id); + if (lock_sys.get_first(page_id)) { goto commit_exit; } } diff --git a/storage/innobase/include/lock0lock.h b/storage/innobase/include/lock0lock.h index ac3e467708b..d2712f090ee 100644 --- a/storage/innobase/include/lock0lock.h +++ b/storage/innobase/include/lock0lock.h @@ -635,13 +635,6 @@ dberr_t lock_trx_handle_wait( /*=================*/ trx_t* trx); /*!< in/out: trx lock state */ -/*********************************************************************//** -Get the number of locks on a table. -@return number of locks */ -ulint -lock_table_get_n_locks( -/*===================*/ - const dict_table_t* table); /*!< in: table */ /*******************************************************************//** Initialise the trx lock list. */ void @@ -750,6 +743,8 @@ public: void mutex_assert_locked() const { mysql_mutex_assert_owner(&mutex); } /** Assert that mutex_lock() has not been invoked */ void mutex_assert_unlocked() const { mysql_mutex_assert_not_owner(&mutex); } + /** Assert that a page shared is exclusively latched */ + void assert_locked(const page_id_t) { mutex_assert_locked(); } /** Wait for a lock to be granted */ void wait_lock(lock_t **lock, mysql_cond_t *cond) @@ -815,6 +810,23 @@ public: { return get_first(prdt_page_hash, id); } }; +/** The lock system */ +extern lock_sys_t lock_sys; + +/** lock_sys.mutex guard */ +struct LockMutexGuard +{ + LockMutexGuard() { lock_sys.mutex_lock(); } + ~LockMutexGuard() { lock_sys.mutex_unlock(); } +}; + +/** lock_sys.mutex guard for a page_id_t shard */ +struct LockGuard +{ + LockGuard(const page_id_t) { lock_sys.mutex_lock(); } + ~LockGuard() { lock_sys.mutex_unlock(); } +}; + /*********************************************************************//** Creates a new record lock and inserts it to the lock queue. Does NOT check for deadlocks or lock compatibility! @@ -923,9 +935,6 @@ lock_rec_free_all_from_discard_page( /*================================*/ const buf_block_t* block); /*!< in: page to be discarded */ -/** The lock system */ -extern lock_sys_t lock_sys; - #ifdef WITH_WSREP /*********************************************************************//** Cancels a waiting lock request and releases possible other transactions diff --git a/storage/innobase/include/lock0priv.h b/storage/innobase/include/lock0priv.h index 28b6fc463b7..eee8a3ef156 100644 --- a/storage/innobase/include/lock0priv.h +++ b/storage/innobase/include/lock0priv.h @@ -515,8 +515,8 @@ lock_rec_set_nth_bit( @return previous value of the bit */ inline byte lock_rec_reset_nth_bit(lock_t* lock, ulint i) { - lock_sys.mutex_assert_locked(); ut_ad(lock_get_type_low(lock) == LOCK_REC); + lock_sys.assert_locked(lock->un_member.rec_lock.page_id); ut_ad(i < lock->un_member.rec_lock.n_bits); byte* b = reinterpret_cast<byte*>(&lock[1]) + (i >> 3); @@ -562,16 +562,21 @@ lock_rec_get_next_const( ulint heap_no,/*!< in: heap number of the record */ const lock_t* lock); /*!< in: lock */ -/*********************************************************************//** -Gets the first explicit lock request on a record. -@return first lock, NULL if none exists */ -UNIV_INLINE -lock_t* -lock_rec_get_first( -/*===============*/ - hash_table_t* hash, /*!< in: hash chain the lock on */ - const buf_block_t* block, /*!< in: block containing the record */ - ulint heap_no);/*!< in: heap number of the record */ +/** Get the first explicit lock request on a record. +@param hash lock hash table +@param id page identifier +@param heap_no record identifier in page +@return first lock +@retval nullptr if none exists */ +inline lock_t* +lock_rec_get_first(hash_table_t *hash, const page_id_t id, ulint heap_no) +{ + for (lock_t *lock= lock_sys.get_first(*hash, id); + lock; lock= lock_rec_get_next_on_page(lock)) + if (lock_rec_get_nth_bit(lock, heap_no)) + return lock; + return nullptr; +} /*********************************************************************//** Gets the mode of a lock. diff --git a/storage/innobase/include/lock0priv.ic b/storage/innobase/include/lock0priv.ic index 3f9a01c0667..b2ee4d1121c 100644 --- a/storage/innobase/include/lock0priv.ic +++ b/storage/innobase/include/lock0priv.ic @@ -132,7 +132,7 @@ lock_rec_get_next( ulint heap_no,/*!< in: heap number of the record */ lock_t* lock) /*!< in: lock */ { - lock_sys.mutex_assert_locked(); + lock_sys.assert_locked(lock->un_member.rec_lock.page_id); do { ut_ad(lock_get_type_low(lock) == LOCK_REC); @@ -152,25 +152,7 @@ lock_rec_get_next_const( ulint heap_no,/*!< in: heap number of the record */ const lock_t* lock) /*!< in: lock */ { - return(lock_rec_get_next(heap_no, (lock_t*) lock)); -} - -/*********************************************************************//** -Gets the first explicit lock request on a record. -@return first lock, NULL if none exists */ -UNIV_INLINE -lock_t* -lock_rec_get_first( -/*===============*/ - hash_table_t* hash, /*!< in: hash chain the lock on */ - const buf_block_t* block, /*!< in: block containing the record */ - ulint heap_no)/*!< in: heap number of the record */ -{ - for (lock_t *lock= lock_sys.get_first(*hash, block->page.id()); - lock; lock= lock_rec_get_next_on_page(lock)) - if (lock_rec_get_nth_bit(lock, heap_no)) - return lock; - return nullptr; + return lock_rec_get_next(heap_no, const_cast<lock_t*>(lock)); } /*********************************************************************//** diff --git a/storage/innobase/lock/lock0lock.cc b/storage/innobase/lock/lock0lock.cc index 97f1a670c6c..6280d5f86f0 100644 --- a/storage/innobase/lock/lock0lock.cc +++ b/storage/innobase/lock/lock0lock.cc @@ -285,13 +285,8 @@ ib_uint64_t DeadlockChecker::s_lock_mark_counter = 0; DeadlockChecker::state_t DeadlockChecker::s_states[MAX_STACK_SIZE]; #ifdef UNIV_DEBUG -/*********************************************************************//** -Validates the lock system. -@return TRUE if ok */ -static -bool -lock_validate(); -/*============*/ +/** Validate the transactional locks. */ +static void lock_validate(); /** Validate the record lock queues on a page. @param block buffer pool block @@ -499,7 +494,7 @@ void lock_sys_t::resize(ulint n_cells) { ut_ad(this == &lock_sys); - mutex_lock(); + LockMutexGuard g; hash_table_t old_hash(rec_hash); rec_hash.create(n_cells); @@ -518,7 +513,6 @@ void lock_sys_t::resize(ulint n_cells) HASH_MIGRATE(&old_hash, &prdt_page_hash, lock_t, hash, lock_rec_lock_fold); old_hash.free(); - mutex_unlock(); } @@ -921,12 +915,13 @@ lock_rec_has_expl( { lock_t* lock; + const page_id_t id{block->page.id()}; lock_sys.mutex_assert_locked(); ut_ad((precise_mode & LOCK_MODE_MASK) == LOCK_S || (precise_mode & LOCK_MODE_MASK) == LOCK_X); ut_ad(!(precise_mode & LOCK_INSERT_INTENTION)); - for (lock = lock_rec_get_first(&lock_sys.rec_hash, block, heap_no); + for (lock = lock_rec_get_first(&lock_sys.rec_hash, id, heap_no); lock != NULL; lock = lock_rec_get_next(heap_no, lock)) { @@ -969,8 +964,6 @@ lock_rec_other_has_expl_req( requests by all transactions are taken into account */ { - - lock_sys.mutex_assert_locked(); ut_ad(mode == LOCK_X || mode == LOCK_S); /* Only GAP lock can be on SUPREMUM, and we are not looking for @@ -979,8 +972,12 @@ lock_rec_other_has_expl_req( return(NULL); } + const page_id_t id{block->page.id()}; + + lock_sys.mutex_assert_locked(); + for (lock_t* lock = lock_rec_get_first(&lock_sys.rec_hash, - block, heap_no); + id, heap_no); lock != NULL; lock = lock_rec_get_next(heap_no, lock)) { @@ -1078,13 +1075,12 @@ lock_rec_other_has_conflicting( ulint heap_no,/*!< in: heap number of the record */ const trx_t* trx) /*!< in: our transaction */ { - lock_t* lock; - + const page_id_t id{block->page.id()}; lock_sys.mutex_assert_locked(); bool is_supremum = (heap_no == PAGE_HEAP_NO_SUPREMUM); - for (lock = lock_rec_get_first(&lock_sys.rec_hash, block, heap_no); + for (lock_t *lock= lock_rec_get_first(&lock_sys.rec_hash, id, heap_no); lock != NULL; lock = lock_rec_get_next(heap_no, lock)) { @@ -1685,8 +1681,8 @@ lock_rec_lock( (mode & LOCK_TYPE_MASK) == 0); ut_ad(dict_index_is_clust(index) || !dict_index_is_online_ddl(index)); DBUG_EXECUTE_IF("innodb_report_deadlock", return DB_DEADLOCK;); - - lock_sys.mutex_lock(); + MONITOR_ATOMIC_INC(MONITOR_NUM_RECLOCK_REQ); + LockMutexGuard g; ut_ad((LOCK_MODE_MASK & mode) != LOCK_S || lock_table_has(trx, index->table, LOCK_IS)); ut_ad((LOCK_MODE_MASK & mode) != LOCK_X || @@ -1760,8 +1756,6 @@ lock_rec_lock( err= DB_SUCCESS_LOCKED_REC; } - lock_sys.mutex_unlock(); - MONITOR_ATOMIC_INC(MONITOR_NUM_RECLOCK_REQ); return err; } @@ -1863,7 +1857,6 @@ lock_rec_cancel( /*============*/ lock_t* lock) /*!< in: waiting record lock request */ { - lock_sys.mutex_assert_locked(); ut_ad(lock_get_type_low(lock) == LOCK_REC); /* Reset the bit (there can be only one set bit) in the lock bitmap */ @@ -2008,20 +2001,15 @@ lock_rec_reset_and_release_wait_low( the record */ ulint heap_no)/*!< in: heap number of record */ { - lock_t* lock; - - lock_sys.mutex_assert_locked(); - - for (lock = lock_rec_get_first(hash, block, heap_no); - lock != NULL; - lock = lock_rec_get_next(heap_no, lock)) { + const page_id_t page_id{block->page.id()}; + lock_sys.mutex_assert_locked(); - if (lock_get_wait(lock)) { - lock_rec_cancel(lock); - } else { - lock_rec_reset_nth_bit(lock, heap_no); - } - } + for (lock_t *lock= lock_rec_get_first(hash, page_id, heap_no); lock; + lock= lock_rec_get_next(heap_no, lock)) + if (lock_get_wait(lock)) + lock_rec_cancel(lock); + else + lock_rec_reset_nth_bit(lock, heap_no); } /*************************************************************//** @@ -2064,8 +2052,7 @@ lock_rec_inherit_to_gap( ulint heap_no) /*!< in: heap_no of the donating record */ { - lock_t* lock; - + const page_id_t id{block->page.id()}; lock_sys.mutex_assert_locked(); /* At READ UNCOMMITTED or READ COMMITTED isolation level, @@ -2074,7 +2061,7 @@ lock_rec_inherit_to_gap( DO want S-locks/X-locks(taken for replace) set by a consistency constraint to be inherited also then. */ - for (lock = lock_rec_get_first(&lock_sys.rec_hash, block, heap_no); + for (lock_t *lock= lock_rec_get_first(&lock_sys.rec_hash, id, heap_no); lock != NULL; lock = lock_rec_get_next(heap_no, lock)) { @@ -2106,11 +2093,10 @@ lock_rec_inherit_to_gap_if_gap_lock( does NOT reset the locks on this record */ { - lock_t* lock; - - lock_sys.mutex_lock(); + const page_id_t id{block->page.id()}; + LockGuard g(id); - for (lock = lock_rec_get_first(&lock_sys.rec_hash, block, heap_no); + for (lock_t *lock= lock_rec_get_first(&lock_sys.rec_hash, id, heap_no); lock != NULL; lock = lock_rec_get_next(heap_no, lock)) { @@ -2124,8 +2110,6 @@ lock_rec_inherit_to_gap_if_gap_lock( lock->trx, FALSE); } } - - lock_sys.mutex_unlock(); } /*************************************************************//** @@ -2147,18 +2131,18 @@ lock_rec_move_low( ulint donator_heap_no)/*!< in: heap_no of the record which gives the locks */ { - lock_t* lock; + const page_id_t donator_id{donator->page.id()}; lock_sys.mutex_assert_locked(); /* If the lock is predicate lock, it resides on INFIMUM record */ - ut_ad(lock_rec_get_first( - lock_hash, receiver, receiver_heap_no) == NULL + ut_ad(!lock_rec_get_first(lock_hash, receiver->page.id(), + receiver_heap_no) || lock_hash == &lock_sys.prdt_hash || lock_hash == &lock_sys.prdt_page_hash); - for (lock = lock_rec_get_first(lock_hash, - donator, donator_heap_no); + for (lock_t *lock = lock_rec_get_first(lock_hash, donator_id, + donator_heap_no); lock != NULL; lock = lock_rec_get_next(donator_heap_no, lock)) { @@ -2179,7 +2163,7 @@ lock_rec_move_low( } ut_ad(!lock_rec_get_first(&lock_sys.rec_hash, - donator, donator_heap_no)); + donator_id, donator_heap_no)); } /** Move all the granted locks to the front of the given lock list. @@ -2250,122 +2234,119 @@ lock_move_reorganize_page( const buf_block_t* oblock) /*!< in: copy of the old, not reorganized page */ { - lock_t* lock; - UT_LIST_BASE_NODE_T(lock_t) old_locks; - mem_heap_t* heap = NULL; - ulint comp; - - lock_sys.mutex_lock(); - - /* FIXME: This needs to deal with predicate lock too */ - lock = lock_sys.get_first(block->page.id()); - - if (lock == NULL) { - lock_sys.mutex_unlock(); + mem_heap_t *heap; - return; - } + { + UT_LIST_BASE_NODE_T(lock_t) old_locks; + UT_LIST_INIT(old_locks, &lock_t::trx_locks); - heap = mem_heap_create(256); + const page_id_t id{block->page.id()}; + LockGuard g{id}; - /* Copy first all the locks on the page to heap and reset the - bitmaps in the original locks; chain the copies of the locks - using the trx_locks field in them. */ + /* FIXME: This needs to deal with predicate lock too */ + lock_t *lock= lock_sys.get_first(id); - UT_LIST_INIT(old_locks, &lock_t::trx_locks); + if (!lock) + return; - do { - /* Make a copy of the lock */ - lock_t* old_lock = lock_rec_copy(lock, heap); + heap= mem_heap_create(256); - UT_LIST_ADD_LAST(old_locks, old_lock); + /* Copy first all the locks on the page to heap and reset the + bitmaps in the original locks; chain the copies of the locks + using the trx_locks field in them. */ - /* Reset bitmap of lock */ - lock_rec_bitmap_reset(lock); + do + { + /* Make a copy of the lock */ + lock_t *old_lock= lock_rec_copy(lock, heap); - if (lock_get_wait(lock)) { + UT_LIST_ADD_LAST(old_locks, old_lock); - lock_reset_lock_and_trx_wait(lock); - } + /* Reset bitmap of lock */ + lock_rec_bitmap_reset(lock); - lock = lock_rec_get_next_on_page(lock); - } while (lock != NULL); + if (lock_get_wait(lock)) + lock_reset_lock_and_trx_wait(lock); - comp = page_is_comp(block->frame); - ut_ad(comp == page_is_comp(oblock->frame)); + lock= lock_rec_get_next_on_page(lock); + } + while (lock); - lock_move_granted_locks_to_front(old_locks); + const ulint comp= page_is_comp(block->frame); + ut_ad(comp == page_is_comp(oblock->frame)); - DBUG_EXECUTE_IF("do_lock_reverse_page_reorganize", - ut_list_reverse(old_locks);); + lock_move_granted_locks_to_front(old_locks); - for (lock = UT_LIST_GET_FIRST(old_locks); lock; - lock = UT_LIST_GET_NEXT(trx_locks, lock)) { + DBUG_EXECUTE_IF("do_lock_reverse_page_reorganize", + ut_list_reverse(old_locks);); - /* NOTE: we copy also the locks set on the infimum and - supremum of the page; the infimum may carry locks if an - update of a record is occurring on the page, and its locks - were temporarily stored on the infimum */ - const rec_t* rec1 = page_get_infimum_rec( - buf_block_get_frame(block)); - const rec_t* rec2 = page_get_infimum_rec( - buf_block_get_frame(oblock)); - - /* Set locks according to old locks */ - for (;;) { - ulint old_heap_no; - ulint new_heap_no; - ut_d(const rec_t* const orec = rec1); - ut_ad(page_rec_is_metadata(rec1) - == page_rec_is_metadata(rec2)); - - if (comp) { - old_heap_no = rec_get_heap_no_new(rec2); - new_heap_no = rec_get_heap_no_new(rec1); - - rec1 = page_rec_get_next_low(rec1, TRUE); - rec2 = page_rec_get_next_low(rec2, TRUE); - } else { - old_heap_no = rec_get_heap_no_old(rec2); - new_heap_no = rec_get_heap_no_old(rec1); - ut_ad(!memcmp(rec1, rec2, - rec_get_data_size_old(rec2))); + for (lock= UT_LIST_GET_FIRST(old_locks); lock; + lock= UT_LIST_GET_NEXT(trx_locks, lock)) + { + /* NOTE: we copy also the locks set on the infimum and + supremum of the page; the infimum may carry locks if an + update of a record is occurring on the page, and its locks + were temporarily stored on the infimum */ + const rec_t *rec1= page_get_infimum_rec(block->frame); + const rec_t *rec2= page_get_infimum_rec(oblock->frame); + + /* Set locks according to old locks */ + for (;;) + { + ulint old_heap_no; + ulint new_heap_no; + ut_d(const rec_t* const orec= rec1); + ut_ad(page_rec_is_metadata(rec1) == page_rec_is_metadata(rec2)); - rec1 = page_rec_get_next_low(rec1, FALSE); - rec2 = page_rec_get_next_low(rec2, FALSE); - } + if (comp) + { + old_heap_no= rec_get_heap_no_new(rec2); + new_heap_no= rec_get_heap_no_new(rec1); - /* Clear the bit in old_lock. */ - if (old_heap_no < lock->un_member.rec_lock.n_bits - && lock_rec_reset_nth_bit(lock, old_heap_no)) { - ut_ad(!page_rec_is_metadata(orec)); + rec1= page_rec_get_next_low(rec1, TRUE); + rec2= page_rec_get_next_low(rec2, TRUE); + } + else + { + old_heap_no= rec_get_heap_no_old(rec2); + new_heap_no= rec_get_heap_no_old(rec1); + ut_ad(!memcmp(rec1, rec2, rec_get_data_size_old(rec2))); - /* NOTE that the old lock bitmap could be too - small for the new heap number! */ + rec1= page_rec_get_next_low(rec1, FALSE); + rec2= page_rec_get_next_low(rec2, FALSE); + } - lock_rec_add_to_queue( - lock->type_mode, block, new_heap_no, - lock->index, lock->trx, FALSE); - } + /* Clear the bit in old_lock. */ + if (old_heap_no < lock->un_member.rec_lock.n_bits && + lock_rec_reset_nth_bit(lock, old_heap_no)) + { + ut_ad(!page_rec_is_metadata(orec)); - if (new_heap_no == PAGE_HEAP_NO_SUPREMUM) { - ut_ad(old_heap_no == PAGE_HEAP_NO_SUPREMUM); - break; - } - } + /* NOTE that the old lock bitmap could be too + small for the new heap number! */ + lock_rec_add_to_queue(lock->type_mode, block, new_heap_no, + lock->index, lock->trx, FALSE); + } - ut_ad(lock_rec_find_set_bit(lock) == ULINT_UNDEFINED); - } + if (new_heap_no == PAGE_HEAP_NO_SUPREMUM) + { + ut_ad(old_heap_no == PAGE_HEAP_NO_SUPREMUM); + break; + } + } - lock_sys.mutex_unlock(); + ut_ad(lock_rec_find_set_bit(lock) == ULINT_UNDEFINED); + } + } - mem_heap_free(heap); + mem_heap_free(heap); #ifdef UNIV_DEBUG_LOCK_VALIDATE - if (fil_space_t* space = fil_space_t::get(page_id.space())) { - ut_ad(lock_rec_validate_page(block, space->is_latched())); - space->release(); - } + if (fil_space_t *space= fil_space_t::get(id.space())) + { + ut_ad(lock_rec_validate_page(block, space->is_latched())); + space->release(); + } #endif } @@ -2380,109 +2361,98 @@ lock_move_rec_list_end( const rec_t* rec) /*!< in: record on page: this is the first record moved */ { - lock_t* lock; - const ulint comp = page_rec_is_comp(rec); + const ulint comp= page_rec_is_comp(rec); - ut_ad(buf_block_get_frame(block) == page_align(rec)); - ut_ad(comp == page_is_comp(buf_block_get_frame(new_block))); + ut_ad(block->frame == page_align(rec)); + ut_ad(comp == page_is_comp(new_block->frame)); - lock_sys.mutex_lock(); - - /* Note: when we move locks from record to record, waiting locks - and possible granted gap type locks behind them are enqueued in - the original order, because new elements are inserted to a hash - table to the end of the hash chain, and lock_rec_add_to_queue - does not reuse locks if there are waiters in the queue. */ - - for (lock = lock_sys.get_first(block->page.id()); - lock; - lock = lock_rec_get_next_on_page(lock)) { - const rec_t* rec1 = rec; - const rec_t* rec2; - const auto type_mode = lock->type_mode; - - if (comp) { - if (page_offset(rec1) == PAGE_NEW_INFIMUM) { - rec1 = page_rec_get_next_low(rec1, TRUE); - } - - rec2 = page_rec_get_next_low( - buf_block_get_frame(new_block) - + PAGE_NEW_INFIMUM, TRUE); - } else { - if (page_offset(rec1) == PAGE_OLD_INFIMUM) { - rec1 = page_rec_get_next_low(rec1, FALSE); - } - - rec2 = page_rec_get_next_low( - buf_block_get_frame(new_block) - + PAGE_OLD_INFIMUM, FALSE); - } - - /* Copy lock requests on user records to new page and - reset the lock bits on the old */ - - for (;;) { - ut_ad(page_rec_is_metadata(rec1) - == page_rec_is_metadata(rec2)); - ut_d(const rec_t* const orec = rec1); - - ulint rec1_heap_no; - ulint rec2_heap_no; + { + const page_id_t id{block->page.id()}; + LockMutexGuard g; + + /* Note: when we move locks from record to record, waiting locks + and possible granted gap type locks behind them are enqueued in + the original order, because new elements are inserted to a hash + table to the end of the hash chain, and lock_rec_add_to_queue + does not reuse locks if there are waiters in the queue. */ + for (lock_t *lock= lock_sys.get_first(id); lock; + lock= lock_rec_get_next_on_page(lock)) + { + const rec_t *rec1= rec; + const rec_t *rec2; + const auto type_mode= lock->type_mode; - if (comp) { - rec1_heap_no = rec_get_heap_no_new(rec1); + if (comp) + { + if (page_offset(rec1) == PAGE_NEW_INFIMUM) + rec1= page_rec_get_next_low(rec1, TRUE); + rec2= page_rec_get_next_low(new_block->frame + PAGE_NEW_INFIMUM, TRUE); + } + else + { + if (page_offset(rec1) == PAGE_OLD_INFIMUM) + rec1= page_rec_get_next_low(rec1, FALSE); + rec2= page_rec_get_next_low(new_block->frame + PAGE_OLD_INFIMUM, FALSE); + } - if (rec1_heap_no == PAGE_HEAP_NO_SUPREMUM) { - break; - } + /* Copy lock requests on user records to new page and + reset the lock bits on the old */ + for (;;) + { + ut_ad(page_rec_is_metadata(rec1) == page_rec_is_metadata(rec2)); + ut_d(const rec_t* const orec = rec1); - rec2_heap_no = rec_get_heap_no_new(rec2); - rec1 = page_rec_get_next_low(rec1, TRUE); - rec2 = page_rec_get_next_low(rec2, TRUE); - } else { - rec1_heap_no = rec_get_heap_no_old(rec1); + ulint rec1_heap_no; + ulint rec2_heap_no; - if (rec1_heap_no == PAGE_HEAP_NO_SUPREMUM) { - break; - } + if (comp) + { + rec1_heap_no= rec_get_heap_no_new(rec1); + if (rec1_heap_no == PAGE_HEAP_NO_SUPREMUM) + break; - rec2_heap_no = rec_get_heap_no_old(rec2); + rec2_heap_no= rec_get_heap_no_new(rec2); + rec1= page_rec_get_next_low(rec1, TRUE); + rec2= page_rec_get_next_low(rec2, TRUE); + } + else + { + rec1_heap_no= rec_get_heap_no_old(rec1); - ut_ad(rec_get_data_size_old(rec1) - == rec_get_data_size_old(rec2)); + if (rec1_heap_no == PAGE_HEAP_NO_SUPREMUM) + break; + rec2_heap_no= rec_get_heap_no_old(rec2); - ut_ad(!memcmp(rec1, rec2, - rec_get_data_size_old(rec1))); + ut_ad(rec_get_data_size_old(rec1) == rec_get_data_size_old(rec2)); + ut_ad(!memcmp(rec1, rec2, rec_get_data_size_old(rec1))); - rec1 = page_rec_get_next_low(rec1, FALSE); - rec2 = page_rec_get_next_low(rec2, FALSE); - } + rec1= page_rec_get_next_low(rec1, FALSE); + rec2= page_rec_get_next_low(rec2, FALSE); + } - if (rec1_heap_no < lock->un_member.rec_lock.n_bits - && lock_rec_reset_nth_bit(lock, rec1_heap_no)) { - ut_ad(!page_rec_is_metadata(orec)); + if (rec1_heap_no < lock->un_member.rec_lock.n_bits && + lock_rec_reset_nth_bit(lock, rec1_heap_no)) + { + ut_ad(!page_rec_is_metadata(orec)); - if (type_mode & LOCK_WAIT) { - lock_reset_lock_and_trx_wait(lock); - } + if (type_mode & LOCK_WAIT) + lock_reset_lock_and_trx_wait(lock); - lock_rec_add_to_queue( - type_mode, new_block, rec2_heap_no, - lock->index, lock->trx, FALSE); - } - } + lock_rec_add_to_queue(type_mode, new_block, rec2_heap_no, + lock->index, lock->trx, FALSE); } - - lock_sys.mutex_unlock(); + } + } + } #ifdef UNIV_DEBUG_LOCK_VALIDATE - if (fil_space_t* space = fil_space_t::get(page_id.space())) { - const bool is_latched{space->is_latched()}; - ut_ad(lock_rec_validate_page(block, is_latched)); - ut_ad(lock_rec_validate_page(new_block, is_latched)); - space->release(); - } + if (fil_space_t *space= fil_space_t::get(page_id.space())) + { + const bool is_latched{space->is_latched()}; + ut_ad(lock_rec_validate_page(block, is_latched)); + ut_ad(lock_rec_validate_page(new_block, is_latched)); + space->release(); + } #endif } @@ -2504,98 +2474,88 @@ lock_move_rec_list_start( before the records were copied */ { - lock_t* lock; - const ulint comp = page_rec_is_comp(rec); + const ulint comp= page_rec_is_comp(rec); - ut_ad(block->frame == page_align(rec)); - ut_ad(new_block->frame == page_align(old_end)); - ut_ad(comp == page_rec_is_comp(old_end)); - ut_ad(!page_rec_is_metadata(rec)); + ut_ad(block->frame == page_align(rec)); + ut_ad(comp == page_is_comp(new_block->frame)); + ut_ad(new_block->frame == page_align(old_end)); + ut_ad(!page_rec_is_metadata(rec)); - lock_sys.mutex_lock(); + { + const page_id_t id{block->page.id()}; + LockMutexGuard g; - for (lock = lock_sys.get_first(block->page.id()); - lock; - lock = lock_rec_get_next_on_page(lock)) { - const rec_t* rec1; - const rec_t* rec2; - const auto type_mode = lock->type_mode; - - if (comp) { - rec1 = page_rec_get_next_low( - buf_block_get_frame(block) - + PAGE_NEW_INFIMUM, TRUE); - rec2 = page_rec_get_next_low(old_end, TRUE); - } else { - rec1 = page_rec_get_next_low( - buf_block_get_frame(block) - + PAGE_OLD_INFIMUM, FALSE); - rec2 = page_rec_get_next_low(old_end, FALSE); - } + for (lock_t *lock= lock_sys.get_first(id); lock; + lock= lock_rec_get_next_on_page(lock)) + { + const rec_t *rec1; + const rec_t *rec2; + const auto type_mode= lock->type_mode; - /* Copy lock requests on user records to new page and - reset the lock bits on the old */ + if (comp) + { + rec1= page_rec_get_next_low(block->frame + PAGE_NEW_INFIMUM, TRUE); + rec2= page_rec_get_next_low(old_end, TRUE); + } + else + { + rec1= page_rec_get_next_low(block->frame + PAGE_OLD_INFIMUM, FALSE); + rec2= page_rec_get_next_low(old_end, FALSE); + } - while (rec1 != rec) { - ut_ad(page_rec_is_metadata(rec1) - == page_rec_is_metadata(rec2)); - ut_d(const rec_t* const prev = rec1); + /* Copy lock requests on user records to new page and + reset the lock bits on the old */ - ulint rec1_heap_no; - ulint rec2_heap_no; + while (rec1 != rec) + { + ut_ad(page_rec_is_metadata(rec1) == page_rec_is_metadata(rec2)); + ut_d(const rec_t* const prev= rec1); - if (comp) { - rec1_heap_no = rec_get_heap_no_new(rec1); - rec2_heap_no = rec_get_heap_no_new(rec2); + ulint rec1_heap_no; + ulint rec2_heap_no; - rec1 = page_rec_get_next_low(rec1, TRUE); - rec2 = page_rec_get_next_low(rec2, TRUE); - } else { - rec1_heap_no = rec_get_heap_no_old(rec1); - rec2_heap_no = rec_get_heap_no_old(rec2); + if (comp) + { + rec1_heap_no= rec_get_heap_no_new(rec1); + rec2_heap_no= rec_get_heap_no_new(rec2); - ut_ad(!memcmp(rec1, rec2, - rec_get_data_size_old(rec2))); + rec1= page_rec_get_next_low(rec1, TRUE); + rec2= page_rec_get_next_low(rec2, TRUE); + } + else + { + rec1_heap_no= rec_get_heap_no_old(rec1); + rec2_heap_no= rec_get_heap_no_old(rec2); - rec1 = page_rec_get_next_low(rec1, FALSE); - rec2 = page_rec_get_next_low(rec2, FALSE); - } + ut_ad(!memcmp(rec1, rec2, rec_get_data_size_old(rec2))); - if (rec1_heap_no < lock->un_member.rec_lock.n_bits - && lock_rec_reset_nth_bit(lock, rec1_heap_no)) { - ut_ad(!page_rec_is_metadata(prev)); + rec1= page_rec_get_next_low(rec1, FALSE); + rec2= page_rec_get_next_low(rec2, FALSE); + } - if (type_mode & LOCK_WAIT) { - lock_reset_lock_and_trx_wait(lock); - } + if (rec1_heap_no < lock->un_member.rec_lock.n_bits && + lock_rec_reset_nth_bit(lock, rec1_heap_no)) + { + ut_ad(!page_rec_is_metadata(prev)); - lock_rec_add_to_queue( - type_mode, new_block, rec2_heap_no, - lock->index, lock->trx, FALSE); - } - } + if (type_mode & LOCK_WAIT) + lock_reset_lock_and_trx_wait(lock); + + lock_rec_add_to_queue(type_mode, new_block, rec2_heap_no, + lock->index, lock->trx, FALSE); + } + } #ifdef UNIV_DEBUG - if (page_rec_is_supremum(rec)) { - ulint i; - - for (i = PAGE_HEAP_NO_USER_LOW; - i < lock_rec_get_n_bits(lock); i++) { - if (lock_rec_get_nth_bit(lock, i)) { - ib::fatal() - << "lock_move_rec_list_start():" - << i << " not moved in " - << (void*) lock; - } - } - } + if (page_rec_is_supremum(rec)) + for (auto i= lock_rec_get_n_bits(lock); --i > PAGE_HEAP_NO_USER_LOW; ) + ut_ad(!lock_rec_get_nth_bit(lock, i)); #endif /* UNIV_DEBUG */ - } - - lock_sys.mutex_unlock(); + } + } #ifdef UNIV_DEBUG_LOCK_VALIDATE - ut_ad(lock_rec_validate_page(block)); + ut_ad(lock_rec_validate_page(block)); #endif } @@ -2612,74 +2572,69 @@ lock_rtr_move_rec_list( moved */ ulint num_move) /*!< in: num of rec to move */ { - lock_t* lock; - ulint comp; + if (!num_move) + return; - if (!num_move) { - return; - } + const ulint comp= page_rec_is_comp(rec_move[0].old_rec); - comp = page_rec_is_comp(rec_move[0].old_rec); + ut_ad(block->frame == page_align(rec_move[0].old_rec)); + ut_ad(new_block->frame == page_align(rec_move[0].new_rec)); + ut_ad(comp == page_rec_is_comp(rec_move[0].new_rec)); - ut_ad(block->frame == page_align(rec_move[0].old_rec)); - ut_ad(new_block->frame == page_align(rec_move[0].new_rec)); - ut_ad(comp == page_rec_is_comp(rec_move[0].new_rec)); - - lock_sys.mutex_lock(); - - for (lock = lock_sys.get_first(block->page.id()); - lock; - lock = lock_rec_get_next_on_page(lock)) { - ulint moved = 0; - const rec_t* rec1; - const rec_t* rec2; - const auto type_mode = lock->type_mode; - - /* Copy lock requests on user records to new page and - reset the lock bits on the old */ + { + const page_id_t id{block->page.id()}; + LockGuard g{id}; - while (moved < num_move) { - ulint rec1_heap_no; - ulint rec2_heap_no; + for (lock_t *lock= lock_sys.get_first(id); lock; + lock= lock_rec_get_next_on_page(lock)) + { + const rec_t *rec1; + const rec_t *rec2; + const auto type_mode= lock->type_mode; - rec1 = rec_move[moved].old_rec; - rec2 = rec_move[moved].new_rec; - ut_ad(!page_rec_is_metadata(rec1)); - ut_ad(!page_rec_is_metadata(rec2)); + /* Copy lock requests on user records to new page and + reset the lock bits on the old */ - if (comp) { - rec1_heap_no = rec_get_heap_no_new(rec1); - rec2_heap_no = rec_get_heap_no_new(rec2); + for (ulint moved= 0; moved < num_move; moved++) + { + ulint rec1_heap_no; + ulint rec2_heap_no; - } else { - rec1_heap_no = rec_get_heap_no_old(rec1); - rec2_heap_no = rec_get_heap_no_old(rec2); + rec1= rec_move[moved].old_rec; + rec2= rec_move[moved].new_rec; + ut_ad(!page_rec_is_metadata(rec1)); + ut_ad(!page_rec_is_metadata(rec2)); - ut_ad(!memcmp(rec1, rec2, - rec_get_data_size_old(rec2))); - } + if (comp) + { + rec1_heap_no= rec_get_heap_no_new(rec1); + rec2_heap_no= rec_get_heap_no_new(rec2); + } + else + { + rec1_heap_no= rec_get_heap_no_old(rec1); + rec2_heap_no= rec_get_heap_no_old(rec2); - if (rec1_heap_no < lock->un_member.rec_lock.n_bits - && lock_rec_reset_nth_bit(lock, rec1_heap_no)) { - if (type_mode & LOCK_WAIT) { - lock_reset_lock_and_trx_wait(lock); - } + ut_ad(!memcmp(rec1, rec2, rec_get_data_size_old(rec2))); + } - lock_rec_add_to_queue( - type_mode, new_block, rec2_heap_no, - lock->index, lock->trx, FALSE); + if (rec1_heap_no < lock->un_member.rec_lock.n_bits && + lock_rec_reset_nth_bit(lock, rec1_heap_no)) + { + if (type_mode & LOCK_WAIT) + lock_reset_lock_and_trx_wait(lock); - rec_move[moved].moved = true; - } + lock_rec_add_to_queue(type_mode, new_block, rec2_heap_no, + lock->index, lock->trx, FALSE); - moved++; - } + rec_move[moved].moved= true; } - - lock_sys.mutex_unlock(); + } + } + } #ifdef UNIV_DEBUG_LOCK_VALIDATE - ut_ad(lock_rec_validate_page(block)); + ut_ad(lock_rec_validate_page(block)); #endif } /*************************************************************//** @@ -2692,7 +2647,7 @@ lock_update_split_right( { ulint heap_no = lock_get_min_heap_no(right_block); - lock_sys.mutex_lock(); + LockMutexGuard g; /* Move the locks on the supremum of the left page to the supremum of the right page */ @@ -2705,8 +2660,6 @@ lock_update_split_right( lock_rec_inherit_to_gap(left_block, right_block, PAGE_HEAP_NO_SUPREMUM, heap_no); - - lock_sys.mutex_unlock(); } /*************************************************************//** @@ -2726,7 +2679,7 @@ lock_update_merge_right( { ut_ad(!page_rec_is_metadata(orig_succ)); - lock_sys.mutex_lock(); + LockMutexGuard g; /* Inherit the locks from the supremum of the left page to the original successor of infimum on the right page, to which the left @@ -2747,8 +2700,6 @@ lock_update_merge_right( ut_ad(!lock_sys.get_first_prdt_page(left_block->page.id())); lock_rec_free_all_from_discard_page(left_block); - - lock_sys.mutex_unlock(); } /*************************************************************//** @@ -2764,14 +2715,13 @@ lock_update_root_raise( const buf_block_t* block, /*!< in: index page to which copied */ const buf_block_t* root) /*!< in: root page */ { - lock_sys.mutex_lock(); + LockMutexGuard g; /* Move the locks on the supremum of the root to the supremum of block */ lock_rec_move(block, root, PAGE_HEAP_NO_SUPREMUM, PAGE_HEAP_NO_SUPREMUM); - lock_sys.mutex_unlock(); } /*************************************************************//** @@ -2785,7 +2735,7 @@ lock_update_copy_and_discard( const buf_block_t* block) /*!< in: index page; NOT the root! */ { - lock_sys.mutex_lock(); + LockMutexGuard g; /* Move the locks on the supremum of the old page to the supremum of new_page */ @@ -2793,8 +2743,6 @@ lock_update_copy_and_discard( lock_rec_move(new_block, block, PAGE_HEAP_NO_SUPREMUM, PAGE_HEAP_NO_SUPREMUM); lock_rec_free_all_from_discard_page(block); - - lock_sys.mutex_unlock(); } /*************************************************************//** @@ -2807,15 +2755,13 @@ lock_update_split_left( { ulint heap_no = lock_get_min_heap_no(right_block); - lock_sys.mutex_lock(); + LockMutexGuard g; /* Inherit the locks to the supremum of the left page from the successor of the infimum on the right page */ lock_rec_inherit_to_gap(left_block, right_block, PAGE_HEAP_NO_SUPREMUM, heap_no); - - lock_sys.mutex_unlock(); } /*************************************************************//** @@ -2835,7 +2781,7 @@ lock_update_merge_left( ut_ad(left_block->frame == page_align(orig_pred)); - lock_sys.mutex_lock(); + LockMutexGuard g; left_next_rec = page_rec_get_next_const(orig_pred); @@ -2866,8 +2812,6 @@ lock_update_merge_left( ut_ad(!lock_sys.get_first_prdt_page(right_block->page.id())); lock_rec_free_all_from_discard_page(right_block); - - lock_sys.mutex_unlock(); } /*************************************************************//** @@ -2887,13 +2831,11 @@ lock_rec_reset_and_inherit_gap_locks( ulint heap_no) /*!< in: heap_no of the donating record */ { - lock_sys.mutex_lock(); + LockMutexGuard g; lock_rec_reset_and_release_wait(heir_block, heir_heap_no); lock_rec_inherit_to_gap(heir_block, block, heir_heap_no, heap_no); - - lock_sys.mutex_unlock(); } /*************************************************************//** @@ -2913,7 +2855,7 @@ lock_update_discard( ulint heap_no; const page_id_t page_id(block->page.id()); - lock_sys.mutex_lock(); + LockMutexGuard g; if (lock_sys.get_first(page_id)) { ut_ad(!lock_sys.get_first_prdt(page_id)); @@ -2959,8 +2901,6 @@ lock_update_discard( lock_rec_free_all_from_discard_page_low( page_id, &lock_sys.prdt_page_hash); } - - lock_sys.mutex_unlock(); } /*************************************************************//** @@ -3021,7 +2961,7 @@ lock_update_delete( FALSE)); } - lock_sys.mutex_lock(); + LockMutexGuard g; /* Let the next record inherit the locks from rec, in gap mode */ @@ -3030,8 +2970,6 @@ lock_update_delete( /* Reset the lock bits on rec and release waiting transactions */ lock_rec_reset_and_release_wait(block, heap_no); - - lock_sys.mutex_unlock(); } /*********************************************************************//** @@ -3055,11 +2993,9 @@ lock_rec_store_on_page_infimum( ut_ad(block->frame == page_align(rec)); - lock_sys.mutex_lock(); + LockMutexGuard g; lock_rec_move(block, block, PAGE_HEAP_NO_INFIMUM, heap_no); - - lock_sys.mutex_unlock(); } /*********************************************************************//** @@ -3079,11 +3015,9 @@ lock_rec_restore_from_page_infimum( { ulint heap_no = page_rec_get_heap_no(rec); - lock_sys.mutex_lock(); + LockMutexGuard g; lock_rec_move(block, donator, heap_no, PAGE_HEAP_NO_INFIMUM); - - lock_sys.mutex_unlock(); } /*========================= TABLE LOCKS ==============================*/ @@ -3524,7 +3458,7 @@ lock_table( trx_set_rw_mode(trx); } - lock_sys.mutex_lock(); + LockMutexGuard g; /* We have to check if the new lock is compatible with any locks other transactions have in the table lock queue. */ @@ -3552,8 +3486,6 @@ lock_table( err = DB_SUCCESS; } - lock_sys.mutex_unlock(); - trx->mutex.wr_unlock(); return(err); @@ -3575,7 +3507,7 @@ lock_table_ix_resurrect( auto mutex= &trx->mutex; - lock_sys.mutex_lock(); + LockMutexGuard g; /* We have to check if the new lock is compatible with any locks other transactions have in the table lock queue. */ @@ -3585,7 +3517,6 @@ lock_table_ix_resurrect( mutex->wr_lock(); lock_table_create(table, LOCK_IX, trx); - lock_sys.mutex_unlock(); mutex->wr_unlock(); } @@ -3737,9 +3668,11 @@ lock_rec_unlock( heap_no = page_rec_get_heap_no(rec); - lock_sys.mutex_lock(); + const page_id_t id{block->page.id()}; - first_lock = lock_rec_get_first(&lock_sys.rec_hash, block, heap_no); + LockGuard g(id); + + first_lock = lock_rec_get_first(&lock_sys.rec_hash, id, heap_no); /* Find the last lock with the same lock_mode and transaction on the record. */ @@ -3751,8 +3684,6 @@ lock_rec_unlock( } } - lock_sys.mutex_unlock(); - { ib::error err; err << "Unlock row could not find a " << lock_mode @@ -3788,8 +3719,6 @@ released: #endif /* WITH_WSREP */ } } - - lock_sys.mutex_unlock(); } #ifdef UNIV_DEBUG @@ -3840,7 +3769,7 @@ void lock_release(trx_t* trx) ulint count = 0; trx_id_t max_trx_id = trx_sys.get_max_trx_id(); - lock_sys.mutex_lock(); + LockMutexGuard g; for (lock_t* lock = UT_LIST_GET_LAST(trx->lock.trx_locks); lock != NULL; @@ -3874,16 +3803,12 @@ void lock_release(trx_t* trx) do not monopolize it */ lock_sys.mutex_unlock(); - - lock_sys.mutex_lock(); - count = 0; + lock_sys.mutex_lock(); } ++count; } - - lock_sys.mutex_unlock(); } /* True if a lock mode is S or X */ @@ -4250,14 +4175,12 @@ lock_print_info_all_transactions( /*=============================*/ FILE* file) /*!< in/out: file where to print */ { - lock_sys.mutex_assert_locked(); - fprintf(file, "LIST OF TRANSACTIONS FOR EACH SESSION:\n"); trx_sys.trx_list.for_each(lock_print_info(file, time(nullptr))); lock_sys.mutex_unlock(); - ut_ad(lock_validate()); + ut_d(lock_validate()); } #ifdef UNIV_DEBUG @@ -4365,6 +4288,8 @@ lock_rec_queue_validate( heap_no = page_rec_get_heap_no(rec); + const page_id_t id{block->page.id()}; + if (!locked_lock_trx_sys) { lock_sys.mutex_lock(); } @@ -4374,7 +4299,7 @@ lock_rec_queue_validate( if (!page_rec_is_user_rec(rec)) { for (lock = lock_rec_get_first(&lock_sys.rec_hash, - block, heap_no); + id, heap_no); lock != NULL; lock = lock_rec_get_next_const(heap_no, lock)) { @@ -4464,7 +4389,7 @@ func_exit: impl_trx->mutex.wr_unlock(); } - for (lock = lock_rec_get_first(&lock_sys.rec_hash, block, heap_no); + for (lock = lock_rec_get_first(&lock_sys.rec_hash, id, heap_no); lock != NULL; lock = lock_rec_get_next_const(heap_no, lock)) { @@ -4529,9 +4454,10 @@ static bool lock_rec_validate_page(const buf_block_t *block, bool latched) rec_offs* offsets = offsets_; rec_offs_init(offsets_); - lock_sys.mutex_lock(); + const page_id_t id{block->page.id()}; + LockGuard g(id); loop: - lock = lock_sys.get_first(block->page.id()); + lock = lock_sys.get_first(id); if (!lock) { goto function_exit; @@ -4586,9 +4512,7 @@ loop: goto loop; function_exit: - lock_sys.mutex_unlock(); - - if (heap != NULL) { + if (UNIV_LIKELY_NULL(heap)) { mem_heap_free(heap); } return(TRUE); @@ -4691,44 +4615,30 @@ static my_bool lock_validate_table_locks(rw_trx_hash_element_t *element, void*) } -/*********************************************************************//** -Validates the lock system. -@return TRUE if ok */ -static -bool -lock_validate() -/*===========*/ +/** Validate the transactional locks. */ +static void lock_validate() { - std::set<page_id_t> pages; - - lock_sys.mutex_lock(); - - /* Validate table locks */ - trx_sys.rw_trx_hash.iterate(lock_validate_table_locks); - - /* Iterate over all the record locks and validate the locks. We - don't want to hog the lock_sys_t::mutex. Release it during the - validation check. */ - - for (ulint i = 0; i < lock_sys.rec_hash.n_cells; i++) { - page_id_t limit(0, 0); - - while (const lock_t* lock = lock_rec_validate(i, &limit)) { - if (lock_rec_find_set_bit(lock) == ULINT_UNDEFINED) { - /* The lock bitmap is empty; ignore it. */ - continue; - } - pages.insert(lock->un_member.rec_lock.page_id); - } - } - - lock_sys.mutex_unlock(); + std::set<page_id_t> pages; + { + LockMutexGuard g; + /* Validate table locks */ + trx_sys.rw_trx_hash.iterate(lock_validate_table_locks); - for (page_id_t page_id : pages) { - lock_rec_block_validate(page_id); - } + for (ulint i= 0; i < lock_sys.rec_hash.n_cells; i++) + { + page_id_t limit{0, 0}; + while (const lock_t *lock= lock_rec_validate(i, &limit)) + { + if (lock_rec_find_set_bit(lock) == ULINT_UNDEFINED) + /* The lock bitmap is empty; ignore it. */ + continue; + pages.insert(lock->un_member.rec_lock.page_id); + } + } + } - return(true); + for (page_id_t page_id : pages) + lock_rec_block_validate(page_id); } #endif /* UNIV_DEBUG */ /*============ RECORD LOCK CHECKS FOR ROW OPERATIONS ====================*/ @@ -4755,136 +4665,107 @@ lock_rec_insert_check_and_lock( LOCK_GAP type locks from the successor record */ { - ut_ad(block->frame == page_align(rec)); - ut_ad(!dict_index_is_online_ddl(index) - || index->is_primary() - || (flags & BTR_CREATE_FLAG)); - ut_ad(mtr->is_named_space(index->table->space)); - ut_ad(page_rec_is_leaf(rec)); + ut_ad(block->frame == page_align(rec)); + ut_ad(!dict_index_is_online_ddl(index) || index->is_primary() || + (flags & BTR_CREATE_FLAG)); + ut_ad(mtr->is_named_space(index->table->space)); + ut_ad(page_is_leaf(block->frame)); - if (flags & BTR_NO_LOCKING_FLAG) { + if (flags & BTR_NO_LOCKING_FLAG) + return(DB_SUCCESS); - return(DB_SUCCESS); - } - - ut_ad(!index->table->is_temporary()); - ut_ad(page_is_leaf(block->frame)); - - dberr_t err; - lock_t* lock; - bool inherit_in = *inherit; - trx_t* trx = thr_get_trx(thr); - const rec_t* next_rec = page_rec_get_next_const(rec); - ulint heap_no = page_rec_get_heap_no(next_rec); - ut_ad(!rec_is_metadata(next_rec, *index)); - - lock_sys.mutex_lock(); - /* Because this code is invoked for a running transaction by - the thread that is serving the transaction, it is not necessary - to hold trx->mutex here. */ - - /* When inserting a record into an index, the table must be at - least IX-locked. When we are building an index, we would pass - BTR_NO_LOCKING_FLAG and skip the locking altogether. */ - ut_ad(lock_table_has(trx, index->table, LOCK_IX)); - - lock = lock_rec_get_first(&lock_sys.rec_hash, block, heap_no); - - if (lock == NULL) { - /* We optimize CPU time usage in the simplest case */ - - lock_sys.mutex_unlock(); - - if (inherit_in && !dict_index_is_clust(index)) { - /* Update the page max trx id field */ - page_update_max_trx_id(block, - buf_block_get_page_zip(block), - trx->id, mtr); - } - - *inherit = false; + ut_ad(!index->table->is_temporary()); - return(DB_SUCCESS); - } - - /* Spatial index does not use GAP lock protection. It uses - "predicate lock" to protect the "range" */ - if (dict_index_is_spatial(index)) { - return(DB_SUCCESS); - } - - *inherit = true; + dberr_t err= DB_SUCCESS; + bool inherit_in= *inherit; + trx_t *trx= thr_get_trx(thr); + const rec_t *next_rec= page_rec_get_next_const(rec); + ulint heap_no= page_rec_get_heap_no(next_rec); + const page_id_t id{block->page.id()}; + ut_ad(!rec_is_metadata(next_rec, *index)); - /* If another transaction has an explicit lock request which locks - the gap, waiting or granted, on the successor, the insert has to wait. + { + LockGuard g{id}; + /* Because this code is invoked for a running transaction by + the thread that is serving the transaction, it is not necessary + to hold trx->mutex here. */ - An exception is the case where the lock by the another transaction - is a gap type lock which it placed to wait for its turn to insert. We - do not consider that kind of a lock conflicting with our insert. This - eliminates an unnecessary deadlock which resulted when 2 transactions - had to wait for their insert. Both had waiting gap type lock requests - on the successor, which produced an unnecessary deadlock. */ + /* When inserting a record into an index, the table must be at + least IX-locked. When we are building an index, we would pass + BTR_NO_LOCKING_FLAG and skip the locking altogether. */ + ut_ad(lock_table_has(trx, index->table, LOCK_IX)); - const unsigned type_mode = LOCK_X | LOCK_GAP | LOCK_INSERT_INTENTION; + *inherit= lock_rec_get_first(&lock_sys.rec_hash, id, heap_no); - if ( + if (*inherit) + { + /* Spatial index does not use GAP lock protection. It uses + "predicate lock" to protect the "range" */ + if (index->is_spatial()) + return DB_SUCCESS; + + /* If another transaction has an explicit lock request which locks + the gap, waiting or granted, on the successor, the insert has to wait. + + An exception is the case where the lock by the another transaction + is a gap type lock which it placed to wait for its turn to insert. We + do not consider that kind of a lock conflicting with our insert. This + eliminates an unnecessary deadlock which resulted when 2 transactions + had to wait for their insert. Both had waiting gap type lock requests + on the successor, which produced an unnecessary deadlock. */ + const unsigned type_mode= LOCK_X | LOCK_GAP | LOCK_INSERT_INTENTION; + + if ( #ifdef WITH_WSREP - lock_t* c_lock = + lock_t *c_lock= #endif /* WITH_WSREP */ - lock_rec_other_has_conflicting(type_mode, block, heap_no, trx)) { - /* Note that we may get DB_SUCCESS also here! */ - trx->mutex.wr_lock(); + lock_rec_other_has_conflicting(type_mode, block, heap_no, trx)) + { + /* Note that we may get DB_SUCCESS also here! */ + trx->mutex.wr_lock(); - err = lock_rec_enqueue_waiting( + err= lock_rec_enqueue_waiting( #ifdef WITH_WSREP - c_lock, + c_lock, #endif /* WITH_WSREP */ - type_mode, block, heap_no, index, thr, NULL); - - trx->mutex.wr_unlock(); - } else { - err = DB_SUCCESS; - } - - lock_sys.mutex_unlock(); - - switch (err) { - case DB_SUCCESS_LOCKED_REC: - err = DB_SUCCESS; - /* fall through */ - case DB_SUCCESS: - if (!inherit_in || dict_index_is_clust(index)) { - break; - } + type_mode, block, heap_no, index, thr, nullptr); + trx->mutex.wr_unlock(); + } + } + } - /* Update the page max trx id field */ - page_update_max_trx_id( - block, buf_block_get_page_zip(block), trx->id, mtr); - default: - /* We only care about the two return values. */ - break; - } + switch (err) { + case DB_SUCCESS_LOCKED_REC: + err = DB_SUCCESS; + /* fall through */ + case DB_SUCCESS: + if (!inherit_in || index->is_clust()) + break; + /* Update the page max trx id field */ + page_update_max_trx_id(block, buf_block_get_page_zip(block), trx->id, mtr); + default: + /* We only care about the two return values. */ + break; + } #ifdef UNIV_DEBUG - { - mem_heap_t* heap = NULL; - rec_offs offsets_[REC_OFFS_NORMAL_SIZE]; - const rec_offs* offsets; - rec_offs_init(offsets_); + { + mem_heap_t *heap= nullptr; + rec_offs offsets_[REC_OFFS_NORMAL_SIZE]; + const rec_offs *offsets; + rec_offs_init(offsets_); - offsets = rec_get_offsets(next_rec, index, offsets_, true, - ULINT_UNDEFINED, &heap); + offsets= rec_get_offsets(next_rec, index, offsets_, true, + ULINT_UNDEFINED, &heap); - ut_ad(lock_rec_queue_validate( - FALSE, block, next_rec, index, offsets)); + ut_ad(lock_rec_queue_validate(FALSE, block, next_rec, index, offsets)); - if (heap != NULL) { - mem_heap_free(heap); - } - } + if (UNIV_LIKELY_NULL(heap)) + mem_heap_free(heap); + } #endif /* UNIV_DEBUG */ - return(err); + return err; } /*********************************************************************//** @@ -4902,27 +4783,26 @@ lock_rec_convert_impl_to_expl_for_trx( trx_t* trx, /*!< in/out: active transaction */ ulint heap_no)/*!< in: rec heap number to lock */ { - ut_ad(trx->is_referenced()); - ut_ad(page_rec_is_leaf(rec)); - ut_ad(!rec_is_metadata(rec, *index)); + ut_ad(trx->is_referenced()); + ut_ad(page_rec_is_leaf(rec)); + ut_ad(!rec_is_metadata(rec, *index)); - DEBUG_SYNC_C("before_lock_rec_convert_impl_to_expl_for_trx"); - lock_sys.mutex_lock(); - trx->mutex.wr_lock(); - ut_ad(!trx_state_eq(trx, TRX_STATE_NOT_STARTED)); + DEBUG_SYNC_C("before_lock_rec_convert_impl_to_expl_for_trx"); + { + LockMutexGuard g; + trx->mutex.wr_lock(); + ut_ad(!trx_state_eq(trx, TRX_STATE_NOT_STARTED)); - if (!trx_state_eq(trx, TRX_STATE_COMMITTED_IN_MEMORY) - && !lock_rec_has_expl(LOCK_X | LOCK_REC_NOT_GAP, - block, heap_no, trx)) { - lock_rec_add_to_queue(LOCK_REC | LOCK_X | LOCK_REC_NOT_GAP, - block, heap_no, index, trx, true); - } + if (!trx_state_eq(trx, TRX_STATE_COMMITTED_IN_MEMORY) && + !lock_rec_has_expl(LOCK_X | LOCK_REC_NOT_GAP, block, heap_no, trx)) + lock_rec_add_to_queue(LOCK_REC | LOCK_X | LOCK_REC_NOT_GAP, + block, heap_no, index, trx, true); + } - lock_sys.mutex_unlock(); - trx->mutex.wr_unlock(); - trx->release_reference(); + trx->mutex.wr_unlock(); + trx->release_reference(); - DEBUG_SYNC_C("after_lock_rec_convert_impl_to_expl_for_trx"); + DEBUG_SYNC_C("after_lock_rec_convert_impl_to_expl_for_trx"); } @@ -4980,21 +4860,17 @@ static void lock_rec_other_trx_holds_expl(trx_t *caller_trx, trx_t *trx, if (trx) { ut_ad(!page_rec_is_metadata(rec)); - lock_sys.mutex_lock(); + LockMutexGuard g; ut_ad(trx->is_referenced()); const trx_state_t state{trx->state}; ut_ad(state != TRX_STATE_NOT_STARTED); if (state == TRX_STATE_COMMITTED_IN_MEMORY) - { - /* The transaction was committed before our lock_sys.mutex_lock(). */ - lock_sys.mutex_unlock(); + /* The transaction was committed before our LockMutexGuard. */ return; - } lock_rec_other_trx_holds_expl_arg arg= { page_rec_get_heap_no(rec), block, trx }; trx_sys.rw_trx_hash.iterate(caller_trx, lock_rec_other_trx_holds_expl_callback, &arg); - lock_sys.mutex_unlock(); } } #endif /* UNIV_DEBUG */ @@ -5642,11 +5518,8 @@ lock_unlock_table_autoinc( necessary to hold trx->mutex here. */ if (lock_trx_holds_autoinc_locks(trx)) { - lock_sys.mutex_lock(); - + LockMutexGuard g; lock_release_autoinc_locks(trx); - - lock_sys.mutex_unlock(); } } @@ -5682,33 +5555,13 @@ lock_trx_handle_wait( return lock_trx_handle_wait_low(trx); } #endif /* WITH_WSREP */ - lock_sys.mutex_lock(); + LockMutexGuard g; trx->mutex.wr_lock(); dberr_t err = lock_trx_handle_wait_low(trx); - lock_sys.mutex_unlock(); trx->mutex.wr_unlock(); return err; } -/*********************************************************************//** -Get the number of locks on a table. -@return number of locks */ -ulint -lock_table_get_n_locks( -/*===================*/ - const dict_table_t* table) /*!< in: table */ -{ - ulint n_table_locks; - - lock_sys.mutex_lock(); - - n_table_locks = UT_LIST_GET_LEN(table->locks); - - lock_sys.mutex_unlock(); - - return(n_table_locks); -} - #ifdef UNIV_DEBUG /** Do an exhaustive check for any locks (table or rec) against the table. @@ -5760,22 +5613,13 @@ lock_table_has_locks( held on records in this table or on the table itself */ { - ibool has_locks; - - ut_ad(table != NULL); - lock_sys.mutex_lock(); - - has_locks = UT_LIST_GET_LEN(table->locks) > 0 || table->n_rec_locks > 0; - + LockMutexGuard g; + bool has_locks= UT_LIST_GET_LEN(table->locks) > 0 || table->n_rec_locks > 0; #ifdef UNIV_DEBUG - if (!has_locks) { - trx_sys.rw_trx_hash.iterate(lock_table_locks_lookup, table); - } + if (!has_locks) + trx_sys.rw_trx_hash.iterate(lock_table_locks_lookup, table); #endif /* UNIV_DEBUG */ - - lock_sys.mutex_unlock(); - - return(has_locks); + return has_locks; } /*******************************************************************//** @@ -5812,7 +5656,7 @@ lock_trx_has_sys_table_locks( const lock_t* strongest_lock = 0; lock_mode strongest = LOCK_NONE; - lock_sys.mutex_lock(); + LockMutexGuard g; const lock_list::const_iterator end = trx->lock.table_locks.end(); lock_list::const_iterator it = trx->lock.table_locks.begin(); @@ -5833,7 +5677,6 @@ lock_trx_has_sys_table_locks( } if (strongest == LOCK_NONE) { - lock_sys.mutex_unlock(); return(NULL); } @@ -5858,8 +5701,6 @@ lock_trx_has_sys_table_locks( } } - lock_sys.mutex_unlock(); - return(strongest_lock); } @@ -5878,12 +5719,11 @@ lock_trx_has_expl_x_lock( { ut_ad(heap_no > PAGE_HEAP_NO_SUPREMUM); - lock_sys.mutex_lock(); + LockMutexGuard g; ut_ad(lock_table_has(trx, table, LOCK_IX)); ut_ad(lock_table_has(trx, table, LOCK_X) || lock_rec_has_expl(LOCK_X | LOCK_REC_NOT_GAP, block, heap_no, trx)); - lock_sys.mutex_unlock(); return(true); } #endif /* UNIV_DEBUG */ @@ -6401,7 +6241,7 @@ lock_update_split_and_merge( ut_ad(page_is_leaf(right_block->frame)); ut_ad(page_align(orig_pred) == left_block->frame); - lock_sys.mutex_lock(); + LockMutexGuard g; left_next_rec = page_rec_get_next_const(orig_pred); ut_ad(!page_rec_is_metadata(left_next_rec)); @@ -6423,6 +6263,4 @@ lock_update_split_and_merge( lock_rec_inherit_to_gap(left_block, right_block, PAGE_HEAP_NO_SUPREMUM, lock_get_min_heap_no(right_block)); - - lock_sys.mutex_unlock(); } diff --git a/storage/innobase/lock/lock0prdt.cc b/storage/innobase/lock/lock0prdt.cc index 1ba05aa01b0..77e70768431 100644 --- a/storage/innobase/lock/lock0prdt.cc +++ b/storage/innobase/lock/lock0prdt.cc @@ -1,7 +1,7 @@ /***************************************************************************** Copyright (c) 2014, 2016, Oracle and/or its affiliates. All Rights Reserved. -Copyright (c) 2018, 2020, MariaDB Corporation. +Copyright (c) 2018, 2021, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -235,15 +235,14 @@ lock_prdt_has_lock( attached to the new lock */ const trx_t* trx) /*!< in: transaction */ { - lock_t* lock; - - lock_sys.mutex_assert_locked(); + const page_id_t id{block->page.id()}; + lock_sys.assert_locked(id); ut_ad((precise_mode & LOCK_MODE_MASK) == LOCK_S || (precise_mode & LOCK_MODE_MASK) == LOCK_X); ut_ad(!(precise_mode & LOCK_INSERT_INTENTION)); - for (lock = lock_rec_get_first( - lock_hash_get(type_mode), block, PRDT_HEAPNO); + for (lock_t* lock = lock_rec_get_first( + lock_hash_get(type_mode), id, PRDT_HEAPNO); lock != NULL; lock = lock_rec_get_next(PRDT_HEAPNO, lock)) { ut_ad(lock->type_mode & (LOCK_PREDICATE | LOCK_PRDT_PAGE)); @@ -295,10 +294,11 @@ lock_prdt_other_has_conflicting( the new lock will be on */ const trx_t* trx) /*!< in: our transaction */ { - lock_sys.mutex_assert_locked(); + const page_id_t id{block->page.id()}; + lock_sys.assert_locked(id); for (lock_t* lock = lock_rec_get_first( - lock_hash_get(mode), block, PRDT_HEAPNO); + lock_hash_get(mode), id, PRDT_HEAPNO); lock != NULL; lock = lock_rec_get_next(PRDT_HEAPNO, lock)) { @@ -390,12 +390,11 @@ lock_prdt_find_on_page( lock_prdt_t* prdt, /*!< in: MBR with the lock */ const trx_t* trx) /*!< in: transaction */ { - lock_t* lock; + const page_id_t id{block->page.id()}; - lock_sys.mutex_assert_locked(); + lock_sys.assert_locked(id); - for (lock = lock_sys.get_first(*lock_hash_get(type_mode), - block->page.id()); + for (lock_t*lock = lock_sys.get_first(*lock_hash_get(type_mode), id); lock != NULL; lock = lock_rec_get_next_on_page(lock)) { @@ -437,7 +436,8 @@ lock_prdt_add_to_queue( /*!< in: TRUE if caller owns the transaction mutex */ { - lock_sys.mutex_assert_locked(); + const page_id_t id{block->page.id()}; + lock_sys.assert_locked(id); ut_ad(!dict_index_is_clust(index) && !dict_index_is_online_ddl(index)); ut_ad(type_mode & (LOCK_PREDICATE | LOCK_PRDT_PAGE)); @@ -457,8 +457,7 @@ lock_prdt_add_to_queue( lock_t* lock; - for (lock = lock_sys.get_first(*lock_hash_get(type_mode), - block->page.id()); + for (lock = lock_sys.get_first(*lock_hash_get(type_mode), id); lock != NULL; lock = lock_rec_get_next_on_page(lock)) { @@ -519,95 +518,65 @@ lock_prdt_insert_check_and_lock( lock_prdt_t* prdt) /*!< in: Predicates with Minimum Bound Rectangle */ { - ut_ad(block->frame == page_align(rec)); - - if (flags & BTR_NO_LOCKING_FLAG) { - - return(DB_SUCCESS); - } - - ut_ad(!index->table->is_temporary()); - ut_ad(!dict_index_is_clust(index)); - - trx_t* trx = thr_get_trx(thr); - - lock_sys.mutex_lock(); - - /* Because this code is invoked for a running transaction by - the thread that is serving the transaction, it is not necessary - to hold trx->mutex here. */ - - ut_ad(lock_table_has(trx, index->table, LOCK_IX)); - - lock_t* lock; - - /* Only need to check locks on prdt_hash */ - lock = lock_rec_get_first(&lock_sys.prdt_hash, block, PRDT_HEAPNO); - - if (lock == NULL) { - lock_sys.mutex_unlock(); - - /* Update the page max trx id field */ - page_update_max_trx_id(block, buf_block_get_page_zip(block), - trx->id, mtr); - - return(DB_SUCCESS); - } - - ut_ad(lock->type_mode & LOCK_PREDICATE); - - dberr_t err; - - /* If another transaction has an explicit lock request which locks - the predicate, waiting or granted, on the successor, the insert - has to wait. - - Similar to GAP lock, we do not consider lock from inserts conflicts - with each other */ - - const ulint mode = LOCK_X | LOCK_PREDICATE | LOCK_INSERT_INTENTION; - - const lock_t* wait_for = lock_prdt_other_has_conflicting( - mode, block, prdt, trx); - - if (wait_for != NULL) { - rtr_mbr_t* mbr = prdt_get_mbr_from_prdt(prdt); - - /* Allocate MBR on the lock heap */ - lock_init_prdt_from_mbr(prdt, mbr, 0, trx->lock.lock_heap); - - /* Note that we may get DB_SUCCESS also here! */ - trx->mutex.wr_lock(); - - err = lock_rec_enqueue_waiting( + ut_ad(block->frame == page_align(rec)); + if (flags & BTR_NO_LOCKING_FLAG) + return DB_SUCCESS; + + ut_ad(!index->table->is_temporary()); + ut_ad(index->is_spatial()); + + trx_t *trx= thr_get_trx(thr); + const page_id_t id{block->page.id()}; + dberr_t err= DB_SUCCESS; + + { + LockGuard g(id); + /* Because this code is invoked for a running transaction by + the thread that is serving the transaction, it is not necessary + to hold trx->mutex here. */ + ut_ad(lock_table_has(trx, index->table, LOCK_IX)); + + /* Only need to check locks on prdt_hash */ + if (ut_d(lock_t *lock=) + lock_rec_get_first(&lock_sys.prdt_hash, id, PRDT_HEAPNO)) + { + ut_ad(lock->type_mode & LOCK_PREDICATE); + + /* If another transaction has an explicit lock request which locks + the predicate, waiting or granted, on the successor, the insert + has to wait. + + Similar to GAP lock, we do not consider lock from inserts conflicts + with each other */ + + const ulint mode= LOCK_X | LOCK_PREDICATE | LOCK_INSERT_INTENTION; + + if (lock_prdt_other_has_conflicting(mode, block, prdt, trx)) + { + rtr_mbr_t *mbr= prdt_get_mbr_from_prdt(prdt); + /* Allocate MBR on the lock heap */ + lock_init_prdt_from_mbr(prdt, mbr, 0, trx->lock.lock_heap); + /* Note that we may get DB_SUCCESS also here! */ + trx->mutex.wr_lock(); + err= lock_rec_enqueue_waiting( #ifdef WITH_WSREP - NULL, /* FIXME: replicate SPATIAL INDEX locks */ + nullptr, /* FIXME: replicate SPATIAL INDEX locks */ #endif - LOCK_X | LOCK_PREDICATE | LOCK_INSERT_INTENTION, - block, PRDT_HEAPNO, index, thr, prdt); - - trx->mutex.wr_unlock(); - } else { - err = DB_SUCCESS; - } - - lock_sys.mutex_unlock(); - - switch (err) { - case DB_SUCCESS_LOCKED_REC: - err = DB_SUCCESS; - /* fall through */ - case DB_SUCCESS: - /* Update the page max trx id field */ - page_update_max_trx_id(block, - buf_block_get_page_zip(block), - trx->id, mtr); - default: - /* We only care about the two return values. */ - break; - } - - return(err); + mode, block, PRDT_HEAPNO, index, thr, prdt); + trx->mutex.wr_unlock(); + } + } + } + + switch (err) { + case DB_SUCCESS_LOCKED_REC: + case DB_SUCCESS: + /* Update the page max trx id field */ + page_update_max_trx_id(block, buf_block_get_page_zip(block), trx->id, mtr); + return DB_SUCCESS; + default: + return err; + } } /**************************************************************//** @@ -622,7 +591,7 @@ lock_prdt_update_parent( lock_prdt_t* right_prdt, /*!< in: MBR on the new page */ const page_id_t page_id) /*!< in: parent page */ { - lock_sys.mutex_lock(); + LockMutexGuard g; /* Get all locks in parent */ for (lock_t *lock = lock_sys.get_first_prdt(page_id); @@ -659,8 +628,6 @@ lock_prdt_update_parent( lock_prdt, FALSE); } } - - lock_sys.mutex_unlock(); } /**************************************************************//** @@ -728,15 +695,13 @@ lock_prdt_update_split( lock_prdt_t* new_prdt, /*!< in: MBR on the new page */ const page_id_t page_id) /*!< in: page number */ { - lock_sys.mutex_lock(); + LockMutexGuard g; lock_prdt_update_split_low(new_block, prdt, new_prdt, page_id, LOCK_PREDICATE); lock_prdt_update_split_low(new_block, NULL, NULL, page_id, LOCK_PRDT_PAGE); - - lock_sys.mutex_unlock(); } /*********************************************************************//** @@ -801,10 +766,12 @@ lock_prdt_lock( index record, and this would not have been possible if another active transaction had modified this secondary index record. */ - lock_sys.mutex_lock(); + const page_id_t page_id{block->page.id()}; + + LockGuard g(page_id); const unsigned prdt_mode = type_mode | mode; - lock_t* lock = lock_sys.get_first(hash, block->page.id()); + lock_t* lock = lock_sys.get_first(hash, page_id); if (lock == NULL) { lock = lock_rec_create( @@ -867,8 +834,6 @@ lock_prdt_lock( } } - lock_sys.mutex_unlock(); - if (status == LOCK_REC_SUCCESS_CREATED && type_mode == LOCK_PREDICATE) { /* Append the predicate in the lock record */ lock_prdt_set_prdt(lock, prdt); @@ -897,7 +862,7 @@ lock_place_prdt_page_lock( index record, and this would not have been possible if another active transaction had modified this secondary index record. */ - lock_sys.mutex_lock(); + LockGuard g(page_id); const lock_t* lock = lock_sys.get_first_prdt_page(page_id); const ulint mode = LOCK_S | LOCK_PRDT_PAGE; @@ -927,8 +892,6 @@ lock_place_prdt_page_lock( #endif /* PRDT_DIAG */ } - lock_sys.mutex_unlock(); - return(DB_SUCCESS); } @@ -938,15 +901,9 @@ lock_place_prdt_page_lock( @return true if there is none */ bool lock_test_prdt_page_lock(const trx_t *trx, const page_id_t page_id) { - lock_t* lock; - - lock_sys.mutex_lock(); - - lock = lock_sys.get_first_prdt_page(page_id); - - lock_sys.mutex_unlock(); - - return(!lock || trx == lock->trx); + LockGuard g(page_id); + lock_t *lock= lock_sys.get_first_prdt_page(page_id); + return !lock || trx == lock->trx; } /*************************************************************//** @@ -960,10 +917,11 @@ lock_prdt_rec_move( const buf_block_t* donator) /*!< in: buffer block containing the donating record */ { - lock_sys.mutex_lock(); + const page_id_t donator_id{donator->page.id()}; + LockMutexGuard g; for (lock_t *lock = lock_rec_get_first(&lock_sys.prdt_hash, - donator, PRDT_HEAPNO); + donator_id, PRDT_HEAPNO); lock != NULL; lock = lock_rec_get_next(PRDT_HEAPNO, lock)) { @@ -977,8 +935,6 @@ lock_prdt_rec_move( type_mode, receiver, lock->index, lock->trx, lock_prdt, FALSE); } - - lock_sys.mutex_unlock(); } /** Removes predicate lock objects set on an index page which is discarded. @@ -989,18 +945,13 @@ lock_prdt_page_free_from_discard( const buf_block_t* block, hash_table_t* lock_hash) { - lock_t* lock; - lock_t* next_lock; - - lock_sys.mutex_assert_locked(); - - lock = lock_sys.get_first(*lock_hash, block->page.id()); - - while (lock != NULL) { - next_lock = lock_rec_get_next_on_page(lock); - - lock_rec_discard(lock); - - lock = next_lock; - } + const page_id_t page_id{block->page.id()}; + lock_sys.assert_locked(page_id); + + for (lock_t *lock= lock_sys.get_first(*lock_hash, page_id), *next; + lock; lock= next) + { + next= lock_rec_get_next_on_page(lock); + lock_rec_discard(lock); + } } diff --git a/storage/innobase/lock/lock0wait.cc b/storage/innobase/lock/lock0wait.cc index 5c5caef9df8..74797a78381 100644 --- a/storage/innobase/lock/lock0wait.cc +++ b/storage/innobase/lock/lock0wait.cc @@ -1,7 +1,7 @@ /***************************************************************************** Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved. -Copyright (c) 2014, 2020, MariaDB Corporation. +Copyright (c) 2014, 2021, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -92,13 +92,12 @@ lock_wait_table_release_slot( trx_t::mutex. To reduce contention on the lock mutex when reserving the slot we avoid acquiring the lock mutex. */ - lock_sys.mutex_lock(); - - slot->thr->slot = NULL; - slot->thr = NULL; - slot->in_use = FALSE; - - lock_sys.mutex_unlock(); + { + LockMutexGuard g; + slot->thr->slot = NULL; + slot->thr = NULL; + slot->in_use = FALSE; + } /* Scan backwards and adjust the last free slot pointer. */ for (slot = lock_sys.last_slot; @@ -284,12 +283,11 @@ lock_wait_suspend_thread( current thread which owns the transaction. Only acquire the mutex if the wait_lock is still active. */ if (const lock_t* wait_lock = trx->lock.wait_lock) { - lock_sys.mutex_lock(); + LockMutexGuard g; wait_lock = trx->lock.wait_lock; if (wait_lock) { lock_type = lock_get_type_low(wait_lock); } - lock_sys.mutex_unlock(); } ulint had_dict_lock = trx->dict_operation_lock_mode; @@ -449,7 +447,7 @@ lock_wait_check_and_cancel( possible that the lock has already been granted: in that case do nothing */ - lock_sys.mutex_lock(); + LockMutexGuard g; if (trx->lock.wait_lock != NULL) { ut_a(trx->lock.que_state == TRX_QUE_LOCK_WAIT); @@ -464,8 +462,6 @@ lock_wait_check_and_cancel( } #endif /* WITH_WSREP */ } - - lock_sys.mutex_unlock(); } } diff --git a/storage/innobase/row/row0ins.cc b/storage/innobase/row/row0ins.cc index 78a1da92736..f4537325374 100644 --- a/storage/innobase/row/row0ins.cc +++ b/storage/innobase/row/row0ins.cc @@ -704,11 +704,12 @@ row_ins_foreign_trx_print( ut_ad(!srv_read_only_mode); - lock_sys.mutex_lock(); - n_rec_locks = trx->lock.n_rec_locks; - n_trx_locks = UT_LIST_GET_LEN(trx->lock.trx_locks); - heap_size = mem_heap_get_size(trx->lock.lock_heap); - lock_sys.mutex_unlock(); + { + LockMutexGuard g; + n_rec_locks = trx->lock.n_rec_locks; + n_trx_locks = UT_LIST_GET_LEN(trx->lock.trx_locks); + heap_size = mem_heap_get_size(trx->lock.lock_heap); + } mysql_mutex_lock(&dict_foreign_err_mutex); rewind(dict_foreign_err_file); diff --git a/storage/innobase/row/row0mysql.cc b/storage/innobase/row/row0mysql.cc index 07394d9d91a..7d3b361a492 100644 --- a/storage/innobase/row/row0mysql.cc +++ b/storage/innobase/row/row0mysql.cc @@ -1,7 +1,7 @@ /***************************************************************************** Copyright (c) 2000, 2018, Oracle and/or its affiliates. All Rights Reserved. -Copyright (c) 2015, 2020, MariaDB Corporation. +Copyright (c) 2015, 2021, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -2663,9 +2663,10 @@ skip: } if (!srv_fast_shutdown && !trx_sys.any_active_transactions()) { - lock_sys.mutex_lock(); - skip = UT_LIST_GET_LEN(table->locks) != 0; - lock_sys.mutex_unlock(); + { + LockMutexGuard g; + skip = UT_LIST_GET_LEN(table->locks) != 0; + } if (skip) { /* We cannot drop tables that are locked by XA PREPARE transactions. */ diff --git a/storage/innobase/trx/trx0i_s.cc b/storage/innobase/trx/trx0i_s.cc index a24562211b6..9330183407a 100644 --- a/storage/innobase/trx/trx0i_s.cc +++ b/storage/innobase/trx/trx0i_s.cc @@ -1193,7 +1193,7 @@ static void fetch_data_into_cache_low(trx_i_s_cache_t *cache, const trx_t *trx) static void fetch_data_into_cache(trx_i_s_cache_t *cache) { - lock_sys.mutex_assert_locked(); + LockMutexGuard g; trx_i_s_cache_clear(cache); /* Capture the state of transactions */ @@ -1223,10 +1223,7 @@ trx_i_s_possibly_fetch_data_into_cache( } /* We need to read trx_sys and record/table lock queues */ - - lock_sys.mutex_lock(); fetch_data_into_cache(cache); - lock_sys.mutex_unlock(); /* update cache last read time */ cache->last_read = my_interval_timer(); diff --git a/storage/innobase/trx/trx0trx.cc b/storage/innobase/trx/trx0trx.cc index 1d4452041c3..ff6bf345eee 100644 --- a/storage/innobase/trx/trx0trx.cc +++ b/storage/innobase/trx/trx0trx.cc @@ -1265,12 +1265,12 @@ trx_update_mod_tables_timestamp( /* recheck while holding the mutex that blocks table->acquire() */ dict_sys.mutex_lock(); - lock_sys.mutex_lock(); - const bool do_evict = !table->get_ref_count() - && !UT_LIST_GET_LEN(table->locks); - lock_sys.mutex_unlock(); - if (do_evict) { - dict_sys.remove(table, true); + { + LockMutexGuard g; + if (!table->get_ref_count() + && !UT_LIST_GET_LEN(table->locks)) { + dict_sys.remove(table, true); + } } dict_sys.mutex_unlock(); #endif @@ -1888,18 +1888,15 @@ trx_print( ulint max_query_len) /*!< in: max query length to print, or 0 to use the default max length */ { - ulint n_rec_locks; - ulint n_trx_locks; - ulint heap_size; - - lock_sys.mutex_lock(); - n_rec_locks = trx->lock.n_rec_locks; - n_trx_locks = UT_LIST_GET_LEN(trx->lock.trx_locks); - heap_size = mem_heap_get_size(trx->lock.lock_heap); - lock_sys.mutex_unlock(); + ulint n_rec_locks, n_trx_locks, heap_size; + { + LockMutexGuard g; + n_rec_locks= trx->lock.n_rec_locks; + n_trx_locks= UT_LIST_GET_LEN(trx->lock.trx_locks); + heap_size= mem_heap_get_size(trx->lock.lock_heap); + } - trx_print_low(f, trx, max_query_len, - n_rec_locks, n_trx_locks, heap_size); + trx_print_low(f, trx, max_query_len, n_rec_locks, n_trx_locks, heap_size); } /** Prepare a transaction. |