summaryrefslogtreecommitdiff
path: root/storage/innobase/lock/lock0lock.cc
diff options
context:
space:
mode:
authorMarko Mäkelä <marko.makela@mariadb.com>2020-06-18 12:26:28 +0300
committerMarko Mäkelä <marko.makela@mariadb.com>2020-06-18 14:16:01 +0300
commitcfd3d70ccbbfcf3fdec034be446317741dfae824 (patch)
tree205817c9ea9252de9f8988ab76da82dc7d0bd7e2 /storage/innobase/lock/lock0lock.cc
parentbf3c862faa8efed4a662725ec27586cd69e9228e (diff)
downloadmariadb-git-cfd3d70ccbbfcf3fdec034be446317741dfae824.tar.gz
MDEV-22871: Remove pointer indirection for InnoDB hash_table_t
hash_get_n_cells(): Remove. Access n_cells directly. hash_get_nth_cell(): Remove. Access array directly. hash_table_clear(): Replaced with hash_table_t::clear(). hash_table_create(), hash_table_free(): Remove. hash0hash.cc: Remove.
Diffstat (limited to 'storage/innobase/lock/lock0lock.cc')
-rw-r--r--storage/innobase/lock/lock0lock.cc161
1 files changed, 77 insertions, 84 deletions
diff --git a/storage/innobase/lock/lock0lock.cc b/storage/innobase/lock/lock0lock.cc
index f11a2a35750..1ea39da6c4d 100644
--- a/storage/innobase/lock/lock0lock.cc
+++ b/storage/innobase/lock/lock0lock.cc
@@ -463,9 +463,9 @@ void lock_sys_t::create(ulint n_cells)
mutex_create(LATCH_ID_LOCK_SYS_WAIT, &wait_mutex);
- rec_hash = hash_create(n_cells);
- prdt_hash = hash_create(n_cells);
- prdt_page_hash = hash_create(n_cells);
+ rec_hash.create(n_cells);
+ prdt_hash.create(n_cells);
+ prdt_page_hash.create(n_cells);
if (!srv_read_only_mode) {
lock_latest_err_file = os_file_create_tmpfile();
@@ -498,23 +498,23 @@ void lock_sys_t::resize(ulint n_cells)
mutex_enter(&mutex);
- hash_table_t* old_hash = rec_hash;
- rec_hash = hash_create(n_cells);
- HASH_MIGRATE(old_hash, rec_hash, lock_t, hash,
+ hash_table_t old_hash(rec_hash);
+ rec_hash.create(n_cells);
+ HASH_MIGRATE(&old_hash, &rec_hash, lock_t, hash,
lock_rec_lock_fold);
- hash_table_free(old_hash);
+ old_hash.free();
old_hash = prdt_hash;
- prdt_hash = hash_create(n_cells);
- HASH_MIGRATE(old_hash, prdt_hash, lock_t, hash,
+ prdt_hash.create(n_cells);
+ HASH_MIGRATE(&old_hash, &prdt_hash, lock_t, hash,
lock_rec_lock_fold);
- hash_table_free(old_hash);
+ old_hash.free();
old_hash = prdt_page_hash;
- prdt_page_hash = hash_create(n_cells);
- HASH_MIGRATE(old_hash, prdt_page_hash, lock_t, hash,
+ prdt_page_hash.create(n_cells);
+ HASH_MIGRATE(&old_hash, &prdt_page_hash, lock_t, hash,
lock_rec_lock_fold);
- hash_table_free(old_hash);
+ old_hash.free();
/* need to update block->lock_hash_val */
mutex_enter(&buf_pool.mutex);
@@ -543,9 +543,9 @@ void lock_sys_t::close()
lock_latest_err_file = NULL;
}
- hash_table_free(rec_hash);
- hash_table_free(prdt_hash);
- hash_table_free(prdt_page_hash);
+ rec_hash.free();
+ prdt_hash.free();
+ prdt_page_hash.free();
mutex_destroy(&mutex);
mutex_destroy(&wait_mutex);
@@ -871,7 +871,7 @@ lock_rec_expl_exist_on_page(
lock_mutex_enter();
/* Only used in ibuf pages, so rec_hash is good enough */
- lock = lock_rec_get_first_on_page_addr(lock_sys.rec_hash,
+ lock = lock_rec_get_first_on_page_addr(&lock_sys.rec_hash,
space, page_no);
lock_mutex_exit();
@@ -989,7 +989,7 @@ lock_rec_has_expl(
|| (precise_mode & LOCK_MODE_MASK) == LOCK_X);
ut_ad(!(precise_mode & LOCK_INSERT_INTENTION));
- for (lock = lock_rec_get_first(lock_sys.rec_hash, block, heap_no);
+ for (lock = lock_rec_get_first(&lock_sys.rec_hash, block, heap_no);
lock != NULL;
lock = lock_rec_get_next(heap_no, lock)) {
@@ -1042,8 +1042,8 @@ lock_rec_other_has_expl_req(
return(NULL);
}
- for (lock_t* lock = lock_rec_get_first(lock_sys.rec_hash,
- block, heap_no);
+ for (lock_t* lock = lock_rec_get_first(&lock_sys.rec_hash,
+ block, heap_no);
lock != NULL;
lock = lock_rec_get_next(heap_no, lock)) {
@@ -1148,7 +1148,7 @@ lock_rec_other_has_conflicting(
bool is_supremum = (heap_no == PAGE_HEAP_NO_SUPREMUM);
- for (lock = lock_rec_get_first(lock_sys.rec_hash, block, heap_no);
+ for (lock = lock_rec_get_first(&lock_sys.rec_hash, block, heap_no);
lock != NULL;
lock = lock_rec_get_next(heap_no, lock)) {
@@ -1468,7 +1468,7 @@ lock_rec_create_low(
&& innodb_lock_schedule_algorithm
== INNODB_LOCK_SCHEDULE_ALGORITHM_VATS
&& !thd_is_replication_slave_thread(trx->mysql_thd)) {
- HASH_PREPEND(lock_t, hash, lock_sys.rec_hash,
+ HASH_PREPEND(lock_t, hash, &lock_sys.rec_hash,
lock_rec_fold(space, page_no), lock);
} else {
HASH_INSERT(lock_t, hash, lock_hash_get(type_mode),
@@ -1538,8 +1538,7 @@ lock_rec_insert_by_trx_age(
page_no = in_lock->un_member.rec_lock.page_no;
rec_fold = lock_rec_fold(space, page_no);
hash = lock_hash_get(in_lock->type_mode);
- cell = hash_get_nth_cell(hash,
- hash_calc_hash(rec_fold, hash));
+ cell = &hash->array[hash->calc_hash(rec_fold)];
node = (lock_t *) cell->node;
// If in_lock is not a wait lock, we insert it to the head of the list.
@@ -1597,8 +1596,7 @@ lock_queue_validate(
page_no = in_lock->un_member.rec_lock.page_no;
rec_fold = lock_rec_fold(space, page_no);
hash = lock_hash_get(in_lock->type_mode);
- cell = hash_get_nth_cell(hash,
- hash_calc_hash(rec_fold, hash));
+ cell = &hash->array[hash->calc_hash(rec_fold)];
next = (lock_t *) cell->node;
while (next != NULL) {
// If this is a granted lock, check that there's no wait lock before it.
@@ -1628,8 +1626,7 @@ lock_rec_insert_to_head(
}
hash = lock_hash_get(in_lock->type_mode);
- cell = hash_get_nth_cell(hash,
- hash_calc_hash(rec_fold, hash));
+ cell = &hash->array[hash->calc_hash(rec_fold)];
node = (lock_t *) cell->node;
if (node != in_lock) {
cell->node = in_lock;
@@ -1745,7 +1742,7 @@ lock_rec_enqueue_waiting(
== INNODB_LOCK_SCHEDULE_ALGORITHM_VATS
&& !prdt
&& !thd_is_replication_slave_thread(lock->trx->mysql_thd)) {
- HASH_DELETE(lock_t, hash, lock_sys.rec_hash,
+ HASH_DELETE(lock_t, hash, &lock_sys.rec_hash,
lock_rec_lock_fold(lock), lock);
dberr_t res = lock_rec_insert_by_trx_age(lock);
if (res != DB_SUCCESS) {
@@ -1928,7 +1925,7 @@ lock_rec_lock(
if (lock_table_has(trx, index->table,
static_cast<lock_mode>(LOCK_MODE_MASK & mode)));
- else if (lock_t *lock= lock_rec_get_first_on_page(lock_sys.rec_hash, block))
+ else if (lock_t *lock= lock_rec_get_first_on_page(&lock_sys.rec_hash, block))
{
trx_mutex_enter(trx);
if (lock_rec_get_next_on_page(lock) ||
@@ -2151,9 +2148,8 @@ lock_grant_and_move_on_page(ulint rec_fold, ulint space, ulint page_no)
{
lock_t* lock;
lock_t* previous = static_cast<lock_t*>(
- hash_get_nth_cell(lock_sys.rec_hash,
- hash_calc_hash(rec_fold, lock_sys.rec_hash))
- ->node);
+ lock_sys.rec_hash.array[lock_sys.rec_hash.calc_hash(rec_fold)].
+ node);
if (previous == NULL) {
return;
}
@@ -2229,7 +2225,7 @@ static void lock_rec_dequeue_from_page(lock_t* in_lock)
if (innodb_lock_schedule_algorithm
== INNODB_LOCK_SCHEDULE_ALGORITHM_FCFS
- || lock_hash != lock_sys.rec_hash
+ || lock_hash != &lock_sys.rec_hash
|| thd_is_replication_slave_thread(in_lock->trx->mysql_thd)) {
/* Check if waiting locks in the queue can now be granted:
grant locks if there are no conflicting locks ahead. Stop at
@@ -2331,11 +2327,11 @@ lock_rec_free_all_from_discard_page(
page_no = block->page.id().page_no();
lock_rec_free_all_from_discard_page_low(
- space, page_no, lock_sys.rec_hash);
+ space, page_no, &lock_sys.rec_hash);
lock_rec_free_all_from_discard_page_low(
- space, page_no, lock_sys.prdt_hash);
+ space, page_no, &lock_sys.prdt_hash);
lock_rec_free_all_from_discard_page_low(
- space, page_no, lock_sys.prdt_page_hash);
+ space, page_no, &lock_sys.prdt_page_hash);
}
/*============= RECORD LOCK MOVING AND INHERITING ===================*/
@@ -2380,12 +2376,12 @@ lock_rec_reset_and_release_wait(
ulint heap_no)/*!< in: heap number of record */
{
lock_rec_reset_and_release_wait_low(
- lock_sys.rec_hash, block, heap_no);
+ &lock_sys.rec_hash, block, heap_no);
lock_rec_reset_and_release_wait_low(
- lock_sys.prdt_hash, block, PAGE_HEAP_NO_INFIMUM);
+ &lock_sys.prdt_hash, block, PAGE_HEAP_NO_INFIMUM);
lock_rec_reset_and_release_wait_low(
- lock_sys.prdt_page_hash, block, PAGE_HEAP_NO_INFIMUM);
+ &lock_sys.prdt_page_hash, block, PAGE_HEAP_NO_INFIMUM);
}
/*************************************************************//**
@@ -2418,7 +2414,7 @@ lock_rec_inherit_to_gap(
DO want S-locks/X-locks(taken for replace) set by a consistency
constraint to be inherited also then. */
- for (lock = lock_rec_get_first(lock_sys.rec_hash, block, heap_no);
+ for (lock = lock_rec_get_first(&lock_sys.rec_hash, block, heap_no);
lock != NULL;
lock = lock_rec_get_next(heap_no, lock)) {
@@ -2454,7 +2450,7 @@ lock_rec_inherit_to_gap_if_gap_lock(
lock_mutex_enter();
- for (lock = lock_rec_get_first(lock_sys.rec_hash, block, heap_no);
+ for (lock = lock_rec_get_first(&lock_sys.rec_hash, block, heap_no);
lock != NULL;
lock = lock_rec_get_next(heap_no, lock)) {
@@ -2498,8 +2494,8 @@ lock_rec_move_low(
/* If the lock is predicate lock, it resides on INFIMUM record */
ut_ad(lock_rec_get_first(
lock_hash, receiver, receiver_heap_no) == NULL
- || lock_hash == lock_sys.prdt_hash
- || lock_hash == lock_sys.prdt_page_hash);
+ || lock_hash == &lock_sys.prdt_hash
+ || lock_hash == &lock_sys.prdt_page_hash);
for (lock = lock_rec_get_first(lock_hash,
donator, donator_heap_no);
@@ -2522,8 +2518,8 @@ lock_rec_move_low(
lock->index, lock->trx, FALSE);
}
- ut_ad(lock_rec_get_first(lock_sys.rec_hash,
- donator, donator_heap_no) == NULL);
+ ut_ad(!lock_rec_get_first(&lock_sys.rec_hash,
+ donator, donator_heap_no));
}
/** Move all the granted locks to the front of the given lock list.
@@ -2577,7 +2573,7 @@ lock_rec_move(
ulint donator_heap_no)/*!< in: heap_no of the record
which gives the locks */
{
- lock_rec_move_low(lock_sys.rec_hash, receiver, donator,
+ lock_rec_move_low(&lock_sys.rec_hash, receiver, donator,
receiver_heap_no, donator_heap_no);
}
@@ -2602,7 +2598,7 @@ lock_move_reorganize_page(
lock_mutex_enter();
/* FIXME: This needs to deal with predicate lock too */
- lock = lock_rec_get_first_on_page(lock_sys.rec_hash, block);
+ lock = lock_rec_get_first_on_page(&lock_sys.rec_hash, block);
if (lock == NULL) {
lock_mutex_exit();
@@ -2735,7 +2731,8 @@ lock_move_rec_list_end(
table to the end of the hash chain, and lock_rec_add_to_queue
does not reuse locks if there are waiters in the queue. */
- for (lock = lock_rec_get_first_on_page(lock_sys.rec_hash, block); lock;
+ for (lock = lock_rec_get_first_on_page(&lock_sys.rec_hash, block);
+ lock;
lock = lock_rec_get_next_on_page(lock)) {
const rec_t* rec1 = rec;
const rec_t* rec2;
@@ -2850,7 +2847,8 @@ lock_move_rec_list_start(
lock_mutex_enter();
- for (lock = lock_rec_get_first_on_page(lock_sys.rec_hash, block); lock;
+ for (lock = lock_rec_get_first_on_page(&lock_sys.rec_hash, block);
+ lock;
lock = lock_rec_get_next_on_page(lock)) {
const rec_t* rec1;
const rec_t* rec2;
@@ -2962,7 +2960,8 @@ lock_rtr_move_rec_list(
lock_mutex_enter();
- for (lock = lock_rec_get_first_on_page(lock_sys.rec_hash, block); lock;
+ for (lock = lock_rec_get_first_on_page(&lock_sys.rec_hash, block);
+ lock;
lock = lock_rec_get_next_on_page(lock)) {
ulint moved = 0;
const rec_t* rec1;
@@ -3074,12 +3073,12 @@ lock_update_merge_right(
waiting transactions */
lock_rec_reset_and_release_wait_low(
- lock_sys.rec_hash, left_block, PAGE_HEAP_NO_SUPREMUM);
+ &lock_sys.rec_hash, left_block, PAGE_HEAP_NO_SUPREMUM);
/* there should exist no page lock on the left page,
otherwise, it will be blocked from merge */
ut_ad(!lock_rec_get_first_on_page_addr(
- lock_sys.prdt_page_hash,
+ &lock_sys.prdt_page_hash,
left_block->page.id().space(),
left_block->page.id().page_no()));
@@ -3189,7 +3188,7 @@ lock_update_merge_left(
releasing waiting transactions */
lock_rec_reset_and_release_wait_low(
- lock_sys.rec_hash, left_block, PAGE_HEAP_NO_SUPREMUM);
+ &lock_sys.rec_hash, left_block, PAGE_HEAP_NO_SUPREMUM);
}
/* Move the locks from the supremum of right page to the supremum
@@ -3201,7 +3200,7 @@ lock_update_merge_left(
/* there should exist no page lock on the right page,
otherwise, it will be blocked from merge */
ut_ad(!lock_rec_get_first_on_page_addr(
- lock_sys.prdt_page_hash,
+ &lock_sys.prdt_page_hash,
right_block->page.id().space(),
right_block->page.id().page_no()));
@@ -3254,9 +3253,9 @@ lock_update_discard(
lock_mutex_enter();
- if (lock_rec_get_first_on_page(lock_sys.rec_hash, block)) {
- ut_ad(!lock_rec_get_first_on_page(lock_sys.prdt_hash, block));
- ut_ad(!lock_rec_get_first_on_page(lock_sys.prdt_page_hash,
+ if (lock_rec_get_first_on_page(&lock_sys.rec_hash, block)) {
+ ut_ad(!lock_rec_get_first_on_page(&lock_sys.prdt_hash, block));
+ ut_ad(!lock_rec_get_first_on_page(&lock_sys.prdt_page_hash,
block));
/* Inherit all the locks on the page to the record and
reset all the locks on the page */
@@ -3293,14 +3292,14 @@ lock_update_discard(
lock_rec_free_all_from_discard_page_low(
block->page.id().space(), block->page.id().page_no(),
- lock_sys.rec_hash);
+ &lock_sys.rec_hash);
} else {
lock_rec_free_all_from_discard_page_low(
block->page.id().space(), block->page.id().page_no(),
- lock_sys.prdt_hash);
+ &lock_sys.prdt_hash);
lock_rec_free_all_from_discard_page_low(
block->page.id().space(), block->page.id().page_no(),
- lock_sys.prdt_page_hash);
+ &lock_sys.prdt_page_hash);
}
lock_mutex_exit();
@@ -4068,12 +4067,10 @@ run_again:
static
void
lock_grant_and_move_on_rec(
- hash_table_t* lock_hash,
lock_t* first_lock,
ulint heap_no)
{
lock_t* lock;
- lock_t* previous;
ulint space;
ulint page_no;
ulint rec_fold;
@@ -4082,8 +4079,9 @@ lock_grant_and_move_on_rec(
page_no = first_lock->un_member.rec_lock.page_no;
rec_fold = lock_rec_fold(space, page_no);
- previous = (lock_t *) hash_get_nth_cell(lock_hash,
- hash_calc_hash(rec_fold, lock_hash))->node;
+ lock_t* previous = static_cast<lock_t*>(
+ lock_sys.rec_hash.array[lock_sys.rec_hash.calc_hash(rec_fold)]
+ .node);
if (previous == NULL) {
return;
}
@@ -4155,7 +4153,7 @@ lock_rec_unlock(
lock_mutex_enter();
trx_mutex_enter(trx);
- first_lock = lock_rec_get_first(lock_sys.rec_hash, block, heap_no);
+ first_lock = lock_rec_get_first(&lock_sys.rec_hash, block, heap_no);
/* Find the last lock with the same lock_mode and transaction
on the record. */
@@ -4204,7 +4202,7 @@ released:
}
}
} else {
- lock_grant_and_move_on_rec(lock_sys.rec_hash, first_lock, heap_no);
+ lock_grant_and_move_on_rec(first_lock, heap_no);
}
lock_mutex_exit();
@@ -4501,21 +4499,18 @@ http://bugs.mysql.com/36942 */
/*********************************************************************//**
Calculates the number of record lock structs in the record lock hash table.
@return number of record locks */
-static
-ulint
-lock_get_n_rec_locks(void)
-/*======================*/
+static ulint lock_get_n_rec_locks()
{
ulint n_locks = 0;
ulint i;
ut_ad(lock_mutex_own());
- for (i = 0; i < hash_get_n_cells(lock_sys.rec_hash); i++) {
+ for (i = 0; i < lock_sys.rec_hash.n_cells; i++) {
const lock_t* lock;
for (lock = static_cast<const lock_t*>(
- HASH_GET_FIRST(lock_sys.rec_hash, i));
+ HASH_GET_FIRST(&lock_sys.rec_hash, i));
lock != 0;
lock = static_cast<const lock_t*>(
HASH_GET_NEXT(hash, lock))) {
@@ -4811,7 +4806,7 @@ lock_rec_queue_validate(
if (!page_rec_is_user_rec(rec)) {
- for (lock = lock_rec_get_first(lock_sys.rec_hash,
+ for (lock = lock_rec_get_first(&lock_sys.rec_hash,
block, heap_no);
lock != NULL;
lock = lock_rec_get_next_const(heap_no, lock)) {
@@ -4896,7 +4891,7 @@ func_exit:
mutex_exit(&impl_trx->mutex);
}
- for (lock = lock_rec_get_first(lock_sys.rec_hash, block, heap_no);
+ for (lock = lock_rec_get_first(&lock_sys.rec_hash, block, heap_no);
lock != NULL;
lock = lock_rec_get_next_const(heap_no, lock)) {
@@ -4965,7 +4960,7 @@ lock_rec_validate_page(
lock_mutex_enter();
loop:
lock = lock_rec_get_first_on_page_addr(
- lock_sys.rec_hash,
+ &lock_sys.rec_hash,
block->page.id().space(), block->page.id().page_no());
if (!lock) {
@@ -5044,7 +5039,7 @@ lock_rec_validate(
ut_ad(lock_mutex_own());
for (const lock_t* lock = static_cast<const lock_t*>(
- HASH_GET_FIRST(lock_sys.rec_hash, start));
+ HASH_GET_FIRST(&lock_sys.rec_hash, start));
lock != NULL;
lock = static_cast<const lock_t*>(HASH_GET_NEXT(hash, lock))) {
@@ -5164,7 +5159,7 @@ lock_validate()
don't want to hog the lock_sys_t::mutex. Release it during the
validation check. */
- for (ulint i = 0; i < hash_get_n_cells(lock_sys.rec_hash); i++) {
+ for (ulint i = 0; i < lock_sys.rec_hash.n_cells; i++) {
ib_uint64_t limit = 0;
while (const lock_t* lock = lock_rec_validate(i, &limit)) {
@@ -5245,7 +5240,7 @@ lock_rec_insert_check_and_lock(
BTR_NO_LOCKING_FLAG and skip the locking altogether. */
ut_ad(lock_table_has(trx, index->table, LOCK_IX));
- lock = lock_rec_get_first(lock_sys.rec_hash, block, heap_no);
+ lock = lock_rec_get_first(&lock_sys.rec_hash, block, heap_no);
if (lock == NULL) {
/* We optimize CPU time usage in the simplest case */
@@ -6479,11 +6474,9 @@ DeadlockChecker::get_first_lock(ulint* heap_no) const
const lock_t* lock = m_wait_lock;
if (lock_get_type_low(lock) == LOCK_REC) {
- hash_table_t* lock_hash;
-
- lock_hash = lock->type_mode & LOCK_PREDICATE
- ? lock_sys.prdt_hash
- : lock_sys.rec_hash;
+ hash_table_t* lock_hash = lock->type_mode & LOCK_PREDICATE
+ ? &lock_sys.prdt_hash
+ : &lock_sys.rec_hash;
/* We are only interested in records that match the heap_no. */
*heap_no = lock_rec_find_set_bit(lock);