summaryrefslogtreecommitdiff
path: root/innobase/lock/lock0lock.c
diff options
context:
space:
mode:
Diffstat (limited to 'innobase/lock/lock0lock.c')
-rw-r--r--innobase/lock/lock0lock.c96
1 files changed, 94 insertions, 2 deletions
diff --git a/innobase/lock/lock0lock.c b/innobase/lock/lock0lock.c
index 4e6abf32c9e..2430380d65c 100644
--- a/innobase/lock/lock0lock.c
+++ b/innobase/lock/lock0lock.c
@@ -953,7 +953,9 @@ lock_rec_get_next_on_page(
ulint space;
ulint page_no;
+#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&kernel_mutex));
+#endif /* UNIV_SYNC_DEBUG */
ut_ad(lock_get_type(lock) == LOCK_REC);
space = lock->un_member.rec_lock.space;
@@ -990,7 +992,9 @@ lock_rec_get_first_on_page_addr(
{
lock_t* lock;
+#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&kernel_mutex));
+#endif /* UNIV_SYNC_DEBUG */
lock = HASH_GET_FIRST(lock_sys->rec_hash,
lock_rec_hash(space, page_no));
@@ -1048,7 +1052,9 @@ lock_rec_get_first_on_page(
ulint space;
ulint page_no;
+#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&kernel_mutex));
+#endif /* UNIV_SYNC_DEBUG */
hash = buf_frame_get_lock_hash_val(ptr);
@@ -1080,7 +1086,9 @@ lock_rec_get_next(
rec_t* rec, /* in: record on a page */
lock_t* lock) /* in: lock */
{
+#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&kernel_mutex));
+#endif /* UNIV_SYNC_DEBUG */
ut_ad(lock_get_type(lock) == LOCK_REC);
for (;;) {
@@ -1109,7 +1117,9 @@ lock_rec_get_first(
{
lock_t* lock;
+#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&kernel_mutex));
+#endif /* UNIV_SYNC_DEBUG */
lock = lock_rec_get_first_on_page(rec);
@@ -1197,7 +1207,9 @@ lock_rec_get_prev(
ulint page_no;
lock_t* found_lock = NULL;
+#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&kernel_mutex));
+#endif /* UNIV_SYNC_DEBUG */
ut_ad(lock_get_type(in_lock) == LOCK_REC);
space = in_lock->un_member.rec_lock.space;
@@ -1237,7 +1249,9 @@ lock_table_has(
{
lock_t* lock;
+#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&kernel_mutex));
+#endif /* UNIV_SYNC_DEBUG */
/* Look for stronger locks the same trx already has on the table */
@@ -1281,7 +1295,9 @@ lock_rec_has_expl(
{
lock_t* lock;
+#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&kernel_mutex));
+#endif /* UNIV_SYNC_DEBUG */
ut_ad((precise_mode & LOCK_MODE_MASK) == LOCK_S
|| (precise_mode & LOCK_MODE_MASK) == LOCK_X);
ut_ad(!(precise_mode & LOCK_INSERT_INTENTION));
@@ -1328,7 +1344,9 @@ lock_rec_other_has_expl_req(
{
lock_t* lock;
+#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&kernel_mutex));
+#endif /* UNIV_SYNC_DEBUG */
ut_ad(mode == LOCK_X || mode == LOCK_S);
ut_ad(gap == 0 || gap == LOCK_GAP);
ut_ad(wait == 0 || wait == LOCK_WAIT);
@@ -1367,7 +1385,9 @@ lock_rec_other_has_conflicting(
{
lock_t* lock;
+#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&kernel_mutex));
+#endif /* UNIV_SYNC_DEBUG */
lock = lock_rec_get_first(rec);
@@ -1399,7 +1419,9 @@ lock_rec_find_similar_on_page(
lock_t* lock;
ulint heap_no;
+#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&kernel_mutex));
+#endif /* UNIV_SYNC_DEBUG */
heap_no = rec_get_heap_no(rec);
@@ -1433,7 +1455,9 @@ lock_sec_rec_some_has_impl_off_kernel(
{
page_t* page;
+#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&kernel_mutex));
+#endif /* UNIV_SYNC_DEBUG */
ut_ad(!(index->type & DICT_CLUSTERED));
ut_ad(page_rec_is_user_rec(rec));
@@ -1491,7 +1515,9 @@ lock_rec_create(
ulint n_bits;
ulint n_bytes;
+#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&kernel_mutex));
+#endif /* UNIV_SYNC_DEBUG */
page = buf_frame_align(rec);
space = buf_frame_get_space_id(page);
@@ -1575,7 +1601,9 @@ lock_rec_enqueue_waiting(
lock_t* lock;
trx_t* trx;
+#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&kernel_mutex));
+#endif /* UNIV_SYNC_DEBUG */
/* Test if there already is some other reason to suspend thread:
we do not enqueue a lock request if the query thread should be
@@ -1583,7 +1611,7 @@ lock_rec_enqueue_waiting(
if (que_thr_stop(thr)) {
- ut_a(0);
+ ut_error;
return(DB_QUE_THR_SUSPENDED);
}
@@ -1661,7 +1689,9 @@ lock_rec_add_to_queue(
page_t* page;
ibool somebody_waits = FALSE;
+#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&kernel_mutex));
+#endif /* UNIV_SYNC_DEBUG */
ut_ad((type_mode & (LOCK_WAIT | LOCK_GAP))
|| ((type_mode & LOCK_MODE_MASK) != LOCK_S)
|| !lock_rec_other_has_expl_req(LOCK_X, 0, LOCK_WAIT, rec, trx));
@@ -1741,7 +1771,9 @@ lock_rec_lock_fast(
lock_t* lock;
ulint heap_no;
+#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&kernel_mutex));
+#endif /* UNIV_SYNC_DEBUG */
ut_ad((LOCK_MODE_MASK & mode) != LOCK_S
|| lock_table_has(thr_get_trx(thr), index->table, LOCK_IS));
ut_ad((LOCK_MODE_MASK & mode) != LOCK_X
@@ -1805,7 +1837,9 @@ lock_rec_lock_slow(
trx_t* trx;
ulint err;
+#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&kernel_mutex));
+#endif /* UNIV_SYNC_DEBUG */
ut_ad((LOCK_MODE_MASK & mode) != LOCK_S
|| lock_table_has(thr_get_trx(thr), index->table, LOCK_IS));
ut_ad((LOCK_MODE_MASK & mode) != LOCK_X
@@ -1867,7 +1901,9 @@ lock_rec_lock(
{
ulint err;
+#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&kernel_mutex));
+#endif /* UNIV_SYNC_DEBUG */
ut_ad((LOCK_MODE_MASK & mode) != LOCK_S
|| lock_table_has(thr_get_trx(thr), index->table, LOCK_IS));
ut_ad((LOCK_MODE_MASK & mode) != LOCK_X
@@ -1905,7 +1941,9 @@ lock_rec_has_to_wait_in_queue(
ulint page_no;
ulint heap_no;
+#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&kernel_mutex));
+#endif /* UNIV_SYNC_DEBUG */
ut_ad(lock_get_wait(wait_lock));
ut_ad(lock_get_type(wait_lock) == LOCK_REC);
@@ -1938,7 +1976,9 @@ lock_grant(
/*=======*/
lock_t* lock) /* in: waiting lock request */
{
+#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&kernel_mutex));
+#endif /* UNIV_SYNC_DEBUG */
lock_reset_lock_and_trx_wait(lock);
@@ -1980,7 +2020,9 @@ lock_rec_cancel(
/*============*/
lock_t* lock) /* in: waiting record lock request */
{
+#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&kernel_mutex));
+#endif /* UNIV_SYNC_DEBUG */
ut_ad(lock_get_type(lock) == LOCK_REC);
/* Reset the bit (there can be only one set bit) in the lock bitmap */
@@ -2013,7 +2055,9 @@ lock_rec_dequeue_from_page(
lock_t* lock;
trx_t* trx;
+#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&kernel_mutex));
+#endif /* UNIV_SYNC_DEBUG */
ut_ad(lock_get_type(in_lock) == LOCK_REC);
trx = in_lock->trx;
@@ -2056,7 +2100,9 @@ lock_rec_discard(
ulint page_no;
trx_t* trx;
+#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&kernel_mutex));
+#endif /* UNIV_SYNC_DEBUG */
ut_ad(lock_get_type(in_lock) == LOCK_REC);
trx = in_lock->trx;
@@ -2085,7 +2131,9 @@ lock_rec_free_all_from_discard_page(
lock_t* lock;
lock_t* next_lock;
+#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&kernel_mutex));
+#endif /* UNIV_SYNC_DEBUG */
space = buf_frame_get_space_id(page);
page_no = buf_frame_get_page_no(page);
@@ -2118,7 +2166,9 @@ lock_rec_reset_and_release_wait(
lock_t* lock;
ulint heap_no;
+#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&kernel_mutex));
+#endif /* UNIV_SYNC_DEBUG */
heap_no = rec_get_heap_no(rec);
@@ -2150,7 +2200,9 @@ lock_rec_inherit_to_gap(
{
lock_t* lock;
+#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&kernel_mutex));
+#endif /* UNIV_SYNC_DEBUG */
lock = lock_rec_get_first(rec);
@@ -2180,7 +2232,9 @@ lock_rec_inherit_to_gap_if_gap_lock(
{
lock_t* lock;
+#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&kernel_mutex));
+#endif /* UNIV_SYNC_DEBUG */
lock = lock_rec_get_first(rec);
@@ -2213,7 +2267,9 @@ lock_rec_move(
ulint heap_no;
ulint type_mode;
+#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&kernel_mutex));
+#endif /* UNIV_SYNC_DEBUG */
heap_no = rec_get_heap_no(donator);
@@ -2849,7 +2905,9 @@ lock_deadlock_occurs(
char* err_buf;
ut_ad(trx && lock);
+#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&kernel_mutex));
+#endif /* UNIV_SYNC_DEBUG */
retry:
/* We check that adding this trx to the waits-for graph
does not produce a cycle. First mark all active transactions
@@ -2925,7 +2983,9 @@ lock_deadlock_recursive(
ulint ret;
ut_a(trx && start && wait_lock);
+#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&kernel_mutex));
+#endif /* UNIV_SYNC_DEBUG */
if (trx->deadlock_mark == 1) {
/* We have already exhaustively searched the subtree starting
@@ -3116,7 +3176,9 @@ lock_table_create(
lock_t* lock;
ut_ad(table && trx);
+#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&kernel_mutex));
+#endif /* UNIV_SYNC_DEBUG */
if (type_mode == LOCK_AUTO_INC) {
/* Only one trx can have the lock on the table
@@ -3166,7 +3228,9 @@ lock_table_remove_low(
dict_table_t* table;
trx_t* trx;
+#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&kernel_mutex));
+#endif /* UNIV_SYNC_DEBUG */
table = lock->un_member.tab_lock.table;
trx = lock->trx;
@@ -3200,14 +3264,16 @@ lock_table_enqueue_waiting(
lock_t* lock;
trx_t* trx;
+#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&kernel_mutex));
+#endif /* UNIV_SYNC_DEBUG */
/* Test if there already is some other reason to suspend thread:
we do not enqueue a lock request if the query thread should be
stopped anyway */
if (que_thr_stop(thr)) {
- ut_a(0);
+ ut_error;
return(DB_QUE_THR_SUSPENDED);
}
@@ -3270,7 +3336,9 @@ lock_table_other_has_incompatible(
{
lock_t* lock;
+#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&kernel_mutex));
+#endif /* UNIV_SYNC_DEBUG */
lock = UT_LIST_GET_LAST(table->locks);
@@ -3420,7 +3488,9 @@ lock_table_dequeue(
{
lock_t* lock;
+#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&kernel_mutex));
+#endif /* UNIV_SYNC_DEBUG */
ut_ad(lock_get_type(in_lock) == LOCK_TABLE);
lock = UT_LIST_GET_NEXT(un_member.tab_lock.locks, in_lock);
@@ -3476,7 +3546,9 @@ lock_release_off_kernel(
ulint count;
lock_t* lock;
+#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&kernel_mutex));
+#endif /* UNIV_SYNC_DEBUG */
lock = UT_LIST_GET_LAST(trx->trx_locks);
@@ -3537,7 +3609,9 @@ lock_cancel_waiting_and_release(
/*============================*/
lock_t* lock) /* in: waiting lock request */
{
+#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&kernel_mutex));
+#endif /* UNIV_SYNC_DEBUG */
if (lock_get_type(lock) == LOCK_REC) {
@@ -3570,7 +3644,9 @@ lock_reset_all_on_table_for_trx(
lock_t* lock;
lock_t* prev_lock;
+#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&kernel_mutex));
+#endif /* UNIV_SYNC_DEBUG */
lock = UT_LIST_GET_LAST(trx->trx_locks);
@@ -3632,7 +3708,9 @@ lock_table_print(
500 bytes */
lock_t* lock) /* in: table type lock */
{
+#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&kernel_mutex));
+#endif /* UNIV_SYNC_DEBUG */
ut_a(lock_get_type(lock) == LOCK_TABLE);
buf += sprintf(buf, "TABLE LOCK table %s trx id %lu %lu",
@@ -3679,7 +3757,9 @@ lock_rec_print(
char* buf_start = buf;
mtr_t mtr;
+#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&kernel_mutex));
+#endif /* UNIV_SYNC_DEBUG */
ut_a(lock_get_type(lock) == LOCK_REC);
space = lock->un_member.rec_lock.space;
@@ -3743,7 +3823,9 @@ lock_rec_print(
}
if (page) {
+#ifdef UNIV_SYNC_DEBUG
buf_page_dbg_add_level(page, SYNC_NO_ORDER_CHECK);
+#endif /* UNIV_SYNC_DEBUG */
}
for (i = 0; i < lock_rec_get_n_bits(lock); i++) {
@@ -3788,7 +3870,9 @@ lock_get_n_rec_locks(void)
ulint n_locks = 0;
ulint i;
+#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&kernel_mutex));
+#endif /* UNIV_SYNC_DEBUG */
for (i = 0; i < hash_get_n_cells(lock_sys->rec_hash); i++) {
@@ -4060,7 +4144,9 @@ lock_table_queue_validate(
lock_t* lock;
ibool is_waiting;
+#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&kernel_mutex));
+#endif /* UNIV_SYNC_DEBUG */
is_waiting = FALSE;
@@ -4213,12 +4299,16 @@ lock_rec_validate_page(
ulint i;
mtr_t mtr;
+#ifdef UNIV_SYNC_DEBUG
ut_ad(!mutex_own(&kernel_mutex));
+#endif /* UNIV_SYNC_DEBUG */
mtr_start(&mtr);
page = buf_page_get(space, page_no, RW_X_LATCH, &mtr);
+#ifdef UNIV_SYNC_DEBUG
buf_page_dbg_add_level(page, SYNC_NO_ORDER_CHECK);
+#endif /* UNIV_SYNC_DEBUG */
lock_mutex_enter_kernel();
loop:
@@ -4464,7 +4554,9 @@ lock_rec_convert_impl_to_expl(
{
trx_t* impl_trx;
+#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&kernel_mutex));
+#endif /* UNIV_SYNC_DEBUG */
ut_ad(page_rec_is_user_rec(rec));
if (index->type & DICT_CLUSTERED) {