diff options
author | marko@hundin.mysql.fi <> | 2004-03-12 17:14:51 +0200 |
---|---|---|
committer | marko@hundin.mysql.fi <> | 2004-03-12 17:14:51 +0200 |
commit | 4573eb566c6201103cda1ef5e079d010cd137802 (patch) | |
tree | 6a59a674bf740a7b4046a20e847ad2578a573d69 /innobase/lock | |
parent | 7362b9a9eef314161c81ad7df3f8e78b892f2f6c (diff) | |
download | mariadb-git-4573eb566c6201103cda1ef5e079d010cd137802.tar.gz |
Allow UNIV_SYNC_DEBUG to be disabled while UNIV_DEBUG is enabled
Diffstat (limited to 'innobase/lock')
-rw-r--r-- | innobase/lock/lock0lock.c | 92 |
1 files changed, 92 insertions, 0 deletions
diff --git a/innobase/lock/lock0lock.c b/innobase/lock/lock0lock.c index dd033b037ad..fab6dda98cb 100644 --- a/innobase/lock/lock0lock.c +++ b/innobase/lock/lock0lock.c @@ -953,7 +953,9 @@ lock_rec_get_next_on_page( ulint space; ulint page_no; +#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&kernel_mutex)); +#endif /* UNIV_SYNC_DEBUG */ ut_ad(lock_get_type(lock) == LOCK_REC); space = lock->un_member.rec_lock.space; @@ -990,7 +992,9 @@ lock_rec_get_first_on_page_addr( { lock_t* lock; +#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&kernel_mutex)); +#endif /* UNIV_SYNC_DEBUG */ lock = HASH_GET_FIRST(lock_sys->rec_hash, lock_rec_hash(space, page_no)); @@ -1048,7 +1052,9 @@ lock_rec_get_first_on_page( ulint space; ulint page_no; +#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&kernel_mutex)); +#endif /* UNIV_SYNC_DEBUG */ hash = buf_frame_get_lock_hash_val(ptr); @@ -1080,7 +1086,9 @@ lock_rec_get_next( rec_t* rec, /* in: record on a page */ lock_t* lock) /* in: lock */ { +#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&kernel_mutex)); +#endif /* UNIV_SYNC_DEBUG */ ut_ad(lock_get_type(lock) == LOCK_REC); for (;;) { @@ -1109,7 +1117,9 @@ lock_rec_get_first( { lock_t* lock; +#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&kernel_mutex)); +#endif /* UNIV_SYNC_DEBUG */ lock = lock_rec_get_first_on_page(rec); @@ -1197,7 +1207,9 @@ lock_rec_get_prev( ulint page_no; lock_t* found_lock = NULL; +#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&kernel_mutex)); +#endif /* UNIV_SYNC_DEBUG */ ut_ad(lock_get_type(in_lock) == LOCK_REC); space = in_lock->un_member.rec_lock.space; @@ -1237,7 +1249,9 @@ lock_table_has( { lock_t* lock; +#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&kernel_mutex)); +#endif /* UNIV_SYNC_DEBUG */ /* Look for stronger locks the same trx already has on the table */ @@ -1281,7 +1295,9 @@ lock_rec_has_expl( { lock_t* lock; +#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&kernel_mutex)); +#endif /* UNIV_SYNC_DEBUG */ ut_ad((precise_mode & LOCK_MODE_MASK) == LOCK_S || (precise_mode & LOCK_MODE_MASK) == LOCK_X); ut_ad(!(precise_mode & LOCK_INSERT_INTENTION)); @@ -1328,7 +1344,9 @@ lock_rec_other_has_expl_req( { lock_t* lock; +#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&kernel_mutex)); +#endif /* UNIV_SYNC_DEBUG */ ut_ad(mode == LOCK_X || mode == LOCK_S); ut_ad(gap == 0 || gap == LOCK_GAP); ut_ad(wait == 0 || wait == LOCK_WAIT); @@ -1367,7 +1385,9 @@ lock_rec_other_has_conflicting( { lock_t* lock; +#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&kernel_mutex)); +#endif /* UNIV_SYNC_DEBUG */ lock = lock_rec_get_first(rec); @@ -1399,7 +1419,9 @@ lock_rec_find_similar_on_page( lock_t* lock; ulint heap_no; +#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&kernel_mutex)); +#endif /* UNIV_SYNC_DEBUG */ heap_no = rec_get_heap_no(rec); @@ -1433,7 +1455,9 @@ lock_sec_rec_some_has_impl_off_kernel( { page_t* page; +#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&kernel_mutex)); +#endif /* UNIV_SYNC_DEBUG */ ut_ad(!(index->type & DICT_CLUSTERED)); ut_ad(page_rec_is_user_rec(rec)); @@ -1491,7 +1515,9 @@ lock_rec_create( ulint n_bits; ulint n_bytes; +#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&kernel_mutex)); +#endif /* UNIV_SYNC_DEBUG */ page = buf_frame_align(rec); space = buf_frame_get_space_id(page); @@ -1575,7 +1601,9 @@ lock_rec_enqueue_waiting( lock_t* lock; trx_t* trx; +#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&kernel_mutex)); +#endif /* UNIV_SYNC_DEBUG */ /* Test if there already is some other reason to suspend thread: we do not enqueue a lock request if the query thread should be @@ -1660,7 +1688,9 @@ lock_rec_add_to_queue( page_t* page; ibool somebody_waits = FALSE; +#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&kernel_mutex)); +#endif /* UNIV_SYNC_DEBUG */ ut_ad((type_mode & (LOCK_WAIT | LOCK_GAP)) || ((type_mode & LOCK_MODE_MASK) != LOCK_S) || !lock_rec_other_has_expl_req(LOCK_X, 0, LOCK_WAIT, rec, trx)); @@ -1740,7 +1770,9 @@ lock_rec_lock_fast( lock_t* lock; ulint heap_no; +#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&kernel_mutex)); +#endif /* UNIV_SYNC_DEBUG */ ut_ad((LOCK_MODE_MASK & mode) != LOCK_S || lock_table_has(thr_get_trx(thr), index->table, LOCK_IS)); ut_ad((LOCK_MODE_MASK & mode) != LOCK_X @@ -1804,7 +1836,9 @@ lock_rec_lock_slow( trx_t* trx; ulint err; +#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&kernel_mutex)); +#endif /* UNIV_SYNC_DEBUG */ ut_ad((LOCK_MODE_MASK & mode) != LOCK_S || lock_table_has(thr_get_trx(thr), index->table, LOCK_IS)); ut_ad((LOCK_MODE_MASK & mode) != LOCK_X @@ -1866,7 +1900,9 @@ lock_rec_lock( { ulint err; +#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&kernel_mutex)); +#endif /* UNIV_SYNC_DEBUG */ ut_ad((LOCK_MODE_MASK & mode) != LOCK_S || lock_table_has(thr_get_trx(thr), index->table, LOCK_IS)); ut_ad((LOCK_MODE_MASK & mode) != LOCK_X @@ -1904,7 +1940,9 @@ lock_rec_has_to_wait_in_queue( ulint page_no; ulint heap_no; +#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&kernel_mutex)); +#endif /* UNIV_SYNC_DEBUG */ ut_ad(lock_get_wait(wait_lock)); ut_ad(lock_get_type(wait_lock) == LOCK_REC); @@ -1937,7 +1975,9 @@ lock_grant( /*=======*/ lock_t* lock) /* in: waiting lock request */ { +#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&kernel_mutex)); +#endif /* UNIV_SYNC_DEBUG */ lock_reset_lock_and_trx_wait(lock); @@ -1979,7 +2019,9 @@ lock_rec_cancel( /*============*/ lock_t* lock) /* in: waiting record lock request */ { +#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&kernel_mutex)); +#endif /* UNIV_SYNC_DEBUG */ ut_ad(lock_get_type(lock) == LOCK_REC); /* Reset the bit (there can be only one set bit) in the lock bitmap */ @@ -2012,7 +2054,9 @@ lock_rec_dequeue_from_page( lock_t* lock; trx_t* trx; +#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&kernel_mutex)); +#endif /* UNIV_SYNC_DEBUG */ ut_ad(lock_get_type(in_lock) == LOCK_REC); trx = in_lock->trx; @@ -2055,7 +2099,9 @@ lock_rec_discard( ulint page_no; trx_t* trx; +#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&kernel_mutex)); +#endif /* UNIV_SYNC_DEBUG */ ut_ad(lock_get_type(in_lock) == LOCK_REC); trx = in_lock->trx; @@ -2084,7 +2130,9 @@ lock_rec_free_all_from_discard_page( lock_t* lock; lock_t* next_lock; +#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&kernel_mutex)); +#endif /* UNIV_SYNC_DEBUG */ space = buf_frame_get_space_id(page); page_no = buf_frame_get_page_no(page); @@ -2117,7 +2165,9 @@ lock_rec_reset_and_release_wait( lock_t* lock; ulint heap_no; +#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&kernel_mutex)); +#endif /* UNIV_SYNC_DEBUG */ heap_no = rec_get_heap_no(rec); @@ -2149,7 +2199,9 @@ lock_rec_inherit_to_gap( { lock_t* lock; +#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&kernel_mutex)); +#endif /* UNIV_SYNC_DEBUG */ lock = lock_rec_get_first(rec); @@ -2179,7 +2231,9 @@ lock_rec_inherit_to_gap_if_gap_lock( { lock_t* lock; +#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&kernel_mutex)); +#endif /* UNIV_SYNC_DEBUG */ lock = lock_rec_get_first(rec); @@ -2212,7 +2266,9 @@ lock_rec_move( ulint heap_no; ulint type_mode; +#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&kernel_mutex)); +#endif /* UNIV_SYNC_DEBUG */ heap_no = rec_get_heap_no(donator); @@ -2848,7 +2904,9 @@ lock_deadlock_occurs( char* err_buf; ut_ad(trx && lock); +#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&kernel_mutex)); +#endif /* UNIV_SYNC_DEBUG */ retry: /* We check that adding this trx to the waits-for graph does not produce a cycle. First mark all active transactions @@ -2924,7 +2982,9 @@ lock_deadlock_recursive( ulint ret; ut_a(trx && start && wait_lock); +#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&kernel_mutex)); +#endif /* UNIV_SYNC_DEBUG */ if (trx->deadlock_mark == 1) { /* We have already exhaustively searched the subtree starting @@ -3115,7 +3175,9 @@ lock_table_create( lock_t* lock; ut_ad(table && trx); +#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&kernel_mutex)); +#endif /* UNIV_SYNC_DEBUG */ if (type_mode == LOCK_AUTO_INC) { /* Only one trx can have the lock on the table @@ -3165,7 +3227,9 @@ lock_table_remove_low( dict_table_t* table; trx_t* trx; +#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&kernel_mutex)); +#endif /* UNIV_SYNC_DEBUG */ table = lock->un_member.tab_lock.table; trx = lock->trx; @@ -3199,7 +3263,9 @@ lock_table_enqueue_waiting( lock_t* lock; trx_t* trx; +#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&kernel_mutex)); +#endif /* UNIV_SYNC_DEBUG */ /* Test if there already is some other reason to suspend thread: we do not enqueue a lock request if the query thread should be @@ -3269,7 +3335,9 @@ lock_table_other_has_incompatible( { lock_t* lock; +#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&kernel_mutex)); +#endif /* UNIV_SYNC_DEBUG */ lock = UT_LIST_GET_LAST(table->locks); @@ -3419,7 +3487,9 @@ lock_table_dequeue( { lock_t* lock; +#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&kernel_mutex)); +#endif /* UNIV_SYNC_DEBUG */ ut_ad(lock_get_type(in_lock) == LOCK_TABLE); lock = UT_LIST_GET_NEXT(un_member.tab_lock.locks, in_lock); @@ -3475,7 +3545,9 @@ lock_release_off_kernel( ulint count; lock_t* lock; +#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&kernel_mutex)); +#endif /* UNIV_SYNC_DEBUG */ lock = UT_LIST_GET_LAST(trx->trx_locks); @@ -3535,7 +3607,9 @@ lock_cancel_waiting_and_release( /*============================*/ lock_t* lock) /* in: waiting lock request */ { +#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&kernel_mutex)); +#endif /* UNIV_SYNC_DEBUG */ if (lock_get_type(lock) == LOCK_REC) { @@ -3568,7 +3642,9 @@ lock_reset_all_on_table_for_trx( lock_t* lock; lock_t* prev_lock; +#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&kernel_mutex)); +#endif /* UNIV_SYNC_DEBUG */ lock = UT_LIST_GET_LAST(trx->trx_locks); @@ -3630,7 +3706,9 @@ lock_table_print( 500 bytes */ lock_t* lock) /* in: table type lock */ { +#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&kernel_mutex)); +#endif /* UNIV_SYNC_DEBUG */ ut_a(lock_get_type(lock) == LOCK_TABLE); buf += sprintf(buf, "TABLE LOCK table %s trx id %lu %lu", @@ -3677,7 +3755,9 @@ lock_rec_print( char* buf_start = buf; mtr_t mtr; +#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&kernel_mutex)); +#endif /* UNIV_SYNC_DEBUG */ ut_a(lock_get_type(lock) == LOCK_REC); space = lock->un_member.rec_lock.space; @@ -3739,7 +3819,9 @@ lock_rec_print( } if (page) { +#ifdef UNIV_SYNC_DEBUG buf_page_dbg_add_level(page, SYNC_NO_ORDER_CHECK); +#endif /* UNIV_SYNC_DEBUG */ } for (i = 0; i < lock_rec_get_n_bits(lock); i++) { @@ -3783,7 +3865,9 @@ lock_get_n_rec_locks(void) ulint n_locks = 0; ulint i; +#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&kernel_mutex)); +#endif /* UNIV_SYNC_DEBUG */ for (i = 0; i < hash_get_n_cells(lock_sys->rec_hash); i++) { @@ -4055,7 +4139,9 @@ lock_table_queue_validate( lock_t* lock; ibool is_waiting; +#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&kernel_mutex)); +#endif /* UNIV_SYNC_DEBUG */ is_waiting = FALSE; @@ -4208,12 +4294,16 @@ lock_rec_validate_page( ulint i; mtr_t mtr; +#ifdef UNIV_SYNC_DEBUG ut_ad(!mutex_own(&kernel_mutex)); +#endif /* UNIV_SYNC_DEBUG */ mtr_start(&mtr); page = buf_page_get(space, page_no, RW_X_LATCH, &mtr); +#ifdef UNIV_SYNC_DEBUG buf_page_dbg_add_level(page, SYNC_NO_ORDER_CHECK); +#endif /* UNIV_SYNC_DEBUG */ lock_mutex_enter_kernel(); loop: @@ -4458,7 +4548,9 @@ lock_rec_convert_impl_to_expl( { trx_t* impl_trx; +#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&kernel_mutex)); +#endif /* UNIV_SYNC_DEBUG */ ut_ad(page_rec_is_user_rec(rec)); if (index->type & DICT_CLUSTERED) { |