diff options
Diffstat (limited to 'storage/innobase/trx/trx0i_s.cc')
-rw-r--r-- | storage/innobase/trx/trx0i_s.cc | 183 |
1 files changed, 71 insertions, 112 deletions
diff --git a/storage/innobase/trx/trx0i_s.cc b/storage/innobase/trx/trx0i_s.cc index 54425ae934b..2aaba63885c 100644 --- a/storage/innobase/trx/trx0i_s.cc +++ b/storage/innobase/trx/trx0i_s.cc @@ -179,31 +179,13 @@ INFORMATION SCHEMA tables is fetched and later retrieved by the C++ code in handler/i_s.cc. */ trx_i_s_cache_t* trx_i_s_cache = &trx_i_s_cache_static; -/*******************************************************************//** -For a record lock that is in waiting state retrieves the only bit that -is set, for a table lock returns ULINT_UNDEFINED. -@return record number within the heap */ -static -ulint -wait_lock_get_heap_no( -/*==================*/ - const lock_t* lock) /*!< in: lock */ +/** @return the heap number of a record lock +@retval 0 for table locks */ +static uint16_t wait_lock_get_heap_no(const lock_t* lock) { - ulint ret; - - switch (lock_get_type(lock)) { - case LOCK_REC: - ret = lock_rec_find_set_bit(lock); - ut_a(ret != ULINT_UNDEFINED); - break; - case LOCK_TABLE: - ret = ULINT_UNDEFINED; - break; - default: - ut_error; - } - - return(ret); + return lock_get_type(lock) == LOCK_REC + ? static_cast<uint16_t>(lock_rec_find_set_bit(lock)) + : uint16_t{0}; } /*******************************************************************//** @@ -405,25 +387,20 @@ i_s_locks_row_validate( /*===================*/ const i_s_locks_row_t* row) /*!< in: row to validate */ { - ut_ad(row->lock_mode != NULL); - ut_ad(row->lock_type != NULL); + ut_ad(row->lock_mode); ut_ad(row->lock_table != NULL); ut_ad(row->lock_table_id != 0); - if (row->lock_space == ULINT_UNDEFINED) { + if (!row->lock_index) { /* table lock */ - ut_ad(!strcmp("TABLE", row->lock_type)); - ut_ad(row->lock_index == NULL); - ut_ad(row->lock_data == NULL); - ut_ad(row->lock_page == ULINT_UNDEFINED); - ut_ad(row->lock_rec == ULINT_UNDEFINED); + ut_ad(!row->lock_data); + ut_ad(!row->lock_space); + ut_ad(!row->lock_page); + ut_ad(!row->lock_rec); } else { /* record lock */ - ut_ad(!strcmp("RECORD", row->lock_type)); - ut_ad(row->lock_index != NULL); /* row->lock_data == NULL if buf_page_try_get() == NULL */ - ut_ad(row->lock_page != ULINT_UNDEFINED); - ut_ad(row->lock_rec != ULINT_UNDEFINED); + ut_ad(row->lock_page); } return(TRUE); @@ -504,21 +481,7 @@ fill_trx_row( } thd_done: - s = trx->op_info; - - if (s != NULL && s[0] != '\0') { - - TRX_I_S_STRING_COPY(s, row->trx_operation_state, - TRX_I_S_TRX_OP_STATE_MAX_LEN, cache); - - if (row->trx_operation_state == NULL) { - - return(FALSE); - } - } else { - - row->trx_operation_state = NULL; - } + row->trx_operation_state = trx->op_info; row->trx_tables_in_use = trx->n_mysql_tables_in_use; @@ -538,23 +501,7 @@ thd_done: row->trx_concurrency_tickets = trx->n_tickets_to_enter_innodb; - switch (trx->isolation_level) { - case TRX_ISO_READ_UNCOMMITTED: - row->trx_isolation_level = "READ UNCOMMITTED"; - break; - case TRX_ISO_READ_COMMITTED: - row->trx_isolation_level = "READ COMMITTED"; - break; - case TRX_ISO_REPEATABLE_READ: - row->trx_isolation_level = "REPEATABLE READ"; - break; - case TRX_ISO_SERIALIZABLE: - row->trx_isolation_level = "SERIALIZABLE"; - break; - /* Should not happen as TRX_ISO_READ_COMMITTED is default */ - default: - row->trx_isolation_level = "UNKNOWN"; - } + row->trx_isolation_level = trx->isolation_level; row->trx_unique_checks = (ibool) trx->check_unique_secondary; @@ -686,8 +633,8 @@ fill_lock_data( mtr_start(&mtr); - block = buf_page_try_get(page_id_t(lock_rec_get_space_id(lock), - lock_rec_get_page_no(lock)), + block = buf_page_try_get(page_id_t(lock->un_member.rec_lock.space, + lock->un_member.rec_lock.page_no), &mtr); if (block == NULL) { @@ -751,22 +698,42 @@ fill_lock_data( /*******************************************************************//** Fills i_s_locks_row_t object. Returns its first argument. If memory can not be allocated then FALSE is returned. -@return FALSE if allocation fails */ -static -ibool -fill_locks_row( -/*===========*/ +@return false if allocation fails */ +static bool fill_locks_row( i_s_locks_row_t* row, /*!< out: result object that's filled */ const lock_t* lock, /*!< in: lock to get data from */ - ulint heap_no,/*!< in: lock's record number - or ULINT_UNDEFINED if the lock + uint16_t heap_no,/*!< in: lock's record number + or 0 if the lock is a table lock */ trx_i_s_cache_t* cache) /*!< in/out: cache into which to copy volatile strings */ { - row->lock_trx_id = lock_get_trx_id(lock); - row->lock_mode = lock_get_mode_str(lock); - row->lock_type = lock_get_type_str(lock); + row->lock_trx_id = lock->trx->id; + const auto lock_type = lock_get_type(lock); + ut_ad(lock_type == LOCK_REC || lock_type == LOCK_TABLE); + + const bool is_gap_lock = lock_type == LOCK_REC + && (lock->type_mode & LOCK_GAP); + switch (lock->type_mode & LOCK_MODE_MASK) { + case LOCK_S: + row->lock_mode = uint8_t(1 + is_gap_lock); + break; + case LOCK_X: + row->lock_mode = uint8_t(3 + is_gap_lock); + break; + case LOCK_IS: + row->lock_mode = uint8_t(5 + is_gap_lock); + break; + case LOCK_IX: + row->lock_mode = uint8_t(7 + is_gap_lock); + break; + case LOCK_AUTO_INC: + row->lock_mode = 9; + break; + default: + ut_ad("unknown lock mode" == 0); + row->lock_mode = 0; + } row->lock_table = ha_storage_put_str_memlim( cache->storage, lock_get_table_name(lock).m_name, @@ -775,11 +742,10 @@ fill_locks_row( /* memory could not be allocated */ if (row->lock_table == NULL) { - return(FALSE); + return false; } - switch (lock_get_type(lock)) { - case LOCK_REC: + if (lock_type == LOCK_REC) { row->lock_index = ha_storage_put_str_memlim( cache->storage, lock_rec_get_index_name(lock), MAX_ALLOWED_FOR_STORAGE(cache)); @@ -787,32 +753,26 @@ fill_locks_row( /* memory could not be allocated */ if (row->lock_index == NULL) { - return(FALSE); + return false; } - row->lock_space = lock_rec_get_space_id(lock); - row->lock_page = lock_rec_get_page_no(lock); + row->lock_space = lock->un_member.rec_lock.space; + row->lock_page = lock->un_member.rec_lock.page_no; row->lock_rec = heap_no; if (!fill_lock_data(&row->lock_data, lock, heap_no, cache)) { /* memory could not be allocated */ - return(FALSE); + return false; } - - break; - case LOCK_TABLE: + } else { row->lock_index = NULL; - row->lock_space = ULINT_UNDEFINED; - row->lock_page = ULINT_UNDEFINED; - row->lock_rec = ULINT_UNDEFINED; + row->lock_space = 0; + row->lock_page = 0; + row->lock_rec = 0; row->lock_data = NULL; - - break; - default: - ut_error; } row->lock_table_id = lock_get_table_id(lock); @@ -820,7 +780,7 @@ fill_locks_row( row->hash_chain.value = row; ut_ad(i_s_locks_row_validate(row)); - return(TRUE); + return true; } /*******************************************************************//** @@ -874,11 +834,11 @@ fold_lock( case LOCK_REC: ut_a(heap_no != ULINT_UNDEFINED); - ret = ut_fold_ulint_pair((ulint) lock_get_trx_id(lock), - lock_rec_get_space_id(lock)); + ret = ut_fold_ulint_pair((ulint) lock->trx->id, + lock->un_member.rec_lock.space); ret = ut_fold_ulint_pair(ret, - lock_rec_get_page_no(lock)); + lock->un_member.rec_lock.page_no); ret = ut_fold_ulint_pair(ret, heap_no); @@ -921,9 +881,9 @@ locks_row_eq_lock( case LOCK_REC: ut_a(heap_no != ULINT_UNDEFINED); - return(row->lock_trx_id == lock_get_trx_id(lock) - && row->lock_space == lock_rec_get_space_id(lock) - && row->lock_page == lock_rec_get_page_no(lock) + return(row->lock_trx_id == lock->trx->id + && row->lock_space == lock->un_member.rec_lock.space + && row->lock_page == lock->un_member.rec_lock.page_no && row->lock_rec == heap_no); case LOCK_TABLE: @@ -932,7 +892,7 @@ locks_row_eq_lock( it fails. */ ut_a(heap_no == ULINT_UNDEFINED); - return(row->lock_trx_id == lock_get_trx_id(lock) + return(row->lock_trx_id == lock->trx->id && row->lock_table_id == lock_get_table_id(lock)); default: @@ -953,7 +913,7 @@ search_innodb_locks( /*================*/ trx_i_s_cache_t* cache, /*!< in: cache */ const lock_t* lock, /*!< in: lock to search for */ - ulint heap_no)/*!< in: lock's record number + uint16_t heap_no)/*!< in: lock's record number or ULINT_UNDEFINED if the lock is a table lock */ { @@ -996,8 +956,8 @@ add_lock_to_cache( /*==============*/ trx_i_s_cache_t* cache, /*!< in/out: cache */ const lock_t* lock, /*!< in: the element to add */ - ulint heap_no)/*!< in: lock's record number - or ULINT_UNDEFINED if the lock + uint16_t heap_no)/*!< in: lock's record number + or 0 if the lock is a table lock */ { i_s_locks_row_t* dst_row; @@ -1111,13 +1071,12 @@ add_trx_relevant_locks_to_cache( if (trx->lock.que_state == TRX_QUE_LOCK_WAIT) { const lock_t* curr_lock; - ulint wait_lock_heap_no; i_s_locks_row_t* blocking_lock_row; lock_queue_iterator_t iter; ut_a(trx->lock.wait_lock != NULL); - wait_lock_heap_no + uint16_t wait_lock_heap_no = wait_lock_get_heap_no(trx->lock.wait_lock); /* add the requested lock */ @@ -1521,11 +1480,11 @@ trx_i_s_create_lock_id( /* please adjust TRX_I_S_LOCK_ID_MAX_LEN if you change this */ - if (row->lock_space != ULINT_UNDEFINED) { + if (row->lock_index) { /* record lock */ res_len = snprintf(lock_id, lock_id_size, TRX_ID_FMT - ":" ULINTPF ":" ULINTPF ":" ULINTPF, + ":%u:%u:%u", row->lock_trx_id, row->lock_space, row->lock_page, row->lock_rec); } else { |