summaryrefslogtreecommitdiff
path: root/storage/innobase/row/row0sel.cc
diff options
context:
space:
mode:
Diffstat (limited to 'storage/innobase/row/row0sel.cc')
-rw-r--r--storage/innobase/row/row0sel.cc357
1 files changed, 159 insertions, 198 deletions
diff --git a/storage/innobase/row/row0sel.cc b/storage/innobase/row/row0sel.cc
index f3445a340d6..aaac86c93e2 100644
--- a/storage/innobase/row/row0sel.cc
+++ b/storage/innobase/row/row0sel.cc
@@ -121,7 +121,7 @@ row_sel_sec_rec_is_for_blob(
field_ref_zero, BTR_EXTERN_FIELD_REF_SIZE)) {
/* The externally stored field was not written yet.
This record should only be seen by
- recv_recovery_rollback_active() or any
+ trx_rollback_recovered() or any
TRX_ISO_READ_UNCOMMITTED transactions. */
return(FALSE);
}
@@ -530,7 +530,7 @@ row_sel_fetch_columns(
externally stored field was not
written yet. This record
should only be seen by
- recv_recovery_rollback_active() or any
+ trx_rollback_recovered() or any
TRX_ISO_READ_UNCOMMITTED
transactions. The InnoDB SQL parser
(the sole caller of this function)
@@ -953,27 +953,17 @@ row_sel_get_clust_rec(
if (!node->read_view) {
/* Try to place a lock on the index record */
- ulint lock_type;
- trx_t* trx;
-
- trx = thr_get_trx(thr);
+ trx_t* trx = thr_get_trx(thr);
- /* If innodb_locks_unsafe_for_binlog option is used
- or this session is using READ COMMITTED or lower isolation level
+ /* At READ UNCOMMITTED or READ COMMITTED isolation level
we lock only the record, i.e., next-key locking is
not used. */
- if (srv_locks_unsafe_for_binlog
- || trx->isolation_level <= TRX_ISO_READ_COMMITTED) {
- lock_type = LOCK_REC_NOT_GAP;
- } else {
- lock_type = LOCK_ORDINARY;
- }
-
err = lock_clust_rec_read_check_and_lock(
0, btr_pcur_get_block(&plan->clust_pcur),
clust_rec, index, offsets,
- static_cast<lock_mode>(node->row_lock_mode),
- lock_type,
+ node->row_lock_mode,
+ trx->isolation_level <= TRX_ISO_READ_COMMITTED
+ ? LOCK_REC_NOT_GAP : LOCK_ORDINARY,
thr);
switch (err) {
@@ -1066,8 +1056,8 @@ sel_set_rtr_rec_lock(
const rec_t* first_rec,/*!< in: record */
dict_index_t* index, /*!< in: index */
const rec_offs* offsets,/*!< in: rec_get_offsets(rec, index) */
- ulint mode, /*!< in: lock mode */
- ulint type, /*!< in: LOCK_ORDINARY, LOCK_GAP, or
+ unsigned mode, /*!< in: lock mode */
+ unsigned type, /*!< in: LOCK_ORDINARY, LOCK_GAP, or
LOC_REC_NOT_GAP */
que_thr_t* thr, /*!< in: query thread */
mtr_t* mtr) /*!< in: mtr */
@@ -1124,7 +1114,7 @@ re_scan:
/* MDEV-14059 FIXME: why re-latch the block?
pcur is already positioned on it! */
- ulint page_no = page_get_page_no(
+ uint32_t page_no = page_get_page_no(
btr_pcur_get_page(pcur));
cur_block = buf_page_get_gen(
@@ -1233,8 +1223,8 @@ sel_set_rec_lock(
const rec_t* rec, /*!< in: record */
dict_index_t* index, /*!< in: index */
const rec_offs* offsets,/*!< in: rec_get_offsets(rec, index) */
- ulint mode, /*!< in: lock mode */
- ulint type, /*!< in: LOCK_ORDINARY, LOCK_GAP, or
+ unsigned mode, /*!< in: lock mode */
+ unsigned type, /*!< in: LOCK_ORDINARY, LOCK_GAP, or
LOC_REC_NOT_GAP */
que_thr_t* thr, /*!< in: query thread */
mtr_t* mtr) /*!< in: mtr */
@@ -1247,11 +1237,9 @@ sel_set_rec_lock(
trx = thr_get_trx(thr);
- if (UT_LIST_GET_LEN(trx->lock.trx_locks) > 10000) {
- if (buf_LRU_buf_pool_running_out()) {
-
- return(DB_LOCK_TABLE_FULL);
- }
+ if (UT_LIST_GET_LEN(trx->lock.trx_locks) > 10000
+ && buf_pool.running_out()) {
+ return DB_LOCK_TABLE_FULL;
}
if (dict_index_is_clust(index)) {
@@ -1711,7 +1699,7 @@ rec_loop:
if (!consistent_read) {
rec_t* next_rec = page_rec_get_next(rec);
- ulint lock_type;
+ unsigned lock_type;
trx_t* trx;
trx = thr_get_trx(thr);
@@ -1720,16 +1708,11 @@ rec_loop:
index->n_core_fields,
ULINT_UNDEFINED, &heap);
- /* If innodb_locks_unsafe_for_binlog option is used
- or this session is using READ COMMITTED or lower isolation
- level, we lock only the record, i.e., next-key
- locking is not used. */
- if (srv_locks_unsafe_for_binlog
- || trx->isolation_level
- <= TRX_ISO_READ_COMMITTED) {
-
+ /* At READ UNCOMMITTED or READ COMMITTED
+ isolation level, we lock only the record,
+ i.e., next-key locking is not used. */
+ if (trx->isolation_level <= TRX_ISO_READ_COMMITTED) {
if (page_rec_is_supremum(next_rec)) {
-
goto skip_lock;
}
@@ -1779,7 +1762,7 @@ skip_lock:
if (!consistent_read) {
/* Try to place a lock on the index record */
- ulint lock_type;
+ unsigned lock_type;
trx_t* trx;
offsets = rec_get_offsets(rec, index, offsets,
@@ -1788,12 +1771,10 @@ skip_lock:
trx = thr_get_trx(thr);
- /* If innodb_locks_unsafe_for_binlog option is used
- or this session is using READ COMMITTED or lower isolation level,
+ /* At READ UNCOMMITTED or READ COMMITTED isolation level,
we lock only the record, i.e., next-key locking is
not used. */
- if (srv_locks_unsafe_for_binlog
- || trx->isolation_level <= TRX_ISO_READ_COMMITTED
+ if (trx->isolation_level <= TRX_ISO_READ_COMMITTED
|| dict_index_is_spatial(index)) {
if (page_rec_is_supremum(rec)) {
@@ -2726,7 +2707,7 @@ row_sel_field_store_in_mysql_format_func(
}
/* Copy the actual data */
- ut_memcpy(dest, data, len);
+ memcpy(dest, data, len);
/* Pad with trailing spaces. */
@@ -2918,7 +2899,7 @@ row_sel_store_mysql_field(
if (UNIV_UNLIKELY(!data)) {
/* The externally stored field was not written
yet. This record should only be seen by
- recv_recovery_rollback_active() or any
+ trx_rollback_recovered() or any
TRX_ISO_READ_UNCOMMITTED transactions. */
if (heap != prebuilt->blob_heap) {
@@ -2952,8 +2933,15 @@ row_sel_store_mysql_field(
MEM_CHECK_DEFINED(prebuilt->default_rec
+ templ->mysql_col_offset,
templ->mysql_col_len);
+#if defined __GNUC__ && !defined __clang__ && __GNUC__ < 6
+# pragma GCC diagnostic push
+# pragma GCC diagnostic ignored "-Wconversion" /* GCC 5 may need this here */
+#endif
mysql_rec[templ->mysql_null_byte_offset]
|= (byte) templ->mysql_null_bit_mask;
+#if defined __GNUC__ && !defined __clang__ && __GNUC__ < 6
+# pragma GCC diagnostic pop
+#endif
memcpy(mysql_rec + templ->mysql_col_offset,
(const byte*) prebuilt->default_rec
+ templ->mysql_col_offset,
@@ -2996,7 +2984,7 @@ row_sel_store_mysql_field(
/* It is a nullable column with a non-NULL
value */
mysql_rec[templ->mysql_null_byte_offset]
- &= ~(byte) templ->mysql_null_bit_mask;
+ &= static_cast<byte>(~templ->mysql_null_bit_mask);
}
DBUG_RETURN(TRUE);
@@ -3048,8 +3036,15 @@ static bool row_sel_store_mysql_rec(
|| !prebuilt->read_just_key) {
/* Initialize the NULL bit. */
if (templ->mysql_null_bit_mask) {
+#if defined __GNUC__ && !defined __clang__ && __GNUC__ < 6
+# pragma GCC diagnostic push
+# pragma GCC diagnostic ignored "-Wconversion" /* GCC 5 may need this here */
+#endif
mysql_rec[templ->mysql_null_byte_offset]
|= (byte) templ->mysql_null_bit_mask;
+#if defined __GNUC__ && !defined __clang__ && __GNUC__ < 6
+# pragma GCC diagnostic pop
+#endif
}
continue;
}
@@ -3069,8 +3064,15 @@ static bool row_sel_store_mysql_rec(
}
if (dfield->len == UNIV_SQL_NULL) {
+#if defined __GNUC__ && !defined __clang__ && __GNUC__ < 6
+# pragma GCC diagnostic push
+# pragma GCC diagnostic ignored "-Wconversion" /* GCC 5 may need this here */
+#endif
mysql_rec[templ->mysql_null_byte_offset]
|= (byte) templ->mysql_null_bit_mask;
+#if defined __GNUC__ && !defined __clang__ && __GNUC__ < 6
+# pragma GCC diagnostic pop
+#endif
memcpy(mysql_rec
+ templ->mysql_col_offset,
(const byte*) prebuilt->default_rec
@@ -3083,8 +3085,9 @@ static bool row_sel_store_mysql_rec(
(const byte*)dfield->data, dfield->len);
if (templ->mysql_null_bit_mask) {
mysql_rec[
- templ->mysql_null_byte_offset]
- &= ~(byte) templ->mysql_null_bit_mask;
+ templ->mysql_null_byte_offset]
+ &= static_cast<byte>
+ (~templ->mysql_null_bit_mask);
}
}
@@ -3316,7 +3319,7 @@ Row_sel_get_clust_rec_for_mysql::operator()(
same as btr_pcur_get_block(prebuilt->pcur),
and is it not unsafe to use RW_NO_LATCH here? */
buf_block_t* block = buf_page_get_gen(
- btr_pcur_get_block(prebuilt->pcur)->page.id,
+ btr_pcur_get_block(prebuilt->pcur)->page.id(),
btr_pcur_get_block(prebuilt->pcur)->zip_size(),
RW_NO_LATCH, NULL, BUF_GET,
__FILE__, __LINE__, mtr, &err);
@@ -3384,7 +3387,7 @@ Row_sel_get_clust_rec_for_mysql::operator()(
err = lock_clust_rec_read_check_and_lock(
0, btr_pcur_get_block(prebuilt->clust_pcur),
clust_rec, clust_index, *offsets,
- static_cast<lock_mode>(prebuilt->select_lock_type),
+ prebuilt->select_lock_type,
LOCK_REC_NOT_GAP,
thr);
@@ -3411,14 +3414,11 @@ Row_sel_get_clust_rec_for_mysql::operator()(
const buf_page_t& bpage = btr_pcur_get_block(
prebuilt->clust_pcur)->page;
- lsn_t lsn = bpage.newest_modification;
- if (!lsn) {
- lsn = mach_read_from_8(
- page_align(clust_rec) + FIL_PAGE_LSN);
- }
+ const lsn_t lsn = mach_read_from_8(
+ page_align(clust_rec) + FIL_PAGE_LSN);
if (lsn != cached_lsn
- || bpage.id != cached_page_id
+ || bpage.id() != cached_page_id
|| clust_rec != cached_clust_rec) {
/* The following call returns 'offsets' associated with
'old_vers' */
@@ -3432,7 +3432,7 @@ Row_sel_get_clust_rec_for_mysql::operator()(
goto err_exit;
}
cached_lsn = lsn;
- cached_page_id = bpage.id;
+ cached_page_id = bpage.id();
cached_clust_rec = clust_rec;
cached_old_vers = old_vers;
} else {
@@ -3645,7 +3645,7 @@ row_sel_copy_cached_field_for_mysql(
len = templ->mysql_col_len;
}
- ut_memcpy(buf, cache, len);
+ memcpy(buf, cache, len);
}
/** Copy used fields from cached row.
@@ -3675,10 +3675,17 @@ row_sel_copy_cached_fields_for_mysql(
/* Copy NULL bit of the current field from cached_rec
to buf */
if (templ->mysql_null_bit_mask) {
+#if defined __GNUC__ && !defined __clang__ && __GNUC__ < 6
+# pragma GCC diagnostic push
+# pragma GCC diagnostic ignored "-Wconversion" /* GCC 5 may need this here */
+#endif
buf[templ->mysql_null_byte_offset]
^= (buf[templ->mysql_null_byte_offset]
^ cached_rec[templ->mysql_null_byte_offset])
& (byte) templ->mysql_null_bit_mask;
+#if defined __GNUC__ && !defined __clang__ && __GNUC__ < 6
+# pragma GCC diagnostic pop
+#endif
}
}
}
@@ -3712,7 +3719,7 @@ row_sel_dequeue_cached_row_for_mysql(
MEM_UNDEFINED(buf, prebuilt->mysql_prefix_len);
/* First copy the NULL bits. */
- ut_memcpy(buf, cached_rec, prebuilt->null_bitmap_len);
+ memcpy(buf, cached_rec, prebuilt->null_bitmap_len);
/* Then copy the requested fields. */
for (i = 0; i < prebuilt->n_template; i++) {
@@ -3729,7 +3736,7 @@ row_sel_dequeue_cached_row_for_mysql(
buf, cached_rec, templ);
}
} else {
- ut_memcpy(buf, cached_rec, prebuilt->mysql_prefix_len);
+ memcpy(buf, cached_rec, prebuilt->mysql_prefix_len);
}
prebuilt->n_fetch_cached--;
@@ -3812,9 +3819,8 @@ row_sel_enqueue_cache_row_for_mysql(
next fetch cache slot. */
if (prebuilt->pk_filter || prebuilt->idx_cond) {
- byte* dest = row_sel_fetch_last_buf(prebuilt);
-
- ut_memcpy(dest, mysql_rec, prebuilt->mysql_row_len);
+ memcpy(row_sel_fetch_last_buf(prebuilt), mysql_rec,
+ prebuilt->mysql_row_len);
}
++prebuilt->n_fetch_cached;
@@ -3846,7 +3852,7 @@ row_sel_try_search_shortcut_for_mysql(
ut_ad(dict_index_is_clust(index));
ut_ad(!prebuilt->templ_contains_blob);
- rw_lock_t* ahi_latch = btr_get_search_latch(index);
+ rw_lock_t* ahi_latch = btr_search_sys.get_latch(*index);
rw_lock_s_lock(ahi_latch);
btr_pcur_open_with_no_init(index, search_tuple, PAGE_CUR_GE,
BTR_SEARCH_LEAF, pcur, ahi_latch, mtr);
@@ -4096,7 +4102,7 @@ rec_field_len_in_chars(
return SIZE_T_MAX;
}
- return(cs->cset->numchars(cs, rec_field, rec_field + rec_field_len));
+ return cs->numchars(rec_field, rec_field + rec_field_len);
}
/** Avoid the clustered index lookup if all the following conditions
@@ -4231,29 +4237,18 @@ row_search_mvcc(
const rec_t* result_rec = NULL;
const rec_t* clust_rec;
Row_sel_get_clust_rec_for_mysql row_sel_get_clust_rec_for_mysql;
- dberr_t err = DB_SUCCESS;
ibool unique_search = FALSE;
ibool mtr_has_extra_clust_latch = FALSE;
ibool moves_up = FALSE;
- ibool set_also_gap_locks = TRUE;
- /* if the query is a plain locking SELECT, and the isolation level
- is <= TRX_ISO_READ_COMMITTED, then this is set to FALSE */
- ibool did_semi_consistent_read = FALSE;
/* if the returned record was locked and we did a semi-consistent
read (fetch the newest committed version), then this is set to
TRUE */
ulint next_offs;
ibool same_user_rec;
- mtr_t mtr;
- mem_heap_t* heap = NULL;
- rec_offs offsets_[REC_OFFS_NORMAL_SIZE];
- rec_offs* offsets = offsets_;
ibool table_lock_waited = FALSE;
byte* next_buf = 0;
bool spatial_search = false;
- rec_offs_init(offsets_);
-
ut_ad(index && pcur && search_tuple);
ut_a(prebuilt->magic_n == ROW_PREBUILT_ALLOCATED);
ut_a(prebuilt->magic_n2 == ROW_PREBUILT_ALLOCATED);
@@ -4286,8 +4281,8 @@ row_search_mvcc(
bool need_vrow = dict_index_has_virtual(prebuilt->index)
&& prebuilt->read_just_key;
- /* Reset the new record lock info if srv_locks_unsafe_for_binlog
- is set or session is using a READ COMMITTED isolation level. Then
+ /* Reset the new record lock info if READ UNCOMMITTED or
+ READ COMMITED isolation level is used. Then
we are able to remove the record locks set here on an individual
row. */
prebuilt->new_rec_locks = 0;
@@ -4330,20 +4325,18 @@ row_search_mvcc(
row_sel_dequeue_cached_row_for_mysql(buf, prebuilt);
prebuilt->n_rows_fetched++;
-
- err = DB_SUCCESS;
- goto func_exit;
+ trx->op_info = "";
+ DBUG_RETURN(DB_SUCCESS);
}
if (prebuilt->fetch_cache_first > 0
&& prebuilt->fetch_cache_first < MYSQL_FETCH_CACHE_SIZE) {
-
+early_not_found:
/* The previous returned row was popped from the fetch
cache, but the cache was not full at the time of the
popping: no more rows can exist in the result set */
-
- err = DB_RECORD_NOT_FOUND;
- goto func_exit;
+ trx->op_info = "";
+ DBUG_RETURN(DB_RECORD_NOT_FOUND);
}
prebuilt->n_rows_fetched++;
@@ -4387,22 +4380,28 @@ row_search_mvcc(
if (UNIV_UNLIKELY(direction != 0
&& !prebuilt->used_in_HANDLER)) {
-
- err = DB_RECORD_NOT_FOUND;
- goto func_exit;
+ goto early_not_found;
}
}
/* We don't support sequencial scan for Rtree index, because it
is no meaning to do so. */
- if (dict_index_is_spatial(index)
- && !RTREE_SEARCH_MODE(mode)) {
- err = DB_END_OF_INDEX;
- goto func_exit;
+ if (dict_index_is_spatial(index) && !RTREE_SEARCH_MODE(mode)) {
+ trx->op_info = "";
+ DBUG_RETURN(DB_END_OF_INDEX);
}
+ /* if the query is a plain locking SELECT, and the isolation level
+ is <= TRX_ISO_READ_COMMITTED, then this is set to FALSE */
+ bool did_semi_consistent_read = false;
+ mtr_t mtr;
mtr.start();
+ mem_heap_t* heap = NULL;
+ rec_offs offsets_[REC_OFFS_NORMAL_SIZE];
+ rec_offs* offsets = offsets_;
+ rec_offs_init(offsets_);
+
#ifdef BTR_CUR_HASH_ADAPT
/*-------------------------------------------------------------*/
/* PHASE 2: Try fast adaptive hash index search if possible */
@@ -4432,6 +4431,7 @@ row_search_mvcc(
let us try a search shortcut through the hash
index. */
+ dberr_t err = DB_SUCCESS;
switch (row_sel_try_search_shortcut_for_mysql(
&rec, prebuilt, &offsets, &heap,
&mtr)) {
@@ -4448,16 +4448,20 @@ row_search_mvcc(
buf, prebuilt,
rec, offsets)) {
case CHECK_ABORTED_BY_USER:
- err = DB_INTERRUPTED;
- mtr.commit();
- goto func_exit;
+ goto aborted;
case CHECK_NEG:
case CHECK_OUT_OF_RANGE:
case CHECK_ERROR:
- goto shortcut_mismatch;
+ err = DB_RECORD_NOT_FOUND;
+ goto shortcut_done;
case CHECK_POS:
- goto shortcut_match;
+ goto shortcut_done;
}
+
+ ut_ad("incorrect code" == 0);
+aborted:
+ err = DB_INTERRUPTED;
+ goto shortcut_done;
}
if (!row_sel_store_mysql_rec(
@@ -4479,20 +4483,22 @@ row_search_mvcc(
break;
}
- shortcut_match:
- mtr.commit();
- /* NOTE that we do NOT store the cursor
- position */
- err = DB_SUCCESS;
- goto func_exit;
+ goto shortcut_done;
case SEL_EXHAUSTED:
- shortcut_mismatch:
+ err = DB_RECORD_NOT_FOUND;
+ shortcut_done:
mtr.commit();
+
/* NOTE that we do NOT store the cursor
position */
- err = DB_RECORD_NOT_FOUND;
- goto func_exit;
+ trx->op_info = "";
+ ut_ad(!sync_check_iterate(sync_check()));
+ ut_ad(!did_semi_consistent_read);
+ if (UNIV_LIKELY_NULL(heap)) {
+ mem_heap_free(heap);
+ }
+ DBUG_RETURN(err);
case SEL_RETRY:
break;
@@ -4530,22 +4536,15 @@ row_search_mvcc(
|| prebuilt->table->no_rollback()
|| srv_read_only_mode);
- if (trx->isolation_level <= TRX_ISO_READ_COMMITTED
- && prebuilt->select_lock_type != LOCK_NONE
- && trx->mysql_thd != NULL
- && thd_is_select(trx->mysql_thd)) {
- /* It is a plain locking SELECT and the isolation
- level is low: do not lock gaps */
-
- set_also_gap_locks = FALSE;
- }
+ /* Do not lock gaps at READ UNCOMMITTED or READ COMMITTED
+ isolation level */
+ const bool set_also_gap_locks =
+ prebuilt->select_lock_type != LOCK_NONE
+ && trx->isolation_level > TRX_ISO_READ_COMMITTED
#ifdef WITH_WSREP
- else if (wsrep_thd_skip_locking(trx->mysql_thd)) {
- ut_ad(!strcmp(wsrep_get_sr_table_name(),
- prebuilt->table->name.m_name));
- set_also_gap_locks = FALSE;
- }
+ && !wsrep_thd_skip_locking(trx->mysql_thd)
#endif /* WITH_WSREP */
+ ;
/* Note that if the search mode was GE or G, then the cursor
naturally moves upward (in fetch next) in alphabetical order,
@@ -4562,10 +4561,12 @@ row_search_mvcc(
thr = que_fork_get_first_thr(prebuilt->sel_graph);
- que_thr_move_to_run_state_for_mysql(thr, trx);
+ thr->start_running();
clust_index = dict_table_get_first_index(prebuilt->table);
+ dberr_t err = DB_SUCCESS;
+
/* Do some start-of-statement preparations */
if (prebuilt->table->no_rollback()) {
@@ -4634,18 +4635,9 @@ wait_table_again:
pcur->btr_cur.thr = thr;
if (dict_index_is_spatial(index)) {
- bool need_pred_lock;
-
- need_pred_lock = (set_also_gap_locks
- && !(srv_locks_unsafe_for_binlog
- || trx->isolation_level
- <= TRX_ISO_READ_COMMITTED)
- && prebuilt->select_lock_type
- != LOCK_NONE);
-
if (!prebuilt->rtr_info) {
prebuilt->rtr_info = rtr_create_rtr_info(
- need_pred_lock, true,
+ set_also_gap_locks, true,
btr_pcur_get_btr_cur(pcur), index);
prebuilt->rtr_info->search_tuple = search_tuple;
prebuilt->rtr_info->search_mode = mode;
@@ -4654,7 +4646,7 @@ wait_table_again:
} else {
rtr_info_reinit_in_cursor(
btr_pcur_get_btr_cur(pcur),
- index, need_pred_lock);
+ index, set_also_gap_locks);
prebuilt->rtr_info->search_tuple = search_tuple;
prebuilt->rtr_info->search_mode = mode;
}
@@ -4675,11 +4667,8 @@ wait_table_again:
ut_ad(page_rec_is_leaf(rec));
if (!moves_up
- && !page_rec_is_supremum(rec)
&& set_also_gap_locks
- && !(srv_locks_unsafe_for_binlog
- || trx->isolation_level <= TRX_ISO_READ_COMMITTED)
- && prebuilt->select_lock_type != LOCK_NONE
+ && !page_rec_is_supremum(rec)
&& !dict_index_is_spatial(index)) {
/* Try to place a gap lock on the next index record
@@ -4759,16 +4748,13 @@ rec_loop:
if (page_rec_is_supremum(rec)) {
if (set_also_gap_locks
- && !(srv_locks_unsafe_for_binlog
- || trx->isolation_level <= TRX_ISO_READ_COMMITTED)
- && prebuilt->select_lock_type != LOCK_NONE
&& !dict_index_is_spatial(index)) {
/* Try to place a lock on the index record */
- /* If innodb_locks_unsafe_for_binlog option is used
- or this session is using a READ COMMITTED or lower isolation
- level we do not lock gaps. Supremum record is really
+ /* If the transaction isolation level is
+ READ UNCOMMITTED or READ COMMITTED,
+ we do not lock gaps. Supremum record is really
a gap and therefore we do not set locks there. */
offsets = rec_get_offsets(rec, index, offsets,
@@ -4833,14 +4819,13 @@ wrong_offs:
ib::error() << "Rec address "
<< static_cast<const void*>(rec)
<< ", buf block fix count "
- << btr_cur_get_block(
- btr_pcur_get_btr_cur(pcur))->page
- .buf_fix_count;
+ << btr_pcur_get_block(pcur)->page
+ .buf_fix_count();
ib::error() << "Index corruption: rec offs "
<< page_offset(rec) << " next offs "
- << next_offs << ", page no "
- << page_get_page_no(page_align(rec))
+ << next_offs
+ << btr_pcur_get_block(pcur)->page.id()
<< ", index " << index->name
<< " of table " << index->table->name
<< ". Run CHECK TABLE. You may need to"
@@ -4856,8 +4841,8 @@ wrong_offs:
ib::info() << "Index corruption: rec offs "
<< page_offset(rec) << " next offs "
- << next_offs << ", page no "
- << page_get_page_no(page_align(rec))
+ << next_offs
+ << btr_pcur_get_block(pcur)->page.id()
<< ", index " << index->name
<< " of table " << index->table->name
<< ". We try to skip the rest of the page.";
@@ -4884,8 +4869,8 @@ wrong_offs:
ib::error() << "Index corruption: rec offs "
<< page_offset(rec) << " next offs "
- << next_offs << ", page no "
- << page_get_page_no(page_align(rec))
+ << next_offs
+ << btr_pcur_get_block(pcur)->page.id()
<< ", index " << index->name
<< " of table " << index->table->name
<< ". We try to skip the record.";
@@ -4908,17 +4893,7 @@ wrong_offs:
if (0 != cmp_dtuple_rec(search_tuple, rec, offsets)) {
if (set_also_gap_locks
- && !(srv_locks_unsafe_for_binlog
- || trx->isolation_level
- <= TRX_ISO_READ_COMMITTED)
- && prebuilt->select_lock_type != LOCK_NONE
&& !dict_index_is_spatial(index)) {
-
- /* Try to place a gap lock on the index
- record only if innodb_locks_unsafe_for_binlog
- option is not set or this session is not
- using a READ COMMITTED or lower isolation level. */
-
err = sel_set_rec_lock(
pcur,
rec, index, offsets,
@@ -4953,17 +4928,7 @@ wrong_offs:
if (!cmp_dtuple_is_prefix_of_rec(search_tuple, rec, offsets)) {
if (set_also_gap_locks
- && !(srv_locks_unsafe_for_binlog
- || trx->isolation_level
- <= TRX_ISO_READ_COMMITTED)
- && prebuilt->select_lock_type != LOCK_NONE
&& !dict_index_is_spatial(index)) {
-
- /* Try to place a gap lock on the index
- record only if innodb_locks_unsafe_for_binlog
- option is not set or this session is not
- using a READ COMMITTED or lower isolation level. */
-
err = sel_set_rec_lock(
pcur,
rec, index, offsets,
@@ -5003,15 +4968,9 @@ wrong_offs:
is a non-delete marked record, then it is enough to lock its
existence with LOCK_REC_NOT_GAP. */
- /* If innodb_locks_unsafe_for_binlog option is used
- or this session is using a READ COMMITTED isolation
- level we lock only the record, i.e., next-key locking is
- not used. */
-
- ulint lock_type;
+ unsigned lock_type;
- if (srv_locks_unsafe_for_binlog
- || trx->isolation_level <= TRX_ISO_READ_COMMITTED) {
+ if (trx->isolation_level <= TRX_ISO_READ_COMMITTED) {
/* At READ COMMITTED or READ UNCOMMITTED
isolation levels, do not lock committed
delete-marked records. */
@@ -5054,8 +5013,16 @@ wrong_offs:
goto no_gap_lock;
}
- if (!set_also_gap_locks
- || (unique_search && !rec_get_deleted_flag(rec, comp))
+#ifdef WITH_WSREP
+ if (UNIV_UNLIKELY(!set_also_gap_locks)) {
+ ut_ad(wsrep_thd_skip_locking(trx->mysql_thd));
+ goto no_gap_lock;
+ }
+#else /* WITH_WSREP */
+ ut_ad(set_also_gap_locks);
+#endif /* WITH_WSREP */
+
+ if ((unique_search && !rec_get_deleted_flag(rec, comp))
|| dict_index_is_spatial(index)) {
goto no_gap_lock;
@@ -5092,9 +5059,7 @@ no_gap_lock:
switch (err) {
const rec_t* old_vers;
case DB_SUCCESS_LOCKED_REC:
- if (srv_locks_unsafe_for_binlog
- || trx->isolation_level
- <= TRX_ISO_READ_COMMITTED) {
+ if (trx->isolation_level <= TRX_ISO_READ_COMMITTED) {
/* Note that a record of
prebuilt->index was locked. */
prebuilt->new_rec_locks = 1;
@@ -5158,7 +5123,7 @@ no_gap_lock:
goto next_rec;
}
- did_semi_consistent_read = TRUE;
+ did_semi_consistent_read = true;
rec = old_vers;
break;
case DB_RECORD_NOT_FOUND:
@@ -5349,9 +5314,7 @@ requires_clust_rec:
break;
case DB_SUCCESS_LOCKED_REC:
ut_a(clust_rec != NULL);
- if (srv_locks_unsafe_for_binlog
- || trx->isolation_level
- <= TRX_ISO_READ_COMMITTED) {
+ if (trx->isolation_level <= TRX_ISO_READ_COMMITTED) {
/* Note that the clustered index record
was locked. */
prebuilt->new_rec_locks = 2;
@@ -5367,8 +5330,7 @@ requires_clust_rec:
/* The record is delete marked: we can skip it */
- if ((srv_locks_unsafe_for_binlog
- || trx->isolation_level <= TRX_ISO_READ_COMMITTED)
+ if (trx->isolation_level <= TRX_ISO_READ_COMMITTED
&& prebuilt->select_lock_type != LOCK_NONE) {
/* No need to keep a lock on a delete-marked
@@ -5591,7 +5553,7 @@ next_rec:
== ROW_READ_DID_SEMI_CONSISTENT)) {
prebuilt->row_read_type = ROW_READ_TRY_SEMI_CONSISTENT;
}
- did_semi_consistent_read = FALSE;
+ did_semi_consistent_read = false;
prebuilt->new_rec_locks = 0;
vrow = NULL;
@@ -5690,7 +5652,7 @@ page_read_error:
== ROW_READ_DID_SEMI_CONSISTENT)) {
prebuilt->row_read_type = ROW_READ_TRY_SEMI_CONSISTENT;
}
- did_semi_consistent_read = FALSE;
+ did_semi_consistent_read = false;
lock_table_wait:
mtr.commit();
@@ -5726,8 +5688,7 @@ lock_table_wait:
moves_up, &mtr);
}
- if ((srv_locks_unsafe_for_binlog
- || trx->isolation_level <= TRX_ISO_READ_COMMITTED)
+ if (trx->isolation_level <= TRX_ISO_READ_COMMITTED
&& !same_user_rec) {
/* Since we were not able to restore the cursor
@@ -5763,10 +5724,10 @@ normal_return:
{
/* handler_index_cond_check() may pull TR_table search
which initates another row_search_mvcc(). */
- ulint n_active_thrs= trx->lock.n_active_thrs;
- trx->lock.n_active_thrs= 1;
- que_thr_stop_for_mysql_no_error(thr, trx);
- trx->lock.n_active_thrs= n_active_thrs - 1;
+ ut_d(ulint n_active_thrs= trx->lock.n_active_thrs);
+ ut_d(trx->lock.n_active_thrs= 1);
+ thr->stop_no_error();
+ ut_d(trx->lock.n_active_thrs= n_active_thrs - 1);
}
mtr.commit();
@@ -5805,7 +5766,7 @@ normal_return:
func_exit:
trx->op_info = "";
- if (heap != NULL) {
+ if (UNIV_LIKELY_NULL(heap)) {
mem_heap_free(heap);
}