summaryrefslogtreecommitdiff
path: root/storage/innobase/btr/btr0cur.cc
diff options
context:
space:
mode:
Diffstat (limited to 'storage/innobase/btr/btr0cur.cc')
-rw-r--r--storage/innobase/btr/btr0cur.cc679
1 files changed, 172 insertions, 507 deletions
diff --git a/storage/innobase/btr/btr0cur.cc b/storage/innobase/btr/btr0cur.cc
index 7954c216c86..c6f11aa518f 100644
--- a/storage/innobase/btr/btr0cur.cc
+++ b/storage/innobase/btr/btr0cur.cc
@@ -204,15 +204,15 @@ btr_cur_latch_leaves(
compile_time_assert(int(MTR_MEMO_PAGE_S_FIX) == int(RW_S_LATCH));
compile_time_assert(int(MTR_MEMO_PAGE_X_FIX) == int(RW_X_LATCH));
compile_time_assert(int(MTR_MEMO_PAGE_SX_FIX) == int(RW_SX_LATCH));
- ut_ad(block->page.id().space() == cursor->index->table->space->id);
+ ut_ad(block->page.id().space() == cursor->index()->table->space->id);
ut_ad(block->page.in_file());
ut_ad(srv_read_only_mode
- || mtr->memo_contains_flagged(&cursor->index->lock,
+ || mtr->memo_contains_flagged(&cursor->index()->lock,
MTR_MEMO_S_LOCK
| MTR_MEMO_X_LOCK
| MTR_MEMO_SX_LOCK));
auto rtr_info = cursor->rtr_info;
- if (UNIV_LIKELY_NULL(rtr_info) && !cursor->index->is_spatial()) {
+ if (UNIV_LIKELY_NULL(rtr_info) && !cursor->index()->is_spatial()) {
rtr_info = nullptr;
}
@@ -249,7 +249,7 @@ latch_block:
case BTR_MODIFY_TREE:
/* It is exclusive for other operations which calls
btr_page_set_prev() */
- ut_ad(mtr->memo_contains_flagged(&cursor->index->lock,
+ ut_ad(mtr->memo_contains_flagged(&cursor->index()->lock,
MTR_MEMO_X_LOCK
| MTR_MEMO_SX_LOCK));
save = mtr->get_savepoint();
@@ -258,7 +258,7 @@ latch_block:
if (left_page_no != FIL_NULL) {
buf_block_t *b = btr_block_get(
- *cursor->index, left_page_no, RW_X_LATCH,
+ *cursor->index(), left_page_no, RW_X_LATCH,
true, mtr);
if (latch_leaves) {
@@ -299,7 +299,7 @@ latch_block:
save = mtr->get_savepoint();
buf_block_t* b = btr_block_get(
- *cursor->index, right_page_no, RW_X_LATCH,
+ *cursor->index(), right_page_no, RW_X_LATCH,
true, mtr);
if (latch_leaves) {
latch_leaves->savepoints[2] = save;
@@ -331,7 +331,8 @@ latch_block:
if (left_page_no != FIL_NULL) {
save = mtr->get_savepoint();
cursor->left_block = btr_block_get(
- *cursor->index, left_page_no, mode, true, mtr);
+ *cursor->index(), left_page_no,
+ mode, true, mtr);
if (latch_leaves) {
latch_leaves->savepoints[0] = save;
latch_leaves->blocks[0] = cursor->left_block;
@@ -340,7 +341,7 @@ latch_block:
goto latch_block;
case BTR_CONT_MODIFY_TREE:
- ut_ad(cursor->index->is_spatial());
+ ut_ad(cursor->index()->is_spatial());
return;
}
@@ -756,8 +757,8 @@ btr_cur_optimistic_latch_leaves(
case BTR_MODIFY_LEAF:
return(buf_page_optimistic_get(*latch_mode, block,
modify_clock, mtr));
- case BTR_SEARCH_PREV:
- case BTR_MODIFY_PREV:
+ case BTR_SEARCH_PREV: /* btr_pcur_move_backward_from_page() */
+ case BTR_MODIFY_PREV: /* Ditto, or ibuf_insert() */
uint32_t curr_page_no, left_page_no;
{
transactional_shared_lock_guard<block_lock> g{
@@ -779,18 +780,17 @@ btr_cur_optimistic_latch_leaves(
if (left_page_no != FIL_NULL) {
cursor->left_block = buf_page_get_gen(
- page_id_t(cursor->index->table->space_id,
+ page_id_t(cursor->index()->table->space_id,
left_page_no),
- cursor->index->table->space->zip_size(),
+ cursor->index()->table->space->zip_size(),
mode, nullptr, BUF_GET_POSSIBLY_FREED, mtr);
if (cursor->left_block
&& btr_page_get_next(
cursor->left_block->page.frame)
!= curr_page_no) {
- /* release the left block */
- btr_leaf_page_release(
- cursor->left_block, mode, mtr);
+release_left_block:
+ mtr->release_last_page();
return false;
}
} else {
@@ -807,17 +807,14 @@ btr_cur_optimistic_latch_leaves(
ut_ad(2 <= block->page.buf_fix_count());
*latch_mode = btr_latch_mode(mode);
return(true);
- } else {
- /* release the block and decrement of
- buf_fix_count which was incremented
- in buf_page_optimistic_get() */
- btr_leaf_page_release(block, mode, mtr);
}
+
+ mtr->release_last_page();
}
ut_ad(block->page.buf_fix_count());
if (cursor->left_block) {
- btr_leaf_page_release(cursor->left_block, mode, mtr);
+ goto release_left_block;
}
}
@@ -1212,7 +1209,6 @@ If mode is PAGE_CUR_LE , cursor is left at the place where an insert of the
search tuple should be performed in the B-tree. InnoDB does an insert
immediately after the cursor. Thus, the cursor may end up on a user record,
or on a page infimum record.
-@param index index
@param level the tree level of search
@param tuple data tuple; NOTE: n_fields_cmp in tuple must be set so that
it cannot get compared to the node ptr page number field!
@@ -1231,7 +1227,7 @@ or on a page infimum record.
@param autoinc PAGE_ROOT_AUTO_INC to be written (0 if none)
@return DB_SUCCESS on success or error code otherwise */
TRANSACTIONAL_TARGET
-dberr_t btr_cur_search_to_nth_level(dict_index_t *index, ulint level,
+dberr_t btr_cur_search_to_nth_level(ulint level,
const dtuple_t *tuple,
page_cur_mode_t mode,
btr_latch_mode latch_mode,
@@ -1271,6 +1267,7 @@ dberr_t btr_cur_search_to_nth_level(dict_index_t *index, ulint level,
bool rtree_parent_modified = false;
bool mbr_adj = false;
bool found = false;
+ dict_index_t * const index = cursor->index();
DBUG_ENTER("btr_cur_search_to_nth_level");
@@ -1358,7 +1355,6 @@ dberr_t btr_cur_search_to_nth_level(dict_index_t *index, ulint level,
ut_ad(autoinc == 0 || level == 0);
cursor->flag = BTR_CUR_BINARY;
- cursor->index = index;
#ifndef BTR_CUR_ADAPT
guess = NULL;
@@ -1470,6 +1466,7 @@ x_latch_index:
latch_mode);
page_cursor = btr_cur_get_page_cur(cursor);
+ page_cursor->index = index;
const ulint zip_size = index->table->space->zip_size();
@@ -1845,6 +1842,8 @@ retry_page_get:
}
}
+ page_cursor->block = block;
+
if (dict_index_is_spatial(index) && page_mode >= PAGE_CUR_CONTAIN) {
ut_ad(need_path);
found = rtr_cur_search_with_match(
@@ -1881,7 +1880,7 @@ retry_page_get:
We only need the byte prefix comparison for the purpose
of updating the adaptive hash index. */
if (page_cur_search_with_match_bytes(
- block, index, tuple, page_mode, &up_match, &up_bytes,
+ tuple, page_mode, &up_match, &up_bytes,
&low_match, &low_bytes, page_cursor)) {
err = DB_CORRUPTION;
goto func_exit;
@@ -1891,7 +1890,7 @@ retry_page_get:
/* Search for complete index fields. */
up_bytes = low_bytes = 0;
if (page_cur_search_with_match(
- block, index, tuple, page_mode, &up_match,
+ tuple, page_mode, &up_match,
&low_match, page_cursor,
need_path ? cursor->rtr_info : nullptr)) {
err = DB_CORRUPTION;
@@ -2140,15 +2139,15 @@ need_opposite_intention:
/* we should sx-latch root page, if released already.
It contains seg_header. */
if (n_releases > 0) {
- mtr_block_sx_latch_at_savepoint(
- mtr, tree_savepoints[0],
+ mtr->sx_latch_at_savepoint(
+ tree_savepoints[0],
tree_blocks[0]);
}
/* x-latch the branch blocks not released yet. */
for (ulint i = n_releases; i <= n_blocks; i++) {
- mtr_block_x_latch_at_savepoint(
- mtr, tree_savepoints[i],
+ mtr->x_latch_at_savepoint(
+ tree_savepoints[i],
tree_blocks[i]);
}
}
@@ -2212,8 +2211,9 @@ need_opposite_intention:
? cursor->rtr_info : NULL;
for (ulint i = 0; i < n_blocks; i++) {
+ page_cursor->block = tree_blocks[i];
if (page_cur_search_with_match(
- tree_blocks[i], index, tuple,
+ tuple,
page_mode, &up_match,
&low_match, page_cursor,
rtr_info)) {
@@ -2415,10 +2415,7 @@ dberr_t btr_cur_t::open_leaf(bool first, dict_index_t *index,
{
ulint node_ptr_max_size= srv_page_size / 2;
btr_intention_t lock_intention;
- buf_block_t *tree_blocks[BTR_MAX_LEVELS]; // FIXME: just use mtr->m_memo
- ulint tree_savepoints[BTR_MAX_LEVELS];
ulint n_blocks= 0;
- ulint n_releases= 0;
mem_heap_t *heap= nullptr;
rec_offs offsets_[REC_OFFS_NORMAL_SIZE];
rec_offs *offsets= offsets_;
@@ -2453,7 +2450,7 @@ dberr_t btr_cur_t::open_leaf(bool first, dict_index_t *index,
/* Most of delete-intended operations are purging. Free blocks
and read IO bandwidth should be prioritized for them, when the
history list is growing huge. */
- savepoint+= sizeof(mtr_memo_slot_t);
+ savepoint++;
if (lock_intention == BTR_INTENTION_DELETE
&& buf_pool.n_pend_reads
&& trx_sys.history_size_approx() > BTR_CUR_FINE_HISTORY_LENGTH)
@@ -2469,7 +2466,7 @@ dberr_t btr_cur_t::open_leaf(bool first, dict_index_t *index,
if (latch_by_caller)
break;
ut_ad(latch_mode != BTR_SEARCH_TREE);
- savepoint+= sizeof(mtr_memo_slot_t);
+ savepoint++;
mtr_s_lock_index(index, mtr);
}
@@ -2478,9 +2475,9 @@ dberr_t btr_cur_t::open_leaf(bool first, dict_index_t *index,
const rw_lock_type_t root_leaf_rw_latch=
btr_cur_latch_for_root_leaf(latch_mode);
- this->index = index;
+ page_cur.index = index;
- page_id_t page_id(index->table->space_id, index->page);
+ uint32_t page= index->page;
const auto zip_size= index->table->space->zip_size();
if (root_leaf_rw_latch == RW_X_LATCH)
@@ -2489,19 +2486,16 @@ dberr_t btr_cur_t::open_leaf(bool first, dict_index_t *index,
for (ulint height= ULINT_UNDEFINED;;)
{
ut_ad(n_blocks < BTR_MAX_LEVELS);
-#if 0 // FIXME: encryption.innodb_onlinealter_encryption innodb.alter_algorithm
- ut_ad(savepoint + n_blocks * sizeof(mtr_memo_slot_t) == mtr->get_savepoint());
-#endif
- tree_savepoints[n_blocks]= mtr->get_savepoint();
+ ut_ad(savepoint + n_blocks == mtr->get_savepoint());
const rw_lock_type_t rw_latch= height && latch_mode != BTR_MODIFY_TREE
? upper_rw_latch
: RW_NO_LATCH;
- buf_block_t *block= buf_page_get_gen(page_id, zip_size, rw_latch, nullptr,
- BUF_GET, mtr, &err,
- !height && !index->is_clust());
+ buf_block_t* block=
+ btr_block_get(*index, page, rw_latch, !height && !index->is_clust(), mtr,
+ &err);
+
ut_ad(!block == (err != DB_SUCCESS));
- tree_blocks[n_blocks]= block;
if (!block)
{
@@ -2510,20 +2504,17 @@ dberr_t btr_cur_t::open_leaf(bool first, dict_index_t *index,
break;
}
- const page_t *page= buf_block_get_frame(block);
-
if (first)
page_cur_set_before_first(block, &page_cur);
else
page_cur_set_after_last(block, &page_cur);
- ut_ad(fil_page_index_page_check(page));
- ut_ad(index->id == btr_page_get_index_id(page));
+ const uint32_t l= btr_page_get_level(block->page.frame);
if (height == ULINT_UNDEFINED)
{
/* We are in the root node */
- height= btr_page_get_level(page);
+ height= l;
if (height);
else if (upper_rw_latch != root_leaf_rw_latch)
{
@@ -2538,6 +2529,9 @@ dberr_t btr_cur_t::open_leaf(bool first, dict_index_t *index,
else
{
reached_leaf:
+ const auto leaf_savepoint= mtr->get_savepoint();
+ ut_ad(leaf_savepoint);
+
if (rw_latch == RW_NO_LATCH)
btr_cur_latch_leaves(block, latch_mode, this, mtr);
@@ -2547,28 +2541,14 @@ dberr_t btr_cur_t::open_leaf(bool first, dict_index_t *index,
case BTR_CONT_SEARCH_TREE:
break;
default:
- if (UNIV_UNLIKELY(srv_read_only_mode))
- break;
- if (!latch_by_caller)
- /* Release the tree s-latch */
- mtr->release_s_latch_at_savepoint(savepoint -
- sizeof(mtr_memo_slot_t),
- &index->lock);
-
- /* release upper blocks */
- for (; n_releases < n_blocks; n_releases++)
- mtr_release_block_at_savepoint(mtr,
-#if 0
- savepoint + n_releases,
-#else
- tree_savepoints[n_releases],
-#endif
- tree_blocks[n_releases]);
+ /* Release index->lock if needed, and the non-leaf pages. */
+ mtr->rollback_to_savepoint(savepoint - !latch_by_caller,
+ leaf_savepoint - 1);
}
break;
}
}
- else if (UNIV_UNLIKELY(height != btr_page_get_level(page)))
+ else if (UNIV_UNLIKELY(height != l))
{
corrupted:
err= DB_CORRUPTION;
@@ -2592,7 +2572,8 @@ dberr_t btr_cur_t::open_leaf(bool first, dict_index_t *index,
ut_ad(latch_mode != BTR_MODIFY_TREE || upper_rw_latch == RW_X_LATCH);
if (latch_mode != BTR_MODIFY_TREE);
- else if (btr_cur_need_opposite_intention(page, lock_intention, node_ptr))
+ else if (btr_cur_need_opposite_intention(block->page.frame,
+ lock_intention, node_ptr))
{
/* If the rec is the first or last in the page for pessimistic
delete intention, it might cause node_ptr insert for the upper
@@ -2600,40 +2581,45 @@ dberr_t btr_cur_t::open_leaf(bool first, dict_index_t *index,
mtr->rollback_to_savepoint(savepoint);
lock_intention= BTR_INTENTION_BOTH;
- page_id.set_page_no(index->page);
+ page= index->page;
height= ULINT_UNDEFINED;
n_blocks= 0;
- n_releases= 0;
continue;
}
- else if (!btr_cur_will_modify_tree(index, page, lock_intention, node_ptr,
- node_ptr_max_size, zip_size, mtr))
- {
- ut_ad(n_releases <= n_blocks);
- /* release non-leaf pages (except the root) */
- for (; n_releases < n_blocks; n_releases++)
- if (n_releases)
- mtr_release_block_at_savepoint(mtr, tree_savepoints[n_releases],
- tree_blocks[n_releases]);
- }
-
- if (latch_mode == BTR_MODIFY_TREE && !height)
+ else
{
- ut_ad(upper_rw_latch == RW_X_LATCH);
- /* we should U-latch root page, if released already.
- It contains seg_header. */
- if (n_releases)
- mtr_block_sx_latch_at_savepoint(mtr, tree_savepoints[0],
- tree_blocks[0]);
+ if (!btr_cur_will_modify_tree(index, block->page.frame,
+ lock_intention, node_ptr,
+ node_ptr_max_size, zip_size, mtr))
+ {
+ ut_ad(n_blocks);
+ /* release buffer-fixes on pages that will not be modified
+ (except the root) */
+ if (n_blocks > 1)
+ {
+ mtr->rollback_to_savepoint(savepoint + 1, savepoint + n_blocks - 1);
+ n_blocks= 1;
+ }
+ }
- /* x-latch the branch blocks not released yet. */
- for (ulint i= n_releases; i <= n_blocks; i++)
- mtr_block_x_latch_at_savepoint(mtr,
- tree_savepoints[i], tree_blocks[i]);
+ if (!height)
+ {
+ if (page == index->page)
+ mtr->upgrade_buffer_fix(savepoint, RW_X_LATCH);
+ else
+ {
+ /* The U-latch protects BTR_SEG_HEAP, BTR_SEG_TOP. */
+ mtr->upgrade_buffer_fix(savepoint, RW_SX_LATCH);
+
+ /* Upgrade buffer-fix to exclusive latches on all remaining pages. */
+ for (ulint i= 1; i <= n_blocks; i++)
+ mtr->upgrade_buffer_fix(savepoint + i, RW_X_LATCH);
+ }
+ }
}
/* Go to the child node */
- page_id.set_page_no(btr_node_ptr_get_child_page_no(node_ptr, offsets));
+ page= btr_node_ptr_get_child_page_no(node_ptr, offsets);
n_blocks++;
}
@@ -2643,326 +2629,6 @@ dberr_t btr_cur_t::open_leaf(bool first, dict_index_t *index,
return err;
}
-/*****************************************************************//**
-Opens a cursor at either end of an index. */
-dberr_t
-btr_cur_open_at_index_side(
- bool from_left, /*!< in: true if open to the low end,
- false if to the high end */
- dict_index_t* index, /*!< in: index */
- btr_latch_mode latch_mode, /*!< in: latch mode */
- btr_cur_t* cursor, /*!< in/out: cursor */
- ulint level, /*!< in: level to search for
- (0=leaf). */
- mtr_t* mtr) /*!< in/out: mini-transaction */
-{
- page_cur_t* page_cursor;
- ulint node_ptr_max_size = srv_page_size / 2;
- ulint height;
- rec_t* node_ptr;
- btr_intention_t lock_intention;
- buf_block_t* tree_blocks[BTR_MAX_LEVELS];
- ulint tree_savepoints[BTR_MAX_LEVELS];
- ulint n_blocks = 0;
- ulint n_releases = 0;
- mem_heap_t* heap = NULL;
- rec_offs offsets_[REC_OFFS_NORMAL_SIZE];
- rec_offs* offsets = offsets_;
- dberr_t err;
-
- rec_offs_init(offsets_);
-
- ut_ad(level != ULINT_UNDEFINED);
-
- const bool latch_by_caller = latch_mode & BTR_ALREADY_S_LATCHED;
- latch_mode = btr_latch_mode(latch_mode & ~BTR_ALREADY_S_LATCHED);
-
- lock_intention = btr_cur_get_and_clear_intention(&latch_mode);
-
- /* This function doesn't need to lock left page of the leaf page */
- if (latch_mode == BTR_SEARCH_PREV) {
- latch_mode = BTR_SEARCH_LEAF;
- } else if (latch_mode == BTR_MODIFY_PREV) {
- latch_mode = BTR_MODIFY_LEAF;
- }
-
- /* Store the position of the tree latch we push to mtr so that we
- know how to release it when we have latched the leaf node */
-
- ulint savepoint = mtr_set_savepoint(mtr);
-
- rw_lock_type_t upper_rw_latch;
-
- switch (latch_mode) {
- case BTR_CONT_MODIFY_TREE:
- case BTR_CONT_SEARCH_TREE:
- upper_rw_latch = RW_NO_LATCH;
- break;
- case BTR_MODIFY_TREE:
- /* Most of delete-intended operations are purging.
- Free blocks and read IO bandwidth should be prior
- for them, when the history list is glowing huge. */
- if (lock_intention == BTR_INTENTION_DELETE
- && buf_pool.n_pend_reads
- && trx_sys.history_size_approx()
- > BTR_CUR_FINE_HISTORY_LENGTH) {
- mtr_x_lock_index(index, mtr);
- } else {
- mtr_sx_lock_index(index, mtr);
- }
- upper_rw_latch = RW_X_LATCH;
- break;
- default:
- ut_ad(!latch_by_caller
- || mtr->memo_contains_flagged(&index->lock,
- MTR_MEMO_SX_LOCK
- | MTR_MEMO_S_LOCK));
- if (!srv_read_only_mode) {
- if (!latch_by_caller) {
- ut_ad(latch_mode != BTR_SEARCH_TREE);
- mtr_s_lock_index(index, mtr);
- }
- upper_rw_latch = RW_S_LATCH;
- } else {
- upper_rw_latch = RW_NO_LATCH;
- }
- }
-
- const rw_lock_type_t root_leaf_rw_latch = btr_cur_latch_for_root_leaf(
- latch_mode);
-
- page_cursor = btr_cur_get_page_cur(cursor);
- cursor->index = index;
-
- page_id_t page_id(index->table->space_id, index->page);
- const ulint zip_size = index->table->space->zip_size();
-
- if (root_leaf_rw_latch == RW_X_LATCH) {
- node_ptr_max_size = btr_node_ptr_max_size(index);
- }
-
- height = ULINT_UNDEFINED;
-
- for (;;) {
- ut_ad(n_blocks < BTR_MAX_LEVELS);
- tree_savepoints[n_blocks] = mtr_set_savepoint(mtr);
-
- const ulint rw_latch = height
- && (latch_mode != BTR_MODIFY_TREE || height == level)
- ? upper_rw_latch : RW_NO_LATCH;
- buf_block_t* block = buf_page_get_gen(page_id, zip_size,
- rw_latch, NULL, BUF_GET,
- mtr, &err,
- height == 0
- && !index->is_clust());
- ut_ad((block != NULL) == (err == DB_SUCCESS));
- tree_blocks[n_blocks] = block;
-
- if (!block) {
- if (err == DB_DECRYPTION_FAILED) {
- btr_decryption_failed(*index);
- }
-
- goto exit_loop;
- }
-
- const page_t* page = buf_block_get_frame(block);
-
- if (height == ULINT_UNDEFINED
- && page_is_leaf(page)
- && rw_latch != RW_NO_LATCH
- && rw_latch != root_leaf_rw_latch) {
- /* We should retry to get the page, because the root page
- is latched with different level as a leaf page. */
- ut_ad(root_leaf_rw_latch != RW_NO_LATCH);
- ut_ad(rw_latch == RW_S_LATCH);
-
- ut_ad(n_blocks == 0);
- mtr_release_block_at_savepoint(
- mtr, tree_savepoints[n_blocks],
- tree_blocks[n_blocks]);
-
- upper_rw_latch = root_leaf_rw_latch;
- continue;
- }
-
- ut_ad(fil_page_index_page_check(page));
- ut_ad(index->id == btr_page_get_index_id(page));
-
- if (height == ULINT_UNDEFINED) {
- /* We are in the root node */
-
- height = btr_page_get_level(page);
- ut_a(height >= level);
- } else {
- /* TODO: flag the index corrupted if this fails */
- ut_ad(height == btr_page_get_level(page));
- }
-
- if (height == 0) {
- if (rw_latch == RW_NO_LATCH) {
- btr_cur_latch_leaves(block, latch_mode,
- cursor, mtr);
- }
-
- /* In versions <= 3.23.52 we had forgotten to
- release the tree latch here. If in an index
- scan we had to scan far to find a record
- visible to the current transaction, that could
- starve others waiting for the tree latch. */
-
- switch (latch_mode) {
- case BTR_MODIFY_TREE:
- case BTR_CONT_MODIFY_TREE:
- case BTR_CONT_SEARCH_TREE:
- break;
- default:
- if (UNIV_UNLIKELY(srv_read_only_mode)) {
- break;
- }
- if (!latch_by_caller) {
- /* Release the tree s-latch */
- mtr_release_s_latch_at_savepoint(
- mtr, savepoint, &index->lock);
- }
-
- /* release upper blocks */
- for (; n_releases < n_blocks; n_releases++) {
- mtr_release_block_at_savepoint(
- mtr,
- tree_savepoints[n_releases],
- tree_blocks[n_releases]);
- }
- }
- } else if (height == level /* height != 0 */
- && UNIV_LIKELY(!srv_read_only_mode)) {
- /* We already have the block latched. */
- ut_ad(latch_mode == BTR_SEARCH_TREE);
- ut_ad(latch_by_caller);
- ut_ad(upper_rw_latch == RW_S_LATCH);
- ut_ad(mtr->memo_contains_flagged(block,
- MTR_MEMO_PAGE_S_FIX));
- ut_ad(mtr->memo_contains(index->lock,
- MTR_MEMO_SX_LOCK));
- /* because has sx-latch of index,
- can release upper blocks. */
- for (; n_releases < n_blocks; n_releases++) {
- mtr_release_block_at_savepoint(
- mtr,
- tree_savepoints[n_releases],
- tree_blocks[n_releases]);
- }
- }
-
- if (from_left) {
- page_cur_set_before_first(block, page_cursor);
- } else {
- page_cur_set_after_last(block, page_cursor);
- }
-
- if (height == level) {
- break;
- }
-
- ut_ad(height > 0);
-
- if (from_left
- ? !page_cur_move_to_next(page_cursor)
- : !page_cur_move_to_prev(page_cursor)) {
- err = DB_CORRUPTION;
- goto exit_loop;
- }
-
- height--;
-
- node_ptr = page_cur_get_rec(page_cursor);
- offsets = rec_get_offsets(node_ptr, cursor->index, offsets,
- 0, ULINT_UNDEFINED, &heap);
-
- /* If the rec is the first or last in the page for
- pessimistic delete intention, it might cause node_ptr insert
- for the upper level. We should change the intention and retry.
- */
- if (latch_mode == BTR_MODIFY_TREE
- && btr_cur_need_opposite_intention(
- page, lock_intention, node_ptr)) {
-
- ut_ad(upper_rw_latch == RW_X_LATCH);
- /* release all blocks */
- for (; n_releases <= n_blocks; n_releases++) {
- mtr_release_block_at_savepoint(
- mtr, tree_savepoints[n_releases],
- tree_blocks[n_releases]);
- }
-
- lock_intention = BTR_INTENTION_BOTH;
-
- page_id.set_page_no(dict_index_get_page(index));
-
- height = ULINT_UNDEFINED;
-
- n_blocks = 0;
- n_releases = 0;
-
- continue;
- }
-
- if (latch_mode == BTR_MODIFY_TREE
- && !btr_cur_will_modify_tree(
- cursor->index, page, lock_intention, node_ptr,
- node_ptr_max_size, zip_size, mtr)) {
- ut_ad(upper_rw_latch == RW_X_LATCH);
- ut_ad(n_releases <= n_blocks);
-
- /* we can release upper blocks */
- for (; n_releases < n_blocks; n_releases++) {
- if (n_releases == 0) {
- /* we should not release root page
- to pin to same block. */
- continue;
- }
-
- /* release unused blocks to unpin */
- mtr_release_block_at_savepoint(
- mtr, tree_savepoints[n_releases],
- tree_blocks[n_releases]);
- }
- }
-
- if (height == level
- && latch_mode == BTR_MODIFY_TREE) {
- ut_ad(upper_rw_latch == RW_X_LATCH);
- /* we should sx-latch root page, if released already.
- It contains seg_header. */
- if (n_releases > 0) {
- mtr_block_sx_latch_at_savepoint(
- mtr, tree_savepoints[0],
- tree_blocks[0]);
- }
-
- /* x-latch the branch blocks not released yet. */
- for (ulint i = n_releases; i <= n_blocks; i++) {
- mtr_block_x_latch_at_savepoint(
- mtr, tree_savepoints[i],
- tree_blocks[i]);
- }
- }
-
- /* Go to the child node */
- page_id.set_page_no(
- btr_node_ptr_get_child_page_no(node_ptr, offsets));
-
- n_blocks++;
- }
-
- exit_loop:
- if (UNIV_LIKELY_NULL(heap)) {
- mem_heap_free(heap);
- }
-
- return err;
-}
-
/**********************************************************************//**
Positions a cursor at a randomly chosen position within a B-tree.
@return true if the index is available and we have put the cursor, false
@@ -3046,7 +2712,7 @@ btr_cur_open_at_rnd_pos(
latch_mode);
page_cursor = btr_cur_get_page_cur(cursor);
- cursor->index = index;
+ page_cursor->index = index;
page_id_t page_id(index->table->space_id, index->page);
const ulint zip_size = index->table->space->zip_size();
@@ -3146,7 +2812,8 @@ btr_cur_open_at_rnd_pos(
}
}
- page_cur_open_on_rnd_user_rec(block, page_cursor);
+ page_cursor->block = block;
+ page_cur_open_on_rnd_user_rec(page_cursor);
if (height == 0) {
@@ -3158,8 +2825,8 @@ btr_cur_open_at_rnd_pos(
height--;
node_ptr = page_cur_get_rec(page_cursor);
- offsets = rec_get_offsets(node_ptr, cursor->index, offsets,
- 0, ULINT_UNDEFINED, &heap);
+ offsets = rec_get_offsets(node_ptr, page_cursor->index,
+ offsets, 0, ULINT_UNDEFINED, &heap);
/* If the rec is the first or last in the page for
pessimistic delete intention, it might cause node_ptr insert
@@ -3191,8 +2858,8 @@ btr_cur_open_at_rnd_pos(
if (latch_mode == BTR_MODIFY_TREE
&& !btr_cur_will_modify_tree(
- cursor->index, page, lock_intention, node_ptr,
- node_ptr_max_size, zip_size, mtr)) {
+ page_cursor->index, page, lock_intention,
+ node_ptr, node_ptr_max_size, zip_size, mtr)) {
ut_ad(upper_rw_latch == RW_X_LATCH);
ut_ad(n_releases <= n_blocks);
@@ -3217,15 +2884,15 @@ btr_cur_open_at_rnd_pos(
/* we should sx-latch root page, if released already.
It contains seg_header. */
if (n_releases > 0) {
- mtr_block_sx_latch_at_savepoint(
- mtr, tree_savepoints[0],
+ mtr->sx_latch_at_savepoint(
+ tree_savepoints[0],
tree_blocks[0]);
}
/* x-latch the branch blocks not released yet. */
for (ulint i = n_releases; i <= n_blocks; i++) {
- mtr_block_x_latch_at_savepoint(
- mtr, tree_savepoints[i],
+ mtr->x_latch_at_savepoint(
+ tree_savepoints[i],
tree_blocks[i]);
}
}
@@ -3281,21 +2948,19 @@ btr_cur_insert_if_possible(
page_cursor = btr_cur_get_page_cur(cursor);
/* Now, try the insert */
- rec = page_cur_tuple_insert(page_cursor, tuple, cursor->index,
- offsets, heap, n_ext, mtr);
+ rec = page_cur_tuple_insert(page_cursor, tuple, offsets, heap, n_ext,
+ mtr);
/* If the record did not fit, reorganize.
For compressed pages, page_cur_tuple_insert()
attempted this already. */
if (!rec && !page_cur_get_page_zip(page_cursor)
- && btr_page_reorganize(page_cursor, cursor->index, mtr)
- == DB_SUCCESS) {
- rec = page_cur_tuple_insert(
- page_cursor, tuple, cursor->index,
- offsets, heap, n_ext, mtr);
+ && btr_page_reorganize(page_cursor, mtr) == DB_SUCCESS) {
+ rec = page_cur_tuple_insert(page_cursor, tuple, offsets, heap,
+ n_ext, mtr);
}
- ut_ad(!rec || rec_offs_validate(rec, cursor->index, *offsets));
+ ut_ad(!rec || rec_offs_validate(rec, page_cursor->index, *offsets));
return(rec);
}
@@ -3325,7 +2990,7 @@ btr_cur_ins_lock_and_undo(
request if yes */
rec_t* rec = btr_cur_get_rec(cursor);
- dict_index_t* index = cursor->index;
+ dict_index_t* index = cursor->index();
ut_ad(!dict_index_is_online_ddl(index)
|| dict_index_is_clust(index)
@@ -3497,7 +3162,7 @@ btr_cur_optimistic_insert(
block = btr_cur_get_block(cursor);
page = buf_block_get_frame(block);
- index = cursor->index;
+ index = cursor->index();
ut_ad(mtr->memo_contains_flagged(block, MTR_MEMO_PAGE_X_FIX));
ut_ad(!dict_index_is_online_ddl(index)
@@ -3613,8 +3278,7 @@ fail_err:
<< ib::hex(thr ? thr->graph->trx->id : 0)
<< ' ' << rec_printer(entry).str());
DBUG_EXECUTE_IF("do_page_reorganize",
- if (n_recs)
- ut_a(btr_page_reorganize(page_cursor, index, mtr)
+ ut_a(!n_recs || btr_page_reorganize(page_cursor, mtr)
== DB_SUCCESS););
/* Now, try the insert */
@@ -3657,9 +3321,8 @@ fail_err:
}
#endif
- *rec = page_cur_tuple_insert(
- page_cursor, entry, index, offsets, heap,
- n_ext, mtr);
+ *rec = page_cur_tuple_insert(page_cursor, entry, offsets, heap,
+ n_ext, mtr);
reorg = page_cursor_rec != page_cur_get_rec(page_cursor);
}
@@ -3681,10 +3344,10 @@ fail_err:
reorg = true;
/* If the record did not fit, reorganize */
- err = btr_page_reorganize(page_cursor, index, mtr);
+ err = btr_page_reorganize(page_cursor, mtr);
if (err != DB_SUCCESS
|| page_get_max_insert_size(page, 1) != max_size
- || !(*rec = page_cur_tuple_insert(page_cursor, entry, index,
+ || !(*rec = page_cur_tuple_insert(page_cursor, entry,
offsets, heap, n_ext,
mtr))) {
err = DB_CORRUPTION;
@@ -3780,7 +3443,7 @@ btr_cur_pessimistic_insert(
| BTR_NO_UNDO_LOG_FLAG)) */
mtr_t* mtr) /*!< in/out: mini-transaction */
{
- dict_index_t* index = cursor->index;
+ dict_index_t* index = cursor->index();
big_rec_t* big_rec_vec = NULL;
bool inherit = false;
uint32_t n_reserved = 0;
@@ -3943,7 +3606,7 @@ btr_cur_upd_lock_and_undo(
ut_ad((thr != NULL) || (flags & BTR_NO_LOCKING_FLAG));
rec = btr_cur_get_rec(cursor);
- index = cursor->index;
+ index = cursor->index();
ut_ad(rec_offs_validate(rec, index, offsets));
ut_ad(mtr->is_named_space(index->table->space));
@@ -4111,7 +3774,6 @@ btr_cur_update_alloc_zip_func(
/*==========================*/
page_zip_des_t* page_zip,/*!< in/out: compressed page */
page_cur_t* cursor, /*!< in/out: B-tree page cursor */
- dict_index_t* index, /*!< in: the index corresponding to cursor */
#ifdef UNIV_DEBUG
rec_offs* offsets,/*!< in/out: offsets of the cursor record */
#endif /* UNIV_DEBUG */
@@ -4120,6 +3782,7 @@ btr_cur_update_alloc_zip_func(
false=update-in-place */
mtr_t* mtr) /*!< in/out: mini-transaction */
{
+ dict_index_t* index = cursor->index;
/* Have a local copy of the variables as these can change
dynamically. */
@@ -4146,7 +3809,7 @@ btr_cur_update_alloc_zip_func(
return(false);
}
- if (btr_page_reorganize(cursor, index, mtr) == DB_SUCCESS) {
+ if (btr_page_reorganize(cursor, mtr) == DB_SUCCESS) {
rec_offs_make_valid(page_cur_get_rec(cursor), index,
page_is_leaf(page), offsets);
@@ -4345,7 +4008,7 @@ btr_cur_update_in_place(
ut_ad(page_is_leaf(cursor->page_cur.block->page.frame));
rec = btr_cur_get_rec(cursor);
- index = cursor->index;
+ index = cursor->index();
ut_ad(!index->is_ibuf());
ut_ad(rec_offs_validate(rec, index, offsets));
ut_ad(!!page_rec_is_comp(rec) == dict_table_is_comp(index->table));
@@ -4377,7 +4040,7 @@ btr_cur_update_in_place(
if (!btr_cur_update_alloc_zip(
page_zip, btr_cur_get_page_cur(cursor),
- index, offsets, rec_offs_size(offsets),
+ offsets, rec_offs_size(offsets),
false, mtr)) {
return(DB_ZIP_OVERFLOW);
}
@@ -4654,7 +4317,7 @@ btr_cur_optimistic_update(
block = btr_cur_get_block(cursor);
page = buf_block_get_frame(block);
rec = btr_cur_get_rec(cursor);
- index = cursor->index;
+ index = cursor->index();
ut_ad(index->has_locking());
ut_ad(trx_id > 0 || (flags & BTR_KEEP_SYS_FLAG)
|| index->table->is_temporary());
@@ -4758,7 +4421,7 @@ any_extern:
}
if (!btr_cur_update_alloc_zip(
- page_zip, page_cursor, index, *offsets,
+ page_zip, page_cursor, *offsets,
new_rec_size, true, mtr)) {
return(DB_ZIP_OVERFLOW);
}
@@ -4853,7 +4516,7 @@ any_extern:
btr_search_update_hash_on_delete(cursor);
}
- page_cur_delete_rec(page_cursor, index, *offsets, mtr);
+ page_cur_delete_rec(page_cursor, *offsets, mtr);
if (!page_cur_move_to_prev(page_cursor)) {
return DB_CORRUPTION;
@@ -4874,7 +4537,7 @@ any_extern:
was a rollback, the shortened metadata record
would have too many fields, and we would be unable to
know the size of the freed record. */
- err = btr_page_reorganize(page_cursor, index, mtr);
+ err = btr_page_reorganize(page_cursor, mtr);
if (err != DB_SUCCESS) {
goto func_exit;
}
@@ -5013,7 +4676,7 @@ btr_cur_pessimistic_update(
block = btr_cur_get_block(cursor);
page_zip = buf_block_get_page_zip(block);
- index = cursor->index;
+ index = cursor->index();
ut_ad(index->has_locking());
ut_ad(mtr->memo_contains_flagged(&index->lock, MTR_MEMO_X_LOCK |
@@ -5219,7 +4882,7 @@ btr_cur_pessimistic_update(
#endif /* UNIV_ZIP_DEBUG */
page_cursor = btr_cur_get_page_cur(cursor);
- page_cur_delete_rec(page_cursor, index, *offsets, mtr);
+ page_cur_delete_rec(page_cursor, *offsets, mtr);
if (!page_cur_move_to_prev(page_cursor)) {
err = DB_CORRUPTION;
@@ -5237,7 +4900,7 @@ btr_cur_pessimistic_update(
was a rollback, the shortened metadata record
would have too many fields, and we would be unable to
know the size of the freed record. */
- err = btr_page_reorganize(page_cursor, index, mtr);
+ err = btr_page_reorganize(page_cursor, mtr);
if (err != DB_SUCCESS) {
goto return_after_reservations;
}
@@ -5292,10 +4955,9 @@ btr_cur_pessimistic_update(
&& !big_rec_vec
&& page_is_leaf(block->page.frame)
&& !dict_index_is_online_ddl(index)) {
-
- mtr_memo_release(mtr, &index->lock,
- MTR_MEMO_X_LOCK | MTR_MEMO_SX_LOCK);
-
+#if 0 // FIXME: this used to be a no-op, and will cause trouble if enabled
+ mtr->release(index->lock);
+#endif
/* NOTE: We cannot release root block latch here, because it
has segment header and already modified in most of cases.*/
}
@@ -5327,7 +4989,7 @@ btr_cur_pessimistic_update(
/* btr_page_split_and_insert() in
btr_cur_pessimistic_insert() invokes
- mtr_memo_release(mtr, index->lock, MTR_MEMO_SX_LOCK).
+ mtr->release(index->lock).
We must keep the index->lock when we created a
big_rec, so that row_upd_clust_rec() can store the
big_rec in the same mini-transaction. */
@@ -5355,7 +5017,7 @@ btr_cur_pessimistic_update(
ut_a(err == DB_SUCCESS);
ut_a(rec);
ut_a(dummy_big_rec == NULL);
- ut_ad(rec_offs_validate(rec, cursor->index, *offsets));
+ ut_ad(rec_offs_validate(rec, cursor->index(), *offsets));
page_cursor->rec = rec;
/* Multiple transactions cannot simultaneously operate on the
@@ -5392,7 +5054,7 @@ btr_cur_pessimistic_update(
was a rollback, the shortened metadata record
would have too many fields, and we would be unable to
know the size of the freed record. */
- err = btr_page_reorganize(page_cursor, index, mtr);
+ err = btr_page_reorganize(page_cursor, mtr);
if (err != DB_SUCCESS) {
goto return_after_reservations;
}
@@ -5545,12 +5207,12 @@ btr_cur_compress_if_useful(
adjusted even when compression occurs */
mtr_t* mtr) /*!< in/out: mini-transaction */
{
- ut_ad(mtr->memo_contains_flagged(&cursor->index->lock,
+ ut_ad(mtr->memo_contains_flagged(&cursor->index()->lock,
MTR_MEMO_X_LOCK | MTR_MEMO_SX_LOCK));
ut_ad(mtr->memo_contains_flagged(btr_cur_get_block(cursor),
MTR_MEMO_PAGE_X_FIX));
- if (cursor->index->is_spatial()) {
+ if (cursor->index()->is_spatial()) {
const trx_t* trx = cursor->rtr_info->thr
? thr_get_trx(cursor->rtr_info->thr)
: NULL;
@@ -5595,23 +5257,23 @@ btr_cur_optimistic_delete(
ut_ad(flags == 0 || flags == BTR_CREATE_FLAG);
ut_ad(mtr->memo_contains_flagged(btr_cur_get_block(cursor),
MTR_MEMO_PAGE_X_FIX));
- ut_ad(mtr->is_named_space(cursor->index->table->space));
- ut_ad(!cursor->index->is_dummy);
+ ut_ad(mtr->is_named_space(cursor->index()->table->space));
+ ut_ad(!cursor->index()->is_dummy);
/* This is intended only for leaf page deletions */
block = btr_cur_get_block(cursor);
- ut_ad(block->page.id().space() == cursor->index->table->space->id);
+ ut_ad(block->page.id().space() == cursor->index()->table->space->id);
ut_ad(page_is_leaf(buf_block_get_frame(block)));
- ut_ad(!dict_index_is_online_ddl(cursor->index)
- || dict_index_is_clust(cursor->index)
+ ut_ad(!dict_index_is_online_ddl(cursor->index())
+ || cursor->index()->is_clust()
|| (flags & BTR_CREATE_FLAG));
rec = btr_cur_get_rec(cursor);
- offsets = rec_get_offsets(rec, cursor->index, offsets,
- cursor->index->n_core_fields,
+ offsets = rec_get_offsets(rec, cursor->index(), offsets,
+ cursor->index()->n_core_fields,
ULINT_UNDEFINED, &heap);
dberr_t err = DB_SUCCESS;
@@ -5621,23 +5283,24 @@ btr_cur_optimistic_delete(
mtr)) {
/* prefetch siblings of the leaf for the pessimistic
operation. */
- btr_cur_prefetch_siblings(block, cursor->index);
+ btr_cur_prefetch_siblings(block, cursor->index());
err = DB_FAIL;
goto func_exit;
}
- if (UNIV_UNLIKELY(block->page.id().page_no() == cursor->index->page
+ if (UNIV_UNLIKELY(block->page.id().page_no() == cursor->index()->page
&& page_get_n_recs(block->page.frame) == 1
- + (cursor->index->is_instant()
- && !rec_is_metadata(rec, *cursor->index))
- && !cursor->index->must_avoid_clear_instant_add())) {
+ + (cursor->index()->is_instant()
+ && !rec_is_metadata(rec, *cursor->index()))
+ && !cursor->index()
+ ->must_avoid_clear_instant_add())) {
/* The whole index (and table) becomes logically empty.
Empty the whole page. That is, if we are deleting the
only user record, also delete the metadata record
if one exists for instant ADD COLUMN (not generic ALTER TABLE).
If we are deleting the metadata record and the
table becomes empty, clean up the whole page. */
- dict_index_t* index = cursor->index;
+ dict_index_t* index = cursor->index();
const rec_t* first_rec = page_rec_get_next_const(
page_get_infimum_rec(block->page.frame));
if (UNIV_UNLIKELY(!first_rec)) {
@@ -5682,17 +5345,17 @@ btr_cur_optimistic_delete(
If this is a recovered transaction, then
index->is_instant() will hold until the
insert into SYS_COLUMNS is rolled back. */
- ut_ad(cursor->index->table->supports_instant());
- ut_ad(cursor->index->is_primary());
+ ut_ad(cursor->index()->table->supports_instant());
+ ut_ad(cursor->index()->is_primary());
ut_ad(!page_zip);
page_cur_delete_rec(btr_cur_get_page_cur(cursor),
- cursor->index, offsets, mtr);
+ offsets, mtr);
/* We must empty the PAGE_FREE list, because
after rollback, this deleted metadata record
would have too many fields, and we would be
unable to know the size of the freed record. */
err = btr_page_reorganize(btr_cur_get_page_cur(cursor),
- cursor->index, mtr);
+ mtr);
goto func_exit;
} else {
if (!flags) {
@@ -5704,12 +5367,14 @@ btr_cur_optimistic_delete(
if (page_zip) {
#ifdef UNIV_ZIP_DEBUG
- ut_a(page_zip_validate(page_zip, page, cursor->index));
+ ut_a(page_zip_validate(page_zip, page,
+ cursor->index()));
#endif /* UNIV_ZIP_DEBUG */
page_cur_delete_rec(btr_cur_get_page_cur(cursor),
- cursor->index, offsets, mtr);
+ offsets, mtr);
#ifdef UNIV_ZIP_DEBUG
- ut_a(page_zip_validate(page_zip, page, cursor->index));
+ ut_a(page_zip_validate(page_zip, page,
+ cursor->index()));
#endif /* UNIV_ZIP_DEBUG */
/* On compressed pages, the IBUF_BITMAP_FREE
@@ -5723,14 +5388,14 @@ btr_cur_optimistic_delete(
page, 1);
page_cur_delete_rec(btr_cur_get_page_cur(cursor),
- cursor->index, offsets, mtr);
+ offsets, mtr);
/* The change buffer does not handle inserts
into non-leaf pages, into clustered indexes,
or into the change buffer. */
- if (!dict_index_is_clust(cursor->index)
- && !cursor->index->table->is_temporary()
- && !dict_index_is_ibuf(cursor->index)) {
+ if (!cursor->index()->is_clust()
+ && !cursor->index()->table->is_temporary()
+ && !dict_index_is_ibuf(cursor->index())) {
ibuf_update_free_bits_low(block, max_ins, mtr);
}
}
@@ -5900,13 +5565,13 @@ btr_cur_pessimistic_delete(
btr_search_update_hash_on_delete(cursor);
} else {
page_cur_delete_rec(btr_cur_get_page_cur(cursor),
- index, offsets, mtr);
+ offsets, mtr);
/* We must empty the PAGE_FREE list, because
after rollback, this deleted metadata record
would carry too many fields, and we would be
unable to know the size of the freed record. */
*err = btr_page_reorganize(btr_cur_get_page_cur(cursor),
- index, mtr);
+ mtr);
ut_ad(!ret);
goto err_exit;
}
@@ -5929,6 +5594,10 @@ discard_page:
goto err_exit;
}
+ btr_cur_t cursor;
+ cursor.page_cur.index = index;
+ cursor.page_cur.block = block;
+
if (!page_has_prev(page)) {
/* If we delete the leftmost node pointer on a
non-leaf level, we must mark the new leftmost node
@@ -5940,22 +5609,19 @@ discard_page:
we need to update parent page. */
rtr_mbr_t father_mbr;
rec_t* father_rec;
- btr_cur_t father_cursor;
rec_offs* offsets;
ulint len;
- rtr_page_get_father_block(NULL, heap, index,
- block, mtr, NULL,
- &father_cursor);
- offsets = rec_get_offsets(
- btr_cur_get_rec(&father_cursor), index, NULL,
- 0, ULINT_UNDEFINED, &heap);
+ rtr_page_get_father_block(NULL, heap, mtr, NULL,
+ &cursor);
+ father_rec = btr_cur_get_rec(&cursor);
+ offsets = rec_get_offsets(father_rec, index, NULL,
+ 0, ULINT_UNDEFINED, &heap);
- father_rec = btr_cur_get_rec(&father_cursor);
rtr_read_mbr(rec_get_nth_field(
father_rec, offsets, 0, &len), &father_mbr);
- rtr_update_mbr_field(&father_cursor, offsets, NULL,
+ rtr_update_mbr_field(&cursor, offsets, NULL,
page, &father_mbr, next_rec, mtr);
ut_d(parent_latched = true);
} else {
@@ -5963,8 +5629,7 @@ discard_page:
on a page, we have to change the parent node pointer
so that it is equal to the new leftmost node pointer
on the page */
- btr_cur_t cursor;
- ret = btr_page_get_father(index, block, mtr, &cursor);
+ ret = btr_page_get_father(mtr, &cursor);
if (!ret) {
*err = DB_CORRUPTION;
goto err_exit;
@@ -6004,7 +5669,7 @@ got_err:
index, page, BTR_INTENTION_DELETE, rec,
btr_node_ptr_max_size(index),
block->zip_size(), mtr);
- page_cur_delete_rec(btr_cur_get_page_cur(cursor), index,
+ page_cur_delete_rec(btr_cur_get_page_cur(cursor),
offsets, mtr);
if (min_mark_next_rec) {
@@ -6040,10 +5705,9 @@ err_exit:
if (!srv_read_only_mode
&& page_is_leaf(page)
&& !dict_index_is_online_ddl(index)) {
-
- mtr_memo_release(mtr, &index->lock,
- MTR_MEMO_X_LOCK | MTR_MEMO_SX_LOCK);
-
+#if 0 // FIXME: this used to be a no-op, and will cause trouble if enabled
+ mtr->release(index->lock);
+#endif
/* NOTE: We cannot release root block latch here, because it
has segment header and already modified in most of cases.*/
}
@@ -6194,7 +5858,8 @@ public:
if (dtuple_get_n_fields(&m_tuple) > 0)
{
m_up_bytes= m_low_bytes= 0;
- if (page_cur_search_with_match(m_block, index(), &m_tuple, m_page_mode,
+ m_page_cur.block= m_block;
+ if (page_cur_search_with_match(&m_tuple, m_page_mode,
&m_up_match, &m_low_match, &m_page_cur,
nullptr))
return false;