summaryrefslogtreecommitdiff
path: root/storage/innobase/btr
diff options
context:
space:
mode:
authorMarko Mäkelä <marko.makela@mariadb.com>2023-03-17 15:04:38 +0200
committerMarko Mäkelä <marko.makela@mariadb.com>2023-03-17 15:04:38 +0200
commit6e58d5ab6a42f22f9c705faea83fbc8889d429c3 (patch)
tree1695b1b6e214c7380b848194c479366ff0c0636b /storage/innobase/btr
parentfc18f9c9ec15035894154fb7dcdd85caac73cfc2 (diff)
parent4c355d4e81ac009abcfb94df4285d48c5229f958 (diff)
downloadmariadb-git-6e58d5ab6a42f22f9c705faea83fbc8889d429c3.tar.gz
Merge 11.0 into 11.1
Diffstat (limited to 'storage/innobase/btr')
-rw-r--r--storage/innobase/btr/btr0btr.cc147
-rw-r--r--storage/innobase/btr/btr0cur.cc157
2 files changed, 146 insertions, 158 deletions
diff --git a/storage/innobase/btr/btr0btr.cc b/storage/innobase/btr/btr0btr.cc
index d4471076780..dddff5e22b8 100644
--- a/storage/innobase/btr/btr0btr.cc
+++ b/storage/innobase/btr/btr0btr.cc
@@ -250,7 +250,7 @@ Gets the root node of a tree and x- or s-latches it.
buf_block_t*
btr_root_block_get(
/*===============*/
- const dict_index_t* index, /*!< in: index tree */
+ dict_index_t* index, /*!< in: index tree */
rw_lock_type_t mode, /*!< in: either RW_S_LATCH
or RW_X_LATCH */
mtr_t* mtr, /*!< in: mtr */
@@ -262,18 +262,42 @@ btr_root_block_get(
return nullptr;
}
- buf_block_t *block = btr_block_get(*index, index->page, mode, mtr, err);
- if (block)
+ buf_block_t *block;
+#ifndef BTR_CUR_ADAPT
+ static constexpr buf_block_t *guess= nullptr;
+#else
+ buf_block_t *&guess= btr_search_get_info(index)->root_guess;
+ guess=
+#endif
+ block=
+ buf_page_get_gen(page_id_t{index->table->space->id, index->page},
+ index->table->space->zip_size(), mode, guess, BUF_GET,
+ mtr, err);
+ ut_ad(!block == (*err != DB_SUCCESS));
+
+ if (UNIV_LIKELY(block != nullptr))
{
- if (!btr_root_fseg_validate(FIL_PAGE_DATA + PAGE_BTR_SEG_LEAF,
- *block, *index->table->space) ||
- !btr_root_fseg_validate(FIL_PAGE_DATA + PAGE_BTR_SEG_TOP,
- *block, *index->table->space))
+ if (!!page_is_comp(block->page.frame) != index->table->not_redundant() ||
+ btr_page_get_index_id(block->page.frame) != index->id ||
+ !fil_page_index_page_check(block->page.frame) ||
+ index->is_spatial() !=
+ (fil_page_get_type(block->page.frame) == FIL_PAGE_RTREE))
+ {
+ *err= DB_PAGE_CORRUPTED;
+ block= nullptr;
+ }
+ else if (!btr_root_fseg_validate(FIL_PAGE_DATA + PAGE_BTR_SEG_LEAF,
+ *block, *index->table->space) ||
+ !btr_root_fseg_validate(FIL_PAGE_DATA + PAGE_BTR_SEG_TOP,
+ *block, *index->table->space))
{
*err= DB_CORRUPTION;
block= nullptr;
}
}
+ else if (*err == DB_DECRYPTION_FAILED)
+ btr_decryption_failed(*index);
+
return block;
}
@@ -284,7 +308,7 @@ static
page_t*
btr_root_get(
/*=========*/
- const dict_index_t* index, /*!< in: index tree */
+ dict_index_t* index, /*!< in: index tree */
mtr_t* mtr, /*!< in: mtr */
dberr_t* err) /*!< out: error code */
{
@@ -496,9 +520,7 @@ btr_block_reget(mtr_t *mtr, const dict_index_t &index,
return block;
}
-#if 0 /* MDEV-29385 FIXME: Acquire the page latch upfront. */
ut_ad(mtr->memo_contains_flagged(&index.lock, MTR_MEMO_X_LOCK));
-#endif
return btr_block_get(index, id.page_no(), rw_latch, mtr, err);
}
@@ -686,9 +708,7 @@ btr_page_get_father_node_ptr_for_validate(
const uint32_t page_no = btr_cur_get_block(cursor)->page.id().page_no();
dict_index_t* index = btr_cur_get_index(cursor);
ut_ad(!dict_index_is_spatial(index));
-
- ut_ad(mtr->memo_contains_flagged(&index->lock, MTR_MEMO_X_LOCK
- | MTR_MEMO_SX_LOCK));
+ ut_ad(mtr->memo_contains(index->lock, MTR_MEMO_X_LOCK));
ut_ad(dict_index_get_page(index) != page_no);
const auto level = btr_page_get_level(btr_cur_get_page(cursor));
@@ -706,10 +726,6 @@ btr_page_get_father_node_ptr_for_validate(
}
const rec_t* node_ptr = btr_cur_get_rec(cursor);
-#if 0 /* MDEV-29835 FIXME */
- ut_ad(!btr_cur_get_block(cursor)->page.lock.not_recursive()
- || mtr->memo_contains(index->lock, MTR_MEMO_X_LOCK));
-#endif
offsets = rec_get_offsets(node_ptr, index, offsets, 0,
ULINT_UNDEFINED, &heap);
@@ -2287,11 +2303,10 @@ btr_insert_on_non_leaf_level(
}
ut_ad(cursor.flag == BTR_CUR_BINARY);
-#if 0 /* MDEV-29835 FIXME */
- ut_ad(!btr_cur_get_block(&cursor)->page.lock.not_recursive()
+ ut_ad(btr_cur_get_block(&cursor)
+ != mtr->at_savepoint(mtr->get_savepoint() - 1)
|| index->is_spatial()
|| mtr->memo_contains(index->lock, MTR_MEMO_X_LOCK));
-#endif
if (UNIV_LIKELY(err == DB_SUCCESS)) {
err = btr_cur_optimistic_insert(flags,
@@ -2399,10 +2414,8 @@ btr_attach_half_pages(
prev_block = mtr->get_already_latched(id, MTR_MEMO_PAGE_X_FIX);
#if 1 /* MDEV-29835 FIXME: acquire page latches upfront */
if (!prev_block) {
-# if 0 /* MDEV-29835 FIXME */
ut_ad(mtr->memo_contains(index->lock,
MTR_MEMO_X_LOCK));
-# endif
prev_block = btr_block_get(*index, prev_page_no,
RW_X_LATCH, mtr);
}
@@ -2413,10 +2426,8 @@ btr_attach_half_pages(
next_block = mtr->get_already_latched(id, MTR_MEMO_PAGE_X_FIX);
#if 1 /* MDEV-29835 FIXME: acquire page latches upfront */
if (!next_block) {
-# if 0 /* MDEV-29835 FIXME */
ut_ad(mtr->memo_contains(index->lock,
MTR_MEMO_X_LOCK));
-# endif
next_block = btr_block_get(*index, next_page_no,
RW_X_LATCH, mtr);
}
@@ -2757,6 +2768,8 @@ btr_page_split_and_insert(
ut_ad(dtuple_check_typed(tuple));
ut_ad(!cursor->index()->is_spatial());
+ buf_pool.pages_split++;
+
if (!*heap) {
*heap = mem_heap_create(1024);
}
@@ -3124,8 +3137,6 @@ insert_failed:
}
func_exit:
- MONITOR_INC(MONITOR_INDEX_SPLIT);
-
ut_ad(page_validate(buf_block_get_frame(left_block),
page_cursor->index));
ut_ad(page_validate(buf_block_get_frame(right_block),
@@ -3160,9 +3171,7 @@ dberr_t btr_level_list_remove(const buf_block_t& block,
#if 1 /* MDEV-29835 FIXME: acquire page latches upfront */
if (!prev)
{
-# if 0 /* MDEV-29835 FIXME */
ut_ad(mtr->memo_contains(index.lock, MTR_MEMO_X_LOCK));
-# endif
prev= btr_block_get(index, id.page_no(), RW_X_LATCH, mtr, &err);
if (UNIV_UNLIKELY(!prev))
return err;
@@ -3177,9 +3186,7 @@ dberr_t btr_level_list_remove(const buf_block_t& block,
#if 1 /* MDEV-29835 FIXME: acquire page latches upfront */
if (!next)
{
-# if 0 /* MDEV-29835 FIXME */
ut_ad(mtr->memo_contains(index.lock, MTR_MEMO_X_LOCK));
-# endif
next= btr_block_get(index, id.page_no(), RW_X_LATCH, mtr, &err);
if (UNIV_UNLIKELY(!next))
return err;
@@ -4009,7 +4016,7 @@ btr_discard_page(
if (UNIV_UNLIKELY(!merge_block)) {
return err;
}
-#if 0 /* MDEV-29385 FIXME: Acquire the page latch upfront. */
+#if 1 /* MDEV-29835 FIXME: Acquire the page latch upfront. */
ut_ad(!memcmp_aligned<4>(merge_block->page.frame
+ FIL_PAGE_NEXT,
block->page.frame + FIL_PAGE_OFFSET,
@@ -4035,7 +4042,7 @@ btr_discard_page(
if (UNIV_UNLIKELY(!merge_block)) {
return err;
}
-#if 0 /* MDEV-29385 FIXME: Acquire the page latch upfront. */
+#if 1 /* MDEV-29835 FIXME: Acquire the page latch upfront. */
ut_ad(!memcmp_aligned<4>(merge_block->page.frame
+ FIL_PAGE_PREV,
block->page.frame + FIL_PAGE_OFFSET,
@@ -4606,8 +4613,7 @@ btr_validate_level(
/*===============*/
dict_index_t* index, /*!< in: index tree */
const trx_t* trx, /*!< in: transaction or NULL */
- ulint level, /*!< in: level number */
- bool lockout)/*!< in: true if X-latch index is intended */
+ ulint level) /*!< in: level number */
{
buf_block_t* block;
page_t* page;
@@ -4626,18 +4632,10 @@ btr_validate_level(
#ifdef UNIV_ZIP_DEBUG
page_zip_des_t* page_zip;
#endif /* UNIV_ZIP_DEBUG */
- ulint savepoint = 0;
- uint32_t parent_page_no = FIL_NULL;
- uint32_t parent_right_page_no = FIL_NULL;
- bool rightmost_child = false;
mtr.start();
- if (lockout) {
- mtr_x_lock_index(index, &mtr);
- } else {
- mtr_sx_lock_index(index, &mtr);
- }
+ mtr_x_lock_index(index, &mtr);
dberr_t err;
block = btr_root_block_get(index, RW_SX_LATCH, &mtr, &err);
@@ -4732,11 +4730,7 @@ func_exit:
mem_heap_empty(heap);
offsets = offsets2 = NULL;
- if (lockout) {
- mtr_x_lock_index(index, &mtr);
- } else {
- mtr_sx_lock_index(index, &mtr);
- }
+ mtr_x_lock_index(index, &mtr);
page = block->page.frame;
@@ -4780,7 +4774,6 @@ func_exit:
if (right_page_no != FIL_NULL) {
const rec_t* right_rec;
- savepoint = mtr.get_savepoint();
right_block = btr_block_get(*index, right_page_no, RW_SX_LATCH,
&mtr, &err);
@@ -4883,11 +4876,6 @@ broken_links:
father_page = btr_cur_get_page(&node_cur);
node_ptr = btr_cur_get_rec(&node_cur);
- parent_page_no = page_get_page_no(father_page);
- parent_right_page_no = btr_page_get_next(father_page);
- rightmost_child = page_rec_is_supremum(
- page_rec_get_next(node_ptr));
-
rec = page_rec_get_prev(page_get_supremum_rec(page));
if (rec) {
btr_cur_position(index, rec, block, &node_cur);
@@ -4969,35 +4957,6 @@ broken_links:
}
} else if (const rec_t* right_node_ptr
= page_rec_get_next(node_ptr)) {
- if (!lockout && rightmost_child) {
-
- /* To obey latch order of tree blocks,
- we should release the right_block once to
- obtain lock of the uncle block. */
- ut_ad(right_block
- == mtr.at_savepoint(savepoint));
- mtr.rollback_to_savepoint(savepoint,
- savepoint + 1);
-
- if (parent_right_page_no != FIL_NULL) {
- btr_block_get(*index,
- parent_right_page_no,
- RW_SX_LATCH, &mtr);
- }
-
- right_block = btr_block_get(*index,
- right_page_no,
- RW_SX_LATCH,
- &mtr, &err);
- if (!right_block) {
- btr_validate_report1(index, level,
- block);
- fputs("InnoDB: broken FIL_PAGE_NEXT"
- " link\n", stderr);
- goto invalid_page;
- }
- }
-
btr_cur_position(
index,
page_get_infimum_rec(right_block->page.frame),
@@ -5069,19 +5028,6 @@ node_ptr_fails:
mtr.start();
- if (!lockout) {
- if (rightmost_child) {
- if (parent_right_page_no != FIL_NULL) {
- btr_block_get(*index,
- parent_right_page_no,
- RW_SX_LATCH, &mtr);
- }
- } else if (parent_page_no != FIL_NULL) {
- btr_block_get(*index, parent_page_no,
- RW_SX_LATCH, &mtr);
- }
- }
-
block = btr_block_get(*index, right_page_no, RW_SX_LATCH,
&mtr, &err);
goto loop;
@@ -5099,21 +5045,16 @@ btr_validate_index(
dict_index_t* index, /*!< in: index */
const trx_t* trx) /*!< in: transaction or NULL */
{
- const bool lockout= index->is_spatial();
-
mtr_t mtr;
mtr.start();
- if (lockout)
- mtr_x_lock_index(index, &mtr);
- else
- mtr_sx_lock_index(index, &mtr);
+ mtr_x_lock_index(index, &mtr);
dberr_t err;
if (page_t *root= btr_root_get(index, &mtr, &err))
for (auto level= btr_page_get_level(root);; level--)
{
- if (dberr_t err_level= btr_validate_level(index, trx, level, lockout))
+ if (dberr_t err_level= btr_validate_level(index, trx, level))
err= err_level;
if (!level)
break;
diff --git a/storage/innobase/btr/btr0cur.cc b/storage/innobase/btr/btr0cur.cc
index 2941a765fa4..74db3fa3d8f 100644
--- a/storage/innobase/btr/btr0cur.cc
+++ b/storage/innobase/btr/btr0cur.cc
@@ -750,29 +750,34 @@ btr_cur_will_modify_tree(
/** Detects whether the modifying record might need a opposite modification
to the intention.
-@param[in] page page
-@param[in] lock_intention lock intention for the tree operation
-@param[in] rec record (current node_ptr)
+@param page page
+@param lock_intention lock intention for the tree operation
+@param node_ptr_max_size the maximum size of a node pointer
+@param compress_limit BTR_CUR_PAGE_COMPRESS_LIMIT(index)
+@param rec record (current node_ptr)
@return true if tree modification is needed */
-static
-bool
-btr_cur_need_opposite_intention(
- const page_t* page,
- btr_intention_t lock_intention,
- const rec_t* rec)
+static bool btr_cur_need_opposite_intention(const page_t *page,
+ btr_intention_t lock_intention,
+ ulint node_ptr_max_size,
+ ulint compress_limit,
+ const rec_t *rec)
{
- switch (lock_intention) {
- case BTR_INTENTION_DELETE:
- return (page_has_prev(page) && page_rec_is_first(rec, page)) ||
- (page_has_next(page) && page_rec_is_last(rec, page));
- case BTR_INTENTION_INSERT:
- return page_has_next(page) && page_rec_is_last(rec, page);
- case BTR_INTENTION_BOTH:
- return(false);
- }
-
- MY_ASSERT_UNREACHABLE();
- return(false);
+ if (lock_intention != BTR_INTENTION_INSERT)
+ {
+ /* We compensate also for btr_cur_compress_recommendation() */
+ if (!page_has_siblings(page) ||
+ page_rec_is_first(rec, page) || page_rec_is_last(rec, page) ||
+ page_get_data_size(page) < node_ptr_max_size + compress_limit)
+ return true;
+ if (lock_intention == BTR_INTENTION_DELETE)
+ return false;
+ }
+ else if (page_has_next(page) && page_rec_is_last(rec, page))
+ return true;
+ LIMIT_OPTIMISTIC_INSERT_DEBUG(page_get_n_recs(page), return true);
+ const ulint max_size= page_get_max_insert_size_after_reorganize(page, 2);
+ return max_size < BTR_CUR_PAGE_REORGANIZE_LIMIT + node_ptr_max_size ||
+ max_size < node_ptr_max_size * 2;
}
/**
@@ -997,7 +1002,7 @@ dberr_t btr_cur_t::search_leaf(const dtuple_t *tuple, page_cur_mode_t mode,
const ulint savepoint= mtr->get_savepoint();
- ulint node_ptr_max_size= 0;
+ ulint node_ptr_max_size= 0, compress_limit= 0;
rw_lock_type_t rw_latch= RW_S_LATCH;
switch (latch_mode) {
@@ -1009,13 +1014,19 @@ dberr_t btr_cur_t::search_leaf(const dtuple_t *tuple, page_cur_mode_t mode,
ut_ad(mtr->memo_contains_flagged(&index()->lock, MTR_MEMO_X_LOCK));
break;
}
- if (lock_intention == BTR_INTENTION_DELETE && buf_pool.n_pend_reads &&
- trx_sys.history_size_approx() > BTR_CUR_FINE_HISTORY_LENGTH)
- /* Most delete-intended operations are due to the purge of history.
- Prioritize them when the history list is growing huge. */
- mtr_x_lock_index(index(), mtr);
- else
- mtr_sx_lock_index(index(), mtr);
+ if (lock_intention == BTR_INTENTION_DELETE)
+ {
+ compress_limit= BTR_CUR_PAGE_COMPRESS_LIMIT(index());
+ if (buf_pool.n_pend_reads &&
+ trx_sys.history_size_approx() > BTR_CUR_FINE_HISTORY_LENGTH)
+ {
+ /* Most delete-intended operations are due to the purge of history.
+ Prioritize them when the history list is growing huge. */
+ mtr_x_lock_index(index(), mtr);
+ break;
+ }
+ }
+ mtr_sx_lock_index(index(), mtr);
break;
#ifdef UNIV_DEBUG
case BTR_CONT_MODIFY_TREE:
@@ -1221,6 +1232,10 @@ release_tree:
!btr_block_get(*index(), btr_page_get_next(block->page.frame),
RW_X_LATCH, mtr, &err))
goto func_exit;
+ if (btr_cur_need_opposite_intention(block->page.frame, lock_intention,
+ node_ptr_max_size, compress_limit,
+ page_cur.rec))
+ goto need_opposite_intention;
}
reached_latched_leaf:
@@ -1274,6 +1289,7 @@ release_tree:
break;
case BTR_MODIFY_TREE:
if (btr_cur_need_opposite_intention(block->page.frame, lock_intention,
+ node_ptr_max_size, compress_limit,
page_cur.rec))
/* If the rec is the first or last in the page for pessimistic
delete intention, it might cause node_ptr insert for the upper
@@ -1417,6 +1433,17 @@ release_tree:
goto search_loop;
}
+ATTRIBUTE_COLD void mtr_t::index_lock_upgrade()
+{
+ auto &slot= m_memo[get_savepoint() - 1];
+ if (slot.type == MTR_MEMO_X_LOCK)
+ return;
+ ut_ad(slot.type == MTR_MEMO_SX_LOCK);
+ index_lock *lock= static_cast<index_lock*>(slot.object);
+ lock->u_x_upgrade(SRW_LOCK_CALL);
+ slot.type= MTR_MEMO_X_LOCK;
+}
+
ATTRIBUTE_COLD
dberr_t btr_cur_t::pessimistic_search_leaf(const dtuple_t *tuple,
page_cur_mode_t mode, mtr_t *mtr)
@@ -1434,8 +1461,7 @@ dberr_t btr_cur_t::pessimistic_search_leaf(const dtuple_t *tuple,
ut_ad(block->page.id().page_no() == index()->page);
block->page.fix();
mtr->rollback_to_savepoint(1);
- ut_ad(mtr->memo_contains_flagged(&index()->lock,
- MTR_MEMO_SX_LOCK | MTR_MEMO_X_LOCK));
+ mtr->index_lock_upgrade();
const page_cur_mode_t page_mode{btr_cur_nonleaf_mode(mode)};
@@ -1665,7 +1691,6 @@ search_loop:
dberr_t btr_cur_t::open_leaf(bool first, dict_index_t *index,
btr_latch_mode latch_mode, mtr_t *mtr)
{
- btr_intention_t lock_intention;
ulint n_blocks= 0;
mem_heap_t *heap= nullptr;
rec_offs offsets_[REC_OFFS_NORMAL_SIZE];
@@ -1677,7 +1702,7 @@ dberr_t btr_cur_t::open_leaf(bool first, dict_index_t *index,
const bool latch_by_caller= latch_mode & BTR_ALREADY_S_LATCHED;
latch_mode= btr_latch_mode(latch_mode & ~BTR_ALREADY_S_LATCHED);
- lock_intention= btr_cur_get_and_clear_intention(&latch_mode);
+ btr_intention_t lock_intention= btr_cur_get_and_clear_intention(&latch_mode);
/* Store the position of the tree latch we push to mtr so that we
know how to release it when we have latched the leaf node */
@@ -1685,7 +1710,7 @@ dberr_t btr_cur_t::open_leaf(bool first, dict_index_t *index,
auto savepoint= mtr->get_savepoint();
rw_lock_type_t upper_rw_latch= RW_X_LATCH;
- ulint node_ptr_max_size= 0;
+ ulint node_ptr_max_size= 0, compress_limit= 0;
if (latch_mode == BTR_MODIFY_TREE)
{
@@ -1694,12 +1719,18 @@ dberr_t btr_cur_t::open_leaf(bool first, dict_index_t *index,
and read IO bandwidth should be prioritized for them, when the
history list is growing huge. */
savepoint++;
- if (lock_intention == BTR_INTENTION_DELETE
- && buf_pool.n_pend_reads
- && trx_sys.history_size_approx() > BTR_CUR_FINE_HISTORY_LENGTH)
- mtr_x_lock_index(index, mtr);
- else
- mtr_sx_lock_index(index, mtr);
+ if (lock_intention == BTR_INTENTION_DELETE)
+ {
+ compress_limit= BTR_CUR_PAGE_COMPRESS_LIMIT(index);
+
+ if (buf_pool.n_pend_reads &&
+ trx_sys.history_size_approx() > BTR_CUR_FINE_HISTORY_LENGTH)
+ {
+ mtr_x_lock_index(index, mtr);
+ goto index_locked;
+ }
+ }
+ mtr_sx_lock_index(index, mtr);
}
else
{
@@ -1720,6 +1751,7 @@ dberr_t btr_cur_t::open_leaf(bool first, dict_index_t *index,
}
}
+index_locked:
ut_ad(savepoint == mtr->get_savepoint());
const rw_lock_type_t root_leaf_rw_latch=
@@ -1792,15 +1824,28 @@ dberr_t btr_cur_t::open_leaf(bool first, dict_index_t *index,
!btr_block_get(*index, btr_page_get_next(block->page.frame),
RW_X_LATCH, mtr, &err))
break;
+
+ if (!index->lock.have_x() &&
+ btr_cur_need_opposite_intention(block->page.frame,
+ lock_intention,
+ node_ptr_max_size,
+ compress_limit, page_cur.rec))
+ goto need_opposite_intention;
}
else
{
if (rw_latch == RW_NO_LATCH)
mtr->upgrade_buffer_fix(leaf_savepoint - 1,
- rw_lock_type_t(latch_mode));
- /* Release index->lock if needed, and the non-leaf pages. */
- mtr->rollback_to_savepoint(savepoint - !latch_by_caller,
- leaf_savepoint - 1);
+ rw_lock_type_t(latch_mode &
+ (RW_X_LATCH | RW_S_LATCH)));
+ if (latch_mode != BTR_CONT_MODIFY_TREE)
+ {
+ ut_ad(latch_mode == BTR_MODIFY_LEAF ||
+ latch_mode == BTR_SEARCH_LEAF);
+ /* Release index->lock if needed, and the non-leaf pages. */
+ mtr->rollback_to_savepoint(savepoint - !latch_by_caller,
+ leaf_savepoint - 1);
+ }
}
break;
}
@@ -1822,22 +1867,25 @@ dberr_t btr_cur_t::open_leaf(bool first, dict_index_t *index,
: !page_cur_move_to_prev(&page_cur))
goto corrupted;
- const rec_t *node_ptr= page_cur.rec;
- offsets= rec_get_offsets(node_ptr, index, offsets, 0, ULINT_UNDEFINED,
+ offsets= rec_get_offsets(page_cur.rec, index, offsets, 0, ULINT_UNDEFINED,
&heap);
ut_ad(latch_mode != BTR_MODIFY_TREE || upper_rw_latch == RW_X_LATCH);
if (latch_mode != BTR_MODIFY_TREE);
- else if (btr_cur_need_opposite_intention(block->page.frame,
- lock_intention, node_ptr))
+ else if (btr_cur_need_opposite_intention(block->page.frame, lock_intention,
+ node_ptr_max_size, compress_limit,
+ page_cur.rec))
{
+ need_opposite_intention:
/* If the rec is the first or last in the page for pessimistic
delete intention, it might cause node_ptr insert for the upper
level. We should change the intention and retry. */
mtr->rollback_to_savepoint(savepoint);
- lock_intention= BTR_INTENTION_BOTH;
+ mtr->index_lock_upgrade();
+ /* X-latch all pages from now on */
+ latch_mode= BTR_CONT_MODIFY_TREE;
page= index->page;
height= ULINT_UNDEFINED;
n_blocks= 0;
@@ -1846,7 +1894,7 @@ dberr_t btr_cur_t::open_leaf(bool first, dict_index_t *index,
else
{
if (!btr_cur_will_modify_tree(index, block->page.frame,
- lock_intention, node_ptr,
+ lock_intention, page_cur.rec,
node_ptr_max_size, zip_size, mtr))
{
ut_ad(n_blocks);
@@ -1876,7 +1924,7 @@ dberr_t btr_cur_t::open_leaf(bool first, dict_index_t *index,
}
/* Go to the child node */
- page= btr_node_ptr_get_child_page_no(node_ptr, offsets);
+ page= btr_node_ptr_get_child_page_no(page_cur.rec, offsets);
n_blocks++;
}
@@ -2178,8 +2226,7 @@ convert_big_rec:
return(DB_TOO_BIG_RECORD);
}
- LIMIT_OPTIMISTIC_INSERT_DEBUG(page_get_n_recs(page),
- goto fail);
+ LIMIT_OPTIMISTIC_INSERT_DEBUG(page_get_n_recs(page), goto fail);
if (block->page.zip.data && leaf
&& (page_get_data_size(page) + rec_size
@@ -2193,7 +2240,7 @@ fail:
/* prefetch siblings of the leaf for the pessimistic
operation, if the page is leaf. */
- if (page_is_leaf(page)) {
+ if (leaf) {
btr_cur_prefetch_siblings(block, index);
}
fail_err:
@@ -2262,7 +2309,7 @@ fail_err:
#ifdef UNIV_DEBUG
if (!(flags & BTR_CREATE_FLAG)
- && index->is_primary() && page_is_leaf(page)) {
+ && leaf && index->is_primary()) {
const dfield_t* trx_id = dtuple_get_nth_field(
entry, dict_col_get_clust_pos(
dict_table_get_sys_col(index->table,