summaryrefslogtreecommitdiff
path: root/storage/innobase/btr/btr0btr.cc
diff options
context:
space:
mode:
Diffstat (limited to 'storage/innobase/btr/btr0btr.cc')
-rw-r--r--storage/innobase/btr/btr0btr.cc136
1 files changed, 136 insertions, 0 deletions
diff --git a/storage/innobase/btr/btr0btr.cc b/storage/innobase/btr/btr0btr.cc
index 4f9ccbe061a..ff27b470974 100644
--- a/storage/innobase/btr/btr0btr.cc
+++ b/storage/innobase/btr/btr0btr.cc
@@ -2826,6 +2826,134 @@ btr_page_tuple_smaller(
return(cmp_dtuple_rec(tuple, first_rec, *offsets) < 0);
}
+/** Insert the tuple into the right sibling page, if the cursor is at the end
+of a page.
+@param[in] flags undo logging and locking flags
+@param[in,out] cursor cursor at which to insert; when the function succeeds,
+ the cursor is positioned before the insert point.
+@param[out] offsets offsets on inserted record
+@param[in,out] heap memory heap for allocating offsets
+@param[in] tuple tuple to insert
+@param[in] n_ext number of externally stored columns
+@param[in,out] mtr mini-transaction
+@return inserted record (first record on the right sibling page);
+ the cursor will be positioned on the page infimum
+@retval NULL if the operation was not performed */
+static
+rec_t*
+btr_insert_into_right_sibling(
+ ulint flags,
+ btr_cur_t* cursor,
+ ulint** offsets,
+ mem_heap_t* heap,
+ const dtuple_t* tuple,
+ ulint n_ext,
+ mtr_t* mtr)
+{
+ buf_block_t* block = btr_cur_get_block(cursor);
+ page_t* page = buf_block_get_frame(block);
+ ulint next_page_no = btr_page_get_next(page, mtr);
+
+ ut_ad(mtr_memo_contains(mtr, dict_index_get_lock(cursor->index),
+ MTR_MEMO_X_LOCK));
+ ut_ad(mtr_memo_contains(mtr, block, MTR_MEMO_PAGE_X_FIX));
+ ut_ad(heap);
+
+ if (next_page_no == FIL_NULL || !page_rec_is_supremum(
+ page_rec_get_next(btr_cur_get_rec(cursor)))) {
+
+ return(NULL);
+ }
+
+ page_cur_t next_page_cursor;
+ buf_block_t* next_block;
+ page_t* next_page;
+ btr_cur_t next_father_cursor;
+ rec_t* rec = NULL;
+ ulint zip_size = buf_block_get_zip_size(block);
+ ulint max_size;
+
+ next_block = btr_block_get(
+ buf_block_get_space(block), zip_size,
+ next_page_no, RW_X_LATCH, cursor->index, mtr);
+ next_page = buf_block_get_frame(next_block);
+
+ bool is_leaf = page_is_leaf(next_page);
+
+ btr_page_get_father(
+ cursor->index, next_block, mtr, &next_father_cursor);
+
+ page_cur_search(
+ next_block, cursor->index, tuple, PAGE_CUR_LE,
+ &next_page_cursor);
+
+ max_size = page_get_max_insert_size_after_reorganize(next_page, 1);
+
+ /* Extends gap lock for the next page */
+ lock_update_split_left(next_block, block);
+
+ rec = page_cur_tuple_insert(
+ &next_page_cursor, tuple, cursor->index, offsets, &heap,
+ n_ext, mtr);
+
+ if (rec == NULL) {
+ if (zip_size && is_leaf
+ && !dict_index_is_clust(cursor->index)) {
+ /* Reset the IBUF_BITMAP_FREE bits, because
+ page_cur_tuple_insert() will have attempted page
+ reorganize before failing. */
+ ibuf_reset_free_bits(next_block);
+ }
+ return(NULL);
+ }
+
+ ibool compressed;
+ dberr_t err;
+ ulint level = btr_page_get_level(next_page, mtr);
+
+ /* adjust cursor position */
+ *btr_cur_get_page_cur(cursor) = next_page_cursor;
+
+ ut_ad(btr_cur_get_rec(cursor) == page_get_infimum_rec(next_page));
+ ut_ad(page_rec_get_next(page_get_infimum_rec(next_page)) == rec);
+
+ /* We have to change the parent node pointer */
+
+ compressed = btr_cur_pessimistic_delete(
+ &err, TRUE, &next_father_cursor,
+ BTR_CREATE_FLAG, RB_NONE, mtr);
+
+ ut_a(err == DB_SUCCESS);
+
+ if (!compressed) {
+ btr_cur_compress_if_useful(&next_father_cursor, FALSE, mtr);
+ }
+
+ dtuple_t* node_ptr = dict_index_build_node_ptr(
+ cursor->index, rec, buf_block_get_page_no(next_block),
+ heap, level);
+
+ btr_insert_on_non_leaf_level(
+ flags, cursor->index, level + 1, node_ptr, mtr);
+
+ ut_ad(rec_offs_validate(rec, cursor->index, *offsets));
+
+ if (is_leaf && !dict_index_is_clust(cursor->index)) {
+ /* Update the free bits of the B-tree page in the
+ insert buffer bitmap. */
+
+ if (zip_size) {
+ ibuf_update_free_bits_zip(next_block, mtr);
+ } else {
+ ibuf_update_free_bits_if_full(
+ next_block, max_size,
+ rec_offs_size(*offsets) + PAGE_DIR_SLOT_SIZE);
+ }
+ }
+
+ return(rec);
+}
+
/*************************************************************//**
Splits an index page to halves and inserts the tuple. It is assumed
that mtr holds an x-latch to the index tree. NOTE: the tree x-latch is
@@ -2896,6 +3024,14 @@ func_start:
ut_ad(mtr_memo_contains(mtr, block, MTR_MEMO_PAGE_X_FIX));
ut_ad(!page_is_empty(page));
+ /* try to insert to the next page if possible before split */
+ rec = btr_insert_into_right_sibling(
+ flags, cursor, offsets, *heap, tuple, n_ext, mtr);
+
+ if (rec != NULL) {
+ return(rec);
+ }
+
page_no = buf_block_get_page_no(block);
/* 1. Decide the split record; split_rec == NULL means that the