diff options
author | Sergei Golubchik <sergii@pisem.net> | 2013-01-31 09:48:19 +0100 |
---|---|---|
committer | Sergei Golubchik <sergii@pisem.net> | 2013-01-31 09:48:19 +0100 |
commit | ab83952f293ea46c00e421c81b81a394c9cae2b4 (patch) | |
tree | 6a3e995bd1330828a0e988d896bca218863fc74c /storage/xtradb/btr/btr0cur.c | |
parent | 055b62f404ee0a0463ee8ff98a0e24c083b95f1d (diff) | |
parent | 5267af5e9e5e601d4f4b1ef40730da1e36d38d9d (diff) | |
download | mariadb-git-ab83952f293ea46c00e421c81b81a394c9cae2b4.tar.gz |
10.0-base merge
Diffstat (limited to 'storage/xtradb/btr/btr0cur.c')
-rw-r--r-- | storage/xtradb/btr/btr0cur.c | 68 |
1 files changed, 57 insertions, 11 deletions
diff --git a/storage/xtradb/btr/btr0cur.c b/storage/xtradb/btr/btr0cur.c index 61c07ac792e..687853a422e 100644 --- a/storage/xtradb/btr/btr0cur.c +++ b/storage/xtradb/btr/btr0cur.c @@ -239,6 +239,7 @@ btr_cur_latch_leaves( mtr_t* mtr) /*!< in: mtr */ { ulint mode; + ulint sibling_mode; ulint left_page_no; ulint right_page_no; buf_block_t* get_block; @@ -261,14 +262,21 @@ btr_cur_latch_leaves( #endif /* UNIV_BTR_DEBUG */ get_block->check_index_page_at_flush = TRUE; return; + case BTR_SEARCH_TREE: case BTR_MODIFY_TREE: - /* x-latch also brothers from left to right */ + if (UNIV_UNLIKELY(latch_mode == BTR_SEARCH_TREE)) { + mode = RW_S_LATCH; + sibling_mode = RW_NO_LATCH; + } else { + mode = sibling_mode = RW_X_LATCH; + } + /* Fetch and possibly latch also brothers from left to right */ left_page_no = btr_page_get_prev(page, mtr); if (left_page_no != FIL_NULL) { get_block = btr_block_get( space, zip_size, left_page_no, - RW_X_LATCH, cursor->index, mtr); + sibling_mode, cursor->index, mtr); if (srv_pass_corrupt_table && !get_block) { return; @@ -280,12 +288,21 @@ btr_cur_latch_leaves( ut_a(btr_page_get_next(get_block->frame, mtr) == page_get_page_no(page)); #endif /* UNIV_BTR_DEBUG */ - get_block->check_index_page_at_flush = TRUE; + if (sibling_mode == RW_NO_LATCH) { + /* btr_block_get() called with RW_NO_LATCH will + fix the read block in the buffer. This serves + no purpose for the fake changes prefetching, + thus we unfix the sibling blocks immediately.*/ + mtr_memo_release(mtr, get_block, + MTR_MEMO_BUF_FIX); + } else { + get_block->check_index_page_at_flush = TRUE; + } } get_block = btr_block_get( space, zip_size, page_no, - RW_X_LATCH, cursor->index, mtr); + mode, cursor->index, mtr); if (srv_pass_corrupt_table && !get_block) { return; @@ -301,7 +318,7 @@ btr_cur_latch_leaves( if (right_page_no != FIL_NULL) { get_block = btr_block_get( space, zip_size, right_page_no, - RW_X_LATCH, cursor->index, mtr); + sibling_mode, cursor->index, mtr); if (srv_pass_corrupt_table && !get_block) { return; @@ -313,7 +330,12 @@ btr_cur_latch_leaves( ut_a(btr_page_get_prev(get_block->frame, mtr) == page_get_page_no(page)); #endif /* UNIV_BTR_DEBUG */ - get_block->check_index_page_at_flush = TRUE; + if (sibling_mode == RW_NO_LATCH) { + mtr_memo_release(mtr, get_block, + MTR_MEMO_BUF_FIX); + } else { + get_block->check_index_page_at_flush = TRUE; + } } return; @@ -1566,6 +1588,9 @@ btr_cur_pessimistic_insert( } if (!(flags & BTR_NO_UNDO_LOG_FLAG)) { + + ut_a(cursor->tree_height != ULINT_UNDEFINED); + /* First reserve enough free space for the file segments of the index tree, so that the insert will not fail because of lack of space */ @@ -1860,7 +1885,8 @@ btr_cur_update_alloc_zip( ulint length, /*!< in: size needed */ ibool create, /*!< in: TRUE=delete-and-insert, FALSE=update-in-place */ - mtr_t* mtr) /*!< in: mini-transaction */ + mtr_t* mtr, /*!< in: mini-transaction */ + trx_t* trx) /*!< in: NULL or transaction */ { ut_a(page_zip == buf_block_get_page_zip(block)); ut_ad(page_zip); @@ -1877,6 +1903,14 @@ btr_cur_update_alloc_zip( return(FALSE); } + if (trx && trx->fake_changes) { + /* Don't call page_zip_compress_write_log_no_data as that has + assert which would fail. Assume there won't be a compression + failure. */ + + return TRUE; + } + if (!page_zip_compress(page_zip, buf_block_get_frame(block), index, mtr)) { /* Unable to compress the page */ @@ -1960,7 +1994,8 @@ btr_cur_update_in_place( /* Check that enough space is available on the compressed page. */ if (page_zip && !btr_cur_update_alloc_zip(page_zip, block, index, - rec_offs_size(offsets), FALSE, mtr)) { + rec_offs_size(offsets), FALSE, mtr, + trx)) { return(DB_ZIP_OVERFLOW); } @@ -2159,7 +2194,8 @@ any_extern: if (page_zip && !btr_cur_update_alloc_zip(page_zip, block, index, - new_rec_size, TRUE, mtr)) { + new_rec_size, TRUE, mtr, + thr_get_trx(thr))) { err = DB_ZIP_OVERFLOW; goto err_exit; } @@ -2402,7 +2438,15 @@ btr_cur_pessimistic_update( of the index tree, so that the update will not fail because of lack of space */ - n_extents = cursor->tree_height / 16 + 3; + if (UNIV_UNLIKELY(cursor->tree_height == ULINT_UNDEFINED)) { + /* When the tree height is uninitialized due to fake + changes, reserve some hardcoded number of extents. */ + ut_a(thr && thr_get_trx(thr)->fake_changes); + n_extents = 3; + } + else { + n_extents = cursor->tree_height / 16 + 3; + } if (flags & BTR_NO_UNDO_LOG_FLAG) { reserve_flag = FSP_CLEANING; @@ -2439,7 +2483,7 @@ btr_cur_pessimistic_update( itself. Thus the following call is safe. */ row_upd_index_replace_new_col_vals_index_pos(new_entry, index, update, FALSE, *heap); - if (!(flags & BTR_KEEP_SYS_FLAG)) { + if (!(flags & BTR_KEEP_SYS_FLAG) && !trx->fake_changes) { row_upd_index_entry_sys_field(new_entry, index, DATA_ROLL_PTR, roll_ptr); row_upd_index_entry_sys_field(new_entry, index, DATA_TRX_ID, @@ -3210,6 +3254,8 @@ btr_cur_pessimistic_delete( of the index tree, so that the node pointer updates will not fail because of lack of space */ + ut_a(cursor->tree_height != ULINT_UNDEFINED); + n_extents = cursor->tree_height / 32 + 1; success = fsp_reserve_free_extents(&n_reserved, |