summaryrefslogtreecommitdiff
path: root/storage/innobase/btr/btr0cur.c
diff options
context:
space:
mode:
Diffstat (limited to 'storage/innobase/btr/btr0cur.c')
-rw-r--r--storage/innobase/btr/btr0cur.c105
1 files changed, 56 insertions, 49 deletions
diff --git a/storage/innobase/btr/btr0cur.c b/storage/innobase/btr/btr0cur.c
index 69dd1e428e1..f63ca20ef24 100644
--- a/storage/innobase/btr/btr0cur.c
+++ b/storage/innobase/btr/btr0cur.c
@@ -249,7 +249,8 @@ btr_cur_latch_leaves(
case BTR_SEARCH_LEAF:
case BTR_MODIFY_LEAF:
mode = latch_mode == BTR_SEARCH_LEAF ? RW_S_LATCH : RW_X_LATCH;
- get_block = btr_block_get(space, zip_size, page_no, mode, mtr);
+ get_block = btr_block_get(
+ space, zip_size, page_no, mode, cursor->index, mtr);
#ifdef UNIV_BTR_DEBUG
ut_a(page_is_comp(get_block->frame) == page_is_comp(page));
#endif /* UNIV_BTR_DEBUG */
@@ -260,9 +261,9 @@ btr_cur_latch_leaves(
left_page_no = btr_page_get_prev(page, mtr);
if (left_page_no != FIL_NULL) {
- get_block = btr_block_get(space, zip_size,
- left_page_no,
- RW_X_LATCH, mtr);
+ get_block = btr_block_get(
+ space, zip_size, left_page_no,
+ RW_X_LATCH, cursor->index, mtr);
#ifdef UNIV_BTR_DEBUG
ut_a(page_is_comp(get_block->frame)
== page_is_comp(page));
@@ -272,8 +273,9 @@ btr_cur_latch_leaves(
get_block->check_index_page_at_flush = TRUE;
}
- get_block = btr_block_get(space, zip_size, page_no,
- RW_X_LATCH, mtr);
+ get_block = btr_block_get(
+ space, zip_size, page_no,
+ RW_X_LATCH, cursor->index, mtr);
#ifdef UNIV_BTR_DEBUG
ut_a(page_is_comp(get_block->frame) == page_is_comp(page));
#endif /* UNIV_BTR_DEBUG */
@@ -282,9 +284,9 @@ btr_cur_latch_leaves(
right_page_no = btr_page_get_next(page, mtr);
if (right_page_no != FIL_NULL) {
- get_block = btr_block_get(space, zip_size,
- right_page_no,
- RW_X_LATCH, mtr);
+ get_block = btr_block_get(
+ space, zip_size, right_page_no,
+ RW_X_LATCH, cursor->index, mtr);
#ifdef UNIV_BTR_DEBUG
ut_a(page_is_comp(get_block->frame)
== page_is_comp(page));
@@ -303,8 +305,9 @@ btr_cur_latch_leaves(
left_page_no = btr_page_get_prev(page, mtr);
if (left_page_no != FIL_NULL) {
- get_block = btr_block_get(space, zip_size,
- left_page_no, mode, mtr);
+ get_block = btr_block_get(
+ space, zip_size,
+ left_page_no, mode, cursor->index, mtr);
cursor->left_block = get_block;
#ifdef UNIV_BTR_DEBUG
ut_a(page_is_comp(get_block->frame)
@@ -315,7 +318,8 @@ btr_cur_latch_leaves(
get_block->check_index_page_at_flush = TRUE;
}
- get_block = btr_block_get(space, zip_size, page_no, mode, mtr);
+ get_block = btr_block_get(
+ space, zip_size, page_no, mode, cursor->index, mtr);
#ifdef UNIV_BTR_DEBUG
ut_a(page_is_comp(get_block->frame) == page_is_comp(page));
#endif /* UNIV_BTR_DEBUG */
@@ -669,7 +673,9 @@ retry_page_get:
ut_a(!page_zip || page_zip_validate(page_zip, page));
#endif /* UNIV_ZIP_DEBUG */
- buf_block_dbg_add_level(block, SYNC_TREE_NODE);
+ buf_block_dbg_add_level(
+ block, dict_index_is_ibuf(index)
+ ? SYNC_IBUF_TREE_NODE : SYNC_TREE_NODE);
}
ut_ad(index->id == btr_page_get_index_id(page));
@@ -767,7 +773,7 @@ retry_page_get:
if (level != 0) {
/* x-latch the page */
page = btr_page_get(
- space, zip_size, page_no, RW_X_LATCH, mtr);
+ space, zip_size, page_no, RW_X_LATCH, index, mtr);
ut_a((ibool)!!page_is_comp(page)
== dict_table_is_comp(index->table));
@@ -1839,6 +1845,7 @@ btr_cur_update_in_place(
roll_ptr_t roll_ptr = 0;
trx_t* trx;
ulint was_delete_marked;
+ ibool is_hashed;
mem_heap_t* heap = NULL;
ulint offsets_[REC_OFFS_NORMAL_SIZE];
ulint* offsets = offsets_;
@@ -1880,7 +1887,21 @@ btr_cur_update_in_place(
return(err);
}
- if (block->is_hashed) {
+ if (!(flags & BTR_KEEP_SYS_FLAG)) {
+ row_upd_rec_sys_fields(rec, NULL,
+ index, offsets, trx, roll_ptr);
+ }
+
+ was_delete_marked = rec_get_deleted_flag(
+ rec, page_is_comp(buf_block_get_frame(block)));
+
+ is_hashed = block->is_hashed;
+
+ if (is_hashed) {
+ /* TO DO: Can we skip this if none of the fields
+ index->search_info->curr_n_fields
+ are being updated? */
+
/* The function row_upd_changes_ord_field_binary works only
if the update vector was built for a clustered index, we must
NOT call it if index is secondary */
@@ -1896,17 +1917,9 @@ btr_cur_update_in_place(
rw_lock_x_lock(&btr_search_latch);
}
- if (!(flags & BTR_KEEP_SYS_FLAG)) {
- row_upd_rec_sys_fields(rec, NULL,
- index, offsets, trx, roll_ptr);
- }
-
- was_delete_marked = rec_get_deleted_flag(
- rec, page_is_comp(buf_block_get_frame(block)));
-
row_upd_rec_in_place(rec, index, offsets, update, page_zip);
- if (block->is_hashed) {
+ if (is_hashed) {
rw_lock_x_unlock(&btr_search_latch);
}
@@ -2607,7 +2620,8 @@ btr_cur_parse_del_mark_set_clust_rec(
/* We do not need to reserve btr_search_latch, as the page
is only being recovered, and there cannot be a hash index to
- it. */
+ it. Besides, these fields are being updated in place
+ and the adaptive hash index does not depend on them. */
btr_rec_set_deleted_flag(rec, page_zip, val);
@@ -2687,9 +2701,9 @@ btr_cur_del_mark_set_clust_rec(
return(err);
}
- if (block->is_hashed) {
- rw_lock_x_lock(&btr_search_latch);
- }
+ /* The btr_search_latch is not needed here, because
+ the adaptive hash index does not depend on the delete-mark
+ and the delete-mark is being updated in place. */
page_zip = buf_block_get_page_zip(block);
@@ -2703,10 +2717,6 @@ btr_cur_del_mark_set_clust_rec(
index, offsets, trx, roll_ptr);
}
- if (block->is_hashed) {
- rw_lock_x_unlock(&btr_search_latch);
- }
-
btr_cur_del_mark_set_clust_rec_log(flags, rec, index, val, trx,
roll_ptr, mtr);
@@ -2782,7 +2792,8 @@ btr_cur_parse_del_mark_set_sec_rec(
/* We do not need to reserve btr_search_latch, as the page
is only being recovered, and there cannot be a hash index to
- it. */
+ it. Besides, the delete-mark flag is being updated in place
+ and the adaptive hash index does not depend on it. */
btr_rec_set_deleted_flag(rec, page_zip, val);
}
@@ -2830,16 +2841,11 @@ btr_cur_del_mark_set_sec_rec(
ut_ad(!!page_rec_is_comp(rec)
== dict_table_is_comp(cursor->index->table));
- if (block->is_hashed) {
- rw_lock_x_lock(&btr_search_latch);
- }
-
+ /* We do not need to reserve btr_search_latch, as the
+ delete-mark flag is being updated in place and the adaptive
+ hash index does not depend on it. */
btr_rec_set_deleted_flag(rec, buf_block_get_page_zip(block), val);
- if (block->is_hashed) {
- rw_lock_x_unlock(&btr_search_latch);
- }
-
btr_cur_del_mark_set_sec_rec_log(rec, val, mtr);
return(DB_SUCCESS);
@@ -2860,8 +2866,11 @@ btr_cur_set_deleted_flag_for_ibuf(
ibool val, /*!< in: value to set */
mtr_t* mtr) /*!< in: mtr */
{
- /* We do not need to reserve btr_search_latch, as the page has just
- been read to the buffer pool and there cannot be a hash index to it. */
+ /* We do not need to reserve btr_search_latch, as the page
+ has just been read to the buffer pool and there cannot be
+ a hash index to it. Besides, the delete-mark flag is being
+ updated in place and the adaptive hash index does not depend
+ on it. */
btr_rec_set_deleted_flag(rec, page_zip, val);
@@ -3515,7 +3524,6 @@ static
void
btr_record_not_null_field_in_rec(
/*=============================*/
- rec_t* rec, /*!< in: physical record */
ulint n_unique, /*!< in: dict_index_get_n_unique(index),
number of columns uniquely determine
an index entry */
@@ -3535,9 +3543,8 @@ btr_record_not_null_field_in_rec(
for (i = 0; i < n_unique; i++) {
ulint rec_len;
- byte* field;
- field = rec_get_nth_field(rec, offsets, i, &rec_len);
+ rec_get_nth_field_offs(offsets, i, &rec_len);
if (rec_len != UNIV_SQL_NULL) {
n_not_null[i]++;
@@ -3652,7 +3659,7 @@ btr_estimate_number_of_different_key_vals(
if (n_not_null) {
btr_record_not_null_field_in_rec(
- rec, n_cols, offsets_rec, n_not_null);
+ n_cols, offsets_rec, n_not_null);
}
}
@@ -3687,7 +3694,7 @@ btr_estimate_number_of_different_key_vals(
if (n_not_null) {
btr_record_not_null_field_in_rec(
- next_rec, n_cols, offsets_next_rec,
+ n_cols, offsets_next_rec,
n_not_null);
}
@@ -4095,7 +4102,7 @@ btr_blob_free(
&& buf_block_get_space(block) == space
&& buf_block_get_page_no(block) == page_no) {
- if (buf_LRU_free_block(&block->page, all) != BUF_LRU_FREED
+ if (!buf_LRU_free_block(&block->page, all)
&& all && block->page.zip.data) {
/* Attempt to deallocate the uncompressed page
if the whole block cannot be deallocted. */