summaryrefslogtreecommitdiff
path: root/storage/innobase/btr/btr0cur.c
diff options
context:
space:
mode:
authorMarko Mäkelä <marko.makela@oracle.com>2011-09-08 16:25:45 +0300
committerMarko Mäkelä <marko.makela@oracle.com>2011-09-08 16:25:45 +0300
commit109185f1ef1573de50d9c5c406e41504dc401226 (patch)
tree006c2fab77a9755bf98b21c770bcdefd1a3ceeec /storage/innobase/btr/btr0cur.c
parenta8f704a106c2b1dd0e6155a2f2d799b4dfdf4f4f (diff)
parent8c545acd5367950d3d03590880781089040016e0 (diff)
downloadmariadb-git-109185f1ef1573de50d9c5c406e41504dc401226.tar.gz
Merge mysql-5.1 to mysql-5.5.
Diffstat (limited to 'storage/innobase/btr/btr0cur.c')
-rw-r--r--storage/innobase/btr/btr0cur.c61
1 files changed, 32 insertions, 29 deletions
diff --git a/storage/innobase/btr/btr0cur.c b/storage/innobase/btr/btr0cur.c
index 4a5a247b022..3021bb71929 100644
--- a/storage/innobase/btr/btr0cur.c
+++ b/storage/innobase/btr/btr0cur.c
@@ -1845,6 +1845,7 @@ btr_cur_update_in_place(
roll_ptr_t roll_ptr = 0;
trx_t* trx;
ulint was_delete_marked;
+ ibool is_hashed;
mem_heap_t* heap = NULL;
ulint offsets_[REC_OFFS_NORMAL_SIZE];
ulint* offsets = offsets_;
@@ -1886,7 +1887,21 @@ btr_cur_update_in_place(
return(err);
}
- if (block->is_hashed) {
+ if (!(flags & BTR_KEEP_SYS_FLAG)) {
+ row_upd_rec_sys_fields(rec, NULL,
+ index, offsets, trx, roll_ptr);
+ }
+
+ was_delete_marked = rec_get_deleted_flag(
+ rec, page_is_comp(buf_block_get_frame(block)));
+
+ is_hashed = block->is_hashed;
+
+ if (is_hashed) {
+ /* TO DO: Can we skip this if none of the fields
+ index->search_info->curr_n_fields
+ are being updated? */
+
/* The function row_upd_changes_ord_field_binary works only
if the update vector was built for a clustered index, we must
NOT call it if index is secondary */
@@ -1902,17 +1917,9 @@ btr_cur_update_in_place(
rw_lock_x_lock(&btr_search_latch);
}
- if (!(flags & BTR_KEEP_SYS_FLAG)) {
- row_upd_rec_sys_fields(rec, NULL,
- index, offsets, trx, roll_ptr);
- }
-
- was_delete_marked = rec_get_deleted_flag(
- rec, page_is_comp(buf_block_get_frame(block)));
-
row_upd_rec_in_place(rec, index, offsets, update, page_zip);
- if (block->is_hashed) {
+ if (is_hashed) {
rw_lock_x_unlock(&btr_search_latch);
}
@@ -2638,7 +2645,8 @@ btr_cur_parse_del_mark_set_clust_rec(
/* We do not need to reserve btr_search_latch, as the page
is only being recovered, and there cannot be a hash index to
- it. */
+ it. Besides, these fields are being updated in place
+ and the adaptive hash index does not depend on them. */
btr_rec_set_deleted_flag(rec, page_zip, val);
@@ -2718,9 +2726,9 @@ btr_cur_del_mark_set_clust_rec(
return(err);
}
- if (block->is_hashed) {
- rw_lock_x_lock(&btr_search_latch);
- }
+ /* The btr_search_latch is not needed here, because
+ the adaptive hash index does not depend on the delete-mark
+ and the delete-mark is being updated in place. */
page_zip = buf_block_get_page_zip(block);
@@ -2734,10 +2742,6 @@ btr_cur_del_mark_set_clust_rec(
index, offsets, trx, roll_ptr);
}
- if (block->is_hashed) {
- rw_lock_x_unlock(&btr_search_latch);
- }
-
btr_cur_del_mark_set_clust_rec_log(flags, rec, index, val, trx,
roll_ptr, mtr);
@@ -2813,7 +2817,8 @@ btr_cur_parse_del_mark_set_sec_rec(
/* We do not need to reserve btr_search_latch, as the page
is only being recovered, and there cannot be a hash index to
- it. */
+ it. Besides, the delete-mark flag is being updated in place
+ and the adaptive hash index does not depend on it. */
btr_rec_set_deleted_flag(rec, page_zip, val);
}
@@ -2861,16 +2866,11 @@ btr_cur_del_mark_set_sec_rec(
ut_ad(!!page_rec_is_comp(rec)
== dict_table_is_comp(cursor->index->table));
- if (block->is_hashed) {
- rw_lock_x_lock(&btr_search_latch);
- }
-
+ /* We do not need to reserve btr_search_latch, as the
+ delete-mark flag is being updated in place and the adaptive
+ hash index does not depend on it. */
btr_rec_set_deleted_flag(rec, buf_block_get_page_zip(block), val);
- if (block->is_hashed) {
- rw_lock_x_unlock(&btr_search_latch);
- }
-
btr_cur_del_mark_set_sec_rec_log(rec, val, mtr);
return(DB_SUCCESS);
@@ -2891,8 +2891,11 @@ btr_cur_set_deleted_flag_for_ibuf(
ibool val, /*!< in: value to set */
mtr_t* mtr) /*!< in: mtr */
{
- /* We do not need to reserve btr_search_latch, as the page has just
- been read to the buffer pool and there cannot be a hash index to it. */
+ /* We do not need to reserve btr_search_latch, as the page
+ has just been read to the buffer pool and there cannot be
+ a hash index to it. Besides, the delete-mark flag is being
+ updated in place and the adaptive hash index does not depend
+ on it. */
btr_rec_set_deleted_flag(rec, page_zip, val);