summaryrefslogtreecommitdiff
path: root/storage/innobase/row/row0vers.c
diff options
context:
space:
mode:
Diffstat (limited to 'storage/innobase/row/row0vers.c')
-rw-r--r--storage/innobase/row/row0vers.c74
1 files changed, 53 insertions, 21 deletions
diff --git a/storage/innobase/row/row0vers.c b/storage/innobase/row/row0vers.c
index b32ab8822f4..ab3b6385146 100644
--- a/storage/innobase/row/row0vers.c
+++ b/storage/innobase/row/row0vers.c
@@ -62,7 +62,7 @@ row_vers_impl_x_locked_off_kernel(
ulint err;
mtr_t mtr;
ulint comp;
-
+
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&kernel_mutex));
ut_ad(!rw_lock_own(&(purge_sys->latch), RW_LOCK_SHARED));
@@ -71,12 +71,12 @@ row_vers_impl_x_locked_off_kernel(
mutex_exit(&kernel_mutex);
mtr_start(&mtr);
-
+
/* Search for the clustered index record: this is a time-consuming
operation: therefore we release the kernel mutex; also, the release
is required by the latching order convention. The latch on the
clustered index locks the top of the stack of versions. We also
- reserve purge_latch to lock the bottom of the version stack. */
+ reserve purge_latch to lock the bottom of the version stack. */
clust_rec = row_get_clust_rec(BTR_SEARCH_LEAF, rec, index,
&clust_index, &mtr);
@@ -96,7 +96,7 @@ row_vers_impl_x_locked_off_kernel(
mutex_enter(&kernel_mutex);
mtr_commit(&mtr);
- return(NULL);
+ return(NULL);
}
heap = mem_heap_create(1024);
@@ -107,7 +107,7 @@ row_vers_impl_x_locked_off_kernel(
mtr_s_lock(&(purge_sys->latch), &mtr);
mutex_enter(&kernel_mutex);
-
+
trx = NULL;
if (!trx_is_active(trx_id)) {
/* The transaction that modified or inserted clust_rec is no
@@ -123,7 +123,7 @@ row_vers_impl_x_locked_off_kernel(
comp = page_rec_is_comp(rec);
ut_ad(index->table == clust_index->table);
- ut_ad(!!comp == index->table->comp);
+ ut_ad(!!comp == dict_table_is_comp(index->table));
ut_ad(!comp == !page_rec_is_comp(clust_rec));
/* We look up if some earlier version, which was modified by the trx_id
@@ -179,7 +179,7 @@ row_vers_impl_x_locked_off_kernel(
may assert the following: */
ut_ad(err == DB_SUCCESS);
-
+
if (prev_version == NULL) {
/* It was a freshly inserted version: there is an
implicit x-lock on rec */
@@ -273,7 +273,7 @@ row_vers_must_preserve_del_marked(
/* A purge operation is not yet allowed to remove this
delete marked record */
-
+
return(TRUE);
}
@@ -313,7 +313,7 @@ row_vers_old_has_index_entry(
ulint comp;
ut_ad(mtr_memo_contains(mtr, buf_block_align(rec), MTR_MEMO_PAGE_X_FIX)
- || mtr_memo_contains(mtr, buf_block_align(rec),
+ || mtr_memo_contains(mtr, buf_block_align(rec),
MTR_MEMO_PAGE_S_FIX));
#ifdef UNIV_SYNC_DEBUG
ut_ad(!rw_lock_own(&(purge_sys->latch), RW_LOCK_SHARED));
@@ -323,7 +323,7 @@ row_vers_old_has_index_entry(
clust_index = dict_table_get_first_index(index->table);
comp = page_rec_is_comp(rec);
- ut_ad(!index->table->comp == !comp);
+ ut_ad(!dict_table_is_comp(index->table) == !comp);
heap = mem_heap_create(1024);
clust_offsets = rec_get_offsets(rec, clust_index, NULL,
ULINT_UNDEFINED, &heap);
@@ -333,7 +333,7 @@ row_vers_old_has_index_entry(
rec, clust_offsets, heap);
entry = row_build_index_entry(row, index, heap);
- /* NOTE that we cannot do the comparison as binary
+ /* NOTE that we cannot do the comparison as binary
fields because the row is maybe being modified so that
the clustered index record has already been updated
to a different binary value in a char field, but the
@@ -373,7 +373,7 @@ row_vers_old_has_index_entry(
prev_version, clust_offsets, heap);
entry = row_build_index_entry(row, index, heap);
- /* NOTE that we cannot do the comparison as binary
+ /* NOTE that we cannot do the comparison as binary
fields because maybe the secondary index record has
already been updated to a different binary value in
a char field, but the collation identifies the old
@@ -421,14 +421,14 @@ row_vers_build_for_consistent_read(
{
rec_t* version;
rec_t* prev_version;
- dulint prev_trx_id;
+ dulint trx_id;
mem_heap_t* heap = NULL;
byte* buf;
ulint err;
ut_ad(index->type & DICT_CLUSTERED);
ut_ad(mtr_memo_contains(mtr, buf_block_align(rec), MTR_MEMO_PAGE_X_FIX)
- || mtr_memo_contains(mtr, buf_block_align(rec),
+ || mtr_memo_contains(mtr, buf_block_align(rec),
MTR_MEMO_PAGE_S_FIX));
#ifdef UNIV_SYNC_DEBUG
ut_ad(!rw_lock_own(&(purge_sys->latch), RW_LOCK_SHARED));
@@ -436,16 +436,48 @@ row_vers_build_for_consistent_read(
ut_ad(rec_offs_validate(rec, index, *offsets));
- ut_ad(!read_view_sees_trx_id(view,
- row_get_rec_trx_id(rec, index, *offsets)));
+ trx_id = row_get_rec_trx_id(rec, index, *offsets);
+
+ ut_ad(!read_view_sees_trx_id(view, trx_id));
rw_lock_s_lock(&(purge_sys->latch));
version = rec;
for (;;) {
mem_heap_t* heap2 = heap;
+ trx_undo_rec_t* undo_rec;
+ dulint roll_ptr;
+ dulint undo_no;
heap = mem_heap_create(1024);
+ /* If we have high-granularity consistent read view and
+ creating transaction of the view is the same as trx_id in
+ the record we see this record only in the case when
+ undo_no of the record is < undo_no in the view. */
+
+ if (view->type == VIEW_HIGH_GRANULARITY
+ && ut_dulint_cmp(view->creator_trx_id, trx_id) == 0) {
+
+ roll_ptr = row_get_rec_roll_ptr(version, index,
+ *offsets);
+ undo_rec = trx_undo_get_undo_rec_low(roll_ptr, heap);
+ undo_no = trx_undo_rec_get_undo_no(undo_rec);
+ mem_heap_empty(heap);
+
+ if (ut_dulint_cmp(view->undo_no, undo_no) > 0) {
+ /* The view already sees this version: we can
+ copy it to in_heap and return */
+
+ buf = mem_heap_alloc(in_heap,
+ rec_offs_size(*offsets));
+ *old_vers = rec_copy(buf, version, *offsets);
+ rec_offs_make_valid(*old_vers, index, *offsets);
+ err = DB_SUCCESS;
+
+ break;
+ }
+ }
+
err = trx_undo_prev_version_build(rec, mtr, version, index,
*offsets, heap, &prev_version);
if (heap2) {
@@ -466,10 +498,10 @@ row_vers_build_for_consistent_read(
*offsets = rec_get_offsets(prev_version, index, *offsets,
ULINT_UNDEFINED, offset_heap);
- prev_trx_id = row_get_rec_trx_id(prev_version, index,
- *offsets);
- if (read_view_sees_trx_id(view, prev_trx_id)) {
+ trx_id = row_get_rec_trx_id(prev_version, index, *offsets);
+
+ if (read_view_sees_trx_id(view, trx_id)) {
/* The view already sees this version: we can copy
it to in_heap and return */
@@ -558,8 +590,8 @@ row_vers_build_for_semi_consistent_read(
mutex_exit(&kernel_mutex);
if (!version_trx
- || version_trx->conc_state == TRX_NOT_STARTED
- || version_trx->conc_state == TRX_COMMITTED_IN_MEMORY) {
+ || version_trx->conc_state == TRX_NOT_STARTED
+ || version_trx->conc_state == TRX_COMMITTED_IN_MEMORY) {
/* We found a version that belongs to a
committed transaction: return it. */