summaryrefslogtreecommitdiff
path: root/storage/innobase/row/row0ins.cc
diff options
context:
space:
mode:
Diffstat (limited to 'storage/innobase/row/row0ins.cc')
-rw-r--r--storage/innobase/row/row0ins.cc300
1 files changed, 36 insertions, 264 deletions
diff --git a/storage/innobase/row/row0ins.cc b/storage/innobase/row/row0ins.cc
index 720ad44b4b8..20c64a4288c 100644
--- a/storage/innobase/row/row0ins.cc
+++ b/storage/innobase/row/row0ins.cc
@@ -155,10 +155,7 @@ row_ins_alloc_sys_fields(
ut_ad(dtuple_get_n_fields(row) == dict_table_get_n_cols(table));
/* allocate buffer to hold the needed system created hidden columns. */
- uint len = DATA_ROW_ID_LEN + DATA_TRX_ID_LEN;
- if (!dict_table_is_intrinsic(table)) {
- len += DATA_ROLL_PTR_LEN;
- }
+ const uint len = DATA_ROW_ID_LEN + DATA_TRX_ID_LEN + DATA_ROLL_PTR_LEN;
ptr = static_cast<byte*>(mem_heap_zalloc(heap, len));
/* 1. Populate row-id */
@@ -183,13 +180,11 @@ row_ins_alloc_sys_fields(
ptr += DATA_TRX_ID_LEN;
- if (!dict_table_is_intrinsic(table)) {
- col = dict_table_get_sys_col(table, DATA_ROLL_PTR);
+ col = dict_table_get_sys_col(table, DATA_ROLL_PTR);
- dfield = dtuple_get_nth_field(row, dict_col_get_no(col));
+ dfield = dtuple_get_nth_field(row, dict_col_get_no(col));
- dfield_set_data(dfield, ptr, DATA_ROLL_PTR_LEN);
- }
+ dfield_set_data(dfield, ptr, DATA_ROLL_PTR_LEN);
}
/*********************************************************************//**
@@ -2152,7 +2147,7 @@ row_ins_scan_sec_index_for_duplicate(
cmp = cmp_dtuple_rec(entry, rec, offsets);
- if (cmp == 0 && !index->allow_duplicates) {
+ if (cmp == 0) {
if (row_ins_dupl_error_with_rec(rec, entry,
index, offsets)) {
err = DB_DUPLICATE_KEY;
@@ -2174,7 +2169,7 @@ row_ins_scan_sec_index_for_duplicate(
goto end_scan;
}
} else {
- ut_a(cmp < 0 || index->allow_duplicates);
+ ut_a(cmp < 0);
goto end_scan;
}
} while (btr_pcur_move_to_next(&pcur, mtr));
@@ -2507,9 +2502,6 @@ row_ins_clust_index_entry_low(
Disable locking as temp-tables are local to a connection. */
ut_ad(flags & BTR_NO_LOCKING_FLAG);
- ut_ad(!dict_table_is_intrinsic(index->table)
- || (flags & BTR_NO_UNDO_LOG_FLAG));
-
mtr.set_log_mode(MTR_LOG_NO_REDO);
}
@@ -2528,9 +2520,6 @@ row_ins_clust_index_entry_low(
cursor->thr = thr;
}
- ut_ad(!dict_table_is_intrinsic(index->table)
- || cursor->page_cur.block->made_dirty_with_no_latch);
-
#ifdef UNIV_DEBUG
{
page_t* page = btr_cur_get_page(cursor);
@@ -2542,15 +2531,7 @@ row_ins_clust_index_entry_low(
}
#endif /* UNIV_DEBUG */
- /* Allowing duplicates in clustered index is currently enabled
- only for intrinsic table and caller understand the limited
- operation that can be done in this case. */
- ut_ad(!index->allow_duplicates
- || (index->allow_duplicates
- && dict_table_is_intrinsic(index->table)));
-
- if (!index->allow_duplicates
- && n_uniq
+ if (n_uniq
&& (cursor->up_match >= n_uniq || cursor->low_match >= n_uniq)) {
if (flags
@@ -2593,20 +2574,13 @@ err_exit:
}
/* Note: Allowing duplicates would qualify for modification of
- an existing record as the new entry is exactly same as old entry.
- Avoid this check if allow duplicates is enabled. */
- if (!index->allow_duplicates && row_ins_must_modify_rec(cursor)) {
+ an existing record as the new entry is exactly same as old entry. */
+ if (row_ins_must_modify_rec(cursor)) {
/* There is already an index entry with a long enough common
prefix, we must convert the insert into a modify of an
existing record */
mem_heap_t* entry_heap = mem_heap_create(1024);
- /* If the existing record is being modified and the new record
- doesn't fit the provided slot then existing record is added
- to free list and new record is inserted. This also means
- cursor that we have cached for SELECT is now invalid. */
- index->last_sel_cur->invalid = true;
-
err = row_ins_clust_index_entry_by_modify(
&pcur, flags, mode, &offsets, &offsets_heap,
entry_heap, entry, thr, &mtr);
@@ -2690,147 +2664,6 @@ func_exit:
DBUG_RETURN(err);
}
-/** This is a specialized function meant for direct insertion to
-auto-generated clustered index based on cached position from
-last successful insert. To be used when data is sorted.
-
-@param[in] mode BTR_MODIFY_LEAF or BTR_MODIFY_TREE.
- depending on whether we wish optimistic or
- pessimistic descent down the index tree
-@param[in,out] index clustered index
-@param[in,out] entry index entry to insert
-@param[in] thr query thread
-
-@return error code */
-static
-dberr_t
-row_ins_sorted_clust_index_entry(
- ulint mode,
- dict_index_t* index,
- dtuple_t* entry,
- ulint n_ext,
- que_thr_t* thr)
-{
- dberr_t err = DB_SUCCESS;
- mtr_t* mtr;
- const bool commit_mtr = mode == BTR_MODIFY_TREE;
-
- mem_heap_t* offsets_heap = NULL;
- ulint offsets_[REC_OFFS_NORMAL_SIZE];
- ulint* offsets = offsets_;
- rec_offs_init(offsets_);
-
- DBUG_ENTER("row_ins_sorted_clust_index_entry");
-
- ut_ad(index->last_ins_cur != NULL);
- ut_ad(dict_index_is_clust(index));
- ut_ad(dict_table_is_intrinsic(index->table));
- ut_ad(dict_index_is_auto_gen_clust(index));
-
- btr_cur_t cursor;
- cursor.thr = thr;
- mtr = &index->last_ins_cur->mtr;
-
- /* Search for position if tree needs to be split or if last position
- is not cached. */
- if (mode == BTR_MODIFY_TREE
- || index->last_ins_cur->rec == NULL
- || index->last_ins_cur->disable_caching) {
-
- /* Commit the previous mtr. */
- index->last_ins_cur->release();
-
- mtr_start(mtr);
- mtr_set_log_mode(mtr, MTR_LOG_NO_REDO);
-
- err = btr_cur_search_to_nth_level_with_no_latch(
- index, 0, entry, PAGE_CUR_LE, &cursor,
- __FILE__, __LINE__, mtr);
- ut_ad(cursor.page_cur.block != NULL);
- ut_ad(cursor.page_cur.block->made_dirty_with_no_latch);
- } else {
- cursor.index = index;
-
- cursor.page_cur.index = index;
-
- cursor.page_cur.rec = index->last_ins_cur->rec;
-
- cursor.page_cur.block = index->last_ins_cur->block;
- }
-
- const ulint flags = BTR_NO_LOCKING_FLAG | BTR_NO_UNDO_LOG_FLAG;
-
- for (;;) {
- rec_t* insert_rec;
- big_rec_t* big_rec = NULL;
-
- if (mode != BTR_MODIFY_TREE) {
- ut_ad((mode & ~BTR_ALREADY_S_LATCHED)
- == BTR_MODIFY_LEAF);
-
- err = btr_cur_optimistic_insert(
- flags, &cursor, &offsets, &offsets_heap, entry,
- &insert_rec, &big_rec, n_ext, thr, mtr);
- if (err != DB_SUCCESS) {
- break;
- }
- } else {
- /* TODO: Check if this is needed for intrinsic table. */
- if (buf_LRU_buf_pool_running_out()) {
- err = DB_LOCK_TABLE_FULL;
- break;
- }
-
- err = btr_cur_optimistic_insert(
- flags, &cursor, &offsets, &offsets_heap, entry,
- &insert_rec, &big_rec, n_ext, thr, mtr);
-
- if (err == DB_FAIL) {
- err = btr_cur_pessimistic_insert(
- flags, &cursor, &offsets, &offsets_heap,
- entry, &insert_rec, &big_rec, n_ext,
- thr, mtr);
- }
- }
-
- if (big_rec != NULL) {
- /* If index involves big-record optimization is
- turned-off. */
- index->last_ins_cur->release();
- index->last_ins_cur->disable_caching = true;
-
- err = row_ins_index_entry_big_rec(
- entry, big_rec, offsets, &offsets_heap, index,
- thr_get_trx(thr)->mysql_thd, __FILE__, __LINE__);
-
- dtuple_convert_back_big_rec(index, entry, big_rec);
-
- } else if (err == DB_SUCCESS ) {
- if (!commit_mtr
- && !index->last_ins_cur->disable_caching) {
- index->last_ins_cur->rec = insert_rec;
-
- index->last_ins_cur->block
- = cursor.page_cur.block;
- } else {
- index->last_ins_cur->release();
- }
- }
-
- break;
- }
-
- if (err != DB_SUCCESS) {
- index->last_ins_cur->release();
- }
-
- if (offsets_heap != NULL) {
- mem_heap_free(offsets_heap);
- }
-
- DBUG_RETURN(err);
-}
-
/** Start a mini-transaction and check if the index will be dropped.
@param[in,out] mtr mini-transaction
@param[in,out] index secondary index
@@ -2923,8 +2756,7 @@ row_ins_sec_index_entry_low(
cursor.thr = thr;
cursor.rtr_info = NULL;
- ut_ad(thr_get_trx(thr)->id != 0
- || dict_table_is_intrinsic(index->table));
+ ut_ad(thr_get_trx(thr)->id != 0);
mtr_start(&mtr);
mtr.set_named_space(index->space);
@@ -2936,9 +2768,6 @@ row_ins_sec_index_entry_low(
Disable locking as temp-tables are local to a connection. */
ut_ad(flags & BTR_NO_LOCKING_FLAG);
- ut_ad(!dict_table_is_intrinsic(index->table)
- || (flags & BTR_NO_UNDO_LOG_FLAG));
-
mtr.set_log_mode(MTR_LOG_NO_REDO);
} else if (!dict_index_is_spatial(index)) {
/* Enable insert buffering if it's neither temp-table
@@ -3008,18 +2837,10 @@ row_ins_sec_index_entry_low(
goto func_exit;});
} else {
- if (dict_table_is_intrinsic(index->table)) {
- err = btr_cur_search_to_nth_level_with_no_latch(
- index, 0, entry, PAGE_CUR_LE, &cursor,
- __FILE__, __LINE__, &mtr);
- ut_ad(cursor.page_cur.block != NULL);
- ut_ad(cursor.page_cur.block->made_dirty_with_no_latch);
- } else {
- err = btr_cur_search_to_nth_level(
- index, 0, entry, PAGE_CUR_LE,
- search_mode,
- &cursor, 0, __FILE__, __LINE__, &mtr);
- }
+ err = btr_cur_search_to_nth_level(
+ index, 0, entry, PAGE_CUR_LE,
+ search_mode,
+ &cursor, 0, __FILE__, __LINE__, &mtr);
}
if (err != DB_SUCCESS) {
@@ -3111,19 +2932,11 @@ row_ins_sec_index_entry_low(
prevent any insertion of a duplicate by another
transaction. Let us now reposition the cursor and
continue the insertion. */
- if (dict_table_is_intrinsic(index->table)) {
- err = btr_cur_search_to_nth_level_with_no_latch(
- index, 0, entry, PAGE_CUR_LE, &cursor,
- __FILE__, __LINE__, &mtr);
- ut_ad(cursor.page_cur.block != NULL);
- ut_ad(cursor.page_cur.block->made_dirty_with_no_latch);
- } else {
- btr_cur_search_to_nth_level(
- index, 0, entry, PAGE_CUR_LE,
- (search_mode
- & ~(BTR_INSERT | BTR_IGNORE_SEC_UNIQUE)),
- &cursor, 0, __FILE__, __LINE__, &mtr);
- }
+ btr_cur_search_to_nth_level(
+ index, 0, entry, PAGE_CUR_LE,
+ (search_mode
+ & ~(BTR_INSERT | BTR_IGNORE_SEC_UNIQUE)),
+ &cursor, 0, __FILE__, __LINE__, &mtr);
}
if (!(flags & BTR_NO_LOCKING_FLAG)
@@ -3169,12 +2982,6 @@ row_ins_sec_index_entry_low(
}
if (row_ins_must_modify_rec(&cursor)) {
- /* If the existing record is being modified and the new record
- is doesn't fit the provided slot then existing record is added
- to free list and new record is inserted. This also means
- cursor that we have cached for SELECT is now invalid. */
- index->last_sel_cur->invalid = true;
-
/* There is already an index entry with a long enough common
prefix, we must convert the insert into a modify of an
existing record */
@@ -3339,26 +3146,14 @@ row_ins_clust_index_entry(
n_uniq = dict_index_is_unique(index) ? index->n_uniq : 0;
/* Try first optimistic descent to the B-tree */
- ulint flags;
+ log_free_check();
+ const ulint flags = dict_table_is_temporary(index->table)
+ ? BTR_NO_LOCKING_FLAG
+ : 0;
- if (!dict_table_is_intrinsic(index->table)) {
- log_free_check();
- flags = dict_table_is_temporary(index->table)
- ? BTR_NO_LOCKING_FLAG
- : 0;
- } else {
- flags = BTR_NO_LOCKING_FLAG | BTR_NO_UNDO_LOG_FLAG;
- }
-
- if (dict_table_is_intrinsic(index->table)
- && dict_index_is_auto_gen_clust(index)) {
- err = row_ins_sorted_clust_index_entry(
- BTR_MODIFY_LEAF, index, entry, n_ext, thr);
- } else {
- err = row_ins_clust_index_entry_low(
- flags, BTR_MODIFY_LEAF, index, n_uniq, entry,
- n_ext, thr, dup_chk_only);
- }
+ err = row_ins_clust_index_entry_low(
+ flags, BTR_MODIFY_LEAF, index, n_uniq, entry,
+ n_ext, thr, dup_chk_only);
DEBUG_SYNC_C_IF_THD(thr_get_trx(thr)->mysql_thd,
@@ -3370,21 +3165,11 @@ row_ins_clust_index_entry(
}
/* Try then pessimistic descent to the B-tree */
- if (!dict_table_is_intrinsic(index->table)) {
- log_free_check();
- } else {
- index->last_sel_cur->invalid = true;
- }
+ log_free_check();
- if (dict_table_is_intrinsic(index->table)
- && dict_index_is_auto_gen_clust(index)) {
- err = row_ins_sorted_clust_index_entry(
- BTR_MODIFY_TREE, index, entry, n_ext, thr);
- } else {
- err = row_ins_clust_index_entry_low(
- flags, BTR_MODIFY_TREE, index, n_uniq, entry,
- n_ext, thr, dup_chk_only);
- }
+ err = row_ins_clust_index_entry_low(
+ flags, BTR_MODIFY_TREE, index, n_uniq, entry,
+ n_ext, thr, dup_chk_only);
DBUG_RETURN(err);
}
@@ -3422,24 +3207,17 @@ row_ins_sec_index_entry(
}
}
- ut_ad(thr_get_trx(thr)->id != 0
- || dict_table_is_intrinsic(index->table));
+ ut_ad(thr_get_trx(thr)->id != 0);
offsets_heap = mem_heap_create(1024);
heap = mem_heap_create(1024);
/* Try first optimistic descent to the B-tree */
- ulint flags;
-
- if (!dict_table_is_intrinsic(index->table)) {
- log_free_check();
- flags = dict_table_is_temporary(index->table)
- ? BTR_NO_LOCKING_FLAG
- : 0;
- } else {
- flags = BTR_NO_LOCKING_FLAG | BTR_NO_UNDO_LOG_FLAG;
- }
+ log_free_check();
+ const ulint flags = dict_table_is_temporary(index->table)
+ ? BTR_NO_LOCKING_FLAG
+ : 0;
err = row_ins_sec_index_entry_low(
flags, BTR_MODIFY_LEAF, index, offsets_heap, heap, entry,
@@ -3448,12 +3226,7 @@ row_ins_sec_index_entry(
mem_heap_empty(heap);
/* Try then pessimistic descent to the B-tree */
-
- if (!dict_table_is_intrinsic(index->table)) {
- log_free_check();
- } else {
- index->last_sel_cur->invalid = true;
- }
+ log_free_check();
err = row_ins_sec_index_entry_low(
flags, BTR_MODIFY_TREE, index,
@@ -3892,7 +3665,6 @@ row_ins_step(
node = static_cast<ins_node_t*>(thr->run_node);
ut_ad(que_node_get_type(node) == QUE_NODE_INSERT);
- ut_ad(!dict_table_is_intrinsic(node->table));
parent = que_node_get_parent(node);
sel_node = node->select;