From b8f4b984f9f23b336491c91c70be448b6fa8f095 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Fri, 16 Dec 2022 17:08:56 +0200 Subject: MDEV-24685 fixup: Remove srv_n_file_io_threads The variable was not really being used for anything. The parameters innodb_read_io_threads, innodb_write_io_threads have replaced innodb_file_io_threads. --- storage/innobase/include/srv0srv.h | 3 --- storage/innobase/srv/srv0start.cc | 12 +----------- 2 files changed, 1 insertion(+), 14 deletions(-) (limited to 'storage/innobase') diff --git a/storage/innobase/include/srv0srv.h b/storage/innobase/include/srv0srv.h index ea04e8be75d..077e68e2a16 100644 --- a/storage/innobase/include/srv0srv.h +++ b/storage/innobase/include/srv0srv.h @@ -313,7 +313,6 @@ extern ulong srv_buf_pool_load_pages_abort; /** Lock table size in bytes */ extern ulint srv_lock_table_size; -extern uint srv_n_file_io_threads; extern my_bool srv_random_read_ahead; extern ulong srv_read_ahead_threshold; extern uint srv_n_read_io_threads; @@ -434,8 +433,6 @@ extern bool srv_log_file_created; extern ulint srv_dml_needed_delay; -#define SRV_MAX_N_IO_THREADS 130 - /** innodb_purge_threads; the number of purge tasks to use */ extern uint srv_n_purge_threads; diff --git a/storage/innobase/srv/srv0start.cc b/storage/innobase/srv/srv0start.cc index ae4f831bf2d..311a01ed719 100644 --- a/storage/innobase/srv/srv0start.cc +++ b/storage/innobase/srv/srv0start.cc @@ -110,9 +110,6 @@ lsn_t srv_shutdown_lsn; /** TRUE if a raw partition is in use */ ibool srv_start_raw_disk_in_use; -/** Number of IO threads to use */ -uint srv_n_file_io_threads; - /** UNDO tablespaces starts with space id. */ ulint srv_undo_space_id_start; @@ -1208,18 +1205,11 @@ dberr_t srv_start(bool create_new_db) return(srv_init_abort(err)); } - srv_n_file_io_threads = srv_n_read_io_threads + srv_n_write_io_threads; - - if (!srv_read_only_mode) { - /* Add the log and ibuf IO threads. */ - srv_n_file_io_threads += 2; - } else { + if (srv_read_only_mode) { ib::info() << "Disabling background log and ibuf IO write" << " threads."; } - ut_a(srv_n_file_io_threads <= SRV_MAX_N_IO_THREADS); - if (os_aio_init()) { ib::error() << "Cannot initialize AIO sub-system"; -- cgit v1.2.1 From 3ddc00dc3bf29f7cf326268265e47fda61e6a83e Mon Sep 17 00:00:00 2001 From: Vlad Lesin Date: Tue, 13 Dec 2022 16:06:13 +0300 Subject: MDEV-30225 RR isolation violation with locking unique search Before the fix next-key lock was requested only if a record was delete-marked for locking unique search in RR isolation level. There can be several delete-marked records for the same unique key, that's why InnoDB scans the records until eighter non-delete-marked record is reached or all delete-marked records with the same unique key are scanned. For range scan next-key locks are used for RR to protect scanned range from inserting new records by other transactions. And this is the reason of why next-key locks are used for delete-marked records for unique searches. If a record is not delete-marked, the requested lock type was "not-gap". When a record is not delete-marked during lock request by trx 1, and some other transaction holds conflicting lock, trx 1 creates waiting not-gap lock on the record and suspends. During trx 1 suspending the record can be delete-marked. And when the lock is granted on conflicting transaction commit or rollback, its type is still "not-gap". So we have "not-gap" lock on delete-marked record for RR. And this let some other transaction to insert some record with the same unique key when trx 1 is not committed, what can cause isolation level violation. The fix is to set next-key locks for both delete-marked and non-delete-marked records for unique search in RR. --- storage/innobase/row/row0sel.cc | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'storage/innobase') diff --git a/storage/innobase/row/row0sel.cc b/storage/innobase/row/row0sel.cc index ff0f9d47892..395739e4b4e 100644 --- a/storage/innobase/row/row0sel.cc +++ b/storage/innobase/row/row0sel.cc @@ -5114,8 +5114,10 @@ wrong_offs: goto no_gap_lock; } + /* Set next-key lock both for delete- and non-delete-marked + records for unique search, because non-delete-marked record can + be marked as deleted while transaction suspends. */ if (!set_also_gap_locks - || (unique_search && !rec_get_deleted_flag(rec, comp)) || dict_index_is_spatial(index)) { goto no_gap_lock; -- cgit v1.2.1 From 7c5609fb647b7b013694a16acae7c5c5739b394b Mon Sep 17 00:00:00 2001 From: musvaage Date: Tue, 20 Dec 2022 17:41:24 -0600 Subject: typos --- storage/innobase/row/row0row.cc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'storage/innobase') diff --git a/storage/innobase/row/row0row.cc b/storage/innobase/row/row0row.cc index 6f34c9fcc9b..729baa369c8 100644 --- a/storage/innobase/row/row0row.cc +++ b/storage/innobase/row/row0row.cc @@ -1191,7 +1191,7 @@ row_raw_format_int( ulint buf_size, /*!< in: output buffer size in bytes */ ibool* format_in_hex) /*!< out: should the data be - formated in hex */ + formatted in hex */ { ulint ret; @@ -1239,7 +1239,7 @@ row_raw_format_str( ulint buf_size, /*!< in: output buffer size in bytes */ ibool* format_in_hex) /*!< out: should the data be - formated in hex */ + formatted in hex */ { ulint charset_coll; -- cgit v1.2.1 From 5d506ac201b2bce35448fe9fe714e068bd6be487 Mon Sep 17 00:00:00 2001 From: Aleksey Midenkov Date: Tue, 27 Dec 2022 00:02:01 +0300 Subject: MDEV-25004 vers_force_trx option to force transactional System Versioning Works like vers_force but forces trx_id-based system-versioned tables if the storage supports it (currently InnoDB-only). Otherwise creates timestamp-based system-versioned table. --- storage/innobase/row/row0mysql.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'storage/innobase') diff --git a/storage/innobase/row/row0mysql.cc b/storage/innobase/row/row0mysql.cc index e79264d3fb8..8eeaa62b29e 100644 --- a/storage/innobase/row/row0mysql.cc +++ b/storage/innobase/row/row0mysql.cc @@ -1672,7 +1672,7 @@ row_fts_update_or_delete( if (new_doc_id == 0) { ib::error() << "InnoDB FTS: Doc ID cannot be 0"; - return(DB_FTS_INVALID_DOCID); + DBUG_RETURN(DB_FTS_INVALID_DOCID); } row_fts_do_update(trx, table, old_doc_id, new_doc_id); } -- cgit v1.2.1 From e056efdd6cfa62cc4c978fce5730af0b8d4c3c6b Mon Sep 17 00:00:00 2001 From: Aleksey Midenkov Date: Tue, 27 Dec 2022 00:02:02 +0300 Subject: MDEV-25004 Missing row in FTS_DOC_ID_INDEX during DELETE HISTORY 1. In case of system-versioned table add row_end into FTS_DOC_ID index in fts_create_common_tables() and innobase_create_key_defs(). fts_n_uniq() returns 1 or 2 depending on whether the table is system-versioned. After this patch recreate of FTS_DOC_ID index is required for existing system-versioned tables. If you see this message in error log or server warnings: "InnoDB: Table db/t1 contains 2 indexes inside InnoDB, which is different from the number of indexes 1 defined in the MariaDB" use this command to fix the table: ALTER TABLE db.t1 FORCE; 2. Fix duplicate history for secondary unique index like it was done in MDEV-23644 for clustered index (932ec586aad). In case of existing history row which conflicts with currently inseted row we check in row_ins_scan_sec_index_for_duplicate() whether that row was inserted as part of current transaction. In that case we indicate with DB_FOREIGN_DUPLICATE_KEY that new history row is not needed and should be silently skipped. 3. Some parts of MDEV-21138 (7410ff436e9) reverted. Skipping of FTS_DOC_ID index for history rows made problems with purge system. Now this is fixed differently by p.2. 4. wait_all_purged.inc checks that we didn't affect non-history rows so they are deleted and purged correctly. Additional FTS fixes fts_init_get_doc_id(): exclude history rows from max_doc_id calculation. fts_init_get_doc_id() callback is used only for crash recovery. fts_add_doc_by_id(): set max value for row_end field. fts_read_stopword(): stopwords table can be system-versioned too. We now read stopwords only for current data. row_insert_for_mysql(): exclude history rows from doc_id validation. row_merge_read_clustered_index(): exclude history_rows from doc_id processing. fts_load_user_stopword(): for versioned table retrieve row_end field and skip history rows. For non-versioned table we retrieve 'value' field twice (just for uniformity). FTS tests for System Versioning now include maybe_versioning.inc which adds 3 combinations: 'vers' for debug build sets sysvers_force and sysvers_hide. sysvers_force makes every created table system-versioned, sysvers_hide hides WITH SYSTEM VERSIONING for SHOW CREATE. Note: basic.test, stopword.test and versioning.test do not require debug for 'vers' combination. This is controlled by $modify_create_table in maybe_versioning.inc and these tests run WITH SYSTEM VERSIONING explicitly which allows to test 'vers' combination on non-debug builds. 'vers_trx' like 'vers' sets sysvers_force_trx and sysvers_hide. That tests FTS with trx_id-based System Versioning. 'orig' works like before: no System Versioning is added, no debug is required. Upgrade/downgrade test for System Versioning is done by innodb_fts.versioning. It has 2 combinations: 'prepare' makes binaries in std_data (requires old server and OLD_BINDIR). It tests upgrade/downgrade against old server as well. 'upgrade' tests upgrade against binaries in std_data. Cleanups: Removed innodb-fts-stopword.test as it duplicates stopword.test --- storage/innobase/dict/dict0mem.cc | 14 +++ storage/innobase/fts/fts0fts.cc | 147 ++++++++++++++++++++++++------ storage/innobase/handler/ha_innodb.cc | 48 +++++++--- storage/innobase/handler/handler0alter.cc | 32 +++++-- storage/innobase/include/dict0mem.h | 3 + storage/innobase/include/fts0fts.h | 17 ++-- storage/innobase/include/row0ins.h | 1 - storage/innobase/include/row0upd.h | 16 ++-- storage/innobase/row/row0ins.cc | 107 ++++++++++++++++------ storage/innobase/row/row0merge.cc | 33 ++++--- storage/innobase/row/row0mysql.cc | 9 +- storage/innobase/row/row0upd.cc | 13 +++ 12 files changed, 326 insertions(+), 114 deletions(-) (limited to 'storage/innobase') diff --git a/storage/innobase/dict/dict0mem.cc b/storage/innobase/dict/dict0mem.cc index 265642ef210..6366efd2248 100644 --- a/storage/innobase/dict/dict0mem.cc +++ b/storage/innobase/dict/dict0mem.cc @@ -1531,6 +1531,20 @@ dict_index_t::vers_history_row( { ut_ad(!is_primary()); + /* + Get row_end from clustered index + + TODO (optimization): row_end can be taken from unique secondary index + as well. For that dict_index_t::vers_end member should be added and + updated at index init (dict_index_build_internal_non_clust()). + + Test case: + + create or replace table t1 (x int unique, y int unique, + foreign key r (y) references t1 (x)) + with system versioning engine innodb; + insert into t1 values (1, 1); + */ bool error = false; mem_heap_t* heap = NULL; dict_index_t* clust_index = NULL; diff --git a/storage/innobase/fts/fts0fts.cc b/storage/innobase/fts/fts0fts.cc index dc8d529d79c..6a44b8882a7 100644 --- a/storage/innobase/fts/fts0fts.cc +++ b/storage/innobase/fts/fts0fts.cc @@ -398,8 +398,10 @@ fts_read_stopword( fts_string_t str; mem_heap_t* heap; ib_rbt_bound_t parent; + dict_table_t* table; sel_node = static_cast(row); + table = sel_node->table_list->table; stopword_info = static_cast(user_arg); stop_words = stopword_info->cached_stopword; @@ -414,6 +416,27 @@ fts_read_stopword( str.f_n_char = 0; str.f_str = static_cast(dfield_get_data(dfield)); str.f_len = dfield_get_len(dfield); + exp = que_node_get_next(exp); + ut_ad(exp); + + if (table->versioned()) { + dfield = que_node_get_val(exp); + ut_ad(dfield_get_type(dfield)->vers_sys_end()); + void* data = dfield_get_data(dfield); + ulint len = dfield_get_len(dfield); + if (table->versioned_by_id()) { + ut_ad(len == sizeof trx_id_max_bytes); + if (0 != memcmp(data, trx_id_max_bytes, len)) { + return true; + } + } else { + ut_ad(len == sizeof timestamp_max_bytes); + if (0 != memcmp(data, timestamp_max_bytes, len)) { + return true; + } + } + } + ut_ad(!que_node_get_next(exp)); /* Only create new node if it is a value not already existed */ if (str.f_len != UNIV_SQL_NULL @@ -457,7 +480,9 @@ fts_load_user_stopword( /* Validate the user table existence in the right format */ bool ret= false; - stopword_info->charset = fts_valid_stopword_table(stopword_table_name); + const char* row_end; + stopword_info->charset = fts_valid_stopword_table(stopword_table_name, + &row_end); if (!stopword_info->charset) { cleanup: if (!fts->dict_locked) { @@ -482,6 +507,7 @@ cleanup: pars_info_t* info = pars_info_create(); pars_info_bind_id(info, "table_stopword", stopword_table_name); + pars_info_bind_id(info, "row_end", row_end); pars_info_bind_function(info, "my_func", fts_read_stopword, stopword_info); @@ -490,7 +516,7 @@ cleanup: info, "DECLARE FUNCTION my_func;\n" "DECLARE CURSOR c IS" - " SELECT value" + " SELECT value, $row_end" " FROM $table_stopword;\n" "BEGIN\n" "\n" @@ -1925,9 +1951,16 @@ fts_create_common_tables( goto func_exit; } - index = dict_mem_index_create(table, FTS_DOC_ID_INDEX_NAME, - DICT_UNIQUE, 1); - dict_mem_index_add_field(index, FTS_DOC_ID_COL_NAME, 0); + if (table->versioned()) { + index = dict_mem_index_create(table, FTS_DOC_ID_INDEX_NAME, + DICT_UNIQUE, 2); + dict_mem_index_add_field(index, FTS_DOC_ID_COL_NAME, 0); + dict_mem_index_add_field(index, table->cols[table->vers_end].name(*table), 0); + } else { + index = dict_mem_index_create(table, FTS_DOC_ID_INDEX_NAME, + DICT_UNIQUE, 1); + dict_mem_index_add_field(index, FTS_DOC_ID_COL_NAME, 0); + } op = trx_get_dict_operation(trx); @@ -3427,7 +3460,8 @@ fts_add_doc_by_id( /* Search based on Doc ID. Here, we'll need to consider the case when there is no primary index on Doc ID */ - tuple = dtuple_create(heap, 1); + const ulint n_uniq = table->fts_n_uniq(); + tuple = dtuple_create(heap, n_uniq); dfield = dtuple_get_nth_field(tuple, 0); dfield->type.mtype = DATA_INT; dfield->type.prtype = DATA_NOT_NULL | DATA_UNSIGNED | DATA_BINARY_TYPE; @@ -3435,12 +3469,27 @@ fts_add_doc_by_id( mach_write_to_8((byte*) &temp_doc_id, doc_id); dfield_set_data(dfield, &temp_doc_id, sizeof(temp_doc_id)); + if (n_uniq == 2) { + ut_ad(table->versioned()); + ut_ad(fts_id_index->fields[1].col->vers_sys_end()); + dfield = dtuple_get_nth_field(tuple, 1); + dfield->type.mtype = fts_id_index->fields[1].col->mtype; + dfield->type.prtype = fts_id_index->fields[1].col->prtype; + if (table->versioned_by_id()) { + dfield_set_data(dfield, trx_id_max_bytes, + sizeof(trx_id_max_bytes)); + } else { + dfield_set_data(dfield, timestamp_max_bytes, + sizeof(timestamp_max_bytes)); + } + } + btr_pcur_open_with_no_init( fts_id_index, tuple, PAGE_CUR_LE, BTR_SEARCH_LEAF, &pcur, &mtr); /* If we have a match, add the data to doc structure */ - if (btr_pcur_get_low_match(&pcur) == 1) { + if (btr_pcur_get_low_match(&pcur) == n_uniq) { const rec_t* rec; btr_pcur_t* doc_pcur; const rec_t* clust_rec; @@ -3637,20 +3686,34 @@ fts_get_max_doc_id( if (!page_is_empty(btr_pcur_get_page(&pcur))) { const rec_t* rec = NULL; - rec_offs offsets_[REC_OFFS_NORMAL_SIZE]; - rec_offs* offsets = offsets_; - mem_heap_t* heap = NULL; - ulint len; - const void* data; - - rec_offs_init(offsets_); + const ulint doc_id_len= 8; do { rec = btr_pcur_get_rec(&pcur); - if (page_rec_is_user_rec(rec)) { + if (!page_rec_is_user_rec(rec)) { + continue; + } + + if (index->n_uniq == 1) { break; } + + ut_ad(table->versioned()); + ut_ad(index->n_uniq == 2); + + const byte *data = rec + doc_id_len; + if (table->versioned_by_id()) { + if (0 == memcmp(data, trx_id_max_bytes, + sizeof trx_id_max_bytes)) { + break; + } + } else { + if (0 == memcmp(data, timestamp_max_bytes, + sizeof timestamp_max_bytes)) { + break; + } + } } while (btr_pcur_move_to_prev(&pcur, &mtr)); if (!rec) { @@ -3658,14 +3721,8 @@ fts_get_max_doc_id( } ut_ad(!rec_is_metadata(rec, index)); - offsets = rec_get_offsets( - rec, index, offsets, index->n_core_fields, - ULINT_UNDEFINED, &heap); - data = rec_get_nth_field(rec, offsets, 0, &len); - - doc_id = static_cast(fts_read_doc_id( - static_cast(data))); + doc_id = fts_read_doc_id(rec); } func_exit: @@ -5967,12 +6024,16 @@ void fts_drop_orphaned_tables() /**********************************************************************//** Check whether user supplied stopword table is of the right format. Caller is responsible to hold dictionary locks. -@return the stopword column charset if qualifies */ +@param stopword_table_name table name +@param row_end name of the system-versioning end column, or "value" +@return the stopword column charset +@retval NULL if the table does not exist or qualify */ CHARSET_INFO* fts_valid_stopword_table( /*=====================*/ - const char* stopword_table_name) /*!< in: Stopword table + const char* stopword_table_name, /*!< in: Stopword table name */ + const char** row_end) /* row_end value of system-versioned table */ { dict_table_t* table; dict_col_t* col = NULL; @@ -6014,6 +6075,13 @@ fts_valid_stopword_table( } ut_ad(col); + ut_ad(!table->versioned() || col->ind != table->vers_end); + + if (row_end) { + *row_end = table->versioned() + ? dict_table_get_col_name(table, table->vers_end) + : "value"; /* for fts_load_user_stopword() */ + } return(fts_get_charset(col->prtype)); } @@ -6149,18 +6217,20 @@ cleanup: /**********************************************************************//** Callback function when we initialize the FTS at the start up time. It recovers the maximum Doc IDs presented in the current table. +Tested by innodb_fts.crash_recovery @return: always returns TRUE */ static ibool fts_init_get_doc_id( /*================*/ void* row, /*!< in: sel_node_t* */ - void* user_arg) /*!< in: fts cache */ + void* user_arg) /*!< in: table with fts */ { doc_id_t doc_id = FTS_NULL_DOC_ID; sel_node_t* node = static_cast(row); que_node_t* exp = node->select_list; - fts_cache_t* cache = static_cast(user_arg); + dict_table_t* table = static_cast(user_arg); + fts_cache_t* cache = table->fts->cache; ut_ad(ib_vector_is_empty(cache->get_docs)); @@ -6175,6 +6245,29 @@ fts_init_get_doc_id( doc_id = static_cast(mach_read_from_8( static_cast(data))); + exp = que_node_get_next(que_node_get_next(exp)); + if (exp) { + ut_ad(table->versioned()); + dfield = que_node_get_val(exp); + type = dfield_get_type(dfield); + ut_ad(type->vers_sys_end()); + data = dfield_get_data(dfield); + ulint len = dfield_get_len(dfield); + if (table->versioned_by_id()) { + ut_ad(len == sizeof trx_id_max_bytes); + if (0 != memcmp(data, trx_id_max_bytes, len)) { + return true; + } + } else { + ut_ad(len == sizeof timestamp_max_bytes); + if (0 != memcmp(data, timestamp_max_bytes, len)) { + return true; + } + } + ut_ad(!(exp = que_node_get_next(exp))); + } + ut_ad(!exp); + if (doc_id >= cache->next_doc_id) { cache->next_doc_id = doc_id + 1; } @@ -6340,7 +6433,7 @@ fts_init_index( fts_doc_fetch_by_doc_id(NULL, start_doc, index, FTS_FETCH_DOC_BY_ID_LARGE, - fts_init_get_doc_id, cache); + fts_init_get_doc_id, table); } else { if (table->fts->cache->stopword_info.status & STOPWORD_NOT_INIT) { diff --git a/storage/innobase/handler/ha_innodb.cc b/storage/innobase/handler/ha_innodb.cc index a1f58091a90..58b2068b56f 100644 --- a/storage/innobase/handler/ha_innodb.cc +++ b/storage/innobase/handler/ha_innodb.cc @@ -8967,6 +8967,9 @@ ha_innobase::update_row( innobase_srv_conc_enter_innodb(m_prebuilt); + if (m_prebuilt->upd_node->is_delete) { + trx->fts_next_doc_id = 0; + } error = row_update_for_mysql(m_prebuilt); if (error == DB_SUCCESS && vers_ins_row @@ -9084,6 +9087,7 @@ ha_innobase::delete_row( && trx->id != table->vers_start_id() ? VERSIONED_DELETE : PLAIN_DELETE; + trx->fts_next_doc_id = 0; innobase_srv_conc_enter_innodb(m_prebuilt); @@ -10079,9 +10083,12 @@ ha_innobase::ft_init_ext( /*****************************************************************//** Set up search tuple for a query through FTS_DOC_ID_INDEX on supplied Doc ID. This is used by MySQL to retrieve the documents -once the search result (Doc IDs) is available */ +once the search result (Doc IDs) is available + +@return DB_SUCCESS or DB_INDEX_CORRUPT +*/ static -void +dberr_t innobase_fts_create_doc_id_key( /*===========================*/ dtuple_t* tuple, /* in/out: m_prebuilt->search_tuple */ @@ -10093,8 +10100,10 @@ innobase_fts_create_doc_id_key( { doc_id_t temp_doc_id; dfield_t* dfield = dtuple_get_nth_field(tuple, 0); + const ulint n_uniq = index->table->fts_n_uniq(); - ut_a(dict_index_get_n_unique(index) == 1); + if (dict_index_get_n_unique(index) != n_uniq) + return DB_INDEX_CORRUPT; dtuple_set_n_fields(tuple, index->n_fields); dict_index_copy_types(tuple, index, index->n_fields); @@ -10112,12 +10121,25 @@ innobase_fts_create_doc_id_key( *doc_id = temp_doc_id; dfield_set_data(dfield, doc_id, sizeof(*doc_id)); - dtuple_set_n_fields_cmp(tuple, 1); + if (n_uniq == 2) { + ut_ad(index->table->versioned()); + dfield = dtuple_get_nth_field(tuple, 1); + if (index->table->versioned_by_id()) { + dfield_set_data(dfield, trx_id_max_bytes, + sizeof(trx_id_max_bytes)); + } else { + dfield_set_data(dfield, timestamp_max_bytes, + sizeof(timestamp_max_bytes)); + } + } + + dtuple_set_n_fields_cmp(tuple, n_uniq); - for (ulint i = 1; i < index->n_fields; i++) { + for (ulint i = n_uniq; i < index->n_fields; i++) { dfield = dtuple_get_nth_field(tuple, i); dfield_set_null(dfield); } + return DB_SUCCESS; } /**********************************************************************//** @@ -10199,14 +10221,18 @@ next_record: /* We pass a pointer of search_doc_id because it will be converted to storage byte order used in the search tuple. */ - innobase_fts_create_doc_id_key(tuple, index, &search_doc_id); + dberr_t ret = innobase_fts_create_doc_id_key( + tuple, index, &search_doc_id); - innobase_srv_conc_enter_innodb(m_prebuilt); + if (ret == DB_SUCCESS) { + innobase_srv_conc_enter_innodb(m_prebuilt); - dberr_t ret = row_search_for_mysql( - (byte*) buf, PAGE_CUR_GE, m_prebuilt, ROW_SEL_EXACT, 0); + ret = row_search_for_mysql( + (byte*) buf, PAGE_CUR_GE, m_prebuilt, + ROW_SEL_EXACT, 0); - innobase_srv_conc_exit_innodb(m_prebuilt); + innobase_srv_conc_exit_innodb(m_prebuilt); + } int error; @@ -17537,7 +17563,7 @@ innodb_stopword_table_validate( /* Validate the stopword table's (if supplied) existence and of the right format */ int ret = stopword_table_name && !fts_valid_stopword_table( - stopword_table_name); + stopword_table_name, NULL); row_mysql_unlock_data_dictionary(trx); diff --git a/storage/innobase/handler/handler0alter.cc b/storage/innobase/handler/handler0alter.cc index 19c90fed74c..fdfc347f8db 100644 --- a/storage/innobase/handler/handler0alter.cc +++ b/storage/innobase/handler/handler0alter.cc @@ -1341,6 +1341,11 @@ ha_innobase::check_if_supported_inplace_alter( < dict_table_get_n_user_cols(m_prebuilt->table))); if (fulltext_indexes && m_prebuilt->table->fts) { + /* FTS index of versioned table has row_end, need rebuild */ + if (table->versioned() != altered_table->versioned()) { + need_rebuild= true; + } + /* FULLTEXT indexes are supposed to remain. */ /* Disallow DROP INDEX FTS_DOC_ID_INDEX */ @@ -2762,6 +2767,8 @@ innobase_fts_check_doc_id_index( /* Check if a unique index with the name of FTS_DOC_ID_INDEX_NAME is being created. */ + const ulint fts_n_uniq= altered_table->versioned() ? 2 : 1; + for (uint i = 0; i < altered_table->s->keys; i++) { const KEY& key = altered_table->key_info[i]; @@ -2771,7 +2778,7 @@ innobase_fts_check_doc_id_index( } if ((key.flags & HA_NOSAME) - && key.user_defined_key_parts == 1 + && key.user_defined_key_parts == fts_n_uniq && !strcmp(key.name.str, FTS_DOC_ID_INDEX_NAME) && !strcmp(key.key_part[0].field->field_name.str, FTS_DOC_ID_COL_NAME)) { @@ -2801,7 +2808,7 @@ innobase_fts_check_doc_id_index( } if (!dict_index_is_unique(index) - || dict_index_get_n_unique(index) > 1 + || dict_index_get_n_unique(index) != table->fts_n_uniq() || strcmp(index->name, FTS_DOC_ID_INDEX_NAME)) { return(FTS_INCORRECT_DOC_ID_INDEX); } @@ -2842,6 +2849,7 @@ innobase_fts_check_doc_id_index_in_def( { /* Check whether there is a "FTS_DOC_ID_INDEX" in the to be built index list */ + const uint fts_n_uniq= key_info->table->versioned() ? 2 : 1; for (ulint j = 0; j < n_key; j++) { const KEY* key = &key_info[j]; @@ -2852,7 +2860,7 @@ innobase_fts_check_doc_id_index_in_def( /* Do a check on FTS DOC ID_INDEX, it must be unique, named as "FTS_DOC_ID_INDEX" and on column "FTS_DOC_ID" */ if (!(key->flags & HA_NOSAME) - || key->user_defined_key_parts != 1 + || key->user_defined_key_parts != fts_n_uniq || strcmp(key->name.str, FTS_DOC_ID_INDEX_NAME) || strcmp(key->key_part[0].field->field_name.str, FTS_DOC_ID_COL_NAME)) { @@ -3050,13 +3058,21 @@ created_clustered: if (add_fts_doc_idx) { index_def_t* index = indexdef++; + uint nfields = 1; + if (altered_table->versioned()) + ++nfields; index->fields = static_cast( - mem_heap_alloc(heap, sizeof *index->fields)); - index->n_fields = 1; - index->fields->col_no = fts_doc_id_col; - index->fields->prefix_len = 0; - index->fields->is_v_col = false; + mem_heap_alloc(heap, sizeof(*index->fields) * nfields)); + index->n_fields = nfields; + index->fields[0].col_no = fts_doc_id_col; + index->fields[0].prefix_len = 0; + index->fields[0].is_v_col = false; + if (nfields == 2) { + index->fields[1].col_no = altered_table->s->row_end_field; + index->fields[1].prefix_len = 0; + index->fields[1].is_v_col = false; + } index->ind_type = DICT_UNIQUE; ut_ad(!rebuild || !add_fts_doc_id diff --git a/storage/innobase/include/dict0mem.h b/storage/innobase/include/dict0mem.h index bf7d9931bed..82d0b70b17c 100644 --- a/storage/innobase/include/dict0mem.h +++ b/storage/innobase/include/dict0mem.h @@ -2120,6 +2120,9 @@ public: or mysql/innodb_index_stats. @return true if the table name is same as stats table */ bool is_stats_table() const; + + /** @return number of unique columns in FTS_DOC_ID index */ + unsigned fts_n_uniq() const { return versioned() ? 2 : 1; } }; inline void dict_index_t::set_modified(mtr_t& mtr) const diff --git a/storage/innobase/include/fts0fts.h b/storage/innobase/include/fts0fts.h index 326734c84c9..2c75fdf66e7 100644 --- a/storage/innobase/include/fts0fts.h +++ b/storage/innobase/include/fts0fts.h @@ -832,15 +832,14 @@ fts_get_max_doc_id( /*===============*/ dict_table_t* table); /*!< in: user table */ -/******************************************************************//** -Check whether user supplied stopword table exists and is of -the right format. -@return the stopword column charset if qualifies */ -CHARSET_INFO* -fts_valid_stopword_table( -/*=====================*/ - const char* stopword_table_name); /*!< in: Stopword table - name */ +/** Check whether a stopword table is in the right format. +@param stopword_table_name table name +@param row_end name of the system-versioning end column, or "value" +@return the stopword column charset +@retval NULL if the table does not exist or qualify */ +CHARSET_INFO *fts_valid_stopword_table(const char *stopword_table_name, + const char **row_end= NULL); + /****************************************************************//** This function loads specified stopword into FTS cache @return true if success */ diff --git a/storage/innobase/include/row0ins.h b/storage/innobase/include/row0ins.h index 9a16394a052..34427dc6dc7 100644 --- a/storage/innobase/include/row0ins.h +++ b/storage/innobase/include/row0ins.h @@ -206,7 +206,6 @@ struct ins_node_t if this is NULL, entry list should be created and buffers for sys fields in row allocated */ void vers_update_end(row_prebuilt_t *prebuilt, bool history_row); - bool vers_history_row() const; /* true if 'row' is historical */ }; /** Create an insert object. diff --git a/storage/innobase/include/row0upd.h b/storage/innobase/include/row0upd.h index 9721e975a0a..8fa93467490 100644 --- a/storage/innobase/include/row0upd.h +++ b/storage/innobase/include/row0upd.h @@ -617,17 +617,13 @@ public: void vers_make_update(const trx_t *trx) { vers_update_fields(trx, table->vers_start); - } + } - /** Only set row_end = CURRENT_TIMESTAMP/trx->id. - Do not touch other fields at all. - @param[in] trx transaction */ - void vers_make_delete(const trx_t *trx) - { - update->n_fields = 0; - is_delete = VERSIONED_DELETE; - vers_update_fields(trx, table->vers_end); - } + /** Prepare update vector for versioned delete. + Set row_end to CURRENT_TIMESTAMP or trx->id. + Initialize fts_next_doc_id for versioned delete. + @param[in] trx transaction */ + void vers_make_delete(trx_t *trx); }; #define UPD_NODE_MAGIC_N 1579975 diff --git a/storage/innobase/row/row0ins.cc b/storage/innobase/row/row0ins.cc index 2683ad8251f..f4302a1eca3 100644 --- a/storage/innobase/row/row0ins.cc +++ b/storage/innobase/row/row0ins.cc @@ -2059,6 +2059,65 @@ row_ins_dupl_error_with_rec( return(!rec_get_deleted_flag(rec, rec_offs_comp(offsets))); } +/** @return true if history row was inserted by this transaction + (row TRX_ID is the same as current TRX_ID). */ +static +dberr_t vers_row_same_trx(dict_index_t* index, const rec_t* rec, + que_thr_t* thr, bool *same_trx) +{ + mtr_t mtr; + dberr_t ret= DB_SUCCESS; + ulint trx_id_len; + const byte *trx_id_bytes; + trx_id_t trx_id; + dict_index_t *clust_index= dict_table_get_first_index(index->table); + ut_ad(index != clust_index); + + mtr.start(); + + rec_t *clust_rec= + row_get_clust_rec(BTR_SEARCH_LEAF, rec, index, &clust_index, &mtr); + rec_offs offsets_[REC_OFFS_NORMAL_SIZE]; + rec_offs *clust_offs= offsets_; + rec_offs_init(offsets_); + mem_heap_t *heap= NULL; + + if (clust_rec) + { + clust_offs= + rec_get_offsets(clust_rec, clust_index, clust_offs, + clust_index->n_core_fields, ULINT_UNDEFINED, &heap); + if (!clust_index->vers_history_row(clust_rec, clust_offs)) + { + *same_trx= false; + goto end; + } + } + else + { + ib::error() << "foreign constraints: secondary index " << index->name << + " of table " << index->table->name << " is out of sync"; + ut_ad("secondary index is out of sync" == 0); + ret= DB_TABLE_CORRUPT; + goto end; + } + + trx_id_bytes= rec_get_nth_field(clust_rec, clust_offs, + clust_index->n_uniq, &trx_id_len); + ut_ad(trx_id_len == DATA_TRX_ID_LEN); + + trx_id= trx_read_trx_id(trx_id_bytes); + + if (UNIV_LIKELY_NULL(heap)) + mem_heap_free(heap); + + *same_trx= thr_get_trx(thr)->id == trx_id; + +end: + mtr.commit(); + return ret; +} + /***************************************************************//** Scans a unique non-clustered index at a given index entry to determine whether a uniqueness violation has occurred for the key value of the entry. @@ -2082,6 +2141,8 @@ row_ins_scan_sec_index_for_duplicate( ulint n_fields_cmp; btr_pcur_t pcur; dberr_t err = DB_SUCCESS; + dberr_t err2; + bool same_trx; ulint allow_duplicates; rec_offs offsets_[REC_OFFS_SEC_INDEX_SIZE]; rec_offs* offsets = offsets_; @@ -2175,10 +2236,25 @@ row_ins_scan_sec_index_for_duplicate( if (cmp == 0) { if (row_ins_dupl_error_with_rec(rec, entry, index, offsets)) { + err = DB_DUPLICATE_KEY; thr_get_trx(thr)->error_info = index; + if (index->table->versioned()) { + err2 = vers_row_same_trx(index, rec, + thr, &same_trx); + if (err2 != DB_SUCCESS) { + err = err2; + goto end_scan; + } + + if (same_trx) { + err = DB_FOREIGN_DUPLICATE_KEY; + goto end_scan; + } + } + /* If the duplicate is on hidden FTS_DOC_ID, state so in the error log */ if (index == index->table->fts_doc_id_index @@ -3580,16 +3656,6 @@ row_ins_get_row_from_select( } } -inline -bool ins_node_t::vers_history_row() const -{ - if (!table->versioned()) - return false; - dfield_t* row_end = dtuple_get_nth_field(row, table->vers_end); - return row_end->vers_history_row(); -} - - /***********************************************************//** Inserts a row to a table. @return DB_SUCCESS if operation successfully completed, else error @@ -3628,31 +3694,12 @@ row_ins( ut_ad(node->state == INS_NODE_INSERT_ENTRIES); while (node->index != NULL) { - dict_index_t *index = node->index; - /* - We do not insert history rows into FTS_DOC_ID_INDEX because - it is unique by FTS_DOC_ID only and we do not want to add - row_end to unique key. Fulltext field works the way new - FTS_DOC_ID is created on every fulltext UPDATE, so holding only - FTS_DOC_ID for history is enough. - */ - const unsigned type = index->type; - if (index->type & DICT_FTS) { - } else if (!(type & DICT_UNIQUE) || index->n_uniq > 1 - || !node->vers_history_row()) { - + if (!(node->index->type & DICT_FTS)) { dberr_t err = row_ins_index_entry_step(node, thr); if (err != DB_SUCCESS) { DBUG_RETURN(err); } - } else { - /* Unique indexes with system versioning must contain - the version end column. The only exception is a hidden - FTS_DOC_ID_INDEX that InnoDB may create on a hidden or - user-created FTS_DOC_ID column. */ - ut_ad(!strcmp(index->name, FTS_DOC_ID_INDEX_NAME)); - ut_ad(!strcmp(index->fields[0].name, FTS_DOC_ID_COL_NAME)); } node->index = dict_table_get_next_index(node->index); diff --git a/storage/innobase/row/row0merge.cc b/storage/innobase/row/row0merge.cc index 7bd15177dad..415c93a9f6f 100644 --- a/storage/innobase/row/row0merge.cc +++ b/storage/innobase/row/row0merge.cc @@ -455,6 +455,7 @@ row_merge_buf_redundant_convert( @param[in] new_table new table @param[in,out] psort_info parallel sort info @param[in,out] row table row +@param[in] history_row row is historical in a system-versioned table @param[in] ext cache of externally stored column prefixes, or NULL @param[in,out] doc_id Doc ID if we are creating @@ -477,6 +478,7 @@ row_merge_buf_add( const dict_table_t* new_table, fts_psort_t* psort_info, dtuple_t* row, + const bool history_row, const row_ext_t* ext, doc_id_t* doc_id, mem_heap_t* conv_heap, @@ -540,7 +542,7 @@ error: : NULL; /* Process the Doc ID column */ - if (!v_col && *doc_id + if (!v_col && index->table->fts && (*doc_id || history_row) && col->ind == index->table->fts->doc_col) { fts_write_doc_id((byte*) &write_doc_id, *doc_id); @@ -592,7 +594,7 @@ error: /* Tokenize and process data for FTS */ - if (index->type & DICT_FTS) { + if (!history_row && (index->type & DICT_FTS)) { fts_doc_item_t* doc_item; byte* value; void* ptr; @@ -1705,6 +1707,7 @@ row_merge_read_clustered_index( char new_sys_trx_end[8]; byte any_autoinc_data[8] = {0}; bool vers_update_trt = false; + bool history_row = false; DBUG_ENTER("row_merge_read_clustered_index"); @@ -2132,6 +2135,12 @@ end_of_index: row_heap); ut_ad(row); + if (new_table->versioned()) { + const dfield_t* dfield = dtuple_get_nth_field( + row, new_table->vers_end); + history_row = dfield->vers_history_row(); + } + for (ulint i = 0; i < n_nonnull; i++) { dfield_t* field = &row->fields[nonnull[i]]; @@ -2161,7 +2170,7 @@ end_of_index: } /* Get the next Doc ID */ - if (add_doc_id) { + if (add_doc_id && !history_row) { doc_id++; } else { doc_id = 0; @@ -2197,13 +2206,6 @@ end_of_index: ut_ad(add_autoinc < dict_table_get_n_user_cols(new_table)); - bool history_row = false; - if (new_table->versioned()) { - const dfield_t* dfield = dtuple_get_nth_field( - row, new_table->vers_end); - history_row = dfield->vers_history_row(); - } - dfield_t* dfield = dtuple_get_nth_field(row, add_autoinc); @@ -2327,8 +2329,8 @@ write_buffers: if (UNIV_LIKELY (row && (rows_added = row_merge_buf_add( buf, fts_index, old_table, new_table, - psort_info, row, ext, &doc_id, - conv_heap, &err, + psort_info, row, history_row, ext, + &doc_id, conv_heap, &err, &v_heap, eval_table, trx)))) { /* Set the page flush observer for the @@ -2660,9 +2662,10 @@ write_buffers: if (UNIV_UNLIKELY (!(rows_added = row_merge_buf_add( buf, fts_index, old_table, - new_table, psort_info, row, ext, - &doc_id, conv_heap, - &err, &v_heap, eval_table, trx)))) { + new_table, psort_info, row, + history_row, ext, &doc_id, + conv_heap, &err, &v_heap, + eval_table, trx)))) { /* An empty buffer should have enough room for at least one record. */ ut_ad(err == DB_COMPUTE_VALUE_FAILED diff --git a/storage/innobase/row/row0mysql.cc b/storage/innobase/row/row0mysql.cc index 8eeaa62b29e..dfc5393a8b3 100644 --- a/storage/innobase/row/row0mysql.cc +++ b/storage/innobase/row/row0mysql.cc @@ -1449,7 +1449,10 @@ error_exit: return(err); } - if (dict_table_has_fts_index(table)) { + if (dict_table_has_fts_index(table) + && (!table->versioned() + || !node->row->fields[table->vers_end].vers_history_row())) { + doc_id_t doc_id; /* Extract the doc id from the hidden FTS column */ @@ -1663,7 +1666,7 @@ row_fts_update_or_delete( ut_a(dict_table_has_fts_index(prebuilt->table)); /* Deletes are simple; get them out of the way first. */ - if (node->is_delete == PLAIN_DELETE) { + if (node->is_delete) { /* A delete affects all FTS indexes, so we pass NULL */ fts_trx_add_op(trx, table, old_doc_id, FTS_DELETE, NULL); } else { @@ -2228,7 +2231,7 @@ row_update_cascade_for_mysql( return(DB_FOREIGN_EXCEED_MAX_CASCADE); } - const trx_t* trx = thr_get_trx(thr); + trx_t* trx = thr_get_trx(thr); if (table->versioned()) { if (node->is_delete == PLAIN_DELETE) { diff --git a/storage/innobase/row/row0upd.cc b/storage/innobase/row/row0upd.cc index 599aece23f5..ef780e7ccb0 100644 --- a/storage/innobase/row/row0upd.cc +++ b/storage/innobase/row/row0upd.cc @@ -3545,3 +3545,16 @@ skip_append: } } } + + +/** Prepare update vector for versioned delete. +Set row_end to CURRENT_TIMESTAMP or trx->id. +Initialize fts_next_doc_id for versioned delete. +@param[in] trx transaction */ +void upd_node_t::vers_make_delete(trx_t* trx) +{ + update->n_fields= 0; + is_delete= VERSIONED_DELETE; + vers_update_fields(trx, table->vers_end); + trx->fts_next_doc_id= table->fts ? UINT64_UNDEFINED : 0; +} -- cgit v1.2.1 From 72e2d1d2201c2da23777d8fb89e078475e0c1371 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Tue, 27 Dec 2022 00:02:02 +0300 Subject: MDEV-25004 Refactorings * Avoid some pessimization * Slightly smaller upgrade dataset * Simplify vers_row_same_trx() and its caller --- storage/innobase/row/row0ins.cc | 91 +++++++++++++++++---------------------- storage/innobase/row/row0merge.cc | 28 ++++++------ 2 files changed, 53 insertions(+), 66 deletions(-) (limited to 'storage/innobase') diff --git a/storage/innobase/row/row0ins.cc b/storage/innobase/row/row0ins.cc index f4302a1eca3..f9fbefc9bc0 100644 --- a/storage/innobase/row/row0ins.cc +++ b/storage/innobase/row/row0ins.cc @@ -2059,39 +2059,48 @@ row_ins_dupl_error_with_rec( return(!rec_get_deleted_flag(rec, rec_offs_comp(offsets))); } -/** @return true if history row was inserted by this transaction - (row TRX_ID is the same as current TRX_ID). */ -static -dberr_t vers_row_same_trx(dict_index_t* index, const rec_t* rec, - que_thr_t* thr, bool *same_trx) +/** Determine whether a history row was inserted by this transaction +(row TRX_ID is the same as current TRX_ID). +@param index secondary index +@param rec secondary index record +@param trx transaction +@return error code +@retval DB_SUCCESS on success +@retval DB_FOREIGN_DUPLICATE_KEY if a history row was inserted by trx */ +static dberr_t vers_row_same_trx(dict_index_t* index, const rec_t* rec, + const trx_t& trx) { mtr_t mtr; dberr_t ret= DB_SUCCESS; - ulint trx_id_len; - const byte *trx_id_bytes; - trx_id_t trx_id; dict_index_t *clust_index= dict_table_get_first_index(index->table); ut_ad(index != clust_index); mtr.start(); - rec_t *clust_rec= - row_get_clust_rec(BTR_SEARCH_LEAF, rec, index, &clust_index, &mtr); - rec_offs offsets_[REC_OFFS_NORMAL_SIZE]; - rec_offs *clust_offs= offsets_; - rec_offs_init(offsets_); - mem_heap_t *heap= NULL; - - if (clust_rec) + if (const rec_t *clust_rec= + row_get_clust_rec(BTR_SEARCH_LEAF, rec, index, &clust_index, &mtr)) { + rec_offs offsets_[REC_OFFS_NORMAL_SIZE]; + rec_offs *clust_offs= offsets_; + rec_offs_init(offsets_); + mem_heap_t *heap= NULL; + clust_offs= - rec_get_offsets(clust_rec, clust_index, clust_offs, - clust_index->n_core_fields, ULINT_UNDEFINED, &heap); - if (!clust_index->vers_history_row(clust_rec, clust_offs)) + rec_get_offsets(clust_rec, clust_index, clust_offs, + clust_index->n_core_fields, ULINT_UNDEFINED, &heap); + if (clust_index->vers_history_row(clust_rec, clust_offs)) { - *same_trx= false; - goto end; + ulint trx_id_len; + const byte *trx_id= rec_get_nth_field(clust_rec, clust_offs, + clust_index->n_uniq, &trx_id_len); + ut_ad(trx_id_len == DATA_TRX_ID_LEN); + + if (trx.id == trx_read_trx_id(trx_id)) + ret= DB_FOREIGN_DUPLICATE_KEY; } + + if (UNIV_LIKELY_NULL(heap)) + mem_heap_free(heap); } else { @@ -2099,21 +2108,8 @@ dberr_t vers_row_same_trx(dict_index_t* index, const rec_t* rec, " of table " << index->table->name << " is out of sync"; ut_ad("secondary index is out of sync" == 0); ret= DB_TABLE_CORRUPT; - goto end; } - trx_id_bytes= rec_get_nth_field(clust_rec, clust_offs, - clust_index->n_uniq, &trx_id_len); - ut_ad(trx_id_len == DATA_TRX_ID_LEN); - - trx_id= trx_read_trx_id(trx_id_bytes); - - if (UNIV_LIKELY_NULL(heap)) - mem_heap_free(heap); - - *same_trx= thr_get_trx(thr)->id == trx_id; - -end: mtr.commit(); return ret; } @@ -2141,9 +2137,6 @@ row_ins_scan_sec_index_for_duplicate( ulint n_fields_cmp; btr_pcur_t pcur; dberr_t err = DB_SUCCESS; - dberr_t err2; - bool same_trx; - ulint allow_duplicates; rec_offs offsets_[REC_OFFS_SEC_INDEX_SIZE]; rec_offs* offsets = offsets_; DBUG_ENTER("row_ins_scan_sec_index_for_duplicate"); @@ -2181,7 +2174,7 @@ row_ins_scan_sec_index_for_duplicate( : BTR_SEARCH_LEAF, &pcur, mtr); - allow_duplicates = thr_get_trx(thr)->duplicates; + trx_t* const trx = thr_get_trx(thr); /* Scan index records and check if there is a duplicate */ @@ -2202,7 +2195,7 @@ row_ins_scan_sec_index_for_duplicate( if (flags & BTR_NO_LOCKING_FLAG) { /* Set no locks when applying log in online table rebuild. */ - } else if (allow_duplicates) { + } else if (trx->duplicates) { /* If the SQL-query will update or replace duplicate key we will take X-lock for @@ -2239,20 +2232,14 @@ row_ins_scan_sec_index_for_duplicate( err = DB_DUPLICATE_KEY; - thr_get_trx(thr)->error_info = index; - - if (index->table->versioned()) { - err2 = vers_row_same_trx(index, rec, - thr, &same_trx); - if (err2 != DB_SUCCESS) { - err = err2; - goto end_scan; - } + trx->error_info = index; - if (same_trx) { - err = DB_FOREIGN_DUPLICATE_KEY; - goto end_scan; - } + if (!index->table->versioned()) { + } else if (dberr_t e = + vers_row_same_trx(index, rec, + *trx)) { + err = e; + goto end_scan; } /* If the duplicate is on hidden FTS_DOC_ID, diff --git a/storage/innobase/row/row0merge.cc b/storage/innobase/row/row0merge.cc index 415c93a9f6f..89c10320365 100644 --- a/storage/innobase/row/row0merge.cc +++ b/storage/innobase/row/row0merge.cc @@ -455,9 +455,10 @@ row_merge_buf_redundant_convert( @param[in] new_table new table @param[in,out] psort_info parallel sort info @param[in,out] row table row -@param[in] history_row row is historical in a system-versioned table @param[in] ext cache of externally stored column prefixes, or NULL +@param[in] history_fts row is historical in a system-versioned table + on which a FTS_DOC_ID_INDEX(FTS_DOC_ID) exists @param[in,out] doc_id Doc ID if we are creating FTS index @param[in,out] conv_heap memory heap where to allocate data when @@ -478,8 +479,8 @@ row_merge_buf_add( const dict_table_t* new_table, fts_psort_t* psort_info, dtuple_t* row, - const bool history_row, const row_ext_t* ext, + const bool history_fts, doc_id_t* doc_id, mem_heap_t* conv_heap, dberr_t* err, @@ -542,7 +543,7 @@ error: : NULL; /* Process the Doc ID column */ - if (!v_col && index->table->fts && (*doc_id || history_row) + if (!v_col && (history_fts || *doc_id) && col->ind == index->table->fts->doc_col) { fts_write_doc_id((byte*) &write_doc_id, *doc_id); @@ -594,7 +595,7 @@ error: /* Tokenize and process data for FTS */ - if (!history_row && (index->type & DICT_FTS)) { + if (!history_fts && (index->type & DICT_FTS)) { fts_doc_item_t* doc_item; byte* value; void* ptr; @@ -1707,7 +1708,6 @@ row_merge_read_clustered_index( char new_sys_trx_end[8]; byte any_autoinc_data[8] = {0}; bool vers_update_trt = false; - bool history_row = false; DBUG_ENTER("row_merge_read_clustered_index"); @@ -1897,6 +1897,7 @@ row_merge_read_clustered_index( dtuple_t* row; row_ext_t* ext; page_cur_t* cur = btr_pcur_get_page_cur(&pcur); + bool history_row, history_fts = false; page_cur_move_to_next(cur); @@ -2135,11 +2136,10 @@ end_of_index: row_heap); ut_ad(row); - if (new_table->versioned()) { - const dfield_t* dfield = dtuple_get_nth_field( - row, new_table->vers_end); - history_row = dfield->vers_history_row(); - } + history_row = new_table->versioned() + && dtuple_get_nth_field(row, new_table->vers_end) + ->vers_history_row(); + history_fts = history_row && new_table->fts; for (ulint i = 0; i < n_nonnull; i++) { dfield_t* field = &row->fields[nonnull[i]]; @@ -2170,7 +2170,7 @@ end_of_index: } /* Get the next Doc ID */ - if (add_doc_id && !history_row) { + if (add_doc_id && !history_fts) { doc_id++; } else { doc_id = 0; @@ -2329,7 +2329,7 @@ write_buffers: if (UNIV_LIKELY (row && (rows_added = row_merge_buf_add( buf, fts_index, old_table, new_table, - psort_info, row, history_row, ext, + psort_info, row, ext, history_fts, &doc_id, conv_heap, &err, &v_heap, eval_table, trx)))) { @@ -2662,8 +2662,8 @@ write_buffers: if (UNIV_UNLIKELY (!(rows_added = row_merge_buf_add( buf, fts_index, old_table, - new_table, psort_info, row, - history_row, ext, &doc_id, + new_table, psort_info, + row, ext, history_fts, &doc_id, conv_heap, &err, &v_heap, eval_table, trx)))) { /* An empty buffer should have enough -- cgit v1.2.1