summaryrefslogtreecommitdiff
path: root/storage/innobase/dict
diff options
context:
space:
mode:
Diffstat (limited to 'storage/innobase/dict')
-rw-r--r--storage/innobase/dict/dict0boot.cc54
-rw-r--r--storage/innobase/dict/dict0crea.cc68
-rw-r--r--storage/innobase/dict/dict0dict.cc273
-rw-r--r--storage/innobase/dict/dict0load.cc5
-rw-r--r--storage/innobase/dict/dict0mem.cc358
-rw-r--r--storage/innobase/dict/dict0stats.cc61
-rw-r--r--storage/innobase/dict/dict0stats_bg.cc4
7 files changed, 281 insertions, 542 deletions
diff --git a/storage/innobase/dict/dict0boot.cc b/storage/innobase/dict/dict0boot.cc
index e5bf33593ed..2a80def7335 100644
--- a/storage/innobase/dict/dict0boot.cc
+++ b/storage/innobase/dict/dict0boot.cc
@@ -1,7 +1,7 @@
/*****************************************************************************
Copyright (c) 1996, 2017, Oracle and/or its affiliates. All Rights Reserved.
-Copyright (c) 2016, MariaDB Corporation.
+Copyright (c) 2016, 2018, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -47,7 +47,7 @@ dict_hdr_get(
dict_hdr_t* header;
block = buf_page_get(page_id_t(DICT_HDR_SPACE, DICT_HDR_PAGE_NO),
- univ_page_size, RW_X_LATCH, mtr);
+ 0, RW_X_LATCH, mtr);
header = DICT_HDR + buf_block_get_frame(block);
buf_block_dbg_add_level(block, SYNC_DICT_HEADER);
@@ -64,52 +64,14 @@ dict_hdr_get_new_id(
(not assigned if NULL) */
index_id_t* index_id, /*!< out: index id
(not assigned if NULL) */
- ulint* space_id, /*!< out: space id
+ ulint* space_id) /*!< out: space id
(not assigned if NULL) */
- const dict_table_t* table, /*!< in: table */
- bool disable_redo) /*!< in: if true and table
- object is NULL
- then disable-redo */
{
dict_hdr_t* dict_hdr;
ib_id_t id;
mtr_t mtr;
mtr_start(&mtr);
- if (table) {
- if (table->is_temporary()) {
- mtr.set_log_mode(MTR_LOG_NO_REDO);
- }
- } else if (disable_redo) {
- /* In non-read-only mode we need to ensure that space-id header
- page is written to disk else if page is removed from buffer
- cache and re-loaded it would assign temporary tablespace id
- to another tablespace.
- This is not a case with read-only mode as there is no new object
- that is created except temporary tablespace. */
- mtr.set_log_mode(srv_read_only_mode
- ? MTR_LOG_NONE : MTR_LOG_NO_REDO);
- }
-
- /* Server started and let's say space-id = x
- - table created with file-per-table
- - space-id = x + 1
- - crash
- Case 1: If it was redo logged then we know that it will be
- restored to x + 1
- Case 2: if not redo-logged
- Header will have the old space-id = x
- This is OK because on restart there is no object with
- space id = x + 1
- Case 3:
- space-id = x (on start)
- space-id = x+1 (temp-table allocation) - no redo logging
- space-id = x+2 (non-temp-table allocation), this get's
- redo logged.
- If there is a crash there will be only 2 entries
- x (original) and x+2 (new) and disk hdr will be updated
- to reflect x + 2 entry.
- We cannot allocate the same space id to different objects. */
dict_hdr = dict_hdr_get(&mtr);
if (table_id) {
@@ -212,7 +174,7 @@ dict_hdr_create(
/*--------------------------*/
root_page_no = btr_create(DICT_CLUSTERED | DICT_UNIQUE,
fil_system.sys_space, DICT_TABLES_ID,
- dict_ind_redundant, NULL, mtr);
+ dict_ind_redundant, mtr);
if (root_page_no == FIL_NULL) {
return(FALSE);
@@ -223,7 +185,7 @@ dict_hdr_create(
/*--------------------------*/
root_page_no = btr_create(DICT_UNIQUE,
fil_system.sys_space, DICT_TABLE_IDS_ID,
- dict_ind_redundant, NULL, mtr);
+ dict_ind_redundant, mtr);
if (root_page_no == FIL_NULL) {
return(FALSE);
@@ -234,7 +196,7 @@ dict_hdr_create(
/*--------------------------*/
root_page_no = btr_create(DICT_CLUSTERED | DICT_UNIQUE,
fil_system.sys_space, DICT_COLUMNS_ID,
- dict_ind_redundant, NULL, mtr);
+ dict_ind_redundant, mtr);
if (root_page_no == FIL_NULL) {
return(FALSE);
@@ -245,7 +207,7 @@ dict_hdr_create(
/*--------------------------*/
root_page_no = btr_create(DICT_CLUSTERED | DICT_UNIQUE,
fil_system.sys_space, DICT_INDEXES_ID,
- dict_ind_redundant, NULL, mtr);
+ dict_ind_redundant, mtr);
if (root_page_no == FIL_NULL) {
return(FALSE);
@@ -256,7 +218,7 @@ dict_hdr_create(
/*--------------------------*/
root_page_no = btr_create(DICT_CLUSTERED | DICT_UNIQUE,
fil_system.sys_space, DICT_FIELDS_ID,
- dict_ind_redundant, NULL, mtr);
+ dict_ind_redundant, mtr);
if (root_page_no == FIL_NULL) {
return(FALSE);
diff --git a/storage/innobase/dict/dict0crea.cc b/storage/innobase/dict/dict0crea.cc
index 25a90342f78..d8b9bcdaf39 100644
--- a/storage/innobase/dict/dict0crea.cc
+++ b/storage/innobase/dict/dict0crea.cc
@@ -352,10 +352,12 @@ dict_build_table_def_step(
{
ut_ad(mutex_own(&dict_sys->mutex));
dict_table_t* table = node->table;
+ trx_t* trx = thr_get_trx(thr);
ut_ad(!table->is_temporary());
ut_ad(!table->space);
ut_ad(table->space_id == ULINT_UNDEFINED);
- dict_table_assign_new_id(table, thr_get_trx(thr));
+ dict_hdr_get_new_id(&table->id, NULL, NULL);
+ trx->table_id = table->id;
/* Always set this bit for all new created tables */
DICT_TF2_FLAG_SET(table, DICT_TF2_FTS_AUX_HEX_NAME);
@@ -368,8 +370,6 @@ dict_build_table_def_step(
ut_ad(DICT_TF_GET_ZIP_SSIZE(table->flags) == 0
|| dict_table_has_atomic_blobs(table));
- trx_t* trx = thr_get_trx(thr);
- ut_ad(trx->table_id);
mtr_t mtr;
trx_undo_t* undo = trx->rsegs.m_redo.undo;
if (undo && !undo->table_id
@@ -397,7 +397,7 @@ dict_build_table_def_step(
}
/* Get a new tablespace ID */
ulint space_id;
- dict_hdr_get_new_id(NULL, NULL, &space_id, table, false);
+ dict_hdr_get_new_id(NULL, NULL, &space_id);
DBUG_EXECUTE_IF(
"ib_create_table_fail_out_of_space_ids",
@@ -745,7 +745,7 @@ dict_build_index_def_step(
ut_ad((UT_LIST_GET_LEN(table->indexes) > 0)
|| dict_index_is_clust(index));
- dict_hdr_get_new_id(NULL, &index->id, NULL, table, false);
+ dict_hdr_get_new_id(NULL, &index->id, NULL);
/* Inherit the space id from the table; we store all indexes of a
table in the same tablespace */
@@ -785,7 +785,7 @@ dict_build_index_def(
ut_ad((UT_LIST_GET_LEN(table->indexes) > 0)
|| dict_index_is_clust(index));
- dict_hdr_get_new_id(NULL, &index->id, NULL, table, false);
+ dict_hdr_get_new_id(NULL, &index->id, NULL);
/* Note that the index was created by this transaction. */
index->trx_id = trx->id;
@@ -859,7 +859,7 @@ dict_create_index_tree_step(
node->page_no = btr_create(
index->type, index->table->space,
- index->id, index, NULL, &mtr);
+ index->id, index, &mtr);
if (node->page_no == FIL_NULL) {
err = DB_OUT_OF_FILE_SPACE;
@@ -905,7 +905,7 @@ dict_create_index_tree_in_mem(
ut_ad(!(index->table->flags2 & DICT_TF2_DISCARDED));
index->page = btr_create(index->type, index->table->space,
- index->id, index, NULL, &mtr);
+ index->id, index, &mtr);
mtr_commit(&mtr);
index->trx_id = trx->id;
@@ -960,28 +960,19 @@ dict_drop_index_tree(
ut_ad(len == 8);
- bool found;
- const page_size_t page_size(fil_space_get_page_size(space,
- &found));
-
- if (!found) {
- /* It is a single table tablespace and the .ibd file is
- missing: do nothing */
-
- return(false);
- }
-
- /* If tablespace is scheduled for truncate, do not try to drop
- the indexes in that tablespace. There is a truncate fixup action
- which will take care of it. */
- if (srv_is_tablespace_truncated(space)) {
- return(false);
+ if (fil_space_t* s = fil_space_acquire_silent(space)) {
+ /* Ensure that the tablespace file exists
+ in order to avoid a crash in buf_page_get_gen(). */
+ if (s->size || fil_space_get_size(space)) {
+ btr_free_if_exists(page_id_t(space, root_page_no),
+ s->zip_size(),
+ mach_read_from_8(ptr), mtr);
+ }
+ s->release();
+ return true;
}
- btr_free_if_exists(page_id_t(space, root_page_no), page_size,
- mach_read_from_8(ptr), mtr);
-
- return(true);
+ return false;
}
/*******************************************************************//**
@@ -1053,7 +1044,7 @@ dict_recreate_index_tree(
ulint root_page_no = (index->type & DICT_FTS)
? FIL_NULL
: btr_create(type, table->space,
- index_id, index, NULL, mtr);
+ index_id, index, mtr);
index->page = unsigned(root_page_no);
return root_page_no;
}
@@ -2134,6 +2125,8 @@ dict_create_add_foreigns_to_dictionary(
return(DB_ERROR);
}
+ error = DB_SUCCESS;
+
for (dict_foreign_set::const_iterator it = local_fk_set.begin();
it != local_fk_set.end();
++it) {
@@ -2145,12 +2138,11 @@ dict_create_add_foreigns_to_dictionary(
table->name.m_name, foreign, trx);
if (error != DB_SUCCESS) {
-
- return(error);
+ break;
}
}
- return(DB_SUCCESS);
+ return error;
}
/****************************************************************//**
@@ -2382,15 +2374,3 @@ dict_delete_tablespace_and_datafiles(
return(err);
}
-
-/** Assign a new table ID and put it into the table cache and the transaction.
-@param[in,out] table Table that needs an ID
-@param[in,out] trx Transaction */
-void
-dict_table_assign_new_id(
- dict_table_t* table,
- trx_t* trx)
-{
- dict_hdr_get_new_id(&table->id, NULL, NULL, table, false);
- trx->table_id = table->id;
-}
diff --git a/storage/innobase/dict/dict0dict.cc b/storage/innobase/dict/dict0dict.cc
index e67f860f3e2..9c5293d0f9d 100644
--- a/storage/innobase/dict/dict0dict.cc
+++ b/storage/innobase/dict/dict0dict.cc
@@ -406,6 +406,27 @@ dict_table_stats_unlock(
}
}
+
+/** Open a persistent table.
+@param[in] table_id persistent table identifier
+@param[in] ignore_err errors to ignore
+@param[in] cached_only whether to skip loading
+@return persistent table
+@retval NULL if not found */
+static dict_table_t* dict_table_open_on_id_low(
+ table_id_t table_id,
+ dict_err_ignore_t ignore_err,
+ bool cached_only)
+{
+ dict_table_t* table = dict_sys->get_table(table_id);
+
+ if (!table && !cached_only) {
+ table = dict_load_table_on_id(table_id, ignore_err);
+ }
+
+ return table;
+}
+
/**********************************************************************//**
Try to drop any indexes after an aborted index creation.
This can also be after a server kill during DROP INDEX. */
@@ -416,7 +437,7 @@ dict_table_try_drop_aborted(
dict_table_t* table, /*!< in: table, or NULL if it
needs to be looked up again */
table_id_t table_id, /*!< in: table identifier */
- int32 ref_count) /*!< in: expected table->n_ref_count */
+ uint32_t ref_count) /*!< in: expected table->n_ref_count */
{
trx_t* trx;
@@ -884,47 +905,29 @@ dict_index_get_nth_col_or_prefix_pos(
return(ULINT_UNDEFINED);
}
-/** Returns TRUE if the index contains a column or a prefix of that column.
-@param[in] index index
+/** Check if the index contains a column or a prefix of that column.
@param[in] n column number
@param[in] is_virtual whether it is a virtual col
-@return TRUE if contains the column or its prefix */
-bool
-dict_index_contains_col_or_prefix(
- const dict_index_t* index,
- ulint n,
- bool is_virtual)
+@return whether the index contains the column or its prefix */
+bool dict_index_t::contains_col_or_prefix(ulint n, bool is_virtual) const
{
- const dict_field_t* field;
- const dict_col_t* col;
- ulint pos;
- ulint n_fields;
-
- ut_ad(index);
- ut_ad(index->magic_n == DICT_INDEX_MAGIC_N);
+ ut_ad(magic_n == DICT_INDEX_MAGIC_N);
- if (dict_index_is_clust(index)) {
+ if (is_primary()) {
return(!is_virtual);
}
- if (is_virtual) {
- col = &dict_table_get_nth_v_col(index->table, n)->m_col;
- } else {
- col = dict_table_get_nth_col(index->table, n);
- }
+ const dict_col_t* col = is_virtual
+ ? &dict_table_get_nth_v_col(table, n)->m_col
+ : dict_table_get_nth_col(table, n);
- n_fields = dict_index_get_n_fields(index);
-
- for (pos = 0; pos < n_fields; pos++) {
- field = dict_index_get_nth_field(index, pos);
-
- if (col == field->col) {
-
- return(true);
+ for (ulint pos = 0; pos < n_fields; pos++) {
+ if (col == fields[pos].col) {
+ return true;
}
}
- return(false);
+ return false;
}
/********************************************************************//**
@@ -1081,20 +1084,19 @@ dict_init(void)
dict_operation_lock = static_cast<rw_lock_t*>(
ut_zalloc_nokey(sizeof(*dict_operation_lock)));
- dict_sys = static_cast<dict_sys_t*>(ut_zalloc_nokey(sizeof(*dict_sys)));
+ dict_sys = new (ut_zalloc_nokey(sizeof(*dict_sys))) dict_sys_t();
UT_LIST_INIT(dict_sys->table_LRU, &dict_table_t::table_LRU);
UT_LIST_INIT(dict_sys->table_non_LRU, &dict_table_t::table_LRU);
mutex_create(LATCH_ID_DICT_SYS, &dict_sys->mutex);
- dict_sys->table_hash = hash_create(
- buf_pool_get_curr_size()
- / (DICT_POOL_PER_TABLE_HASH * UNIV_WORD_SIZE));
+ const ulint hash_size = buf_pool_get_curr_size()
+ / (DICT_POOL_PER_TABLE_HASH * UNIV_WORD_SIZE);
- dict_sys->table_id_hash = hash_create(
- buf_pool_get_curr_size()
- / (DICT_POOL_PER_TABLE_HASH * UNIV_WORD_SIZE));
+ dict_sys->table_hash = hash_create(hash_size);
+ dict_sys->table_id_hash = hash_create(hash_size);
+ dict_sys->temp_id_hash = hash_create(hash_size);
rw_lock_create(dict_operation_lock_key,
dict_operation_lock, SYNC_DICT_OPERATION);
@@ -1253,8 +1255,7 @@ dict_table_add_system_columns(
}
/** Add the table definition to the data dictionary cache */
-void
-dict_table_t::add_to_cache()
+void dict_table_t::add_to_cache()
{
ut_ad(dict_lru_validate());
ut_ad(mutex_own(&dict_sys->mutex));
@@ -1262,7 +1263,6 @@ dict_table_t::add_to_cache()
cached = TRUE;
ulint fold = ut_fold_string(name.m_name);
- ulint id_fold = ut_fold_ull(id);
/* Look for a table with the same name: error if such exists */
{
@@ -1280,31 +1280,30 @@ dict_table_t::add_to_cache()
ut_ad(table2 == NULL);
#endif /* UNIV_DEBUG */
}
+ HASH_INSERT(dict_table_t, name_hash, dict_sys->table_hash, fold,
+ this);
/* Look for a table with the same id: error if such exists */
+ hash_table_t* id_hash = is_temporary()
+ ? dict_sys->temp_id_hash : dict_sys->table_id_hash;
+ const ulint id_fold = ut_fold_ull(id);
{
dict_table_t* table2;
- HASH_SEARCH(id_hash, dict_sys->table_id_hash, id_fold,
+ HASH_SEARCH(id_hash, id_hash, id_fold,
dict_table_t*, table2, ut_ad(table2->cached),
table2->id == id);
ut_a(table2 == NULL);
#ifdef UNIV_DEBUG
/* Look for the same table pointer with a different id */
- HASH_SEARCH_ALL(id_hash, dict_sys->table_id_hash,
+ HASH_SEARCH_ALL(id_hash, id_hash,
dict_table_t*, table2, ut_ad(table2->cached),
table2 == this);
ut_ad(table2 == NULL);
#endif /* UNIV_DEBUG */
- }
- /* Add table to hash table of tables */
- HASH_INSERT(dict_table_t, name_hash, dict_sys->table_hash, fold,
- this);
-
- /* Add table to hash table of tables based on table id */
- HASH_INSERT(dict_table_t, id_hash, dict_sys->table_id_hash, id_fold,
- this);
+ HASH_INSERT(dict_table_t, id_hash, id_hash, id_fold, this);
+ }
if (can_be_evicted) {
UT_LIST_ADD_FIRST(dict_sys->table_LRU, this);
@@ -1429,7 +1428,7 @@ dict_make_room_in_cache(
ut_ad(0);
}
};);
- dict_table_remove_from_cache_low(table, TRUE);
+ dict_table_remove_from_cache(table, true);
++n_evicted;
}
@@ -1950,6 +1949,7 @@ dict_table_change_id_in_cache(
{
ut_ad(mutex_own(&dict_sys->mutex));
ut_ad(table->magic_n == DICT_TABLE_MAGIC_N);
+ ut_ad(!table->is_temporary());
/* Remove the table from the hash table of id's */
@@ -1962,14 +1962,11 @@ dict_table_change_id_in_cache(
ut_fold_ull(table->id), table);
}
-/**********************************************************************//**
-Removes a table object from the dictionary cache. */
-void
-dict_table_remove_from_cache_low(
-/*=============================*/
- dict_table_t* table, /*!< in, own: table */
- ibool lru_evict) /*!< in: TRUE if table being evicted
- to make room in the table LRU list */
+/** Evict a table definition from the InnoDB data dictionary cache.
+@param[in,out] table cached table definition to be evicted
+@param[in] lru whether this is part of least-recently-used eviction
+@param[in] keep whether to keep (not free) the object */
+void dict_table_remove_from_cache(dict_table_t* table, bool lru, bool keep)
{
dict_foreign_t* foreign;
dict_index_t* index;
@@ -2001,7 +1998,7 @@ dict_table_remove_from_cache_low(
index != NULL;
index = UT_LIST_GET_LAST(table->indexes)) {
- dict_index_remove_from_cache_low(table, index, lru_evict);
+ dict_index_remove_from_cache_low(table, index, lru);
}
/* Remove table from the hash tables of tables */
@@ -2009,8 +2006,10 @@ dict_table_remove_from_cache_low(
HASH_DELETE(dict_table_t, name_hash, dict_sys->table_hash,
ut_fold_string(table->name.m_name), table);
- HASH_DELETE(dict_table_t, id_hash, dict_sys->table_id_hash,
- ut_fold_ull(table->id), table);
+ hash_table_t* id_hash = table->is_temporary()
+ ? dict_sys->temp_id_hash : dict_sys->table_id_hash;
+ const ulint id_fold = ut_fold_ull(table->id);
+ HASH_DELETE(dict_table_t, id_hash, id_hash, id_fold, table);
/* Remove table from LRU or non-LRU list. */
if (table->can_be_evicted) {
@@ -2023,7 +2022,7 @@ dict_table_remove_from_cache_low(
ut_ad(dict_lru_validate());
- if (lru_evict && table->drop_aborted) {
+ if (lru && table->drop_aborted) {
/* When evicting the table definition,
drop the orphan indexes from the data dictionary
and free the index pages. */
@@ -2048,17 +2047,9 @@ dict_table_remove_from_cache_low(
UT_DELETE(table->vc_templ);
}
- dict_mem_table_free(table);
-}
-
-/**********************************************************************//**
-Removes a table object from the dictionary cache. */
-void
-dict_table_remove_from_cache(
-/*=========================*/
- dict_table_t* table) /*!< in, own: table */
-{
- dict_table_remove_from_cache_low(table, FALSE);
+ if (!keep) {
+ dict_mem_table_free(table);
+ }
}
/****************************************************************//**
@@ -2121,10 +2112,9 @@ dict_index_too_big_for_tree(
comp = dict_table_is_comp(table);
- const page_size_t page_size(dict_tf_get_page_size(table->flags));
+ const ulint zip_size = dict_tf_get_zip_size(table->flags);
- if (page_size.is_compressed()
- && page_size.physical() < srv_page_size) {
+ if (zip_size && zip_size < srv_page_size) {
/* On a compressed page, two records must fit in the
uncompressed page modification log. On compressed pages
with size.physical() == srv_page_size,
@@ -2135,7 +2125,7 @@ dict_index_too_big_for_tree(
number in the page modification log. The maximum
allowed node pointer size is half that. */
page_rec_max = page_zip_empty_size(new_index->n_fields,
- page_size.physical());
+ zip_size);
if (page_rec_max) {
page_rec_max--;
}
@@ -5436,46 +5426,6 @@ dict_index_build_node_ptr(
return(tuple);
}
-/**********************************************************************//**
-Copies an initial segment of a physical record, long enough to specify an
-index entry uniquely.
-@return pointer to the prefix record */
-rec_t*
-dict_index_copy_rec_order_prefix(
-/*=============================*/
- const dict_index_t* index, /*!< in: index */
- const rec_t* rec, /*!< in: record for which to
- copy prefix */
- ulint* n_fields,/*!< out: number of fields copied */
- byte** buf, /*!< in/out: memory buffer for the
- copied prefix, or NULL */
- ulint* buf_size)/*!< in/out: buffer size */
-{
- ulint n;
-
- UNIV_PREFETCH_R(rec);
-
- if (dict_index_is_ibuf(index)) {
- ut_ad(!dict_table_is_comp(index->table));
- n = rec_get_n_fields_old(rec);
- } else {
- if (page_rec_is_leaf(rec)) {
- n = dict_index_get_n_unique_in_tree(index);
- } else if (dict_index_is_spatial(index)) {
- ut_ad(dict_index_get_n_unique_in_tree_nonleaf(index)
- == DICT_INDEX_SPATIAL_NODEPTR_SIZE);
- /* For R-tree, we have to compare
- the child page numbers as well. */
- n = DICT_INDEX_SPATIAL_NODEPTR_SIZE + 1;
- } else {
- n = dict_index_get_n_unique_in_tree(index);
- }
- }
-
- *n_fields = n;
- return(rec_copy_prefix_to_buf(rec, index, n, buf, buf_size));
-}
-
/** Convert a physical record into a search tuple.
@param[in] rec index record (not necessarily in an index page)
@param[in] index index
@@ -6522,17 +6472,17 @@ dict_resize()
/* all table entries are in table_LRU and table_non_LRU lists */
hash_table_free(dict_sys->table_hash);
hash_table_free(dict_sys->table_id_hash);
+ hash_table_free(dict_sys->temp_id_hash);
- dict_sys->table_hash = hash_create(
- buf_pool_get_curr_size()
- / (DICT_POOL_PER_TABLE_HASH * UNIV_WORD_SIZE));
-
- dict_sys->table_id_hash = hash_create(
- buf_pool_get_curr_size()
- / (DICT_POOL_PER_TABLE_HASH * UNIV_WORD_SIZE));
+ const ulint hash_size = buf_pool_get_curr_size()
+ / (DICT_POOL_PER_TABLE_HASH * UNIV_WORD_SIZE);
+ dict_sys->table_hash = hash_create(hash_size);
+ dict_sys->table_id_hash = hash_create(hash_size);
+ dict_sys->temp_id_hash = hash_create(hash_size);
for (table = UT_LIST_GET_FIRST(dict_sys->table_LRU); table;
table = UT_LIST_GET_NEXT(table_LRU, table)) {
+ ut_ad(!table->is_temporary());
ulint fold = ut_fold_string(table->name.m_name);
ulint id_fold = ut_fold_ull(table->id);
@@ -6551,8 +6501,10 @@ dict_resize()
HASH_INSERT(dict_table_t, name_hash, dict_sys->table_hash,
fold, table);
- HASH_INSERT(dict_table_t, id_hash, dict_sys->table_id_hash,
- id_fold, table);
+ hash_table_t* id_hash = table->is_temporary()
+ ? dict_sys->temp_id_hash : dict_sys->table_id_hash;
+
+ HASH_INSERT(dict_table_t, id_hash, id_hash, id_fold, table);
}
mutex_exit(&dict_sys->mutex);
@@ -6575,7 +6527,7 @@ dict_close(void)
/* Free the hash elements. We don't remove them from the table
because we are going to destroy the table anyway. */
- for (ulint i = 0; i < hash_get_n_cells(dict_sys->table_id_hash); i++) {
+ for (ulint i = 0; i < hash_get_n_cells(dict_sys->table_hash); i++) {
dict_table_t* table;
table = static_cast<dict_table_t*>(
@@ -6596,6 +6548,7 @@ dict_close(void)
/* The elements are the same instance as in dict_sys->table_hash,
therefore we don't delete the individual elements. */
hash_table_free(dict_sys->table_id_hash);
+ hash_table_free(dict_sys->temp_id_hash);
mutex_exit(&dict_sys->mutex);
mutex_free(&dict_sys->mutex);
@@ -6825,6 +6778,7 @@ dict_index_zip_pad_update(
ulint fail_pct;
ut_ad(info);
+ ut_ad(info->pad % ZIP_PAD_INCR == 0);
total = info->success + info->failure;
@@ -6849,17 +6803,16 @@ dict_index_zip_pad_update(
if (fail_pct > zip_threshold) {
/* Compression failures are more then user defined
threshold. Increase the pad size to reduce chances of
- compression failures. */
- ut_ad(info->pad % ZIP_PAD_INCR == 0);
+ compression failures.
- /* Only do increment if it won't increase padding
+ Only do increment if it won't increase padding
beyond max pad size. */
if (info->pad + ZIP_PAD_INCR
< (srv_page_size * zip_pad_max) / 100) {
/* Use atomics even though we have the mutex.
This is to ensure that we are able to read
info->pad atomically. */
- my_atomic_addlint(&info->pad, ZIP_PAD_INCR);
+ info->pad += ZIP_PAD_INCR;
MONITOR_INC(MONITOR_PAD_INCREMENTS);
}
@@ -6877,11 +6830,10 @@ dict_index_zip_pad_update(
if (info->n_rounds >= ZIP_PAD_SUCCESSFUL_ROUND_LIMIT
&& info->pad > 0) {
- ut_ad(info->pad % ZIP_PAD_INCR == 0);
/* Use atomics even though we have the mutex.
This is to ensure that we are able to read
info->pad atomically. */
- my_atomic_addlint(&info->pad, ulint(-ZIP_PAD_INCR));
+ info->pad -= ZIP_PAD_INCR;
info->n_rounds = 0;
@@ -6948,7 +6900,7 @@ dict_index_zip_pad_optimal_page_size(
return(srv_page_size);
}
- pad = my_atomic_loadlint(&index->zip_pad.pad);
+ pad = index->zip_pad.pad;
ut_ad(pad < srv_page_size);
sz = srv_page_size - pad;
@@ -7090,52 +7042,3 @@ dict_space_get_id(
return(id);
}
-
-/** Determine the extent size (in pages) for the given table
-@param[in] table the table whose extent size is being
- calculated.
-@return extent size in pages (256, 128 or 64) */
-ulint
-dict_table_extent_size(
- const dict_table_t* table)
-{
- const ulint mb_1 = 1024 * 1024;
- const ulint mb_2 = 2 * mb_1;
- const ulint mb_4 = 4 * mb_1;
-
- page_size_t page_size = dict_table_page_size(table);
- ulint pages_in_extent = FSP_EXTENT_SIZE;
-
- if (page_size.is_compressed()) {
-
- ulint disk_page_size = page_size.physical();
-
- switch (disk_page_size) {
- case 1024:
- pages_in_extent = mb_1/1024;
- break;
- case 2048:
- pages_in_extent = mb_1/2048;
- break;
- case 4096:
- pages_in_extent = mb_1/4096;
- break;
- case 8192:
- pages_in_extent = mb_1/8192;
- break;
- case 16384:
- pages_in_extent = mb_1/16384;
- break;
- case 32768:
- pages_in_extent = mb_2/32768;
- break;
- case 65536:
- pages_in_extent = mb_4/65536;
- break;
- default:
- ut_ad(0);
- }
- }
-
- return(pages_in_extent);
-}
diff --git a/storage/innobase/dict/dict0load.cc b/storage/innobase/dict/dict0load.cc
index 6cb62f7e256..02da027f7a4 100644
--- a/storage/innobase/dict/dict0load.cc
+++ b/storage/innobase/dict/dict0load.cc
@@ -424,6 +424,8 @@ dict_process_sys_indexes_rec(
const char* err_msg;
byte* buf;
+ ut_d(index->is_dummy = true);
+ ut_d(index->in_instant_init = false);
buf = static_cast<byte*>(mem_heap_alloc(heap, 8));
/* Parse the record, and get "dict_index_t" struct filled */
@@ -3009,8 +3011,7 @@ corrupted:
dict_table_get_first_index(table)->page);
mtr.start();
buf_block_t* block = buf_page_get(
- page_id,
- dict_table_page_size(table),
+ page_id, table->space->zip_size(),
RW_S_LATCH, &mtr);
const bool corrupted = !block
|| page_get_space_id(block->frame)
diff --git a/storage/innobase/dict/dict0mem.cc b/storage/innobase/dict/dict0mem.cc
index b55cb51aef6..e33a4819134 100644
--- a/storage/innobase/dict/dict0mem.cc
+++ b/storage/innobase/dict/dict0mem.cc
@@ -185,8 +185,6 @@ dict_mem_table_create(
|| DICT_TF2_FLAG_IS_SET(table, DICT_TF2_FTS_ADD_DOC_ID)) {
table->fts = fts_create(table);
table->fts->cache = fts_cache_create(table);
- } else {
- table->fts = NULL;
}
new(&table->foreign_set) dict_foreign_set();
@@ -526,6 +524,14 @@ dict_mem_table_col_rename_low(
= dict_index_get_nth_field(
index, i);
+ ut_ad(!field->name
+ == field->col->is_dropped());
+ if (!field->name) {
+ /* dropped columns lack a name */
+ ut_ad(index->is_instant());
+ continue;
+ }
+
/* if is_virtual and that in field->col does
not match, continue */
if ((!is_virtual) !=
@@ -710,6 +716,7 @@ dict_mem_fill_column_struct(
column->mbmaxlen = mbmaxlen;
column->def_val.data = NULL;
column->def_val.len = UNIV_SQL_DEFAULT;
+ ut_ad(!column->is_dropped());
}
/**********************************************************************//**
@@ -1159,293 +1166,122 @@ operator<< (std::ostream& out, const dict_foreign_set& fk_set)
return(out);
}
-/** Adjust clustered index metadata for instant ADD COLUMN.
-@param[in] clustered index definition after instant ADD COLUMN */
-inline void dict_index_t::instant_add_field(const dict_index_t& instant)
+/** Reconstruct the clustered index fields. */
+inline void dict_index_t::reconstruct_fields()
{
DBUG_ASSERT(is_primary());
- DBUG_ASSERT(instant.is_primary());
- DBUG_ASSERT(!instant.is_instant());
- DBUG_ASSERT(n_def == n_fields);
- DBUG_ASSERT(instant.n_def == instant.n_fields);
-
- DBUG_ASSERT(type == instant.type);
- DBUG_ASSERT(trx_id_offset == instant.trx_id_offset);
- DBUG_ASSERT(n_user_defined_cols == instant.n_user_defined_cols);
- DBUG_ASSERT(n_uniq == instant.n_uniq);
- DBUG_ASSERT(instant.n_fields > n_fields);
- DBUG_ASSERT(instant.n_def > n_def);
- DBUG_ASSERT(instant.n_nullable >= n_nullable);
- DBUG_ASSERT(instant.n_core_fields >= n_core_fields);
- DBUG_ASSERT(instant.n_core_null_bytes >= n_core_null_bytes);
-
- n_fields = instant.n_fields;
- n_def = instant.n_def;
- n_nullable = instant.n_nullable;
- fields = static_cast<dict_field_t*>(
- mem_heap_dup(heap, instant.fields, n_fields * sizeof *fields));
-
- ut_d(unsigned n_null = 0);
-
- for (unsigned i = 0; i < n_fields; i++) {
- DBUG_ASSERT(fields[i].same(instant.fields[i]));
- const dict_col_t* icol = instant.fields[i].col;
- DBUG_ASSERT(!icol->is_virtual());
- dict_col_t* col = fields[i].col = &table->cols[
- icol - instant.table->cols];
- fields[i].name = col->name(*table);
- ut_d(n_null += col->is_nullable());
- }
- ut_ad(n_null == n_nullable);
-}
+ n_fields += table->instant->n_dropped;
+ n_def += table->instant->n_dropped;
-/** Adjust metadata for instant ADD COLUMN.
-@param[in] table table definition after instant ADD COLUMN */
-void dict_table_t::instant_add_column(const dict_table_t& table)
-{
- DBUG_ASSERT(!table.cached);
- DBUG_ASSERT(table.n_def == table.n_cols);
- DBUG_ASSERT(table.n_t_def == table.n_t_cols);
- DBUG_ASSERT(n_def == n_cols);
- DBUG_ASSERT(n_t_def == n_t_cols);
- DBUG_ASSERT(table.n_cols > n_cols);
- ut_ad(mutex_own(&dict_sys->mutex));
-
- const char* end = table.col_names;
- for (unsigned i = table.n_cols; i--; ) end += strlen(end) + 1;
-
- col_names = static_cast<char*>(
- mem_heap_dup(heap, table.col_names,
- ulint(end - table.col_names)));
- const dict_col_t* const old_cols = cols;
- const dict_col_t* const old_cols_end = cols + n_cols;
- cols = static_cast<dict_col_t*>(mem_heap_dup(heap, table.cols,
- table.n_cols
- * sizeof *cols));
-
- /* Preserve the default values of previously instantly
- added columns. */
- for (unsigned i = unsigned(n_cols) - DATA_N_SYS_COLS; i--; ) {
- cols[i].def_val = old_cols[i].def_val;
- }
+ const unsigned n_first = first_user_field();
+
+ dict_field_t* tfields = static_cast<dict_field_t*>(
+ mem_heap_zalloc(heap, n_fields * sizeof *fields));
+
+ memcpy(tfields, fields, n_first * sizeof *fields);
- /* Copy the new default values to this->heap. */
- for (unsigned i = n_cols; i < table.n_cols; i++) {
- dict_col_t& c = cols[i - DATA_N_SYS_COLS];
- DBUG_ASSERT(c.is_instant());
- if (c.def_val.len == 0) {
- c.def_val.data = field_ref_zero;
- } else if (const void*& d = c.def_val.data) {
- d = mem_heap_dup(heap, d, c.def_val.len);
+ n_nullable = 0;
+ ulint n_core_null = 0;
+ const bool comp = dict_table_is_comp(table);
+ const auto* field_map_it = table->instant->field_map;
+ for (unsigned i = n_first, j = 0; i < n_fields; ) {
+ dict_field_t& f = tfields[i++];
+ auto c = *field_map_it++;
+ if (c.is_dropped()) {
+ f.col = &table->instant->dropped[j++];
+ DBUG_ASSERT(f.col->is_dropped());
+ f.fixed_len = dict_col_get_fixed_size(f.col, comp);
} else {
- DBUG_ASSERT(c.def_val.len == UNIV_SQL_NULL);
+ DBUG_ASSERT(!c.is_not_null());
+ const auto old = std::find_if(
+ fields + n_first, fields + n_fields,
+ [c](const dict_field_t& o)
+ { return o.col->ind == c.ind(); });
+ ut_ad(old >= &fields[n_first]);
+ ut_ad(old < &fields[n_fields]);
+ DBUG_ASSERT(!old->prefix_len);
+ DBUG_ASSERT(old->col == &table->cols[c.ind()]);
+ f = *old;
}
- }
- const unsigned old_n_cols = n_cols;
- const unsigned n_add = unsigned(table.n_cols - n_cols);
-
- n_t_def += n_add;
- n_t_cols += n_add;
- n_cols = table.n_cols;
- n_def = n_cols;
-
- for (unsigned i = n_v_def; i--; ) {
- const dict_v_col_t& v = v_cols[i];
- for (ulint n = v.num_base; n--; ) {
- dict_col_t*& base = v.base_col[n];
- if (!base->is_virtual()) {
- DBUG_ASSERT(base >= old_cols);
- size_t n = size_t(base - old_cols);
- DBUG_ASSERT(n + DATA_N_SYS_COLS < old_n_cols);
- base = &cols[n];
- }
+ f.col->clear_instant();
+ if (f.col->is_nullable()) {
+ n_nullable++;
+ n_core_null += i <= n_core_fields;
}
}
- dict_index_t* index = dict_table_get_first_index(this);
-
- index->instant_add_field(*dict_table_get_first_index(&table));
-
- while ((index = dict_table_get_next_index(index)) != NULL) {
- for (unsigned i = 0; i < index->n_fields; i++) {
- dict_field_t& field = index->fields[i];
- if (field.col < old_cols
- || field.col >= old_cols_end) {
- DBUG_ASSERT(field.col->is_virtual());
- } else {
- /* Secondary indexes may contain user
- columns and DB_ROW_ID (if there is
- GEN_CLUST_INDEX instead of PRIMARY KEY),
- but not DB_TRX_ID,DB_ROLL_PTR. */
- DBUG_ASSERT(field.col >= old_cols);
- size_t n = size_t(field.col - old_cols);
- DBUG_ASSERT(n + DATA_N_SYS_COLS <= old_n_cols);
- if (n + DATA_N_SYS_COLS >= old_n_cols) {
- /* Replace DB_ROW_ID */
- n += n_add;
- }
- field.col = &cols[n];
- DBUG_ASSERT(!field.col->is_virtual());
- field.name = field.col->name(*this);
- }
- }
- }
+ fields = tfields;
+ n_core_null_bytes = UT_BITS_IN_BYTES(n_core_null);
}
-/** Roll back instant_add_column().
-@param[in] old_n_cols original n_cols
-@param[in] old_cols original cols
-@param[in] old_col_names original col_names */
-void
-dict_table_t::rollback_instant(
- unsigned old_n_cols,
- dict_col_t* old_cols,
- const char* old_col_names)
+/** Reconstruct dropped or reordered columns.
+@param[in] metadata data from serialise_columns()
+@param[in] len length of the metadata, in bytes
+@return whether parsing the metadata failed */
+bool dict_table_t::deserialise_columns(const byte* metadata, ulint len)
{
- ut_ad(mutex_own(&dict_sys->mutex));
- dict_index_t* index = indexes.start;
- /* index->is_instant() does not necessarily hold here, because
- the table may have been emptied */
- DBUG_ASSERT(old_n_cols >= DATA_N_SYS_COLS);
- DBUG_ASSERT(n_cols >= old_n_cols);
- DBUG_ASSERT(n_cols == n_def);
- DBUG_ASSERT(index->n_def == index->n_fields);
-
- const unsigned n_remove = n_cols - old_n_cols;
-
- for (unsigned i = index->n_fields - n_remove; i < index->n_fields;
- i++) {
- if (index->fields[i].col->is_nullable()) {
- index->n_nullable--;
- }
- }
+ DBUG_ASSERT(!instant);
- index->n_fields -= n_remove;
- index->n_def = index->n_fields;
- if (index->n_core_fields > index->n_fields) {
- index->n_core_fields = index->n_fields;
- index->n_core_null_bytes
- = UT_BITS_IN_BYTES(unsigned(index->n_nullable));
- }
+ unsigned num_non_pk_fields = mach_read_from_4(metadata);
+ metadata += 4;
- const dict_col_t* const new_cols = cols;
- const dict_col_t* const new_cols_end = cols + n_cols;
-
- cols = old_cols;
- col_names = old_col_names;
- n_cols = old_n_cols;
- n_def = old_n_cols;
- n_t_def -= n_remove;
- n_t_cols -= n_remove;
-
- for (unsigned i = n_v_def; i--; ) {
- const dict_v_col_t& v = v_cols[i];
- for (ulint n = v.num_base; n--; ) {
- dict_col_t*& base = v.base_col[n];
- if (!base->is_virtual()) {
- base = &cols[base - new_cols];
- }
- }
+ if (num_non_pk_fields >= REC_MAX_N_FIELDS - 3) {
+ return true;
}
- do {
- for (unsigned i = 0; i < index->n_fields; i++) {
- dict_field_t& field = index->fields[i];
- if (field.col < new_cols
- || field.col >= new_cols_end) {
- DBUG_ASSERT(field.col->is_virtual());
- } else {
- DBUG_ASSERT(field.col >= new_cols);
- size_t n = size_t(field.col - new_cols);
- DBUG_ASSERT(n <= n_cols);
- if (n + DATA_N_SYS_COLS >= n_cols) {
- n -= n_remove;
- }
- field.col = &cols[n];
- DBUG_ASSERT(!field.col->is_virtual());
- field.name = field.col->name(*this);
- }
- }
- } while ((index = dict_table_get_next_index(index)) != NULL);
-}
+ dict_index_t* index = UT_LIST_GET_FIRST(indexes);
-/** Trim the instantly added columns when an insert into SYS_COLUMNS
-is rolled back during ALTER TABLE or recovery.
-@param[in] n number of surviving non-system columns */
-void dict_table_t::rollback_instant(unsigned n)
-{
- ut_ad(mutex_own(&dict_sys->mutex));
- dict_index_t* index = indexes.start;
- DBUG_ASSERT(index->is_instant());
- DBUG_ASSERT(index->n_def == index->n_fields);
- DBUG_ASSERT(n_cols == n_def);
- DBUG_ASSERT(n >= index->n_uniq);
- DBUG_ASSERT(n_cols > n + DATA_N_SYS_COLS);
- const unsigned n_remove = n_cols - n - DATA_N_SYS_COLS;
-
- char* names = const_cast<char*>(dict_table_get_col_name(this, n));
- const char* sys = names;
- for (unsigned i = n_remove; i--; ) {
- sys += strlen(sys) + 1;
+ if (num_non_pk_fields < unsigned(index->n_fields)
+ - index->first_user_field()) {
+ return true;
}
- static const char system[] = "DB_ROW_ID\0DB_TRX_ID\0DB_ROLL_PTR";
- DBUG_ASSERT(!memcmp(sys, system, sizeof system));
- for (unsigned i = index->n_fields - n_remove; i < index->n_fields;
- i++) {
- if (index->fields[i].col->is_nullable()) {
- index->n_nullable--;
+
+ field_map_element_t* field_map = static_cast<field_map_element_t*>(
+ mem_heap_alloc(heap,
+ num_non_pk_fields * sizeof *field_map));
+
+ unsigned n_dropped_cols = 0;
+
+ for (unsigned i = 0; i < num_non_pk_fields; i++) {
+ auto c = field_map[i] = mach_read_from_2(metadata);
+ metadata += 2;
+
+ if (field_map[i].is_dropped()) {
+ if (c.ind() > DICT_MAX_FIXED_COL_LEN + 1) {
+ return true;
+ }
+ n_dropped_cols++;
+ } else if (c >= n_cols) {
+ return true;
}
}
- index->n_fields -= n_remove;
- index->n_def = index->n_fields;
- memmove(names, sys, sizeof system);
- memmove(cols + n, cols + n_cols - DATA_N_SYS_COLS,
- DATA_N_SYS_COLS * sizeof *cols);
- n_cols -= n_remove;
- n_def = n_cols;
- n_t_cols -= n_remove;
- n_t_def -= n_remove;
-
- for (unsigned i = DATA_N_SYS_COLS; i--; ) {
- cols[n_cols - i].ind--;
- }
- if (dict_index_is_auto_gen_clust(index)) {
- DBUG_ASSERT(index->n_uniq == 1);
- dict_field_t* field = index->fields;
- field->name = sys;
- field->col = dict_table_get_sys_col(this, DATA_ROW_ID);
- field++;
- field->name = sys + sizeof "DB_ROW_ID";
- field->col = dict_table_get_sys_col(this, DATA_TRX_ID);
- field++;
- field->name = sys + sizeof "DB_ROW_ID\0DB_TRX_ID";
- field->col = dict_table_get_sys_col(this, DATA_ROLL_PTR);
-
- /* Replace the DB_ROW_ID column in secondary indexes. */
- while ((index = dict_table_get_next_index(index)) != NULL) {
- field = &index->fields[index->n_fields - 1];
- DBUG_ASSERT(field->col->mtype == DATA_SYS);
- DBUG_ASSERT(field->col->prtype
- == DATA_NOT_NULL + DATA_TRX_ID);
- field->col--;
- field->name = sys;
+ dict_col_t* dropped_cols = static_cast<dict_col_t*>(mem_heap_zalloc(
+ heap, n_dropped_cols * sizeof(dict_col_t)));
+ instant = new (mem_heap_alloc(heap, sizeof *instant)) dict_instant_t();
+ instant->n_dropped = n_dropped_cols;
+ instant->dropped = dropped_cols;
+ instant->field_map = field_map;
+
+ dict_col_t* col = dropped_cols;
+ for (unsigned i = 0; i < num_non_pk_fields; i++) {
+ if (field_map[i].is_dropped()) {
+ auto fixed_len = field_map[i].ind();
+ DBUG_ASSERT(fixed_len <= DICT_MAX_FIXED_COL_LEN + 1);
+ (col++)->set_dropped(field_map[i].is_not_null(),
+ fixed_len == 1,
+ fixed_len > 1 ? fixed_len - 1
+ : 0);
}
-
- return;
}
+ DBUG_ASSERT(col == &dropped_cols[n_dropped_cols]);
- dict_field_t* field = &index->fields[index->n_uniq];
- field->name = sys + sizeof "DB_ROW_ID";
- field->col = dict_table_get_sys_col(this, DATA_TRX_ID);
- field++;
- field->name = sys + sizeof "DB_ROW_ID\0DB_TRX_ID";
- field->col = dict_table_get_sys_col(this, DATA_ROLL_PTR);
+ UT_LIST_GET_FIRST(indexes)->reconstruct_fields();
+ return false;
}
-
/** Check if record in clustered index is historical row.
@param[in] rec clustered row
@param[in] offsets offsets
diff --git a/storage/innobase/dict/dict0stats.cc b/storage/innobase/dict/dict0stats.cc
index b9185b99aa4..9492d9ed711 100644
--- a/storage/innobase/dict/dict0stats.cc
+++ b/storage/innobase/dict/dict0stats.cc
@@ -1505,7 +1505,7 @@ dict_stats_analyze_index_below_cur(
page_id_t page_id(index->table->space_id,
btr_node_ptr_get_child_page_no(
rec, offsets_rec));
- const page_size_t page_size(index->table->space->flags);
+ const ulint zip_size = index->table->space->zip_size();
/* assume no external pages by default - in case we quit from this
function without analyzing any leaf pages */
@@ -1518,7 +1518,7 @@ dict_stats_analyze_index_below_cur(
dberr_t err = DB_SUCCESS;
- block = buf_page_get_gen(page_id, page_size, RW_S_LATCH,
+ block = buf_page_get_gen(page_id, zip_size, RW_S_LATCH,
NULL /* no guessed block */,
BUF_GET, __FILE__, __LINE__, &mtr, &err);
@@ -3840,6 +3840,63 @@ dict_stats_rename_table(
return(ret);
}
+/*********************************************************************//**
+Renames an index in InnoDB persistent stats storage.
+This function creates its own transaction and commits it.
+@return DB_SUCCESS or error code. DB_STATS_DO_NOT_EXIST will be returned
+if the persistent stats do not exist. */
+dberr_t
+dict_stats_rename_index(
+/*====================*/
+ const dict_table_t* table, /*!< in: table whose index
+ is renamed */
+ const char* old_index_name, /*!< in: old index name */
+ const char* new_index_name) /*!< in: new index name */
+{
+ rw_lock_x_lock(dict_operation_lock);
+ mutex_enter(&dict_sys->mutex);
+
+ if (!dict_stats_persistent_storage_check(true)) {
+ mutex_exit(&dict_sys->mutex);
+ rw_lock_x_unlock(dict_operation_lock);
+ return(DB_STATS_DO_NOT_EXIST);
+ }
+
+ char dbname_utf8[MAX_DB_UTF8_LEN];
+ char tablename_utf8[MAX_TABLE_UTF8_LEN];
+
+ dict_fs2utf8(table->name.m_name, dbname_utf8, sizeof(dbname_utf8),
+ tablename_utf8, sizeof(tablename_utf8));
+
+ pars_info_t* pinfo;
+
+ pinfo = pars_info_create();
+
+ pars_info_add_str_literal(pinfo, "dbname_utf8", dbname_utf8);
+ pars_info_add_str_literal(pinfo, "tablename_utf8", tablename_utf8);
+ pars_info_add_str_literal(pinfo, "new_index_name", new_index_name);
+ pars_info_add_str_literal(pinfo, "old_index_name", old_index_name);
+
+ dberr_t ret;
+
+ ret = dict_stats_exec_sql(
+ pinfo,
+ "PROCEDURE RENAME_INDEX_IN_INDEX_STATS () IS\n"
+ "BEGIN\n"
+ "UPDATE \"" INDEX_STATS_NAME "\" SET\n"
+ "index_name = :new_index_name\n"
+ "WHERE\n"
+ "database_name = :dbname_utf8 AND\n"
+ "table_name = :tablename_utf8 AND\n"
+ "index_name = :old_index_name;\n"
+ "END;\n", NULL);
+
+ mutex_exit(&dict_sys->mutex);
+ rw_lock_x_unlock(dict_operation_lock);
+
+ return(ret);
+}
+
/* tests @{ */
#ifdef UNIV_ENABLE_UNIT_TEST_DICT_STATS
diff --git a/storage/innobase/dict/dict0stats_bg.cc b/storage/innobase/dict/dict0stats_bg.cc
index 7f4b12d302b..603122903ff 100644
--- a/storage/innobase/dict/dict0stats_bg.cc
+++ b/storage/innobase/dict/dict0stats_bg.cc
@@ -1,7 +1,7 @@
/*****************************************************************************
Copyright (c) 2012, 2017, Oracle and/or its affiliates. All Rights Reserved.
-Copyright (c) 2017, 2018, MariaDB Corporation.
+Copyright (c) 2017, 2019, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -180,7 +180,7 @@ void dict_stats_update_if_needed_func(dict_table_t* table)
lock waits to be enqueued at head of waiting
queue. */
if (thd
- && !wsrep_thd_is_applier(thd)
+ && !wsrep_thd_is_applying(thd)
&& wsrep_on(thd)
&& wsrep_thd_is_BF(thd, 0)) {
WSREP_DEBUG("Avoiding background statistics"