summaryrefslogtreecommitdiff
path: root/handler
diff options
context:
space:
mode:
authorunknown <knielsen@knielsen-hq.org>2010-08-03 12:54:05 +0200
committerunknown <knielsen@knielsen-hq.org>2010-08-03 12:54:05 +0200
commit0c6afe17dceb926b5c757157308188e540caec1e (patch)
treef7b58e3be042609d1b4eab722e3910fca5c0d802 /handler
parent3e4d2be619d1d67f6d7405514295d4faf21d39c5 (diff)
downloadmariadb-git-0c6afe17dceb926b5c757157308188e540caec1e.tar.gz
updated with changes from Percona-Server-5.1.47-11.2
Diffstat (limited to 'handler')
-rw-r--r--handler/ha_innodb.cc1138
-rw-r--r--handler/ha_innodb.h37
-rw-r--r--handler/handler0alter.cc97
-rw-r--r--handler/i_s.cc77
-rw-r--r--handler/innodb_patch_info.h2
5 files changed, 1003 insertions, 348 deletions
diff --git a/handler/ha_innodb.cc b/handler/ha_innodb.cc
index 5a84d853ad3..5c01abd2e0c 100644
--- a/handler/ha_innodb.cc
+++ b/handler/ha_innodb.cc
@@ -1,7 +1,8 @@
/*****************************************************************************
-Copyright (c) 2000, 2009, MySQL AB & Innobase Oy. All Rights Reserved.
+Copyright (c) 2000, 2010, MySQL AB & Innobase Oy. All Rights Reserved.
Copyright (c) 2008, 2009 Google Inc.
+Copyright (c) 2009, Percona Inc.
Portions of this file contain modifications contributed and copyrighted by
Google, Inc. Those modifications are gratefully acknowledged and are described
@@ -9,6 +10,13 @@ briefly in the InnoDB documentation. The contributions by Google are
incorporated with their permission, and subject to the conditions contained in
the file COPYING.Google.
+Portions of this file contain modifications contributed and copyrighted
+by Percona Inc.. Those modifications are
+gratefully acknowledged and are described briefly in the InnoDB
+documentation. The contributions by Percona Inc. are incorporated with
+their permission, and subject to the conditions contained in the file
+COPYING.Percona.
+
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
Foundation; version 2 of the License.
@@ -22,32 +30,6 @@ this program; if not, write to the Free Software Foundation, Inc., 59 Temple
Place, Suite 330, Boston, MA 02111-1307 USA
*****************************************************************************/
-/***********************************************************************
-
-Copyright (c) 1995, 2009, Innobase Oy. All Rights Reserved.
-Copyright (c) 2009, Percona Inc.
-
-Portions of this file contain modifications contributed and copyrighted
-by Percona Inc.. Those modifications are
-gratefully acknowledged and are described briefly in the InnoDB
-documentation. The contributions by Percona Inc. are incorporated with
-their permission, and subject to the conditions contained in the file
-COPYING.Percona.
-
-This program is free software; you can redistribute it and/or modify it
-under the terms of the GNU General Public License as published by the
-Free Software Foundation; version 2 of the License.
-
-This program is distributed in the hope that it will be useful, but
-WITHOUT ANY WARRANTY; without even the implied warranty of
-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
-Public License for more details.
-
-You should have received a copy of the GNU General Public License along
-with this program; if not, write to the Free Software Foundation, Inc.,
-59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-
-***********************************************************************/
/* TODO list for the InnoDB handler in 5.0:
- Remove the flag trx->active_trans and look at trx->conc_state
@@ -144,7 +126,6 @@ static ulong commit_threads = 0;
static pthread_mutex_t commit_threads_m;
static pthread_cond_t commit_cond;
static pthread_mutex_t commit_cond_m;
-static pthread_mutex_t analyze_mutex;
static bool innodb_inited = 0;
#define INSIDE_HA_INNOBASE_CC
@@ -187,6 +168,7 @@ static char* innobase_data_file_path = NULL;
static char* innobase_log_group_home_dir = NULL;
static char* innobase_file_format_name = NULL;
static char* innobase_change_buffering = NULL;
+static char* innobase_doublewrite_file = NULL;
/* Note: This variable can be set to on/off and any of the supported
file formats in the configuration file, but can only be set to any
@@ -360,7 +342,7 @@ static MYSQL_THDVAR_ULONG(lock_wait_timeout, PLUGIN_VAR_RQCMDARG,
static handler *innobase_create_handler(handlerton *hton,
- TABLE_SHARE *table,
+ TABLE_SHARE *table,
MEM_ROOT *mem_root)
{
return new (mem_root) ha_innobase(hton, table);
@@ -473,8 +455,9 @@ static
int
innobase_start_trx_and_assign_read_view(
/*====================================*/
- handlerton* hton, /*!< in: Innodb handlerton */
- THD* thd); /*!< in: MySQL thread handle of the user for whom
+ /* out: 0 */
+ handlerton* hton, /* in: Innodb handlerton */
+ THD* thd); /* in: MySQL thread handle of the user for whom
the transaction should be committed */
/****************************************************************//**
Flushes InnoDB logs to disk and makes a checkpoint. Really, a commit flushes
@@ -556,6 +539,8 @@ static SHOW_VAR innodb_status_variables[]= {
(char*) &export_vars.innodb_data_written, SHOW_LONG},
{"dblwr_pages_written",
(char*) &export_vars.innodb_dblwr_pages_written, SHOW_LONG},
+ {"deadlocks",
+ (char*) &export_vars.innodb_deadlocks, SHOW_LONG},
{"dblwr_writes",
(char*) &export_vars.innodb_dblwr_writes, SHOW_LONG},
{"dict_tables",
@@ -1897,6 +1882,19 @@ trx_is_interrupted(
return(trx && trx->mysql_thd && thd_killed((THD*) trx->mysql_thd));
}
+/**********************************************************************//**
+Determines if the currently running transaction is in strict mode.
+@return TRUE if strict */
+extern "C" UNIV_INTERN
+ibool
+trx_is_strict(
+/*==========*/
+ trx_t* trx) /*!< in: transaction */
+{
+ return(trx && trx->mysql_thd
+ && THDVAR((THD*) trx->mysql_thd, strict_mode));
+}
+
/**************************************************************//**
Resets some fields of a prebuilt struct. The template is used in fast
retrieval of just those column values MySQL needs in its processing. */
@@ -2233,6 +2231,8 @@ mem_free_and_error:
goto error;
}
+ srv_doublewrite_file = innobase_doublewrite_file;
+
srv_extra_undoslots = (ibool) innobase_extra_undoslots;
/* -------------- Log files ---------------------------*/
@@ -2331,7 +2331,7 @@ mem_free_and_error:
}
sql_print_error("InnoDB: invalid value "
- "innodb_file_format_check=%s",
+ "innodb_change_buffering=%s",
innobase_change_buffering);
goto mem_free_and_error;
}
@@ -2370,7 +2370,6 @@ innobase_change_buffering_inited_ok:
srv_force_recovery = (ulint) innobase_force_recovery;
- srv_fast_recovery = (ibool) innobase_fast_recovery;
srv_recovery_stats = (ibool) innobase_recovery_stats;
srv_use_doublewrite_buf = (ibool) innobase_use_doublewrite;
@@ -2500,7 +2499,6 @@ skip_overwrite:
pthread_mutex_init(&prepare_commit_mutex, MY_MUTEX_INIT_FAST);
pthread_mutex_init(&commit_threads_m, MY_MUTEX_INIT_FAST);
pthread_mutex_init(&commit_cond_m, MY_MUTEX_INIT_FAST);
- pthread_mutex_init(&analyze_mutex, MY_MUTEX_INIT_FAST);
pthread_cond_init(&commit_cond, NULL);
innodb_inited= 1;
#ifdef MYSQL_DYNAMIC_PLUGIN
@@ -2555,7 +2553,6 @@ innobase_end(
pthread_mutex_destroy(&prepare_commit_mutex);
pthread_mutex_destroy(&commit_threads_m);
pthread_mutex_destroy(&commit_cond_m);
- pthread_mutex_destroy(&analyze_mutex);
pthread_cond_destroy(&commit_cond);
}
@@ -2592,10 +2589,7 @@ innobase_alter_table_flags(
{
return(HA_ONLINE_ADD_INDEX_NO_WRITES
| HA_ONLINE_DROP_INDEX_NO_WRITES
- /* Current InnoDB doesn't sort unique indexes along mysqld's order
- It is dangerous to use index. So it is disabled until
- the bug http://bugs.mysql.com/47622 */
- /* | HA_ONLINE_ADD_UNIQUE_INDEX_NO_WRITES */
+ | HA_ONLINE_ADD_UNIQUE_INDEX_NO_WRITES
| HA_ONLINE_DROP_UNIQUE_INDEX_NO_WRITES
| HA_ONLINE_ADD_PK_INDEX_NO_WRITES);
}
@@ -3288,59 +3282,370 @@ normalize_table_name(
}
/********************************************************************//**
+Get the upper limit of the MySQL integral and floating-point type.
+@return maximum allowed value for the field */
+static
+ulonglong
+innobase_get_int_col_max_value(
+/*===========================*/
+ const Field* field) /*!< in: MySQL field */
+{
+ ulonglong max_value = 0;
+
+ switch(field->key_type()) {
+ /* TINY */
+ case HA_KEYTYPE_BINARY:
+ max_value = 0xFFULL;
+ break;
+ case HA_KEYTYPE_INT8:
+ max_value = 0x7FULL;
+ break;
+ /* SHORT */
+ case HA_KEYTYPE_USHORT_INT:
+ max_value = 0xFFFFULL;
+ break;
+ case HA_KEYTYPE_SHORT_INT:
+ max_value = 0x7FFFULL;
+ break;
+ /* MEDIUM */
+ case HA_KEYTYPE_UINT24:
+ max_value = 0xFFFFFFULL;
+ break;
+ case HA_KEYTYPE_INT24:
+ max_value = 0x7FFFFFULL;
+ break;
+ /* LONG */
+ case HA_KEYTYPE_ULONG_INT:
+ max_value = 0xFFFFFFFFULL;
+ break;
+ case HA_KEYTYPE_LONG_INT:
+ max_value = 0x7FFFFFFFULL;
+ break;
+ /* BIG */
+ case HA_KEYTYPE_ULONGLONG:
+ max_value = 0xFFFFFFFFFFFFFFFFULL;
+ break;
+ case HA_KEYTYPE_LONGLONG:
+ max_value = 0x7FFFFFFFFFFFFFFFULL;
+ break;
+ case HA_KEYTYPE_FLOAT:
+ /* We use the maximum as per IEEE754-2008 standard, 2^24 */
+ max_value = 0x1000000ULL;
+ break;
+ case HA_KEYTYPE_DOUBLE:
+ /* We use the maximum as per IEEE754-2008 standard, 2^53 */
+ max_value = 0x20000000000000ULL;
+ break;
+ default:
+ ut_error;
+ }
+
+ return(max_value);
+}
+
+/*******************************************************************//**
+This function checks whether the index column information
+is consistent between KEY info from mysql and that from innodb index.
+@return TRUE if all column types match. */
+static
+ibool
+innobase_match_index_columns(
+/*=========================*/
+ const KEY* key_info, /*!< in: Index info
+ from mysql */
+ const dict_index_t* index_info) /*!< in: Index info
+ from Innodb */
+{
+ const KEY_PART_INFO* key_part;
+ const KEY_PART_INFO* key_end;
+ const dict_field_t* innodb_idx_fld;
+ const dict_field_t* innodb_idx_fld_end;
+
+ DBUG_ENTER("innobase_match_index_columns");
+
+ /* Check whether user defined index column count matches */
+ if (key_info->key_parts != index_info->n_user_defined_cols) {
+ DBUG_RETURN(FALSE);
+ }
+
+ key_part = key_info->key_part;
+ key_end = key_part + key_info->key_parts;
+ innodb_idx_fld = index_info->fields;
+ innodb_idx_fld_end = index_info->fields + index_info->n_fields;
+
+ /* Check each index column's datatype. We do not check
+ column name because there exists case that index
+ column name got modified in mysql but such change does not
+ propagate to InnoDB.
+ One hidden assumption here is that the index column sequences
+ are matched up between those in mysql and Innodb. */
+ for (; key_part != key_end; ++key_part) {
+ ulint col_type;
+ ibool is_unsigned;
+ ulint mtype = innodb_idx_fld->col->mtype;
+
+ /* Need to translate to InnoDB column type before
+ comparison. */
+ col_type = get_innobase_type_from_mysql_type(&is_unsigned,
+ key_part->field);
+
+ /* Ignore Innodb specific system columns. */
+ while (mtype == DATA_SYS) {
+ innodb_idx_fld++;
+
+ if (innodb_idx_fld >= innodb_idx_fld_end) {
+ DBUG_RETURN(FALSE);
+ }
+ }
+
+ if (col_type != mtype) {
+ /* Column Type mismatches */
+ DBUG_RETURN(FALSE);
+ }
+
+ innodb_idx_fld++;
+ }
+
+ DBUG_RETURN(TRUE);
+}
+
+/*******************************************************************//**
+This function builds a translation table in INNOBASE_SHARE
+structure for fast index location with mysql array number from its
+table->key_info structure. This also provides the necessary translation
+between the key order in mysql key_info and Innodb ib_table->indexes if
+they are not fully matched with each other.
+Note we do not have any mutex protecting the translation table
+building based on the assumption that there is no concurrent
+index creation/drop and DMLs that requires index lookup. All table
+handle will be closed before the index creation/drop.
+@return TRUE if index translation table built successfully */
+static
+ibool
+innobase_build_index_translation(
+/*=============================*/
+ const TABLE* table, /*!< in: table in MySQL data
+ dictionary */
+ dict_table_t* ib_table, /*!< in: table in Innodb data
+ dictionary */
+ INNOBASE_SHARE* share) /*!< in/out: share structure
+ where index translation table
+ will be constructed in. */
+{
+ ulint mysql_num_index;
+ ulint ib_num_index;
+ dict_index_t** index_mapping;
+ ibool ret = TRUE;
+
+ DBUG_ENTER("innobase_build_index_translation");
+
+ mysql_num_index = table->s->keys;
+ ib_num_index = UT_LIST_GET_LEN(ib_table->indexes);
+
+ index_mapping = share->idx_trans_tbl.index_mapping;
+
+ /* If there exists inconsistency between MySQL and InnoDB dictionary
+ (metadata) information, the number of index defined in MySQL
+ could exceed that in InnoDB, do not build index translation
+ table in such case */
+ if (UNIV_UNLIKELY(ib_num_index < mysql_num_index)) {
+ ret = FALSE;
+ goto func_exit;
+ }
+
+ /* If index entry count is non-zero, nothing has
+ changed since last update, directly return TRUE */
+ if (share->idx_trans_tbl.index_count) {
+ /* Index entry count should still match mysql_num_index */
+ ut_a(share->idx_trans_tbl.index_count == mysql_num_index);
+ goto func_exit;
+ }
+
+ /* The number of index increased, rebuild the mapping table */
+ if (mysql_num_index > share->idx_trans_tbl.array_size) {
+ index_mapping = (dict_index_t**) my_realloc(index_mapping,
+ mysql_num_index *
+ sizeof(*index_mapping),
+ MYF(MY_ALLOW_ZERO_PTR));
+
+ if (!index_mapping) {
+ ret = FALSE;
+ goto func_exit;
+ }
+
+ share->idx_trans_tbl.array_size = mysql_num_index;
+ }
+
+
+ /* For each index in the mysql key_info array, fetch its
+ corresponding InnoDB index pointer into index_mapping
+ array. */
+ for (ulint count = 0; count < mysql_num_index; count++) {
+
+ /* Fetch index pointers into index_mapping according to mysql
+ index sequence */
+ index_mapping[count] = dict_table_get_index_on_name(
+ ib_table, table->key_info[count].name);
+
+ if (!index_mapping[count]) {
+ sql_print_error("Cannot find index %s in InnoDB "
+ "index dictionary.",
+ table->key_info[count].name);
+ ret = FALSE;
+ goto func_exit;
+ }
+
+ /* Double check fetched index has the same
+ column info as those in mysql key_info. */
+ if (!innobase_match_index_columns(&table->key_info[count],
+ index_mapping[count])) {
+ sql_print_error("Found index %s whose column info "
+ "does not match that of MySQL.",
+ table->key_info[count].name);
+ ret = FALSE;
+ goto func_exit;
+ }
+ }
+
+ /* Successfully built the translation table */
+ share->idx_trans_tbl.index_count = mysql_num_index;
+
+func_exit:
+ if (!ret) {
+ /* Build translation table failed. */
+ my_free(index_mapping, MYF(MY_ALLOW_ZERO_PTR));
+
+ share->idx_trans_tbl.array_size = 0;
+ share->idx_trans_tbl.index_count = 0;
+ index_mapping = NULL;
+ }
+
+ share->idx_trans_tbl.index_mapping = index_mapping;
+
+ DBUG_RETURN(ret);
+}
+
+/*******************************************************************//**
+This function uses index translation table to quickly locate the
+requested index structure.
+Note we do not have mutex protection for the index translatoin table
+access, it is based on the assumption that there is no concurrent
+translation table rebuild (fter create/drop index) and DMLs that
+require index lookup.
+@return dict_index_t structure for requested index. NULL if
+fail to locate the index structure. */
+static
+dict_index_t*
+innobase_index_lookup(
+/*==================*/
+ INNOBASE_SHARE* share, /*!< in: share structure for index
+ translation table. */
+ uint keynr) /*!< in: index number for the requested
+ index */
+{
+ if (!share->idx_trans_tbl.index_mapping
+ || keynr >= share->idx_trans_tbl.index_count) {
+ return(NULL);
+ }
+
+ return(share->idx_trans_tbl.index_mapping[keynr]);
+}
+
+/************************************************************************
Set the autoinc column max value. This should only be called once from
-ha_innobase::open(). Therefore there's no need for a covering lock.
-@return DB_SUCCESS or error code */
+ha_innobase::open(). Therefore there's no need for a covering lock. */
UNIV_INTERN
-ulint
+void
ha_innobase::innobase_initialize_autoinc()
/*======================================*/
{
- dict_index_t* index;
ulonglong auto_inc;
- const char* col_name;
- ulint error;
+ const Field* field = table->found_next_number_field;
- col_name = table->found_next_number_field->field_name;
- index = innobase_get_index(table->s->next_number_index);
+ if (field != NULL) {
+ auto_inc = innobase_get_int_col_max_value(field);
+ } else {
+ /* We have no idea what's been passed in to us as the
+ autoinc column. We set it to the 0, effectively disabling
+ updates to the table. */
+ auto_inc = 0;
- /* Execute SELECT MAX(col_name) FROM TABLE; */
- error = row_search_max_autoinc(index, col_name, &auto_inc);
+ ut_print_timestamp(stderr);
+ fprintf(stderr, " InnoDB: Unable to determine the AUTOINC "
+ "column name\n");
+ }
- switch (error) {
- case DB_SUCCESS:
+ if (srv_force_recovery >= SRV_FORCE_NO_IBUF_MERGE) {
+ /* If the recovery level is set so high that writes
+ are disabled we force the AUTOINC counter to 0
+ value effectively disabling writes to the table.
+ Secondly, we avoid reading the table in case the read
+ results in failure due to a corrupted table/index.
+
+ We will not return an error to the client, so that the
+ tables can be dumped with minimal hassle. If an error
+ were returned in this case, the first attempt to read
+ the table would fail and subsequent SELECTs would succeed. */
+ auto_inc = 0;
+ } else if (field == NULL) {
+ /* This is a far more serious error, best to avoid
+ opening the table and return failure. */
+ my_error(ER_AUTOINC_READ_FAILED, MYF(0));
+ } else {
+ dict_index_t* index;
+ const char* col_name;
+ ulonglong read_auto_inc;
+ ulint err;
- /* At the this stage we don't know the increment
- or the offset, so use default inrement of 1. */
- ++auto_inc;
- break;
+ update_thd(ha_thd());
- case DB_RECORD_NOT_FOUND:
- ut_print_timestamp(stderr);
- fprintf(stderr, " InnoDB: MySQL and InnoDB data "
- "dictionaries are out of sync.\n"
- "InnoDB: Unable to find the AUTOINC column %s in the "
- "InnoDB table %s.\n"
- "InnoDB: We set the next AUTOINC column value to the "
- "maximum possible value,\n"
- "InnoDB: in effect disabling the AUTOINC next value "
- "generation.\n"
- "InnoDB: You can either set the next AUTOINC value "
- "explicitly using ALTER TABLE\n"
- "InnoDB: or fix the data dictionary by recreating "
- "the table.\n",
- col_name, index->table->name);
-
- auto_inc = 0xFFFFFFFFFFFFFFFFULL;
- break;
+ ut_a(prebuilt->trx == thd_to_trx(user_thd));
- default:
- return(error);
+ col_name = field->field_name;
+ index = innobase_get_index(table->s->next_number_index);
+
+ /* Execute SELECT MAX(col_name) FROM TABLE; */
+ err = row_search_max_autoinc(index, col_name, &read_auto_inc);
+
+ switch (err) {
+ case DB_SUCCESS:
+ /* At the this stage we do not know the increment
+ or the offset, so use a default increment of 1. */
+ auto_inc = read_auto_inc + 1;
+ break;
+
+ case DB_RECORD_NOT_FOUND:
+ ut_print_timestamp(stderr);
+ fprintf(stderr, " InnoDB: MySQL and InnoDB data "
+ "dictionaries are out of sync.\n"
+ "InnoDB: Unable to find the AUTOINC column "
+ "%s in the InnoDB table %s.\n"
+ "InnoDB: We set the next AUTOINC column "
+ "value to 0,\n"
+ "InnoDB: in effect disabling the AUTOINC "
+ "next value generation.\n"
+ "InnoDB: You can either set the next "
+ "AUTOINC value explicitly using ALTER TABLE\n"
+ "InnoDB: or fix the data dictionary by "
+ "recreating the table.\n",
+ col_name, index->table->name);
+
+ /* This will disable the AUTOINC generation. */
+ auto_inc = 0;
+
+ /* We want the open to succeed, so that the user can
+ take corrective action. ie. reads should succeed but
+ updates should fail. */
+ err = DB_SUCCESS;
+ break;
+ default:
+ /* row_search_max_autoinc() should only return
+ one of DB_SUCCESS or DB_RECORD_NOT_FOUND. */
+ ut_error;
+ }
}
dict_table_autoinc_initialize(prebuilt->table, auto_inc);
-
- return(DB_SUCCESS);
}
/*****************************************************************//**
@@ -3493,6 +3798,11 @@ retry:
primary_key = table->s->primary_key;
key_used_on_scan = primary_key;
+ if (!innobase_build_index_translation(table, ib_table, share)) {
+ sql_print_error("Build InnoDB index translation table for"
+ " Table %s failed", name);
+ }
+
/* Allocate a buffer for a 'row reference'. A row reference is
a string of bytes of length ref_length which uniquely specifies
a row in our table. Note that MySQL may also compare two row
@@ -3500,31 +3810,86 @@ retry:
of length ref_length! */
if (!row_table_got_default_clust_index(ib_table)) {
- if (primary_key >= MAX_KEY) {
- sql_print_error("Table %s has a primary key in InnoDB data "
- "dictionary, but not in MySQL!", name);
- }
prebuilt->clust_index_was_generated = FALSE;
- /* MySQL allocates the buffer for ref. key_info->key_length
- includes space for all key columns + one byte for each column
- that may be NULL. ref_length must be as exact as possible to
- save space, because all row reference buffers are allocated
- based on ref_length. */
+ if (UNIV_UNLIKELY(primary_key >= MAX_KEY)) {
+ sql_print_error("Table %s has a primary key in "
+ "InnoDB data dictionary, but not "
+ "in MySQL!", name);
- ref_length = table->key_info[primary_key].key_length;
+ /* This mismatch could cause further problems
+ if not attended, bring this to the user's attention
+ by printing a warning in addition to log a message
+ in the errorlog */
+ push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ ER_NO_SUCH_INDEX,
+ "InnoDB: Table %s has a "
+ "primary key in InnoDB data "
+ "dictionary, but not in "
+ "MySQL!", name);
+
+ /* If primary_key >= MAX_KEY, its (primary_key)
+ value could be out of bound if continue to index
+ into key_info[] array. Find InnoDB primary index,
+ and assign its key_length to ref_length.
+ In addition, since MySQL indexes are sorted starting
+ with primary index, unique index etc., initialize
+ ref_length to the first index key length in
+ case we fail to find InnoDB cluster index.
+
+ Please note, this will not resolve the primary
+ index mismatch problem, other side effects are
+ possible if users continue to use the table.
+ However, we allow this table to be opened so
+ that user can adopt necessary measures for the
+ mismatch while still being accessible to the table
+ date. */
+ ref_length = table->key_info[0].key_length;
+
+ /* Find correspoinding cluster index
+ key length in MySQL's key_info[] array */
+ for (ulint i = 0; i < table->s->keys; i++) {
+ dict_index_t* index;
+ index = innobase_get_index(i);
+ if (dict_index_is_clust(index)) {
+ ref_length =
+ table->key_info[i].key_length;
+ }
+ }
+ } else {
+ /* MySQL allocates the buffer for ref.
+ key_info->key_length includes space for all key
+ columns + one byte for each column that may be
+ NULL. ref_length must be as exact as possible to
+ save space, because all row reference buffers are
+ allocated based on ref_length. */
+
+ ref_length = table->key_info[primary_key].key_length;
+ }
} else {
if (primary_key != MAX_KEY) {
- sql_print_error("Table %s has no primary key in InnoDB data "
- "dictionary, but has one in MySQL! If you "
- "created the table with a MySQL version < "
- "3.23.54 and did not define a primary key, "
- "but defined a unique key with all non-NULL "
- "columns, then MySQL internally treats that "
- "key as the primary key. You can fix this "
- "error by dump + DROP + CREATE + reimport "
- "of the table.", name);
+ sql_print_error(
+ "Table %s has no primary key in InnoDB data "
+ "dictionary, but has one in MySQL! If you "
+ "created the table with a MySQL version < "
+ "3.23.54 and did not define a primary key, "
+ "but defined a unique key with all non-NULL "
+ "columns, then MySQL internally treats that "
+ "key as the primary key. You can fix this "
+ "error by dump + DROP + CREATE + reimport "
+ "of the table.", name);
+
+ /* This mismatch could cause further problems
+ if not attended, bring this to the user attention
+ by printing a warning in addition to log a message
+ in the errorlog */
+ push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ ER_NO_SUCH_INDEX,
+ "InnoDB: Table %s has no "
+ "primary key in InnoDB data "
+ "dictionary, but has one in "
+ "MySQL!", name);
}
prebuilt->clust_index_was_generated = TRUE;
@@ -3566,8 +3931,6 @@ retry:
/* Only if the table has an AUTOINC column. */
if (prebuilt->table != NULL && table->found_next_number_field != NULL) {
- ulint error;
-
dict_table_autoinc_lock(prebuilt->table);
/* Since a table can already be "open" in InnoDB's internal
@@ -3576,8 +3939,7 @@ retry:
autoinc value from a previous MySQL open. */
if (dict_table_autoinc_read(prebuilt->table) == 0) {
- error = innobase_initialize_autoinc();
- ut_a(error == DB_SUCCESS);
+ innobase_initialize_autoinc();
}
dict_table_autoinc_unlock(prebuilt->table);
@@ -4394,67 +4756,6 @@ skip_field:
}
/********************************************************************//**
-Get the upper limit of the MySQL integral and floating-point type. */
-UNIV_INTERN
-ulonglong
-ha_innobase::innobase_get_int_col_max_value(
-/*========================================*/
- const Field* field)
-{
- ulonglong max_value = 0;
-
- switch(field->key_type()) {
- /* TINY */
- case HA_KEYTYPE_BINARY:
- max_value = 0xFFULL;
- break;
- case HA_KEYTYPE_INT8:
- max_value = 0x7FULL;
- break;
- /* SHORT */
- case HA_KEYTYPE_USHORT_INT:
- max_value = 0xFFFFULL;
- break;
- case HA_KEYTYPE_SHORT_INT:
- max_value = 0x7FFFULL;
- break;
- /* MEDIUM */
- case HA_KEYTYPE_UINT24:
- max_value = 0xFFFFFFULL;
- break;
- case HA_KEYTYPE_INT24:
- max_value = 0x7FFFFFULL;
- break;
- /* LONG */
- case HA_KEYTYPE_ULONG_INT:
- max_value = 0xFFFFFFFFULL;
- break;
- case HA_KEYTYPE_LONG_INT:
- max_value = 0x7FFFFFFFULL;
- break;
- /* BIG */
- case HA_KEYTYPE_ULONGLONG:
- max_value = 0xFFFFFFFFFFFFFFFFULL;
- break;
- case HA_KEYTYPE_LONGLONG:
- max_value = 0x7FFFFFFFFFFFFFFFULL;
- break;
- case HA_KEYTYPE_FLOAT:
- /* We use the maximum as per IEEE754-2008 standard, 2^24 */
- max_value = 0x1000000ULL;
- break;
- case HA_KEYTYPE_DOUBLE:
- /* We use the maximum as per IEEE754-2008 standard, 2^53 */
- max_value = 0x20000000000000ULL;
- break;
- default:
- ut_error;
- }
-
- return(max_value);
-}
-
-/********************************************************************//**
This special handling is really to overcome the limitations of MySQL's
binlogging. We need to eliminate the non-determinism that will arise in
INSERT ... SELECT type of statements, since MySQL binlog only stores the
@@ -4683,11 +4984,17 @@ no_commit:
prebuilt->autoinc_error = DB_SUCCESS;
if ((error = update_auto_increment())) {
-
/* We don't want to mask autoinc overflow errors. */
- if (prebuilt->autoinc_error != DB_SUCCESS) {
- error = (int) prebuilt->autoinc_error;
+ /* Handle the case where the AUTOINC sub-system
+ failed during initialization. */
+ if (prebuilt->autoinc_error == DB_UNSUPPORTED) {
+ error_result = ER_AUTOINC_READ_FAILED;
+ /* Set the error message to report too. */
+ my_error(ER_AUTOINC_READ_FAILED, MYF(0));
+ goto func_exit;
+ } else if (prebuilt->autoinc_error != DB_SUCCESS) {
+ error = (int) prebuilt->autoinc_error;
goto report_error;
}
@@ -5168,7 +5475,7 @@ ha_innobase::unlock_row(void)
case ROW_READ_WITH_LOCKS:
if (!srv_locks_unsafe_for_binlog
&& prebuilt->trx->isolation_level
- != TRX_ISO_READ_COMMITTED) {
+ > TRX_ISO_READ_COMMITTED) {
break;
}
/* fall through */
@@ -5207,7 +5514,7 @@ ha_innobase::try_semi_consistent_read(bool yes)
if (yes
&& (srv_locks_unsafe_for_binlog
- || prebuilt->trx->isolation_level == TRX_ISO_READ_COMMITTED)) {
+ || prebuilt->trx->isolation_level <= TRX_ISO_READ_COMMITTED)) {
prebuilt->row_read_type = ROW_READ_TRY_SEMI_CONSISTENT;
} else {
prebuilt->row_read_type = ROW_READ_WITH_LOCKS;
@@ -5510,14 +5817,30 @@ ha_innobase::innobase_get_index(
DBUG_ENTER("innobase_get_index");
ha_statistic_increment(&SSV::ha_read_key_count);
- ut_ad(user_thd == ha_thd());
- ut_a(prebuilt->trx == thd_to_trx(user_thd));
-
if (keynr != MAX_KEY && table->s->keys > 0) {
key = table->key_info + keynr;
- index = dict_table_get_index_on_name(prebuilt->table,
- key->name);
+ index = innobase_index_lookup(share, keynr);
+
+ if (index) {
+ ut_a(ut_strcmp(index->name, key->name) == 0);
+ } else {
+ /* Can't find index with keynr in the translation
+ table. Only print message if the index translation
+ table exists */
+ if (share->idx_trans_tbl.index_mapping) {
+ sql_print_error("InnoDB could not find "
+ "index %s key no %u for "
+ "table %s through its "
+ "index translation table",
+ key ? key->name : "NULL",
+ keynr,
+ prebuilt->table->name);
+ }
+
+ index = dict_table_get_index_on_name(prebuilt->table,
+ key->name);
+ }
} else {
index = dict_table_get_first_index(prebuilt->table);
}
@@ -5582,7 +5905,7 @@ ha_innobase::change_active_index(
dtuple_set_n_fields(prebuilt->search_tuple, prebuilt->index->n_fields);
dict_index_copy_types(prebuilt->search_tuple, prebuilt->index,
- prebuilt->index->n_fields);
+ prebuilt->index->n_fields);
/* MySQL changes the active index for a handle also during some
queries, for example SELECT MAX(a), SUM(a) first retrieves the MAX()
@@ -6096,9 +6419,11 @@ create_table_def(
if (error == DB_DUPLICATE_KEY) {
char buf[100];
- innobase_convert_identifier(buf, sizeof buf,
- table_name, strlen(table_name),
- trx->mysql_thd, TRUE);
+ char* buf_end = innobase_convert_identifier(
+ buf, sizeof buf - 1, table_name, strlen(table_name),
+ trx->mysql_thd, TRUE);
+
+ *buf_end = '\0';
my_error(ER_TABLE_EXISTS_ERROR, MYF(0), buf);
}
@@ -6700,6 +7025,10 @@ ha_innobase::create(
goto cleanup;
}
+ if (create_info->options & HA_LEX_CREATE_TMP_TABLE) {
+ flags |= DICT_TF2_TEMPORARY << DICT_TF2_SHIFT;
+ }
+
error = create_table_def(trx, form, norm_name,
create_info->options & HA_LEX_CREATE_TMP_TABLE ? name2 : NULL,
flags);
@@ -7229,10 +7558,15 @@ ha_innobase::records_in_range(
key = table->key_info + active_index;
- index = dict_table_get_index_on_name(prebuilt->table, key->name);
+ index = innobase_get_index(keynr);
- /* MySQL knows about this index and so we must be able to find it.*/
- ut_a(index);
+ /* There exists possibility of not being able to find requested
+ index due to inconsistency between MySQL and InoDB dictionary info.
+ Necessary message should have been printed in innobase_get_index() */
+ if (UNIV_UNLIKELY(!index)) {
+ n_rows = HA_POS_ERROR;
+ goto func_exit;
+ }
heap = mem_heap_create(2 * (key->key_parts * sizeof(dfield_t)
+ sizeof(dtuple_t)));
@@ -7277,6 +7611,7 @@ ha_innobase::records_in_range(
mem_heap_free(heap);
+func_exit:
my_free(key_val_buff2, MYF(0));
prebuilt->trx->op_info = (char*)"";
@@ -7428,6 +7763,7 @@ ha_innobase::info(
char path[FN_REFLEN];
os_file_stat_t stat_info;
+
DBUG_ENTER("info");
/* If we are forcing recovery at a high level, we will suppress
@@ -7590,13 +7926,29 @@ ha_innobase::info(
}
if (flag & HA_STATUS_CONST) {
- index = dict_table_get_first_index(ib_table);
+ /* Verify the number of index in InnoDB and MySQL
+ matches up. If prebuilt->clust_index_was_generated
+ holds, InnoDB defines GEN_CLUST_INDEX internally */
+ ulint num_innodb_index = UT_LIST_GET_LEN(ib_table->indexes)
+ - prebuilt->clust_index_was_generated;
- if (prebuilt->clust_index_was_generated) {
- index = dict_table_get_next_index(index);
+ if (table->s->keys != num_innodb_index) {
+ sql_print_error("Table %s contains %lu "
+ "indexes inside InnoDB, which "
+ "is different from the number of "
+ "indexes %u defined in the MySQL ",
+ ib_table->name, num_innodb_index,
+ table->s->keys);
}
for (i = 0; i < table->s->keys; i++) {
+ /* We could get index quickly through internal
+ index mapping with the index translation table.
+ The identity of index (match up index name with
+ that of table->key_info[i]) is already verified in
+ innobase_get_index(). */
+ index = innobase_get_index(i);
+
if (index == NULL) {
sql_print_error("Table %s contains fewer "
"indexes inside InnoDB than "
@@ -7625,6 +7977,8 @@ ha_innobase::info(
break;
}
+ dict_index_stat_mutex_enter(index);
+
if (index->stat_n_diff_key_vals[j + 1] == 0) {
rec_per_key = stats.records;
@@ -7633,6 +7987,8 @@ ha_innobase::info(
index->stat_n_diff_key_vals[j + 1]);
}
+ dict_index_stat_mutex_exit(index);
+
/* Since MySQL seems to favor table scans
too much over index searches, we pretend
index selectivity is 2 times better than
@@ -7648,8 +8004,6 @@ ha_innobase::info(
rec_per_key >= ~(ulong) 0 ? ~(ulong) 0 :
(ulong) rec_per_key;
}
-
- index = dict_table_get_next_index(index);
}
}
@@ -7693,15 +8047,9 @@ ha_innobase::analyze(
return(HA_ADMIN_CORRUPT);
}
- /* Serialize ANALYZE TABLE inside InnoDB, see
- Bug#38996 Race condition in ANALYZE TABLE */
- pthread_mutex_lock(&analyze_mutex);
-
/* Simply call ::info() with all the flags */
info(HA_STATUS_TIME | HA_STATUS_CONST | HA_STATUS_VARIABLE);
- pthread_mutex_unlock(&analyze_mutex);
-
if (share->ib_table->is_corrupt) {
return(HA_ADMIN_CORRUPT);
}
@@ -7735,8 +8083,13 @@ ha_innobase::check(
HA_CHECK_OPT* check_opt) /*!< in: check options, currently
ignored */
{
- ulint ret;
+ dict_index_t* index;
+ ulint n_rows;
+ ulint n_rows_in_table = ULINT_UNDEFINED;
+ ibool is_ok = TRUE;
+ ulint old_isolation_level;
+ DBUG_ENTER("ha_innobase::check");
DBUG_ASSERT(thd == ha_thd());
ut_a(prebuilt->trx);
ut_a(prebuilt->trx->magic_n == TRX_MAGIC_N);
@@ -7749,21 +8102,144 @@ ha_innobase::check(
build_template(prebuilt, NULL, table, ROW_MYSQL_WHOLE_ROW);
}
- ret = row_check_table_for_mysql(prebuilt);
+ if (prebuilt->table->ibd_file_missing) {
+ sql_print_error("InnoDB: Error:\n"
+ "InnoDB: MySQL is trying to use a table handle"
+ " but the .ibd file for\n"
+ "InnoDB: table %s does not exist.\n"
+ "InnoDB: Have you deleted the .ibd file"
+ " from the database directory under\n"
+ "InnoDB: the MySQL datadir, or have you"
+ " used DISCARD TABLESPACE?\n"
+ "InnoDB: Please refer to\n"
+ "InnoDB: " REFMAN "innodb-troubleshooting.html\n"
+ "InnoDB: how you can resolve the problem.\n",
+ prebuilt->table->name);
+ DBUG_RETURN(HA_ADMIN_CORRUPT);
+ }
- if (ret != DB_INTERRUPTED && share->ib_table->is_corrupt) {
- return(HA_ADMIN_CORRUPT);
+ prebuilt->trx->op_info = "checking table";
+
+ old_isolation_level = prebuilt->trx->isolation_level;
+
+ /* We must run the index record counts at an isolation level
+ >= READ COMMITTED, because a dirty read can see a wrong number
+ of records in some index; to play safe, we use always
+ REPEATABLE READ here */
+
+ prebuilt->trx->isolation_level = TRX_ISO_REPEATABLE_READ;
+
+ /* Enlarge the fatal lock wait timeout during CHECK TABLE. */
+ mutex_enter(&kernel_mutex);
+ srv_fatal_semaphore_wait_threshold += 7200; /* 2 hours */
+ mutex_exit(&kernel_mutex);
+
+ for (index = dict_table_get_first_index(prebuilt->table);
+ index != NULL;
+ index = dict_table_get_next_index(index)) {
+#if 0
+ fputs("Validating index ", stderr);
+ ut_print_name(stderr, trx, FALSE, index->name);
+ putc('\n', stderr);
+#endif
+
+ if (!btr_validate_index(index, prebuilt->trx)) {
+ is_ok = FALSE;
+ push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ ER_NOT_KEYFILE,
+ "InnoDB: The B-tree of"
+ " index '%-.200s' is corrupted.",
+ index->name);
+ continue;
+ }
+
+ /* Instead of invoking change_active_index(), set up
+ a dummy template for non-locking reads, disabling
+ access to the clustered index. */
+ prebuilt->index = index;
+
+ prebuilt->index_usable = row_merge_is_index_usable(
+ prebuilt->trx, prebuilt->index);
+
+ if (UNIV_UNLIKELY(!prebuilt->index_usable)) {
+ push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ HA_ERR_TABLE_DEF_CHANGED,
+ "InnoDB: Insufficient history for"
+ " index '%-.200s'",
+ index->name);
+ continue;
+ }
+
+ prebuilt->sql_stat_start = TRUE;
+ prebuilt->template_type = ROW_MYSQL_DUMMY_TEMPLATE;
+ prebuilt->n_template = 0;
+ prebuilt->need_to_access_clustered = FALSE;
+
+ dtuple_set_n_fields(prebuilt->search_tuple, 0);
+
+ prebuilt->select_lock_type = LOCK_NONE;
+
+ if (!row_check_index_for_mysql(prebuilt, index, &n_rows)) {
+ push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ ER_NOT_KEYFILE,
+ "InnoDB: The B-tree of"
+ " index '%-.200s' is corrupted.",
+ index->name);
+ is_ok = FALSE;
+ }
+
+ if (thd_killed(user_thd)) {
+ break;
+ }
+
+#if 0
+ fprintf(stderr, "%lu entries in index %s\n", n_rows,
+ index->name);
+#endif
+
+ if (index == dict_table_get_first_index(prebuilt->table)) {
+ n_rows_in_table = n_rows;
+ } else if (n_rows != n_rows_in_table) {
+ push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ ER_NOT_KEYFILE,
+ "InnoDB: Index '%-.200s'"
+ " contains %lu entries,"
+ " should be %lu.",
+ index->name,
+ (ulong) n_rows,
+ (ulong) n_rows_in_table);
+ is_ok = FALSE;
+ }
}
- switch (ret) {
- case DB_SUCCESS:
- return(HA_ADMIN_OK);
- case DB_INTERRUPTED:
+ /* Restore the original isolation level */
+ prebuilt->trx->isolation_level = old_isolation_level;
+
+ /* We validate also the whole adaptive hash index for all tables
+ at every CHECK TABLE */
+
+ if (!btr_search_validate()) {
+ push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ ER_NOT_KEYFILE,
+ "InnoDB: The adaptive hash index is corrupted.");
+ is_ok = FALSE;
+ }
+
+ /* Restore the fatal lock wait timeout after CHECK TABLE. */
+ mutex_enter(&kernel_mutex);
+ srv_fatal_semaphore_wait_threshold -= 7200; /* 2 hours */
+ mutex_exit(&kernel_mutex);
+
+ prebuilt->trx->op_info = "";
+ if (thd_killed(user_thd)) {
my_error(ER_QUERY_INTERRUPTED, MYF(0));
- return(-1);
- default:
+ }
+
+ if (share->ib_table->is_corrupt) {
return(HA_ADMIN_CORRUPT);
}
+
+ DBUG_RETURN(is_ok ? HA_ADMIN_OK : HA_ADMIN_CORRUPT);
}
/*************************************************************//**
@@ -8427,7 +8903,9 @@ ha_innobase::external_lock(
if (trx->n_mysql_tables_in_use == 0) {
#ifdef EXTENDED_SLOWLOG
- increment_thd_innodb_stats(thd, trx->io_reads,
+ increment_thd_innodb_stats(thd,
+ (unsigned long long) ut_conv_dulint_to_longlong(trx->id),
+ trx->io_reads,
trx->io_read,
trx->io_reads_wait_timer,
trx->lock_que_wait_timer,
@@ -8607,8 +9085,8 @@ innodb_show_status(
mutex_enter(&srv_monitor_file_mutex);
rewind(srv_monitor_file);
- srv_printf_innodb_monitor(srv_monitor_file,
- &trx_list_start, &trx_list_end);
+ srv_printf_innodb_monitor(srv_monitor_file, FALSE,
+ &trx_list_start, &trx_list_end);
flen = ftell(srv_monitor_file);
os_file_set_eof(srv_monitor_file);
@@ -8665,19 +9143,25 @@ innodb_show_status(
}
/************************************************************************//**
-Implements the SHOW MUTEX STATUS command. . */
+Implements the SHOW MUTEX STATUS command.
+@return TRUE on failure, FALSE on success. */
static
bool
innodb_mutex_show_status(
/*=====================*/
- handlerton* hton, /*!< in: the innodb handlerton */
+ handlerton* hton, /*!< in: the innodb handlerton */
THD* thd, /*!< in: the MySQL query thread of the
caller */
- stat_print_fn* stat_print)
+ stat_print_fn* stat_print) /*!< in: function for printing
+ statistics */
{
char buf1[IO_SIZE], buf2[IO_SIZE];
mutex_t* mutex;
rw_lock_t* lock;
+ ulint block_mutex_oswait_count = 0;
+ ulint block_lock_oswait_count = 0;
+ mutex_t* block_mutex = NULL;
+ rw_lock_t* block_lock = NULL;
#ifdef UNIV_DEBUG
ulint rw_lock_count= 0;
ulint rw_lock_count_spin_loop= 0;
@@ -8692,12 +9176,16 @@ innodb_mutex_show_status(
mutex_enter(&mutex_list_mutex);
- mutex = UT_LIST_GET_FIRST(mutex_list);
+ for (mutex = UT_LIST_GET_FIRST(mutex_list); mutex != NULL;
+ mutex = UT_LIST_GET_NEXT(list, mutex)) {
+ if (mutex->count_os_wait == 0) {
+ continue;
+ }
- while (mutex != NULL) {
- if (mutex->count_os_wait == 0
- || buf_pool_is_block_mutex(mutex)) {
- goto next_mutex;
+ if (buf_pool_is_block_mutex(mutex)) {
+ block_mutex = mutex;
+ block_mutex_oswait_count += mutex->count_os_wait;
+ continue;
}
#ifdef UNIV_DEBUG
if (mutex->mutex_type != 1) {
@@ -8724,8 +9212,7 @@ innodb_mutex_show_status(
DBUG_RETURN(1);
}
}
- }
- else {
+ } else {
rw_lock_count += mutex->count_using;
rw_lock_count_spin_loop += mutex->count_spin_loop;
rw_lock_count_spin_rounds += mutex->count_spin_rounds;
@@ -8737,7 +9224,7 @@ innodb_mutex_show_status(
buf1len= (uint) my_snprintf(buf1, sizeof(buf1), "%s",
mutex->cmutex_name);
buf2len= (uint) my_snprintf(buf2, sizeof(buf2), "os_waits=%lu",
- mutex->count_os_wait);
+ (ulong) mutex->count_os_wait);
if (stat_print(thd, innobase_hton_name,
hton_name_len, buf1, buf1len,
@@ -8746,45 +9233,81 @@ innodb_mutex_show_status(
DBUG_RETURN(1);
}
#endif /* UNIV_DEBUG */
+ }
+
+ if (block_mutex) {
+ buf1len = (uint) my_snprintf(buf1, sizeof buf1,
+ "combined %s",
+ block_mutex->cmutex_name);
+ buf2len = (uint) my_snprintf(buf2, sizeof buf2,
+ "os_waits=%lu",
+ (ulong) block_mutex_oswait_count);
-next_mutex:
- mutex = UT_LIST_GET_NEXT(list, mutex);
+ if (stat_print(thd, innobase_hton_name,
+ hton_name_len, buf1, buf1len,
+ buf2, buf2len)) {
+ mutex_exit(&mutex_list_mutex);
+ DBUG_RETURN(1);
+ }
}
mutex_exit(&mutex_list_mutex);
mutex_enter(&rw_lock_list_mutex);
- lock = UT_LIST_GET_FIRST(rw_lock_list);
-
- while (lock != NULL) {
- if (lock->count_os_wait
- && !buf_pool_is_block_lock(lock)) {
- buf1len= my_snprintf(buf1, sizeof(buf1), "%s",
- lock->lock_name);
- buf2len= my_snprintf(buf2, sizeof(buf2),
- "os_waits=%lu", lock->count_os_wait);
-
- if (stat_print(thd, innobase_hton_name,
- hton_name_len, buf1, buf1len,
- buf2, buf2len)) {
- mutex_exit(&rw_lock_list_mutex);
- DBUG_RETURN(1);
- }
+ for (lock = UT_LIST_GET_FIRST(rw_lock_list); lock != NULL;
+ lock = UT_LIST_GET_NEXT(list, lock)) {
+ if (lock->count_os_wait == 0) {
+ continue;
+ }
+
+ if (buf_pool_is_block_lock(lock)) {
+ block_lock = lock;
+ block_lock_oswait_count += lock->count_os_wait;
+ continue;
+ }
+
+ buf1len = my_snprintf(buf1, sizeof buf1, "%s",
+ lock->lock_name);
+ buf2len = my_snprintf(buf2, sizeof buf2, "os_waits=%lu",
+ (ulong) lock->count_os_wait);
+
+ if (stat_print(thd, innobase_hton_name,
+ hton_name_len, buf1, buf1len,
+ buf2, buf2len)) {
+ mutex_exit(&rw_lock_list_mutex);
+ DBUG_RETURN(1);
+ }
+ }
+
+ if (block_lock) {
+ buf1len = (uint) my_snprintf(buf1, sizeof buf1,
+ "combined %s",
+ block_lock->lock_name);
+ buf2len = (uint) my_snprintf(buf2, sizeof buf2,
+ "os_waits=%lu",
+ (ulong) block_lock_oswait_count);
+
+ if (stat_print(thd, innobase_hton_name,
+ hton_name_len, buf1, buf1len,
+ buf2, buf2len)) {
+ mutex_exit(&rw_lock_list_mutex);
+ DBUG_RETURN(1);
}
- lock = UT_LIST_GET_NEXT(list, lock);
}
mutex_exit(&rw_lock_list_mutex);
#ifdef UNIV_DEBUG
- buf2len= my_snprintf(buf2, sizeof(buf2),
- "count=%lu, spin_waits=%lu, spin_rounds=%lu, "
- "os_waits=%lu, os_yields=%lu, os_wait_times=%lu",
- rw_lock_count, rw_lock_count_spin_loop,
- rw_lock_count_spin_rounds,
- rw_lock_count_os_wait, rw_lock_count_os_yield,
- (ulong) (rw_lock_wait_time/1000));
+ buf2len = my_snprintf(buf2, sizeof buf2,
+ "count=%lu, spin_waits=%lu, spin_rounds=%lu, "
+ "os_waits=%lu, os_yields=%lu, os_wait_times=%lu",
+ (ulong) rw_lock_count,
+ (ulong) rw_lock_count_spin_loop,
+ (ulong) rw_lock_count_spin_rounds,
+ (ulong) rw_lock_count_os_wait,
+ (ulong) rw_lock_count_os_yield,
+ (ulong) (rw_lock_wait_time / 1000));
if (stat_print(thd, innobase_hton_name, hton_name_len,
STRING_WITH_LEN("rw_lock_mutexes"), buf2, buf2len)) {
@@ -8846,6 +9369,11 @@ static INNOBASE_SHARE* get_share(const char* table_name)
innobase_open_tables, fold, share);
thr_lock_init(&share->lock);
+
+ /* Index translation table initialization */
+ share->idx_trans_tbl.index_mapping = NULL;
+ share->idx_trans_tbl.index_count = 0;
+ share->idx_trans_tbl.array_size = 0;
}
share->use_count++;
@@ -8876,6 +9404,11 @@ static void free_share(INNOBASE_SHARE* share)
HASH_DELETE(INNOBASE_SHARE, table_name_hash,
innobase_open_tables, fold, share);
thr_lock_delete(&share->lock);
+
+ /* Free any memory from index translation table */
+ my_free(share->idx_trans_tbl.index_mapping,
+ MYF(MY_ALLOW_ZERO_PTR));
+
my_free(share, MYF(0));
/* TODO: invoke HASH_MIGRATE if innobase_open_tables
@@ -8978,7 +9511,7 @@ ha_innobase::store_lock(
isolation_level = trx->isolation_level;
if ((srv_locks_unsafe_for_binlog
- || isolation_level == TRX_ISO_READ_COMMITTED)
+ || isolation_level <= TRX_ISO_READ_COMMITTED)
&& isolation_level != TRX_ISO_SERIALIZABLE
&& (lock_type == TL_READ || lock_type == TL_READ_NO_INSERT)
&& (sql_command == SQLCOM_INSERT_SELECT
@@ -9110,7 +9643,10 @@ ha_innobase::innobase_get_autoinc(
*value = dict_table_autoinc_read(prebuilt->table);
/* It should have been initialized during open. */
- ut_a(*value != 0);
+ if (*value == 0) {
+ prebuilt->autoinc_error = DB_UNSUPPORTED;
+ dict_table_autoinc_unlock(prebuilt->table);
+ }
}
return(prebuilt->autoinc_error);
@@ -9190,6 +9726,11 @@ ha_innobase::get_auto_increment(
invoking this method. So we are not sure if it's guaranteed to
be 0 or not. */
+ /* We need the upper limit of the col type to check for
+ whether we update the table autoinc counter or not. */
+ ulonglong col_max_value = innobase_get_int_col_max_value(
+ table->next_number_field);
+
/* Called for the first time ? */
if (trx->n_autoinc_rows == 0) {
@@ -9206,6 +9747,11 @@ ha_innobase::get_auto_increment(
/* Not in the middle of a mult-row INSERT. */
} else if (prebuilt->autoinc_last_value == 0) {
set_if_bigger(*first_value, autoinc);
+ /* Check for -ve values. */
+ } else if (*first_value > col_max_value && trx->n_autoinc_rows > 0) {
+ /* Set to next logical value. */
+ ut_a(autoinc > trx->n_autoinc_rows);
+ *first_value = (autoinc - trx->n_autoinc_rows) - 1;
}
*nb_reserved_values = trx->n_autoinc_rows;
@@ -9216,12 +9762,6 @@ ha_innobase::get_auto_increment(
ulonglong need;
ulonglong current;
ulonglong next_value;
- ulonglong col_max_value;
-
- /* We need the upper limit of the col type to check for
- whether we update the table autoinc counter or not. */
- col_max_value = innobase_get_int_col_max_value(
- table->next_number_field);
current = *first_value > col_max_value ? autoinc : *first_value;
need = *nb_reserved_values * increment;
@@ -9722,33 +10262,60 @@ innobase_set_cursor_view(
(cursor_view_t*) curview);
}
+/*******************************************************************//**
+If col_name is not NULL, check whether the named column is being
+renamed in the table. If col_name is not provided, check
+whether any one of columns in the table is being renamed.
+@return true if the column is being renamed */
+static
+bool
+check_column_being_renamed(
+/*=======================*/
+ const TABLE* table, /*!< in: MySQL table */
+ const char* col_name) /*!< in: name of the column */
+{
+ uint k;
+ Field* field;
+
+ for (k = 0; k < table->s->fields; k++) {
+ field = table->field[k];
-/***********************************************************************
-Check whether any of the given columns is being renamed in the table. */
+ if (field->flags & FIELD_IS_RENAMED) {
+
+ /* If col_name is not provided, return
+ if the field is marked as being renamed. */
+ if (!col_name) {
+ return(true);
+ }
+
+ /* If col_name is provided, return only
+ if names match */
+ if (innobase_strcasecmp(field->field_name,
+ col_name) == 0) {
+ return(true);
+ }
+ }
+ }
+
+ return(false);
+}
+
+/*******************************************************************//**
+Check whether any of the given columns is being renamed in the table.
+@return true if any of col_names is being renamed in table */
static
bool
column_is_being_renamed(
/*====================*/
- /* out: true if any of col_names is
- being renamed in table */
- TABLE* table, /* in: MySQL table */
- uint n_cols, /* in: number of columns */
- const char** col_names) /* in: names of the columns */
+ TABLE* table, /*!< in: MySQL table */
+ uint n_cols, /*!< in: number of columns */
+ const char** col_names) /*!< in: names of the columns */
{
uint j;
- uint k;
- Field* field;
- const char* col_name;
for (j = 0; j < n_cols; j++) {
- col_name = col_names[j];
- for (k = 0; k < table->s->fields; k++) {
- field = table->field[k];
- if ((field->flags & FIELD_IS_RENAMED)
- && innobase_strcasecmp(field->field_name,
- col_name) == 0) {
- return(true);
- }
+ if (check_column_being_renamed(table, col_names[j])) {
+ return(true);
}
}
@@ -9832,18 +10399,13 @@ ha_innobase::check_if_incompatible_data(
return(COMPATIBLE_DATA_NO);
}
- /* Renaming column asynchronizes dictionary between mysqld and InnoDB...
- If not synchronized, treat as COMPATIBLE_DATA_NO
- until the bug http://bugs.mysql.com/47621 is fixed officialily */
- {
- uint i;
- for (i = 0; i < table->s->fields; i++) {
- if (table->field[i]->flags & FIELD_IN_ADD_INDEX
- && innobase_strcasecmp(table->field[i]->field_name,
- dict_table_get_col_name(prebuilt->table, i))) {
- return(COMPATIBLE_DATA_NO);
- }
- }
+ /* For column rename operation, MySQL does not supply enough
+ information (new column name etc.) for InnoDB to make appropriate
+ system metadata change. To avoid system metadata inconsistency,
+ currently we can just request a table rebuild/copy by returning
+ COMPATIBLE_DATA_NO */
+ if (check_column_being_renamed(table, NULL)) {
+ return COMPATIBLE_DATA_NO;
}
/* Check if a column participating in a foreign key is being renamed.
@@ -10362,8 +10924,8 @@ static MYSQL_SYSVAR_BOOL(extra_undoslots, innobase_extra_undoslots,
static MYSQL_SYSVAR_BOOL(fast_recovery, innobase_fast_recovery,
PLUGIN_VAR_NOCMDARG | PLUGIN_VAR_READONLY,
- "Enable to use speed hack of recovery avoiding flush list sorting.",
- NULL, NULL, TRUE);
+ "obsolete option. affects nothing.",
+ NULL, NULL, FALSE);
static MYSQL_SYSVAR_BOOL(recovery_stats, innobase_recovery_stats,
PLUGIN_VAR_NOCMDARG | PLUGIN_VAR_READONLY,
@@ -10656,6 +11218,11 @@ static MYSQL_SYSVAR_STR(data_file_path, innobase_data_file_path,
"Path to individual files and their sizes.",
NULL, NULL, NULL);
+static MYSQL_SYSVAR_STR(doublewrite_file, innobase_doublewrite_file,
+ PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY,
+ "Path to special datafile for doublewrite buffer. (default is "": not used) ### ONLY FOR EXPERTS!!! ###",
+ NULL, NULL, NULL);
+
static MYSQL_SYSVAR_LONG(autoinc_lock_mode, innobase_autoinc_lock_mode,
PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY,
"The AUTOINC lock modes supported by InnoDB: "
@@ -10680,13 +11247,13 @@ static MYSQL_SYSVAR_BOOL(use_sys_malloc, srv_use_sys_malloc,
static MYSQL_SYSVAR_STR(change_buffering, innobase_change_buffering,
PLUGIN_VAR_RQCMDARG,
"Buffer changes to reduce random access: "
- "OFF, ON, inserting, deleting, changing, or purging.",
+ "OFF, ON, none, inserts.",
innodb_change_buffering_validate,
innodb_change_buffering_update, NULL);
static MYSQL_SYSVAR_ULONG(read_ahead_threshold, srv_read_ahead_threshold,
PLUGIN_VAR_RQCMDARG,
- "Number of pages that must be accessed sequentially for InnoDB to"
+ "Number of pages that must be accessed sequentially for InnoDB to "
"trigger a readahead.",
NULL, NULL, 56, 0, 64, 0);
@@ -10799,11 +11366,6 @@ static MYSQL_SYSVAR_ULONG(dict_size_limit, srv_dict_size_limit,
"Limit the allocated memory for dictionary cache. (0: unlimited)",
NULL, NULL, 0, 0, LONG_MAX, 0);
-static MYSQL_SYSVAR_ULONG(relax_table_creation, srv_relax_table_creation,
- PLUGIN_VAR_RQCMDARG,
- "Relax limitation of column size at table creation as builtin InnoDB.",
- NULL, NULL, 0, 0, 1, 0);
-
static MYSQL_SYSVAR_ULONG(pass_corrupt_table, srv_pass_corrupt_table,
PLUGIN_VAR_RQCMDARG,
"Pass corruptions of user tables as 'corrupt table' instead of not crashing itself, "
@@ -10822,6 +11384,7 @@ static struct st_mysql_sys_var* innobase_system_variables[]= {
MYSQL_SYSVAR(commit_concurrency),
MYSQL_SYSVAR(concurrency_tickets),
MYSQL_SYSVAR(data_file_path),
+ MYSQL_SYSVAR(doublewrite_file),
MYSQL_SYSVAR(data_home_dir),
MYSQL_SYSVAR(doublewrite),
MYSQL_SYSVAR(extra_undoslots),
@@ -10892,12 +11455,11 @@ static struct st_mysql_sys_var* innobase_system_variables[]= {
MYSQL_SYSVAR(read_ahead_threshold),
MYSQL_SYSVAR(io_capacity),
MYSQL_SYSVAR(use_purge_thread),
- MYSQL_SYSVAR(relax_table_creation),
MYSQL_SYSVAR(pass_corrupt_table),
NULL
};
-mysql_declare_plugin(innobase)
+mysql_declare_plugin(innodb_plugin)
{
MYSQL_STORAGE_ENGINE_PLUGIN,
&innobase_storage_engine,
diff --git a/handler/ha_innodb.h b/handler/ha_innodb.h
index 52fc54fde8c..729abcbcd0a 100644
--- a/handler/ha_innodb.h
+++ b/handler/ha_innodb.h
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 2000, 2009, MySQL AB & Innobase Oy. All Rights Reserved.
+Copyright (c) 2000, 2010, MySQL AB & Innobase Oy. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -27,16 +27,32 @@ Place, Suite 330, Boston, MA 02111-1307 USA
#pragma interface /* gcc class implementation */
#endif
+/* Structure defines translation table between mysql index and innodb
+index structures */
+typedef struct innodb_idx_translate_struct {
+ ulint index_count; /*!< number of valid index entries
+ in the index_mapping array */
+ ulint array_size; /*!< array size of index_mapping */
+ dict_index_t** index_mapping; /*!< index pointer array directly
+ maps to index in Innodb from MySQL
+ array index */
+} innodb_idx_translate_t;
+
+
/** InnoDB table share */
typedef struct st_innobase_share {
- THR_LOCK lock; /*!< MySQL lock protecting
- this structure */
- const char* table_name; /*!< InnoDB table name */
- uint use_count; /*!< reference count,
- incremented in get_share()
- and decremented in free_share() */
- void* table_name_hash;/*!< hash table chain node */
- dict_table_t* ib_table;
+ THR_LOCK lock; /*!< MySQL lock protecting
+ this structure */
+ const char* table_name; /*!< InnoDB table name */
+ uint use_count; /*!< reference count,
+ incremented in get_share()
+ and decremented in
+ free_share() */
+ void* table_name_hash;/*!< hash table chain node */
+ innodb_idx_translate_t idx_trans_tbl; /*!< index translation
+ table between MySQL and
+ Innodb */
+ dict_table_t* ib_table;
} INNOBASE_SHARE;
@@ -92,9 +108,8 @@ class ha_innobase: public handler
ulint innobase_reset_autoinc(ulonglong auto_inc);
ulint innobase_get_autoinc(ulonglong* value);
ulint innobase_update_autoinc(ulonglong auto_inc);
- ulint innobase_initialize_autoinc();
+ void innobase_initialize_autoinc();
dict_index_t* innobase_get_index(uint keynr);
- ulonglong innobase_get_int_col_max_value(const Field* field);
/* Init values for the class: */
public:
diff --git a/handler/handler0alter.cc b/handler/handler0alter.cc
index a5008991400..e474c318c58 100644
--- a/handler/handler0alter.cc
+++ b/handler/handler0alter.cc
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 2005, 2009, Innobase Oy. All Rights Reserved.
+Copyright (c) 2005, 2010, Innobase Oy. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -229,9 +229,11 @@ static
int
innobase_check_index_keys(
/*======================*/
- const KEY* key_info, /*!< in: Indexes to be created */
- ulint num_of_keys) /*!< in: Number of indexes to
- be created */
+ const KEY* key_info, /*!< in: Indexes to be
+ created */
+ ulint num_of_keys, /*!< in: Number of
+ indexes to be created */
+ const dict_table_t* table) /*!< in: Existing indexes */
{
ulint key_num;
@@ -248,9 +250,22 @@ innobase_check_index_keys(
const KEY& key2 = key_info[i];
if (0 == strcmp(key.name, key2.name)) {
- sql_print_error("InnoDB: key name `%s` appears"
- " twice in CREATE INDEX\n",
- key.name);
+ my_error(ER_WRONG_NAME_FOR_INDEX, MYF(0),
+ key.name);
+
+ return(ER_WRONG_NAME_FOR_INDEX);
+ }
+ }
+
+ /* Check that the same index name does not already exist. */
+
+ for (const dict_index_t* index
+ = dict_table_get_first_index(table);
+ index; index = dict_table_get_next_index(index)) {
+
+ if (0 == strcmp(key.name, index->name)) {
+ my_error(ER_WRONG_NAME_FOR_INDEX, MYF(0),
+ key.name);
return(ER_WRONG_NAME_FOR_INDEX);
}
@@ -258,7 +273,7 @@ innobase_check_index_keys(
/* Check that MySQL does not try to create a column
prefix index field on an inappropriate data type and
- that the same colum does not appear twice in the index. */
+ that the same column does not appear twice in the index. */
for (ulint i = 0; i < key.key_parts; i++) {
const KEY_PART_INFO& key_part1
@@ -289,14 +304,8 @@ innobase_check_index_keys(
}
}
- sql_print_error("InnoDB: MySQL is trying to"
- " create a column prefix"
- " index field on an"
- " inappropriate data type."
- " column `%s`,"
- " index `%s`.\n",
- field->field_name,
- key.name);
+ my_error(ER_WRONG_KEY_COLUMN, MYF(0),
+ field->field_name);
return(ER_WRONG_KEY_COLUMN);
}
@@ -309,11 +318,8 @@ innobase_check_index_keys(
continue;
}
- sql_print_error("InnoDB: column `%s`"
- " is not allowed to occur"
- " twice in index `%s`.\n",
- key_part1.field->field_name,
- key.name);
+ my_error(ER_WRONG_KEY_COLUMN, MYF(0),
+ key_part1.field->field_name);
return(ER_WRONG_KEY_COLUMN);
}
}
@@ -522,12 +528,14 @@ innobase_create_key_def(
key_info->name, "PRIMARY");
/* If there is a UNIQUE INDEX consisting entirely of NOT NULL
- columns, MySQL will treat it as a PRIMARY KEY unless the
- table already has one. */
+ columns and if the index does not contain column prefix(es)
+ (only prefix/part of the column is indexed), MySQL will treat the
+ index as a PRIMARY KEY unless the table already has one. */
if (!new_primary && (key_info->flags & HA_NOSAME)
+ && (!(key_info->flags & HA_KEY_HAS_PART_KEY_SEG))
&& row_table_got_default_clust_index(table)) {
- uint key_part = key_info->key_parts;
+ uint key_part = key_info->key_parts;
new_primary = TRUE;
@@ -656,12 +664,18 @@ ha_innobase::add_index(
innodb_table = indexed_table
= dict_table_get(prebuilt->table->name, FALSE);
+ if (UNIV_UNLIKELY(!innodb_table)) {
+ error = HA_ERR_NO_SUCH_TABLE;
+ goto err_exit;
+ }
+
/* Check if the index name is reserved. */
if (innobase_index_name_is_reserved(trx, key_info, num_of_keys)) {
error = -1;
} else {
/* Check that index keys are sensible */
- error = innobase_check_index_keys(key_info, num_of_keys);
+ error = innobase_check_index_keys(key_info, num_of_keys,
+ innodb_table);
}
if (UNIV_UNLIKELY(error)) {
@@ -708,6 +722,8 @@ err_exit:
row_mysql_lock_data_dictionary(trx);
dict_locked = TRUE;
+ ut_d(dict_table_check_for_dup_indexes(innodb_table, FALSE));
+
/* If a new primary key is defined for the table we need
to drop the original table and rebuild all indexes. */
@@ -740,6 +756,8 @@ err_exit:
user_thd);
}
+ ut_d(dict_table_check_for_dup_indexes(innodb_table,
+ FALSE));
row_mysql_unlock_data_dictionary(trx);
goto err_exit;
}
@@ -764,6 +782,10 @@ err_exit:
ut_ad(error == DB_SUCCESS);
+ /* We will need to rebuild index translation table. Set
+ valid index entry count in the translation table to zero */
+ share->idx_trans_tbl.index_count = 0;
+
/* Commit the data dictionary transaction in order to release
the table locks on the system tables. This means that if
MySQL crashes while creating a new primary key inside
@@ -799,18 +821,6 @@ err_exit:
index, num_of_idx, table);
error_handling:
-#ifdef UNIV_DEBUG
- /* TODO: At the moment we can't handle the following statement
- in our debugging code below:
-
- alter table t drop index b, add index (b);
-
- The fix will have to parse the SQL and note that the index
- being added has the same name as the one being dropped and
- ignore that in the dup index check.*/
- //dict_table_check_for_dup_indexes(prebuilt->table);
-#endif
-
/* After an error, remove all those index definitions from the
dictionary which were defined. */
@@ -822,6 +832,8 @@ error_handling:
row_mysql_lock_data_dictionary(trx);
dict_locked = TRUE;
+ ut_d(dict_table_check_for_dup_indexes(prebuilt->table, TRUE));
+
if (!new_primary) {
error = row_merge_rename_indexes(trx, indexed_table);
@@ -909,6 +921,7 @@ convert_error:
}
if (dict_locked) {
+ ut_d(dict_table_check_for_dup_indexes(innodb_table, FALSE));
row_mysql_unlock_data_dictionary(trx);
}
@@ -951,6 +964,7 @@ ha_innobase::prepare_drop_index(
/* Test and mark all the indexes to be dropped */
row_mysql_lock_data_dictionary(trx);
+ ut_d(dict_table_check_for_dup_indexes(prebuilt->table, FALSE));
/* Check that none of the indexes have previously been flagged
for deletion. */
@@ -1116,6 +1130,7 @@ func_exit:
} while (index);
}
+ ut_d(dict_table_check_for_dup_indexes(prebuilt->table, FALSE));
row_mysql_unlock_data_dictionary(trx);
DBUG_RETURN(err);
@@ -1162,6 +1177,7 @@ ha_innobase::final_drop_index(
prebuilt->table->flags, user_thd);
row_mysql_lock_data_dictionary(trx);
+ ut_d(dict_table_check_for_dup_indexes(prebuilt->table, FALSE));
if (UNIV_UNLIKELY(err)) {
@@ -1198,11 +1214,12 @@ ha_innobase::final_drop_index(
ut_a(!index->to_be_dropped);
}
-#ifdef UNIV_DEBUG
- dict_table_check_for_dup_indexes(prebuilt->table);
-#endif
+ /* We will need to rebuild index translation table. Set
+ valid index entry count in the translation table to zero */
+ share->idx_trans_tbl.index_count = 0;
func_exit:
+ ut_d(dict_table_check_for_dup_indexes(prebuilt->table, FALSE));
trx_commit_for_mysql(trx);
trx_commit_for_mysql(prebuilt->trx);
row_mysql_unlock_data_dictionary(trx);
diff --git a/handler/i_s.cc b/handler/i_s.cc
index cd00da93d77..3165ce663ca 100644
--- a/handler/i_s.cc
+++ b/handler/i_s.cc
@@ -2900,6 +2900,9 @@ i_s_innodb_index_stats_fill(
i_s_table->field[3]->store(index->n_uniq);
row_per_keys[0] = '\0';
+
+ /* It is remained optimistic operation still for now */
+ //dict_index_stat_mutex_enter(index);
if (index->stat_n_diff_key_vals) {
for (i = 1; i <= index->n_uniq; i++) {
ib_int64_t rec_per_key;
@@ -2913,6 +2916,8 @@ i_s_innodb_index_stats_fill(
strncat(row_per_keys, buff, 256 - strlen(row_per_keys));
}
}
+ //dict_index_stat_mutex_exit(index);
+
field_store_string(i_s_table->field[4], row_per_keys);
i_s_table->field[5]->store(index->stat_index_size);
@@ -3169,6 +3174,14 @@ UNIV_INTERN struct st_mysql_plugin i_s_innodb_admin_command =
static ST_FIELD_INFO i_s_innodb_sys_tables_info[] =
{
+ {STRUCT_FLD(field_name, "SCHEMA"),
+ STRUCT_FLD(field_length, NAME_LEN),
+ STRUCT_FLD(field_type, MYSQL_TYPE_STRING),
+ STRUCT_FLD(value, 0),
+ STRUCT_FLD(field_flags, 0),
+ STRUCT_FLD(old_name, ""),
+ STRUCT_FLD(open_method, SKIP_OPEN_TABLE)},
+
{STRUCT_FLD(field_name, "NAME"),
STRUCT_FLD(field_length, NAME_LEN),
STRUCT_FLD(field_type, MYSQL_TYPE_STRING),
@@ -3327,6 +3340,54 @@ copy_string_field(
static
int
+copy_name_fields(
+/*=============*/
+ TABLE* table,
+ int table_field_1,
+ const rec_t* rec,
+ int rec_field)
+{
+ int status;
+ const byte* data;
+ ulint len;
+
+ data = rec_get_nth_field_old(rec, rec_field, &len);
+ if (len == UNIV_SQL_NULL) {
+ table->field[table_field_1]->set_null();
+ table->field[table_field_1 + 1]->set_null();
+ status = 0; /* success */
+ } else {
+ char buf[NAME_LEN * 2 + 2];
+ char* ptr;
+
+ if (len > NAME_LEN * 2 + 1) {
+ table->field[table_field_1]->set_null();
+ status = field_store_string(table->field[table_field_1 + 1],
+ "###TOO LONG NAME###");
+ goto end_func;
+ }
+
+ strncpy(buf, (char*)data, len);
+ buf[len] = '\0';
+ ptr = strchr(buf, '/');
+ if (ptr) {
+ *ptr = '\0';
+ ++ptr;
+
+ status = field_store_string(table->field[table_field_1], buf);
+ status |= field_store_string(table->field[table_field_1 + 1], ptr);
+ } else {
+ table->field[table_field_1]->set_null();
+ status = field_store_string(table->field[table_field_1 + 1], buf);
+ }
+ }
+
+end_func:
+ return status;
+}
+
+static
+int
copy_int_field(
/*===========*/
TABLE* table,
@@ -3395,49 +3456,49 @@ copy_sys_tables_rec(
/* NAME */
field = dict_index_get_nth_col_pos(index, 0);
- status = copy_string_field(table, 0, rec, field);
+ status = copy_name_fields(table, 0, rec, field);
if (status) {
return status;
}
/* ID */
field = dict_index_get_nth_col_pos(index, 1);
- status = copy_id_field(table, 1, rec, field);
+ status = copy_id_field(table, 2, rec, field);
if (status) {
return status;
}
/* N_COLS */
field = dict_index_get_nth_col_pos(index, 2);
- status = copy_int_field(table, 2, rec, field);
+ status = copy_int_field(table, 3, rec, field);
if (status) {
return status;
}
/* TYPE */
field = dict_index_get_nth_col_pos(index, 3);
- status = copy_int_field(table, 3, rec, field);
+ status = copy_int_field(table, 4, rec, field);
if (status) {
return status;
}
/* MIX_ID */
field = dict_index_get_nth_col_pos(index, 4);
- status = copy_id_field(table, 4, rec, field);
+ status = copy_id_field(table, 5, rec, field);
if (status) {
return status;
}
/* MIX_LEN */
field = dict_index_get_nth_col_pos(index, 5);
- status = copy_int_field(table, 5, rec, field);
+ status = copy_int_field(table, 6, rec, field);
if (status) {
return status;
}
/* CLUSTER_NAME */
field = dict_index_get_nth_col_pos(index, 6);
- status = copy_string_field(table, 6, rec, field);
+ status = copy_string_field(table, 7, rec, field);
if (status) {
return status;
}
/* SPACE */
field = dict_index_get_nth_col_pos(index, 7);
- status = copy_int_field(table, 7, rec, field);
+ status = copy_int_field(table, 8, rec, field);
if (status) {
return status;
}
diff --git a/handler/innodb_patch_info.h b/handler/innodb_patch_info.h
index 5fa50129a04..38b97411340 100644
--- a/handler/innodb_patch_info.h
+++ b/handler/innodb_patch_info.h
@@ -41,8 +41,8 @@ struct innodb_enhancement {
{"innodb_admin_command_base","XtraDB specific command interface through i_s","","http://www.percona.com/docs/wiki/percona-xtradb"},
{"innodb_show_lock_name","Show mutex/lock name instead of crated file/line","","http://www.percona.com/docs/wiki/percona-xtradb"},
{"innodb_extend_slow","Extended statistics in slow.log","It is InnoDB-part only. It needs to patch also to mysqld.","http://www.percona.com/docs/wiki/percona-xtradb"},
-{"innodb_relax_table_creation","Relax limitation of column size at table creation as builtin InnoDB.","","http://www.percona.com/docs/wiki/percona-xtradb"},
{"innodb_lru_dump_restore","Dump and restore command for content of buffer pool","","http://www.percona.com/docs/wiki/percona-xtradb"},
+{"innodb_separate_doublewrite","Add option 'innodb_doublewrite_file' to separate doublewrite dedicated tablespace","","http://www.percona.com/docs/wiki/percona-xtradb"},
{"innodb_pass_corrupt_table","Treat tables as corrupt instead of crash, when meet corrupt blocks","","http://www.percona.com/docs/wiki/percona-xtradb"},
{"innodb_fast_checksum","Using the checksum on 32bit-unit calculation","incompatible for unpatched ver.","http://www.percona.com/docs/wiki/percona-xtradb"},
{"innodb_files_extend","allow >4GB transaction log files, and can vary universal page size of datafiles","incompatible for unpatched ver.","http://www.percona.com/docs/wiki/percona-xtradb"},