summaryrefslogtreecommitdiff
path: root/storage
diff options
context:
space:
mode:
Diffstat (limited to 'storage')
-rw-r--r--storage/archive/azio.c2
-rw-r--r--storage/archive/azlib.h3
-rw-r--r--storage/blackhole/ha_blackhole.cc2
-rw-r--r--storage/blackhole/ha_blackhole.h2
-rw-r--r--storage/cassandra/ha_cassandra.cc42
-rw-r--r--storage/connect/ha_connect.cc78
-rw-r--r--storage/connect/ha_connect.h4
-rw-r--r--storage/connect/jsonudf.cpp9
-rw-r--r--storage/connect/mysql-test/connect/r/infoschema-9739.result2
-rw-r--r--storage/connect/mysql-test/connect/r/infoschema2-9739.result2
-rw-r--r--storage/connect/tabext.cpp2
-rw-r--r--storage/connect/tabtbl.cpp2
-rw-r--r--storage/connect/tabutil.cpp4
-rw-r--r--storage/csv/ha_tina.cc8
-rw-r--r--storage/csv/ha_tina.h4
-rw-r--r--storage/example/ha_example.cc6
-rw-r--r--storage/example/ha_example.h2
-rw-r--r--storage/federated/ha_federated.cc52
-rw-r--r--storage/federated/ha_federated.h2
-rw-r--r--storage/federatedx/ha_federatedx.cc38
-rw-r--r--storage/federatedx/ha_federatedx.h2
-rw-r--r--storage/heap/ha_heap.cc50
-rw-r--r--storage/heap/ha_heap.h3
-rw-r--r--storage/heap/hp_block.c4
-rw-r--r--storage/innobase/dict/dict0dict.cc3
-rw-r--r--storage/innobase/dict/dict0load.cc16
-rw-r--r--storage/innobase/dict/dict0mem.cc8
-rw-r--r--storage/innobase/fil/fil0fil.cc114
-rw-r--r--storage/innobase/fts/fts0fts.cc58
-rw-r--r--storage/innobase/handler/ha_innodb.cc122
-rw-r--r--storage/innobase/handler/ha_innodb.h4
-rw-r--r--storage/innobase/handler/handler0alter.cc83
-rw-r--r--storage/innobase/include/btr0cur.h5
-rw-r--r--storage/innobase/include/dict0dict.h2
-rw-r--r--storage/innobase/include/dict0dict.ic5
-rw-r--r--storage/innobase/include/dict0mem.h57
-rw-r--r--storage/innobase/include/fil0fil.h57
-rw-r--r--storage/innobase/include/os0once.h3
-rw-r--r--storage/innobase/include/row0mysql.h4
-rw-r--r--storage/innobase/include/trx0rec.h8
-rw-r--r--storage/innobase/include/trx0rec.ic5
-rw-r--r--storage/innobase/include/ut0dbg.h2
-rw-r--r--storage/innobase/include/ut0ut.h38
-rw-r--r--storage/innobase/log/log0recv.cc2
-rw-r--r--storage/innobase/que/que0que.cc2
-rw-r--r--storage/innobase/row/row0ins.cc31
-rw-r--r--storage/innobase/row/row0mysql.cc179
-rw-r--r--storage/innobase/row/row0sel.cc26
-rw-r--r--storage/innobase/row/row0uins.cc26
-rw-r--r--storage/innobase/row/row0upd.cc23
-rw-r--r--storage/innobase/trx/trx0rec.cc113
-rw-r--r--storage/innobase/trx/trx0roll.cc10
-rw-r--r--storage/innobase/ut/ut0ut.cc7
-rw-r--r--storage/maria/ha_maria.cc50
-rw-r--r--storage/maria/ha_maria.h6
-rw-r--r--storage/maria/lockman.c8
-rw-r--r--storage/maria/ma_check.c5
-rw-r--r--storage/maria/ma_ft_boolean_search.c2
-rw-r--r--storage/maria/ma_ft_parser.c3
-rw-r--r--storage/maria/ma_ft_update.c2
-rw-r--r--storage/maria/ma_open.c53
-rw-r--r--storage/maria/ma_unique.c6
-rw-r--r--storage/maria/ma_update.c3
-rw-r--r--storage/maria/maria_def.h2
-rw-r--r--storage/mroonga/ha_mroonga.cpp131
-rw-r--r--storage/mroonga/ha_mroonga.hpp29
-rw-r--r--storage/mroonga/lib/mrn_column_name.cpp14
-rw-r--r--storage/mroonga/lib/mrn_column_name.hpp3
-rw-r--r--storage/mroonga/lib/mrn_condition_converter.cpp4
-rw-r--r--storage/mroonga/lib/mrn_count_skip_checker.cpp6
-rw-r--r--storage/mroonga/mrn_mysql_compat.h13
-rw-r--r--storage/mroonga/mrn_table.cpp6
-rw-r--r--storage/myisam/ft_boolean_search.c2
-rw-r--r--storage/myisam/ft_parser.c3
-rw-r--r--storage/myisam/ft_update.c2
-rw-r--r--storage/myisam/ha_myisam.cc9
-rw-r--r--storage/myisam/ha_myisam.h4
-rw-r--r--storage/myisam/mi_check.c5
-rw-r--r--storage/myisam/mi_create.c2
-rw-r--r--storage/myisam/mi_unique.c5
-rw-r--r--storage/myisam/mi_update.c3
-rw-r--r--storage/myisam/myisamdef.h2
-rw-r--r--storage/myisammrg/ha_myisammrg.cc8
-rw-r--r--storage/myisammrg/ha_myisammrg.h2
-rw-r--r--storage/myisammrg/myrg_update.c3
-rw-r--r--storage/myisammrg/mysql-test/storage_engine/autoincrement.rdiff30
-rw-r--r--storage/myisammrg/mysql-test/storage_engine/create_table.rdiff4
-rw-r--r--storage/myisammrg/mysql-test/storage_engine/truncate_table.rdiff24
-rw-r--r--storage/oqgraph/ha_oqgraph.cc25
-rw-r--r--storage/oqgraph/ha_oqgraph.h4
-rw-r--r--storage/perfschema/ha_perfschema.cc2
-rw-r--r--storage/perfschema/ha_perfschema.h5
-rw-r--r--storage/perfschema/pfs.cc2
-rw-r--r--storage/perfschema/pfs_autosize.cc6
-rw-r--r--storage/perfschema/pfs_column_values.cc36
-rw-r--r--storage/perfschema/pfs_column_values.h18
-rw-r--r--storage/perfschema/pfs_engine_table.cc8
-rw-r--r--storage/perfschema/pfs_engine_table.h6
-rw-r--r--storage/perfschema/pfs_server.cc2
-rw-r--r--storage/perfschema/pfs_server.h2
-rw-r--r--storage/perfschema/table_setup_actors.cc2
-rw-r--r--storage/perfschema/table_setup_actors.h2
-rw-r--r--storage/perfschema/table_setup_consumers.cc2
-rw-r--r--storage/perfschema/table_setup_consumers.h2
-rw-r--r--storage/perfschema/table_setup_instruments.cc2
-rw-r--r--storage/perfschema/table_setup_instruments.h2
-rw-r--r--storage/perfschema/table_setup_objects.cc2
-rw-r--r--storage/perfschema/table_setup_objects.h2
-rw-r--r--storage/perfschema/table_setup_timers.cc2
-rw-r--r--storage/perfschema/table_setup_timers.h2
-rw-r--r--storage/perfschema/table_threads.cc2
-rw-r--r--storage/perfschema/table_threads.h2
-rw-r--r--storage/rocksdb/ha_rocksdb.cc9
-rw-r--r--storage/rocksdb/ha_rocksdb.h4
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/2pc_group_commit.result30
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/bulk_load.result16
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/bulk_load_rev_cf.result16
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/bulk_load_rev_cf_and_data.result16
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/bulk_load_rev_data.result16
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/bulk_load_unsorted.result16
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/corrupted_data_reads_debug.result10
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/issue255.result12
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/rocksdb.result12
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/rocksdb_icp.result2
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/rocksdb_icp_rev.result2
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/show_table_status.result12
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/tbl_opt_data_index_dir.result4
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/truncate_table.result16
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/2pc_group_commit.test24
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/disabled.def2
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_flush_memtable_on_analyze_basic.result8
-rw-r--r--storage/rocksdb/rdb_datadic.cc12
-rw-r--r--storage/sequence/sequence.cc4
-rw-r--r--storage/sphinx/ha_sphinx.cc18
-rw-r--r--storage/sphinx/ha_sphinx.h2
-rw-r--r--storage/spider/ha_spider.cc9
-rw-r--r--storage/spider/ha_spider.h6
-rw-r--r--storage/spider/hs_client/hs_compat.h7
-rw-r--r--storage/spider/spd_db_conn.cc39
-rw-r--r--storage/spider/spd_db_handlersocket.cc2
-rw-r--r--storage/spider/spd_db_mysql.cc17
-rw-r--r--storage/spider/spd_db_oracle.cc14
-rw-r--r--storage/spider/spd_sys_table.cc12
-rw-r--r--storage/tokudb/ha_tokudb.cc15
-rw-r--r--storage/tokudb/ha_tokudb.h8
-rw-r--r--storage/tokudb/ha_tokudb_alter_56.cc10
-rw-r--r--storage/tokudb/ha_tokudb_alter_common.cc4
-rw-r--r--storage/tokudb/ha_tokudb_update.cc18
-rw-r--r--storage/tokudb/hatoku_cmp.cc2
-rw-r--r--storage/tokudb/hatoku_hton.cc4
-rw-r--r--storage/tokudb/mysql-test/tokudb/r/ext_key_1_innodb.result2
-rw-r--r--storage/tokudb/mysql-test/tokudb/r/ext_key_1_tokudb.result2
-rw-r--r--storage/tokudb/mysql-test/tokudb/r/ext_key_2_innodb.result2
-rw-r--r--storage/tokudb/mysql-test/tokudb/r/ext_key_2_tokudb.result2
-rw-r--r--storage/tokudb/mysql-test/tokudb/r/type_float.result6
-rw-r--r--storage/tokudb/mysql-test/tokudb/r/type_ranges.result2
-rw-r--r--storage/tokudb/mysql-test/tokudb_bugs/r/checkpoint_lock.result2
-rw-r--r--storage/tokudb/mysql-test/tokudb_parts/r/part_supported_sql_func_tokudb.result2
-rw-r--r--storage/tokudb/mysql-test/tokudb_parts/r/partition_alter1_1_2_tokudb.result42
-rw-r--r--storage/tokudb/mysql-test/tokudb_parts/r/partition_alter1_1_tokudb.result24
-rw-r--r--storage/tokudb/mysql-test/tokudb_parts/r/partition_alter1_2_tokudb.result60
-rw-r--r--storage/tokudb/mysql-test/tokudb_parts/r/partition_alter2_1_1_tokudb.result30
-rw-r--r--storage/tokudb/mysql-test/tokudb_parts/r/partition_alter2_1_2_tokudb.result30
-rw-r--r--storage/tokudb/mysql-test/tokudb_parts/r/partition_alter2_2_1_tokudb.result30
-rw-r--r--storage/tokudb/mysql-test/tokudb_parts/r/partition_alter2_2_2_tokudb.result30
-rw-r--r--storage/tokudb/mysql-test/tokudb_parts/r/partition_alter4_tokudb.result138
-rw-r--r--storage/tokudb/mysql-test/tokudb_parts/r/partition_basic_tokudb.result48
-rw-r--r--storage/tokudb/mysql-test/tokudb_parts/r/partition_syntax_tokudb.result4
-rw-r--r--storage/tokudb/tokudb_dir_cmd.cc6
-rw-r--r--storage/xtradb/handler/ha_innodb.cc38
-rw-r--r--storage/xtradb/handler/ha_innodb.h4
-rw-r--r--storage/xtradb/handler/handler0alter.cc50
172 files changed, 1622 insertions, 1294 deletions
diff --git a/storage/archive/azio.c b/storage/archive/azio.c
index cc2140e838e..8bf90e700d4 100644
--- a/storage/archive/azio.c
+++ b/storage/archive/azio.c
@@ -913,7 +913,7 @@ int azread_frm(azio_stream *s, uchar *blob)
/*
Simple comment field
*/
-int azwrite_comment(azio_stream *s, char *blob, unsigned int length)
+int azwrite_comment(azio_stream *s, const char *blob, unsigned int length)
{
if (s->mode == 'r')
return 1;
diff --git a/storage/archive/azlib.h b/storage/archive/azlib.h
index 2971705b2f1..d9318002901 100644
--- a/storage/archive/azlib.h
+++ b/storage/archive/azlib.h
@@ -336,7 +336,8 @@ extern int azclose(azio_stream *file);
extern int azwrite_frm (azio_stream *s, const uchar *blob, unsigned int length);
extern int azread_frm (azio_stream *s, uchar *blob);
-extern int azwrite_comment (azio_stream *s, char *blob, unsigned int length);
+extern int azwrite_comment (azio_stream *s, const char *blob,
+ unsigned int length);
extern int azread_comment (azio_stream *s, char *blob);
#ifdef __cplusplus
diff --git a/storage/blackhole/ha_blackhole.cc b/storage/blackhole/ha_blackhole.cc
index 56d8000d64d..ed9b03dc82b 100644
--- a/storage/blackhole/ha_blackhole.cc
+++ b/storage/blackhole/ha_blackhole.cc
@@ -105,7 +105,7 @@ int ha_blackhole::write_row(uchar * buf)
DBUG_RETURN(table->next_number_field ? update_auto_increment() : 0);
}
-int ha_blackhole::update_row(const uchar *old_data, uchar *new_data)
+int ha_blackhole::update_row(const uchar *old_data, const uchar *new_data)
{
DBUG_ENTER("ha_blackhole::update_row");
THD *thd= ha_thd();
diff --git a/storage/blackhole/ha_blackhole.h b/storage/blackhole/ha_blackhole.h
index b70320848d7..9a4b34809f8 100644
--- a/storage/blackhole/ha_blackhole.h
+++ b/storage/blackhole/ha_blackhole.h
@@ -97,6 +97,6 @@ public:
enum thr_lock_type lock_type);
private:
virtual int write_row(uchar *buf);
- virtual int update_row(const uchar *old_data, uchar *new_data);
+ virtual int update_row(const uchar *old_data, const uchar *new_data);
virtual int delete_row(const uchar *buf);
};
diff --git a/storage/cassandra/ha_cassandra.cc b/storage/cassandra/ha_cassandra.cc
index f0ad2e39081..c55e9976ede 100644
--- a/storage/cassandra/ha_cassandra.cc
+++ b/storage/cassandra/ha_cassandra.cc
@@ -411,7 +411,7 @@ int ha_cassandra::check_field_options(Field **fields)
{
if (dyncol_set || (*field)->type() != MYSQL_TYPE_BLOB)
{
- my_error(ER_WRONG_FIELD_SPEC, MYF(0), (*field)->field_name);
+ my_error(ER_WRONG_FIELD_SPEC, MYF(0), (*field)->field_name.str);
DBUG_RETURN(HA_WRONG_CREATE_OPTION);
}
dyncol_set= 1;
@@ -866,7 +866,7 @@ static void alloc_strings_memroot(MEM_ROOT *mem_root)
The mem_root used to allocate UUID (of length 36 + \0) so make
appropriate allocated size
*/
- init_alloc_root(mem_root,
+ init_alloc_root(mem_root, "cassandra",
(36 + 1 + ALIGN_SIZE(sizeof(USED_MEM))) * 10 +
ALLOC_ROOT_MIN_BLOCK_SIZE,
(36 + 1 + ALIGN_SIZE(sizeof(USED_MEM))) * 10 +
@@ -1497,14 +1497,14 @@ bool ha_cassandra::setup_field_converters(Field **field_arg, uint n_fields)
for (field= field_arg + 1, i= 1; *field; field++, i++)
{
if ((!dyncol_set || dyncol_field != i) &&
- !strcmp((*field)->field_name, col_name))
+ !strcmp((*field)->field_name.str, col_name))
{
n_mapped++;
ColumnDataConverter **conv= field_converters + (*field)->field_index;
if (!(*conv= map_field_to_validator(*field, col_type)))
{
se->print_error("Failed to map column %s to datatype %s",
- (*field)->field_name, col_type);
+ (*field)->field_name.str, col_type);
my_error(ER_INTERNAL_ERROR, MYF(0), se->error_str());
DBUG_RETURN(true);
}
@@ -1543,7 +1543,7 @@ bool ha_cassandra::setup_field_converters(Field **field_arg, uint n_fields)
DBUG_ASSERT(first_unmapped);
se->print_error("Field `%s` could not be mapped to any field in Cassandra",
- first_unmapped->field_name);
+ first_unmapped->field_name.str);
my_error(ER_INTERNAL_ERROR, MYF(0), se->error_str());
DBUG_RETURN(true);
}
@@ -1552,14 +1552,14 @@ bool ha_cassandra::setup_field_converters(Field **field_arg, uint n_fields)
Setup type conversion for row_key.
*/
se->get_rowkey_type(&col_name, &col_type);
- if (col_name && strcmp(col_name, (*field_arg)->field_name))
+ if (col_name && strcmp(col_name, (*field_arg)->field_name.str))
{
se->print_error("PRIMARY KEY column must match Cassandra's name '%s'",
col_name);
my_error(ER_INTERNAL_ERROR, MYF(0), se->error_str());
DBUG_RETURN(true);
}
- if (!col_name && strcmp("rowkey", (*field_arg)->field_name))
+ if (!col_name && strcmp("rowkey", (*field_arg)->field_name.str))
{
se->print_error("target column family has no key_alias defined, "
"PRIMARY KEY column must be named 'rowkey'");
@@ -1742,14 +1742,14 @@ int ha_cassandra::read_cassandra_columns(bool unpack_pk)
{
uint fieldnr= (*field)->field_index;
if ((!dyncol_set || dyncol_field != fieldnr) &&
- !strcmp((*field)->field_name, cass_name))
+ !strcmp((*field)->field_name.str, cass_name))
{
found= 1;
(*field)->set_notnull();
if (field_converters[fieldnr]->cassandra_to_mariadb(cass_value,
cass_value_len))
{
- print_conversion_error((*field)->field_name, cass_value,
+ print_conversion_error((*field)->field_name.str, cass_value,
cass_value_len);
res=1;
goto err;
@@ -1770,7 +1770,7 @@ int ha_cassandra::read_cassandra_columns(bool unpack_pk)
se->print_error("Unable to convert value for field `%s`"
" from Cassandra's data format. Name"
" length exceed limit of %u: '%s'",
- table->field[dyncol_field]->field_name,
+ table->field[dyncol_field]->field_name.str,
(uint)MAX_NAME_LENGTH, cass_name);
my_error(ER_INTERNAL_ERROR, MYF(0), se->error_str());
res=1;
@@ -1782,7 +1782,7 @@ int ha_cassandra::read_cassandra_columns(bool unpack_pk)
se->print_error("Unable to convert value for field `%s`"
" from Cassandra's data format. Sum of all names"
" length exceed limit of %lu",
- table->field[dyncol_field]->field_name,
+ table->field[dyncol_field]->field_name.str,
cass_name, (uint)MAX_TOTAL_NAME_LENGTH);
my_error(ER_INTERNAL_ERROR, MYF(0), se->error_str());
res=1;
@@ -1841,7 +1841,7 @@ int ha_cassandra::read_cassandra_columns(bool unpack_pk)
se->get_read_rowkey(&cass_value, &cass_value_len);
if (rowkey_converter->cassandra_to_mariadb(cass_value, cass_value_len))
{
- print_conversion_error((*field)->field_name, cass_value, cass_value_len);
+ print_conversion_error((*field)->field_name.str, cass_value, cass_value_len);
res=1;
goto err;
}
@@ -1953,7 +1953,7 @@ int ha_cassandra::write_row(uchar *buf)
if (rowkey_converter->mariadb_to_cassandra(&cass_key, &cass_key_len))
{
my_error(ER_WARN_DATA_OUT_OF_RANGE, MYF(0),
- rowkey_converter->field->field_name, insert_lineno);
+ rowkey_converter->field->field_name.str, insert_lineno);
dbug_tmp_restore_column_map(table->read_set, old_map);
DBUG_RETURN(HA_ERR_INTERNAL_ERROR);
}
@@ -1987,11 +1987,11 @@ int ha_cassandra::write_row(uchar *buf)
&cass_data_len))
{
my_error(ER_WARN_DATA_OUT_OF_RANGE, MYF(0),
- field_converters[i]->field->field_name, insert_lineno);
+ field_converters[i]->field->field_name.str, insert_lineno);
dbug_tmp_restore_column_map(table->read_set, old_map);
DBUG_RETURN(HA_ERR_INTERNAL_ERROR);
}
- se->add_insert_column(field_converters[i]->field->field_name, 0,
+ se->add_insert_column(field_converters[i]->field->field_name.str, 0,
cass_data, cass_data_len);
}
}
@@ -2074,7 +2074,7 @@ int ha_cassandra::rnd_init(bool scan)
{
se->clear_read_columns();
for (uint i= 1; i < table->s->fields; i++)
- se->add_read_column(table->field[i]->field_name);
+ se->add_read_column(table->field[i]->field_name.str);
}
se->read_batch_size= THDVAR(table->in_use, rnd_batch_size);
@@ -2171,7 +2171,7 @@ int ha_cassandra::info(uint flag)
}
-void key_copy(uchar *to_key, uchar *from_record, KEY *key_info,
+void key_copy(uchar *to_key, const uchar *from_record, KEY *key_info,
uint key_length, bool with_zerofill);
@@ -2355,7 +2355,7 @@ public:
if (idx == obj->table->s->fields)
return NULL;
else
- return obj->table->field[idx++]->field_name;
+ return obj->table->field[idx++]->field_name.str;
}
};
@@ -2386,7 +2386,7 @@ int ha_cassandra::update_row(const uchar *old_data, uchar *new_data)
if (rowkey_converter->mariadb_to_cassandra(&new_key, &new_key_len))
{
my_error(ER_WARN_DATA_OUT_OF_RANGE, MYF(0),
- rowkey_converter->field->field_name, insert_lineno);
+ rowkey_converter->field->field_name.str, insert_lineno);
dbug_tmp_restore_column_map(table->read_set, old_map);
DBUG_RETURN(HA_ERR_INTERNAL_ERROR);
}
@@ -2449,11 +2449,11 @@ int ha_cassandra::update_row(const uchar *old_data, uchar *new_data)
if (field_converters[i]->mariadb_to_cassandra(&cass_data, &cass_data_len))
{
my_error(ER_WARN_DATA_OUT_OF_RANGE, MYF(0),
- field_converters[i]->field->field_name, insert_lineno);
+ field_converters[i]->field->field_name.str, insert_lineno);
dbug_tmp_restore_column_map(table->read_set, old_map);
DBUG_RETURN(HA_ERR_INTERNAL_ERROR);
}
- se->add_insert_column(field_converters[i]->field->field_name, 0,
+ se->add_insert_column(field_converters[i]->field->field_name.str, 0,
cass_data, cass_data_len);
}
}
diff --git a/storage/connect/ha_connect.cc b/storage/connect/ha_connect.cc
index ae7335d2904..b9ff1d478fe 100644
--- a/storage/connect/ha_connect.cc
+++ b/storage/connect/ha_connect.cc
@@ -254,7 +254,7 @@ static int connect_assisted_discovery(handlerton *hton, THD* thd,
/****************************************************************************/
/* Return str as a zero terminated string. */
/****************************************************************************/
-static char *strz(PGLOBAL g, LEX_STRING &ls)
+static char *strz(PGLOBAL g, LEX_CSTRING &ls)
{
char *str= (char*)PlugSubAlloc(g, NULL, ls.length + 1);
@@ -1293,7 +1293,7 @@ PCSZ ha_connect::GetStringOption(PCSZ opname, PCSZ sdef)
PTOS options= GetTableOptionStruct();
if (!stricmp(opname, "Connect")) {
- LEX_STRING cnc= (tshp) ? tshp->connect_string
+ LEX_CSTRING cnc= (tshp) ? tshp->connect_string
: table->s->connect_string;
if (cnc.length)
@@ -1478,7 +1478,7 @@ void *ha_connect::GetColumnOption(PGLOBAL g, void *field, PCOLINFO pcf)
pcf->Flags= 0;
// Now get column information
- pcf->Name= (char*)fp->field_name;
+ pcf->Name= (char*)fp->field_name.str;
if (fop && fop->special) {
pcf->Fieldfmt= (char*)fop->special;
@@ -1675,7 +1675,7 @@ PIXDEF ha_connect::GetIndexInfo(TABLE_SHARE *s)
// Get the the key parts info
for (int k= 0; (unsigned)k < kp.user_defined_key_parts; k++) {
- pn= (char*)kp.key_part[k].field->field_name;
+ pn= (char*)kp.key_part[k].field->field_name.str;
name= PlugDup(g, pn);
// Allocate the key part description block
@@ -1791,7 +1791,7 @@ int ha_connect::GetColNameLen(Field *fp)
if (fop && fop->special)
n= strlen(fop->special) + 1;
else
- n= strlen(fp->field_name);
+ n= fp->field_name.length;
return n;
} // end of GetColNameLen
@@ -1803,7 +1803,7 @@ char *ha_connect::GetColName(Field *fp)
{
PFOS fop= GetFieldOptionStruct(fp);
- return (fop && fop->special) ? fop->special : (char*)fp->field_name;
+ return (fop && fop->special) ? fop->special : (char*)fp->field_name.str;
} // end of GetColName
/****************************************************************************/
@@ -1818,7 +1818,7 @@ void ha_connect::AddColName(char *cp, Field *fp)
// The prefix * mark the column as "special"
strcat(strcpy(cp, "*"), strupr(fop->special));
else
- strcpy(cp, (char*)fp->field_name);
+ strcpy(cp, fp->field_name.str);
} // end of AddColName
#endif // 0
@@ -1905,12 +1905,12 @@ int ha_connect::OpenTable(PGLOBAL g, bool del)
for (field= table->field; fp= *field; field++) {
if (bitmap_is_set(map, fp->field_index)) {
- n1+= (strlen(fp->field_name) + 1);
+ n1+= (fp->field_name.length + 1);
k1++;
} // endif
if (ump && bitmap_is_set(ump, fp->field_index)) {
- n2+= (strlen(fp->field_name) + 1);
+ n2+= (fp->field_name.length + 1);
k2++;
} // endif
@@ -1921,8 +1921,8 @@ int ha_connect::OpenTable(PGLOBAL g, bool del)
for (field= table->field; fp= *field; field++)
if (bitmap_is_set(map, fp->field_index)) {
- strcpy(p, (char*)fp->field_name);
- p+= (strlen(p) + 1);
+ strcpy(p, fp->field_name.str);
+ p+= (fp->field_name.length + 1);
} // endif used field
*p= '\0'; // mark end of list
@@ -1933,7 +1933,7 @@ int ha_connect::OpenTable(PGLOBAL g, bool del)
for (field= table->field; fp= *field; field++)
if (bitmap_is_set(ump, fp->field_index)) {
- strcpy(p, (char*)fp->field_name);
+ strcpy(p, fp->field_name.str);
if (part_id && bitmap_is_set(part_id, fp->field_index)) {
// Trying to update a column used for partitioning
@@ -1991,9 +1991,9 @@ bool ha_connect::CheckColumnList(PGLOBAL g)
try {
for (field= table->field; fp= *field; field++)
if (bitmap_is_set(map, fp->field_index)) {
- if (!(colp= tdbp->ColDB(g, (PSZ)fp->field_name, 0))) {
+ if (!(colp= tdbp->ColDB(g, (PSZ)fp->field_name.str, 0))) {
sprintf(g->Message, "Column %s not found in %s",
- fp->field_name, tdbp->GetName());
+ fp->field_name.str, tdbp->GetName());
throw 1;
} // endif colp
@@ -2087,14 +2087,14 @@ int ha_connect::MakeRecord(char *buf)
// This is a used field, fill the buffer with value
for (colp= tdbp->GetColumns(); colp; colp= colp->GetNext())
if ((!mrr || colp->GetKcol()) &&
- !stricmp(colp->GetName(), (char*)fp->field_name))
+ !stricmp(colp->GetName(), fp->field_name.str))
break;
if (!colp) {
if (mrr)
continue;
- htrc("Column %s not found\n", fp->field_name);
+ htrc("Column %s not found\n", fp->field_name.str);
dbug_tmp_restore_column_map(table->write_set, org_bitmap);
DBUG_RETURN(HA_ERR_WRONG_IN_RECORD);
} // endif colp
@@ -2155,7 +2155,7 @@ int ha_connect::MakeRecord(char *buf)
sprintf(buf, "Out of range value %.140s for column '%s' at row %ld",
value->GetCharString(val),
- fp->field_name,
+ fp->field_name.str,
thd->get_stmt_da()->current_row_for_warning());
push_warning(thd, Sql_condition::WARN_LEVEL_WARN, 0, buf);
@@ -2215,11 +2215,11 @@ int ha_connect::ScanRecord(PGLOBAL g, const uchar *)
&& tdbp->GetAmType() != TYPE_AM_JDBC) ||
bitmap_is_set(table->write_set, fp->field_index)) {
for (colp= tdbp->GetSetCols(); colp; colp= colp->GetNext())
- if (!stricmp(colp->GetName(), fp->field_name))
+ if (!stricmp(colp->GetName(), fp->field_name.str))
break;
if (!colp) {
- htrc("Column %s not found\n", fp->field_name);
+ htrc("Column %s not found\n", fp->field_name.str);
rc= HA_ERR_WRONG_IN_RECORD;
goto err;
} else
@@ -2411,10 +2411,10 @@ bool ha_connect::MakeKeyWhere(PGLOBAL g, PSTRG qry, OPVAL vop, char q,
if (q) {
qry->Append(q);
- qry->Append((PSZ)fp->field_name);
+ qry->Append((PSZ)fp->field_name.str);
qry->Append(q);
} else
- qry->Append((PSZ)fp->field_name);
+ qry->Append((PSZ)fp->field_name.str);
switch (ranges[i]->flag) {
case HA_READ_KEY_EXACT:
@@ -2673,7 +2673,7 @@ PFIL ha_connect::CondFilter(PGLOBAL g, Item *cond)
return NULL;
if (pField->field->table != table ||
- !(colp[i]= tdbp->ColDB(g, (PSZ)pField->field->field_name, 0)))
+ !(colp[i]= tdbp->ColDB(g, (PSZ)pField->field->field_name.str, 0)))
return NULL; // Column does not belong to this table
// These types are not yet implemented (buggy)
@@ -2691,7 +2691,7 @@ PFIL ha_connect::CondFilter(PGLOBAL g, Item *cond)
if (trace) {
htrc("Field index=%d\n", pField->field->field_index);
- htrc("Field name=%s\n", pField->field->field_name);
+ htrc("Field name=%s\n", pField->field->field_name.str);
} // endif trace
} else {
@@ -2784,7 +2784,7 @@ PCFIL ha_connect::CheckCond(PGLOBAL g, PCFIL filp, const Item *cond)
htrc("Cond type=%d\n", cond->type());
if (cond->type() == COND::COND_ITEM) {
- char *pb0, *pb1, *pb2, *ph0, *ph1, *ph2;
+ char *pb0, *pb1, *pb2, *ph0= 0, *ph1= 0, *ph2= 0;
bool bb = false, bh = false;
Item_cond *cond_item= (Item_cond *)cond;
@@ -2954,7 +2954,7 @@ PCFIL ha_connect::CheckCond(PGLOBAL g, PCFIL filp, const Item *cond)
} else {
bool h;
- fnm = filp->Chk(pField->field->field_name, &h);
+ fnm = filp->Chk(pField->field->field_name.str, &h);
if (h && i && !ishav)
return NULL; // Having should be col VOP arg
@@ -2965,7 +2965,7 @@ PCFIL ha_connect::CheckCond(PGLOBAL g, PCFIL filp, const Item *cond)
if (trace) {
htrc("Field index=%d\n", pField->field->field_index);
- htrc("Field name=%s\n", pField->field->field_name);
+ htrc("Field name=%s\n", pField->field->field_name.str);
htrc("Field type=%d\n", pField->field->type());
htrc("Field_type=%d\n", args[i]->field_type());
} // endif trace
@@ -3557,7 +3557,7 @@ int ha_connect::write_row(uchar *buf)
@see
sql_select.cc, sql_acl.cc, sql_update.cc and sql_insert.cc
*/
-int ha_connect::update_row(const uchar *old_data, uchar *new_data)
+int ha_connect::update_row(const uchar *old_data, const uchar *new_data)
{
int rc= 0;
PGLOBAL& g= xp->g;
@@ -4324,7 +4324,7 @@ int ha_connect::delete_all_rows()
} // end of delete_all_rows
-bool ha_connect::check_privileges(THD *thd, PTOS options, char *dbn, bool quick)
+bool ha_connect::check_privileges(THD *thd, PTOS options, const char *dbn, bool quick)
{
const char *db= (dbn && *dbn) ? dbn : NULL;
TABTYPE type=GetRealType(options);
@@ -5536,7 +5536,7 @@ static int connect_assisted_discovery(handlerton *, THD* thd,
} // endif p
} else if (ttp != TAB_ODBC || !(fnc & (FNC_TABLE | FNC_COL)))
- tab = table_s->table_name.str; // Default value
+ tab = (char *) table_s->table_name.str; // Default value
} // endif tab
@@ -6164,7 +6164,7 @@ int ha_connect::create(const char *name, TABLE *table_arg,
TABTYPE type;
TABLE *st= table; // Probably unuseful
THD *thd= ha_thd();
- LEX_STRING cnc = table_arg->s->connect_string;
+ LEX_CSTRING cnc = table_arg->s->connect_string;
#if defined(WITH_PARTITION_STORAGE_ENGINE)
partition_info *part_info= table_arg->part_info;
#endif // WITH_PARTITION_STORAGE_ENGINE
@@ -6401,7 +6401,7 @@ int ha_connect::create(const char *name, TABLE *table_arg,
if (fp->flags & (BLOB_FLAG | ENUM_FLAG | SET_FLAG)) {
sprintf(g->Message, "Unsupported type for column %s",
- fp->field_name);
+ fp->field_name.str);
my_message(ER_UNKNOWN_ERROR, g->Message, MYF(0));
rc= HA_ERR_INTERNAL_ERROR;
DBUG_RETURN(rc);
@@ -6438,11 +6438,11 @@ int ha_connect::create(const char *name, TABLE *table_arg,
#if 0
if (!fp->field_length) {
sprintf(g->Message, "Unsupported 0 length for column %s",
- fp->field_name);
+ fp->field_name.str);
rc= HA_ERR_INTERNAL_ERROR;
my_printf_error(ER_UNKNOWN_ERROR,
"Unsupported 0 length for column %s",
- MYF(0), fp->field_name);
+ MYF(0), fp->field_name.str);
DBUG_RETURN(rc);
} // endif fp
#endif // 0
@@ -6457,12 +6457,12 @@ int ha_connect::create(const char *name, TABLE *table_arg,
case MYSQL_TYPE_BLOB:
case MYSQL_TYPE_GEOMETRY:
default:
-// fprintf(stderr, "Unsupported type column %s\n", fp->field_name);
+// fprintf(stderr, "Unsupported type column %s\n", fp->field_name.str);
sprintf(g->Message, "Unsupported type for column %s",
- fp->field_name);
+ fp->field_name.str);
rc= HA_ERR_INTERNAL_ERROR;
my_printf_error(ER_UNKNOWN_ERROR, "Unsupported type for column %s",
- MYF(0), fp->field_name);
+ MYF(0), fp->field_name.str);
DBUG_RETURN(rc);
break;
} // endswitch type
@@ -6477,12 +6477,12 @@ int ha_connect::create(const char *name, TABLE *table_arg,
if (dbf) {
bool b= false;
- if ((b= strlen(fp->field_name) > 10))
+ if ((b= fp->field_name.length > 10))
sprintf(g->Message, "DBF: Column name '%s' is too long (max=10)",
- fp->field_name);
+ fp->field_name.str);
else if ((b= fp->field_length > 255))
sprintf(g->Message, "DBF: Column length too big for '%s' (max=255)",
- fp->field_name);
+ fp->field_name.str);
if (b) {
my_message(ER_UNKNOWN_ERROR, g->Message, MYF(0));
diff --git a/storage/connect/ha_connect.h b/storage/connect/ha_connect.h
index 4c5bf5856cc..f0fa9b70513 100644
--- a/storage/connect/ha_connect.h
+++ b/storage/connect/ha_connect.h
@@ -394,7 +394,7 @@ virtual int check(THD* thd, HA_CHECK_OPT* check_opt)
We implement this in ha_connect.cc. It's not an obligatory method;
skip it and and MySQL will treat it as not implemented.
*/
- int update_row(const uchar *old_data, uchar *new_data);
+ int update_row(const uchar *old_data, const uchar *new_data);
/** @brief
We implement this in ha_connect.cc. It's not an obligatory method;
@@ -509,7 +509,7 @@ private:
DsMrr_impl ds_mrr;
protected:
- bool check_privileges(THD *thd, PTOS options, char *dbn, bool quick=false);
+ bool check_privileges(THD *thd, PTOS options, const char *dbn, bool quick=false);
MODE CheckMode(PGLOBAL g, THD *thd, MODE newmode, bool *chk, bool *cras);
char *GetDBfromName(const char *name);
diff --git a/storage/connect/jsonudf.cpp b/storage/connect/jsonudf.cpp
index 8ddf32e1285..91917b48a23 100644
--- a/storage/connect/jsonudf.cpp
+++ b/storage/connect/jsonudf.cpp
@@ -1554,7 +1554,8 @@ static PCSZ MakeKey(PGLOBAL g, UDF_ARGS *args, int i)
if (args->arg_count > (unsigned)i) {
int j = 0, n = args->attribute_lengths[i];
my_bool b; // true if attribute is zero terminated
- PSZ p, s = args->attributes[i];
+ PSZ p;
+ const char *s = args->attributes[i];
if (s && *s && (n || *s == '\'')) {
if ((b = (!n || !s[n])))
@@ -1573,7 +1574,7 @@ static PCSZ MakeKey(PGLOBAL g, UDF_ARGS *args, int i)
} // endif *s
if (n < 1)
- return "Key";
+ return (char*) "Key";
if (!b) {
p = (PSZ)PlugSubAlloc(g, NULL, n + 1);
@@ -1584,10 +1585,10 @@ static PCSZ MakeKey(PGLOBAL g, UDF_ARGS *args, int i)
} // endif s
- return s;
+ return (char*) s;
} // endif count
- return "Key";
+ return (char*) "Key";
} // end of MakeKey
/*********************************************************************************/
diff --git a/storage/connect/mysql-test/connect/r/infoschema-9739.result b/storage/connect/mysql-test/connect/r/infoschema-9739.result
index 992f4ed0d58..f9cb2976877 100644
--- a/storage/connect/mysql-test/connect/r/infoschema-9739.result
+++ b/storage/connect/mysql-test/connect/r/infoschema-9739.result
@@ -2,7 +2,7 @@ create table t1 (i int) engine=Connect table_type=XML option_list='xmlsup=domdoc
Warnings:
Warning 1105 No file name. Table will use t1.xml
select * from information_schema.tables where create_options like '%table_type=XML%';
-TABLE_CATALOG TABLE_SCHEMA TABLE_NAME TABLE_TYPE ENGINE VERSION ROW_FORMAT TABLE_ROWS AVG_ROW_LENGTH DATA_LENGTH MAX_DATA_LENGTH INDEX_LENGTH DATA_FREE AUTO_INCREMENT CREATE_TIME UPDATE_TIME CHECK_TIME TABLE_COLLATION CHECKSUM CREATE_OPTIONS TABLE_COMMENT
+TABLE_CATALOG TABLE_SCHEMA TABLE_NAME TABLE_TYPE ENGINE VERSION ROW_FORMAT TABLE_ROWS AVG_ROW_LENGTH DATA_LENGTH MAX_DATA_LENGTH INDEX_LENGTH DATA_FREE AUTO_INCREMENT CREATE_TIME UPDATE_TIME CHECK_TIME TABLE_COLLATION CHECKSUM CREATE_OPTIONS TABLE_COMMENT MAX_INDEX_LENGTH TEMPORARY
Warnings:
Warning 1286 Unknown storage engine 'InnoDB'
Warning 1286 Unknown storage engine 'InnoDB'
diff --git a/storage/connect/mysql-test/connect/r/infoschema2-9739.result b/storage/connect/mysql-test/connect/r/infoschema2-9739.result
index 7d8a6839ea5..4c38ce36753 100644
--- a/storage/connect/mysql-test/connect/r/infoschema2-9739.result
+++ b/storage/connect/mysql-test/connect/r/infoschema2-9739.result
@@ -4,7 +4,7 @@ create table t1 (i int) engine=Connect table_type=XML option_list='xmlsup=libxml
Warnings:
Warning 1105 No file name. Table will use t1.xml
select * from information_schema.tables where create_options like '%table_type=XML%';
-TABLE_CATALOG TABLE_SCHEMA TABLE_NAME TABLE_TYPE ENGINE VERSION ROW_FORMAT TABLE_ROWS AVG_ROW_LENGTH DATA_LENGTH MAX_DATA_LENGTH INDEX_LENGTH DATA_FREE AUTO_INCREMENT CREATE_TIME UPDATE_TIME CHECK_TIME TABLE_COLLATION CHECKSUM CREATE_OPTIONS TABLE_COMMENT
+TABLE_CATALOG TABLE_SCHEMA TABLE_NAME TABLE_TYPE ENGINE VERSION ROW_FORMAT TABLE_ROWS AVG_ROW_LENGTH DATA_LENGTH MAX_DATA_LENGTH INDEX_LENGTH DATA_FREE AUTO_INCREMENT CREATE_TIME UPDATE_TIME CHECK_TIME TABLE_COLLATION CHECKSUM CREATE_OPTIONS TABLE_COMMENT MAX_INDEX_LENGTH TEMPORARY
Warnings:
Warning 1286 Unknown storage engine 'InnoDB'
Warning 1286 Unknown storage engine 'InnoDB'
diff --git a/storage/connect/tabext.cpp b/storage/connect/tabext.cpp
index a75b373b564..207c8401c7b 100644
--- a/storage/connect/tabext.cpp
+++ b/storage/connect/tabext.cpp
@@ -286,7 +286,7 @@ bool TDBEXT::MakeSrcdef(PGLOBAL g)
char *catp = strstr(Srcdef, "%s");
if (catp) {
- char *fil1, *fil2;
+ char *fil1= 0, *fil2;
PCSZ ph = ((EXTDEF*)To_Def)->Phpos;
if (!ph)
diff --git a/storage/connect/tabtbl.cpp b/storage/connect/tabtbl.cpp
index 7925e8f29a8..aa7cf4e41b4 100644
--- a/storage/connect/tabtbl.cpp
+++ b/storage/connect/tabtbl.cpp
@@ -232,7 +232,7 @@ bool TDBTBL::InitTableList(PGLOBAL g)
{
int n;
uint sln;
- char *scs;
+ const char *scs;
PTABLE tp, tabp;
PCOL colp;
PTBLDEF tdp = (PTBLDEF)To_Def;
diff --git a/storage/connect/tabutil.cpp b/storage/connect/tabutil.cpp
index 5d8d7c1b9f8..948ea24aa6b 100644
--- a/storage/connect/tabutil.cpp
+++ b/storage/connect/tabutil.cpp
@@ -182,7 +182,7 @@ PQRYRES TabColumns(PGLOBAL g, THD *thd, const char *db,
// Get column name
crp = qrp->Colresp; // Column_Name
- colname = (char *)fp->field_name;
+ colname = (char *)fp->field_name.str;
crp->Kdata->SetValue(colname, i);
// chset = (char *)fp->charset()->name;
@@ -264,7 +264,7 @@ PQRYRES TabColumns(PGLOBAL g, THD *thd, const char *db,
crp = crp->Next; // Remark
// For Valgrind
- if (fp->comment.length > 0 && (fld = fp->comment.str))
+ if (fp->comment.length > 0 && (fld = (char*) fp->comment.str))
crp->Kdata->SetValue(fld, fp->comment.length, i);
else
crp->Kdata->Reset(i);
diff --git a/storage/csv/ha_tina.cc b/storage/csv/ha_tina.cc
index 7e0c61ff634..f465ee2e947 100644
--- a/storage/csv/ha_tina.cc
+++ b/storage/csv/ha_tina.cc
@@ -514,7 +514,7 @@ ha_tina::ha_tina(handlerton *hton, TABLE_SHARE *table_arg)
buffer.set((char*)byte_buffer, IO_SIZE, &my_charset_bin);
chain= chain_buffer;
file_buff= new Transparent_file();
- init_alloc_root(&blobroot, BLOB_MEMROOT_ALLOC_SIZE, 0, MYF(0));
+ init_alloc_root(&blobroot, "ha_tina", BLOB_MEMROOT_ALLOC_SIZE, 0, MYF(0));
}
@@ -522,7 +522,7 @@ ha_tina::ha_tina(handlerton *hton, TABLE_SHARE *table_arg)
Encode a buffer into the quoted format.
*/
-int ha_tina::encode_quote(uchar *buf)
+int ha_tina::encode_quote(const uchar *buf)
{
char attribute_buffer[1024];
String attribute(attribute_buffer, sizeof(attribute_buffer),
@@ -976,7 +976,7 @@ int ha_tina::open(const char *name, int mode, uint open_options)
*/
thr_lock_data_init(&share->lock, &lock, (void*) this);
ref_length= sizeof(my_off_t);
- init_alloc_root(&blobroot, BLOB_MEMROOT_ALLOC_SIZE, 0, MYF(0));
+ init_alloc_root(&blobroot, "ha_tina", BLOB_MEMROOT_ALLOC_SIZE, 0, MYF(0));
share->lock.get_status= tina_get_status;
share->lock.update_status= tina_update_status;
@@ -1066,7 +1066,7 @@ int ha_tina::open_update_temp_file_if_needed()
This will be called in a table scan right before the previous ::rnd_next()
call.
*/
-int ha_tina::update_row(const uchar * old_data, uchar * new_data)
+int ha_tina::update_row(const uchar * old_data, const uchar * new_data)
{
int size;
int rc= -1;
diff --git a/storage/csv/ha_tina.h b/storage/csv/ha_tina.h
index 127c6053a53..c75a64faa52 100644
--- a/storage/csv/ha_tina.h
+++ b/storage/csv/ha_tina.h
@@ -137,7 +137,7 @@ public:
int open(const char *name, int mode, uint open_options);
int close(void);
int write_row(uchar * buf);
- int update_row(const uchar * old_data, uchar * new_data);
+ int update_row(const uchar * old_data, const uchar * new_data);
int delete_row(const uchar * buf);
int rnd_init(bool scan=1);
int rnd_next(uchar *buf);
@@ -173,7 +173,7 @@ public:
void update_status();
/* The following methods were added just for TINA */
- int encode_quote(uchar *buf);
+ int encode_quote(const uchar *buf);
int find_current_row(uchar *buf);
int chain_append();
};
diff --git a/storage/example/ha_example.cc b/storage/example/ha_example.cc
index 3a5b269e79f..657f5cb9d01 100644
--- a/storage/example/ha_example.cc
+++ b/storage/example/ha_example.cc
@@ -431,7 +431,7 @@ int ha_example::write_row(uchar *buf)
@see
sql_select.cc, sql_acl.cc, sql_update.cc and sql_insert.cc
*/
-int ha_example::update_row(const uchar *old_data, uchar *new_data)
+int ha_example::update_row(const uchar *old_data, const uchar *new_data)
{
DBUG_ENTER("ha_example::update_row");
@@ -896,7 +896,7 @@ int ha_example::create(const char *name, TABLE *table_arg,
ha_field_option_struct *field_options= (*field)->option_struct;
DBUG_ASSERT(field_options);
DBUG_PRINT("info", ("field: %s complex: '%-.64s'",
- (*field)->field_name,
+ (*field)->field_name.str,
(field_options->complex_param_to_parse_it_in_engine ?
field_options->complex_param_to_parse_it_in_engine :
"<NULL>")));
@@ -975,7 +975,7 @@ ha_example::check_if_supported_inplace_alter(TABLE* altered_table,
{
push_warning_printf(ha_thd(), Sql_condition::WARN_LEVEL_NOTE,
ER_UNKNOWN_ERROR, "EXAMPLE DEBUG: Field %`s COMPLEX '%s' -> '%s'",
- table->s->field[i]->field_name,
+ table->s->field[i]->field_name.str,
f_old->complex_param_to_parse_it_in_engine,
f_new->complex_param_to_parse_it_in_engine);
}
diff --git a/storage/example/ha_example.h b/storage/example/ha_example.h
index 2d3d0c81ed9..3a9654bbb7b 100644
--- a/storage/example/ha_example.h
+++ b/storage/example/ha_example.h
@@ -186,7 +186,7 @@ public:
We implement this in ha_example.cc. It's not an obligatory method;
skip it and and MySQL will treat it as not implemented.
*/
- int update_row(const uchar *old_data, uchar *new_data);
+ int update_row(const uchar *old_data, const uchar *new_data);
/** @brief
We implement this in ha_example.cc. It's not an obligatory method;
diff --git a/storage/federated/ha_federated.cc b/storage/federated/ha_federated.cc
index ec05127c475..98d4ef4d7f9 100644
--- a/storage/federated/ha_federated.cc
+++ b/storage/federated/ha_federated.cc
@@ -420,7 +420,7 @@ static int federated_rollback(handlerton *hton, THD *thd, bool all);
/* Federated storage engine handlerton */
-static handler *federated_create_handler(handlerton *hton,
+static handler *federated_create_handler(handlerton *hton,
TABLE_SHARE *table,
MEM_ROOT *mem_root)
{
@@ -754,9 +754,9 @@ static int parse_url(MEM_ROOT *mem_root, FEDERATED_SHARE *share, TABLE *table,
share->table_name++;
share->table_name_length= (uint) strlen(share->table_name);
- DBUG_PRINT("info",
+ DBUG_PRINT("info",
("internal format, parsed table_name share->connection_string \
- %s share->table_name %s",
+ %s share->table_name %s",
share->connection_string, share->table_name));
/*
@@ -778,9 +778,9 @@ static int parse_url(MEM_ROOT *mem_root, FEDERATED_SHARE *share, TABLE *table,
*/
share->table_name= strmake_root(mem_root, table->s->table_name.str,
(share->table_name_length= table->s->table_name.length));
- DBUG_PRINT("info",
+ DBUG_PRINT("info",
("internal format, default table_name share->connection_string \
- %s share->table_name %s",
+ %s share->table_name %s",
share->connection_string, share->table_name));
}
@@ -972,8 +972,8 @@ uint ha_federated::convert_row_to_internal_format(uchar *record,
static bool emit_key_part_name(String *to, KEY_PART_INFO *part)
{
DBUG_ENTER("emit_key_part_name");
- if (append_ident(to, part->field->field_name,
- strlen(part->field->field_name), ident_quote_char))
+ if (append_ident(to, part->field->field_name.str,
+ part->field->field_name.length, ident_quote_char))
DBUG_RETURN(1); // Out of memory
DBUG_RETURN(0);
}
@@ -1235,7 +1235,7 @@ read_range_first: start_key 3 end_key 3
Summary:
-* If the start key flag is 0 the max key flag shouldn't even be set,
+* If the start key flag is 0 the max key flag shouldn't even be set,
and if it is, the query produced would be invalid.
* Multipart keys, even if containing some or all numeric columns,
are treated the same as non-numeric keys
@@ -1517,7 +1517,7 @@ static FEDERATED_SHARE *get_share(const char *table_name, TABLE *table)
*/
query.length(0);
- init_alloc_root(&mem_root, 256, 0, MYF(0));
+ init_alloc_root(&mem_root, "federated_share", 256, 0, MYF(0));
mysql_mutex_lock(&federated_mutex);
@@ -1536,8 +1536,8 @@ static FEDERATED_SHARE *get_share(const char *table_name, TABLE *table)
query.append(STRING_WITH_LEN("SELECT "));
for (field= table->field; *field; field++)
{
- append_ident(&query, (*field)->field_name,
- strlen((*field)->field_name), ident_quote_char);
+ append_ident(&query, (*field)->field_name.str,
+ (*field)->field_name.length, ident_quote_char);
query.append(STRING_WITH_LEN(", "));
}
/* chops off trailing comma */
@@ -1545,7 +1545,7 @@ static FEDERATED_SHARE *get_share(const char *table_name, TABLE *table)
query.append(STRING_WITH_LEN(" FROM "));
- append_ident(&query, tmp_share.table_name,
+ append_ident(&query, tmp_share.table_name,
tmp_share.table_name_length, ident_quote_char);
if (!(share= (FEDERATED_SHARE *) memdup_root(&mem_root, (char*)&tmp_share, sizeof(*share))) ||
@@ -1765,7 +1765,7 @@ bool ha_federated::append_stmt_insert(String *query)
insert_string.append(STRING_WITH_LEN("INSERT IGNORE INTO "));
else
insert_string.append(STRING_WITH_LEN("INSERT INTO "));
- append_ident(&insert_string, share->table_name, share->table_name_length,
+ append_ident(&insert_string, share->table_name, share->table_name_length,
ident_quote_char);
tmp_length= insert_string.length();
insert_string.append(STRING_WITH_LEN(" ("));
@@ -1779,8 +1779,8 @@ bool ha_federated::append_stmt_insert(String *query)
if (bitmap_is_set(table->write_set, (*field)->field_index))
{
/* append the field name */
- append_ident(&insert_string, (*field)->field_name,
- strlen((*field)->field_name), ident_quote_char);
+ append_ident(&insert_string, (*field)->field_name.str,
+ (*field)->field_name.length, ident_quote_char);
/* append commas between both fields and fieldnames */
/*
@@ -1929,11 +1929,11 @@ int ha_federated::write_row(uchar *buf)
if (bulk_insert.length == 0)
{
char insert_buffer[FEDERATED_QUERY_BUFFER_SIZE];
- String insert_string(insert_buffer, sizeof(insert_buffer),
+ String insert_string(insert_buffer, sizeof(insert_buffer),
&my_charset_bin);
insert_string.length(0);
append_stmt_insert(&insert_string);
- dynstr_append_mem(&bulk_insert, insert_string.ptr(),
+ dynstr_append_mem(&bulk_insert, insert_string.ptr(),
insert_string.length());
}
else
@@ -2071,7 +2071,7 @@ int ha_federated::optimize(THD* thd, HA_CHECK_OPT* check_opt)
query.set_charset(system_charset_info);
query.append(STRING_WITH_LEN("OPTIMIZE TABLE "));
- append_ident(&query, share->table_name, share->table_name_length,
+ append_ident(&query, share->table_name, share->table_name_length,
ident_quote_char);
if (real_query(query.ptr(), query.length()))
@@ -2093,7 +2093,7 @@ int ha_federated::repair(THD* thd, HA_CHECK_OPT* check_opt)
query.set_charset(system_charset_info);
query.append(STRING_WITH_LEN("REPAIR TABLE "));
- append_ident(&query, share->table_name, share->table_name_length,
+ append_ident(&query, share->table_name, share->table_name_length,
ident_quote_char);
if (check_opt->flags & T_QUICK)
query.append(STRING_WITH_LEN(" QUICK"));
@@ -2128,7 +2128,7 @@ int ha_federated::repair(THD* thd, HA_CHECK_OPT* check_opt)
Called from sql_select.cc, sql_acl.cc, sql_update.cc, and sql_insert.cc.
*/
-int ha_federated::update_row(const uchar *old_data, uchar *new_data)
+int ha_federated::update_row(const uchar *old_data, const uchar *new_data)
{
/*
This used to control how the query was built. If there was a
@@ -2193,8 +2193,8 @@ int ha_federated::update_row(const uchar *old_data, uchar *new_data)
{
if (bitmap_is_set(table->write_set, (*field)->field_index))
{
- size_t field_name_length= strlen((*field)->field_name);
- append_ident(&update_string, (*field)->field_name, field_name_length,
+ append_ident(&update_string, (*field)->field_name.str,
+ (*field)->field_name.length,
ident_quote_char);
update_string.append(STRING_WITH_LEN(" = "));
@@ -2219,8 +2219,8 @@ int ha_federated::update_row(const uchar *old_data, uchar *new_data)
if (bitmap_is_set(table->read_set, (*field)->field_index))
{
- size_t field_name_length= strlen((*field)->field_name);
- append_ident(&where_string, (*field)->field_name, field_name_length,
+ append_ident(&where_string, (*field)->field_name.str,
+ (*field)->field_name.length,
ident_quote_char);
if (field_in_record_is_null(table, *field, (char*) old_data))
where_string.append(STRING_WITH_LEN(" IS NULL "));
@@ -2302,8 +2302,8 @@ int ha_federated::delete_row(const uchar *buf)
found++;
if (bitmap_is_set(table->read_set, cur_field->field_index))
{
- append_ident(&delete_string, (*field)->field_name,
- strlen((*field)->field_name), ident_quote_char);
+ append_ident(&delete_string, (*field)->field_name.str,
+ (*field)->field_name.length, ident_quote_char);
data_string.length(0);
if (cur_field->is_null())
{
diff --git a/storage/federated/ha_federated.h b/storage/federated/ha_federated.h
index a23375cbe58..e264258a63a 100644
--- a/storage/federated/ha_federated.h
+++ b/storage/federated/ha_federated.h
@@ -210,7 +210,7 @@ public:
void start_bulk_insert(ha_rows rows, uint flags);
int end_bulk_insert();
int write_row(uchar *buf);
- int update_row(const uchar *old_data, uchar *new_data);
+ int update_row(const uchar *old_data, const uchar *new_data);
int delete_row(const uchar *buf);
int index_init(uint keynr, bool sorted);
ha_rows estimate_rows_upper_bound();
diff --git a/storage/federatedx/ha_federatedx.cc b/storage/federatedx/ha_federatedx.cc
index 8f9b499c611..8d5eee99476 100644
--- a/storage/federatedx/ha_federatedx.cc
+++ b/storage/federatedx/ha_federatedx.cc
@@ -892,8 +892,8 @@ uint ha_federatedx::convert_row_to_internal_format(uchar *record,
static bool emit_key_part_name(String *to, KEY_PART_INFO *part)
{
DBUG_ENTER("emit_key_part_name");
- if (append_ident(to, part->field->field_name,
- strlen(part->field->field_name), ident_quote_char))
+ if (append_ident(to, part->field->field_name.str,
+ part->field->field_name.length, ident_quote_char))
DBUG_RETURN(1); // Out of memory
DBUG_RETURN(0);
}
@@ -1520,7 +1520,7 @@ static FEDERATEDX_SERVER *get_server(FEDERATEDX_SHARE *share, TABLE *table)
mysql_mutex_assert_owner(&federatedx_mutex);
- init_alloc_root(&mem_root, 4096, 4096, MYF(0));
+ init_alloc_root(&mem_root, "federated", 4096, 4096, MYF(0));
fill_server(&mem_root, &tmp_server, share, table ? table->s->table_charset : 0);
@@ -1578,7 +1578,7 @@ static FEDERATEDX_SHARE *get_share(const char *table_name, TABLE *table)
query.length(0);
bzero(&tmp_share, sizeof(tmp_share));
- init_alloc_root(&mem_root, 256, 0, MYF(0));
+ init_alloc_root(&mem_root, "federated", 256, 0, MYF(0));
mysql_mutex_lock(&federatedx_mutex);
@@ -1597,8 +1597,8 @@ static FEDERATEDX_SHARE *get_share(const char *table_name, TABLE *table)
query.append(STRING_WITH_LEN("SELECT "));
for (field= table->field; *field; field++)
{
- append_ident(&query, (*field)->field_name,
- strlen((*field)->field_name), ident_quote_char);
+ append_ident(&query, (*field)->field_name.str,
+ (*field)->field_name.length, ident_quote_char);
query.append(STRING_WITH_LEN(", "));
}
/* chops off trailing comma */
@@ -1606,7 +1606,7 @@ static FEDERATEDX_SHARE *get_share(const char *table_name, TABLE *table)
query.append(STRING_WITH_LEN(" FROM "));
- append_ident(&query, tmp_share.table_name,
+ append_ident(&query, tmp_share.table_name,
tmp_share.table_name_length, ident_quote_char);
if (!(share= (FEDERATEDX_SHARE *) memdup_root(&mem_root, (char*)&tmp_share, sizeof(*share))) ||
@@ -1902,7 +1902,7 @@ bool ha_federatedx::append_stmt_insert(String *query)
insert_string.append(STRING_WITH_LEN("INSERT IGNORE INTO "));
else
insert_string.append(STRING_WITH_LEN("INSERT INTO "));
- append_ident(&insert_string, share->table_name, share->table_name_length,
+ append_ident(&insert_string, share->table_name, share->table_name_length,
ident_quote_char);
tmp_length= insert_string.length();
insert_string.append(STRING_WITH_LEN(" ("));
@@ -1916,8 +1916,8 @@ bool ha_federatedx::append_stmt_insert(String *query)
if (bitmap_is_set(table->write_set, (*field)->field_index))
{
/* append the field name */
- append_ident(&insert_string, (*field)->field_name,
- strlen((*field)->field_name), ident_quote_char);
+ append_ident(&insert_string, (*field)->field_name.str,
+ (*field)->field_name.length, ident_quote_char);
/* append commas between both fields and fieldnames */
/*
@@ -2214,7 +2214,7 @@ int ha_federatedx::optimize(THD* thd, HA_CHECK_OPT* check_opt)
query.set_charset(system_charset_info);
query.append(STRING_WITH_LEN("OPTIMIZE TABLE "));
- append_ident(&query, share->table_name, share->table_name_length,
+ append_ident(&query, share->table_name, share->table_name_length,
ident_quote_char);
DBUG_ASSERT(txn == get_txn(thd));
@@ -2240,7 +2240,7 @@ int ha_federatedx::repair(THD* thd, HA_CHECK_OPT* check_opt)
query.set_charset(system_charset_info);
query.append(STRING_WITH_LEN("REPAIR TABLE "));
- append_ident(&query, share->table_name, share->table_name_length,
+ append_ident(&query, share->table_name, share->table_name_length,
ident_quote_char);
if (check_opt->flags & T_QUICK)
query.append(STRING_WITH_LEN(" QUICK"));
@@ -2278,7 +2278,7 @@ int ha_federatedx::repair(THD* thd, HA_CHECK_OPT* check_opt)
Called from sql_select.cc, sql_acl.cc, sql_update.cc, and sql_insert.cc.
*/
-int ha_federatedx::update_row(const uchar *old_data, uchar *new_data)
+int ha_federatedx::update_row(const uchar *old_data, const uchar *new_data)
{
/*
This used to control how the query was built. If there was a
@@ -2344,8 +2344,8 @@ int ha_federatedx::update_row(const uchar *old_data, uchar *new_data)
{
if (bitmap_is_set(table->write_set, (*field)->field_index))
{
- uint field_name_length= strlen((*field)->field_name);
- append_ident(&update_string, (*field)->field_name, field_name_length,
+ append_ident(&update_string, (*field)->field_name.str,
+ (*field)->field_name.length,
ident_quote_char);
update_string.append(STRING_WITH_LEN(" = "));
@@ -2370,8 +2370,8 @@ int ha_federatedx::update_row(const uchar *old_data, uchar *new_data)
if (bitmap_is_set(table->read_set, (*field)->field_index))
{
- uint field_name_length= strlen((*field)->field_name);
- append_ident(&where_string, (*field)->field_name, field_name_length,
+ append_ident(&where_string, (*field)->field_name.str,
+ (*field)->field_name.length,
ident_quote_char);
if (field_in_record_is_null(table, *field, (char*) old_data))
where_string.append(STRING_WITH_LEN(" IS NULL "));
@@ -2457,8 +2457,8 @@ int ha_federatedx::delete_row(const uchar *buf)
found++;
if (bitmap_is_set(table->read_set, cur_field->field_index))
{
- append_ident(&delete_string, (*field)->field_name,
- strlen((*field)->field_name), ident_quote_char);
+ append_ident(&delete_string, (*field)->field_name.str,
+ (*field)->field_name.length, ident_quote_char);
data_string.length(0);
if (cur_field->is_null())
{
diff --git a/storage/federatedx/ha_federatedx.h b/storage/federatedx/ha_federatedx.h
index f3af7258623..a6bc84bf560 100644
--- a/storage/federatedx/ha_federatedx.h
+++ b/storage/federatedx/ha_federatedx.h
@@ -395,7 +395,7 @@ public:
void start_bulk_insert(ha_rows rows, uint flags);
int end_bulk_insert();
int write_row(uchar *buf);
- int update_row(const uchar *old_data, uchar *new_data);
+ int update_row(const uchar *old_data, const uchar *new_data);
int delete_row(const uchar *buf);
int index_init(uint keynr, bool sorted);
ha_rows estimate_rows_upper_bound();
diff --git a/storage/heap/ha_heap.cc b/storage/heap/ha_heap.cc
index 162943cca17..cb210f4394d 100644
--- a/storage/heap/ha_heap.cc
+++ b/storage/heap/ha_heap.cc
@@ -260,7 +260,7 @@ int ha_heap::write_row(uchar * buf)
return res;
}
-int ha_heap::update_row(const uchar * old_data, uchar * new_data)
+int ha_heap::update_row(const uchar * old_data, const uchar * new_data)
{
int res;
res= heap_update(file,old_data,new_data);
@@ -790,6 +790,54 @@ bool ha_heap::check_if_incompatible_data(HA_CREATE_INFO *info,
return COMPATIBLE_DATA_YES;
}
+
+/**
+ Find record by unique index (used in temporary tables with the index)
+
+ @param record (IN|OUT) the record to find
+ @param unique_idx (IN) number of index (for this engine)
+
+ @note It is like hp_search but uses function for raw where hp_search
+ uses functions for index.
+
+ @retval 0 OK
+ @retval 1 Not found
+ @retval -1 Error
+*/
+
+int ha_heap::find_unique_row(uchar *record, uint unique_idx)
+{
+ DBUG_ENTER("ha_heap::find_unique_row");
+ HP_SHARE *share= file->s;
+ DBUG_ASSERT(inited==NONE);
+ HP_KEYDEF *keyinfo= share->keydef + unique_idx;
+ DBUG_ASSERT(keyinfo->algorithm == HA_KEY_ALG_HASH);
+ DBUG_ASSERT(keyinfo->flag & HA_NOSAME);
+ if (!share->records)
+ DBUG_RETURN(1); // not found
+ HASH_INFO *pos= hp_find_hash(&keyinfo->block,
+ hp_mask(hp_rec_hashnr(keyinfo, record),
+ share->blength, share->records));
+ do
+ {
+ if (!hp_rec_key_cmp(keyinfo, pos->ptr_to_rec, record))
+ {
+ file->current_hash_ptr= pos;
+ file->current_ptr= pos->ptr_to_rec;
+ file->update = HA_STATE_AKTIV;
+ /*
+ We compare it only by record in the index, so better to read all
+ records.
+ */
+ memcpy(record, file->current_ptr, (size_t) share->reclength);
+
+ DBUG_RETURN(0); // found and position set
+ }
+ }
+ while ((pos= pos->next_key));
+ DBUG_RETURN(1); // not found
+}
+
struct st_mysql_storage_engine heap_storage_engine=
{ MYSQL_HANDLERTON_INTERFACE_VERSION };
diff --git a/storage/heap/ha_heap.h b/storage/heap/ha_heap.h
index 503d3b896ac..e17c18c8b14 100644
--- a/storage/heap/ha_heap.h
+++ b/storage/heap/ha_heap.h
@@ -71,7 +71,7 @@ public:
int close(void);
void set_keys_for_scanning(void);
int write_row(uchar * buf);
- int update_row(const uchar * old_data, uchar * new_data);
+ int update_row(const uchar * old_data, const uchar * new_data);
int delete_row(const uchar * buf);
virtual void get_auto_increment(ulonglong offset, ulonglong increment,
ulonglong nb_desired_values,
@@ -115,6 +115,7 @@ public:
return memcmp(ref1, ref2, sizeof(HEAP_PTR));
}
bool check_if_incompatible_data(HA_CREATE_INFO *info, uint table_changes);
+ int find_unique_row(uchar *record, uint unique_idx);
private:
void update_key_stats();
};
diff --git a/storage/heap/hp_block.c b/storage/heap/hp_block.c
index aa5343a0717..6ecab0d08c4 100644
--- a/storage/heap/hp_block.c
+++ b/storage/heap/hp_block.c
@@ -71,9 +71,9 @@ int hp_get_new_block(HP_SHARE *info, HP_BLOCK *block, size_t *alloc_length)
lower levels.
For example, for level 0, we allocate data for X rows.
- When level 0 is full, we allocate data for HPTRS_IN_NODE + X rows.
+ When level 0 is full, we allocate data for HP_PTRS_IN_NOD + X rows.
Next time we allocate data for X rows.
- When level 1 is full, we allocate data for HPTRS_IN_NODE at level 2 and 1
+ When level 1 is full, we allocate data for HP_PTRS_IN_NOD at level 2 and 1
+ X rows at level 0.
*/
*alloc_length= (sizeof(HP_PTRS) * ((i == block->levels) ? i : i - 1) +
diff --git a/storage/innobase/dict/dict0dict.cc b/storage/innobase/dict/dict0dict.cc
index b93eb4d6b17..2b049eaec23 100644
--- a/storage/innobase/dict/dict0dict.cc
+++ b/storage/innobase/dict/dict0dict.cc
@@ -1688,6 +1688,8 @@ dict_table_rename_in_cache(
return(err);
}
+ fil_name_write_rename(table->space, old_path, new_path);
+
bool success = fil_rename_tablespace(
table->space, old_path, new_name, new_path);
@@ -2454,6 +2456,7 @@ dict_index_add_to_cache_w_vcol(
ut_d(mem_heap_validate(index->heap));
ut_a(!dict_index_is_clust(index)
|| UT_LIST_GET_LEN(table->indexes) == 0);
+ ut_ad(dict_index_is_clust(index) || !table->no_rollback());
if (!dict_index_find_cols(table, index, add_v)) {
diff --git a/storage/innobase/dict/dict0load.cc b/storage/innobase/dict/dict0load.cc
index 3606b902510..0ac5594fbc4 100644
--- a/storage/innobase/dict/dict0load.cc
+++ b/storage/innobase/dict/dict0load.cc
@@ -1162,11 +1162,6 @@ dict_sys_tables_type_valid(ulint type, bool not_redundant)
return(false);
}
- /* ATOMIC_WRITES cannot be 3; it is the 10.3 NO_ROLLBACK flag. */
- if (!(~type & DICT_TF_MASK_ATOMIC_WRITES)) {
- return(false);
- }
-
return(dict_tf_is_valid_not_redundant(type));
}
@@ -1187,7 +1182,8 @@ dict_sys_tables_type_to_tf(ulint type, bool not_redundant)
| DICT_TF_MASK_ATOMIC_BLOBS
| DICT_TF_MASK_DATA_DIR
| DICT_TF_MASK_PAGE_COMPRESSION
- | DICT_TF_MASK_PAGE_COMPRESSION_LEVEL);
+ | DICT_TF_MASK_PAGE_COMPRESSION_LEVEL
+ | DICT_TF_MASK_NO_ROLLBACK);
ut_ad(dict_tf_is_valid(flags));
return(flags);
@@ -1239,7 +1235,8 @@ dict_sys_tables_rec_read(
MariaDB 10.2.2 introduced the SHARED_SPACE flag from MySQL 5.7,
shifting the flags PAGE_COMPRESSION, PAGE_COMPRESSION_LEVEL,
- ATOMIC_WRITES by one bit. The SHARED_SPACE flag would always
+ ATOMIC_WRITES (repurposed to NO_ROLLBACK in 10.3.1) by one bit.
+ The SHARED_SPACE flag would always
be written as 0 by MariaDB, because MariaDB does not support
CREATE TABLESPACE or CREATE TABLE...TABLESPACE for InnoDB.
@@ -1447,7 +1444,7 @@ dict_check_sys_tables(
look to see if it is already in the tablespace cache. */
if (fil_space_for_table_exists_in_mem(
space_id, table_name.m_name,
- false, true, NULL, 0, flags)) {
+ false, NULL, flags)) {
/* Recovery can open a datafile that does not
match SYS_DATAFILES. If they don't match, update
SYS_DATAFILES. */
@@ -2853,8 +2850,7 @@ dict_load_tablespace(
/* The tablespace may already be open. */
if (fil_space_for_table_exists_in_mem(
- table->space, space_name, false,
- true, heap, table->id, table->flags)) {
+ table->space, space_name, false, heap, table->flags)) {
return;
}
diff --git a/storage/innobase/dict/dict0mem.cc b/storage/innobase/dict/dict0mem.cc
index 7ade7735048..ba3fa177572 100644
--- a/storage/innobase/dict/dict0mem.cc
+++ b/storage/innobase/dict/dict0mem.cc
@@ -50,6 +50,14 @@ static const char* innobase_system_databases[] = {
NullS
};
+/** The start of the table basename suffix for partitioned tables */
+const char table_name_t::part_suffix[4]
+#ifdef _WIN32
+= "#p#";
+#else
+= "#P#";
+#endif
+
/** An interger randomly initialized at startup used to make a temporary
table name as unuique as possible. */
static ib_uint32_t dict_temp_file_num;
diff --git a/storage/innobase/fil/fil0fil.cc b/storage/innobase/fil/fil0fil.cc
index abcdb90c375..9ba61404b10 100644
--- a/storage/innobase/fil/fil0fil.cc
+++ b/storage/innobase/fil/fil0fil.cc
@@ -2330,7 +2330,7 @@ fil_op_write_log(
@param[in,out] mtr mini-transaction */
static
void
-fil_name_write_rename(
+fil_name_write_rename_low(
ulint space_id,
ulint first_page_no,
const char* old_name,
@@ -2344,6 +2344,23 @@ fil_name_write_rename(
space_id, first_page_no, old_name, new_name, 0, mtr);
}
+/** Write redo log for renaming a file.
+@param[in] space_id tablespace id
+@param[in] old_name tablespace file name
+@param[in] new_name tablespace file name after renaming */
+void
+fil_name_write_rename(
+ ulint space_id,
+ const char* old_name,
+ const char* new_name)
+{
+ mtr_t mtr;
+ mtr.start();
+ fil_name_write_rename_low(space_id, 0, old_name, new_name, &mtr);
+ mtr.commit();
+ log_write_up_to(mtr.commit_lsn(), true);
+}
+
/** Write MLOG_FILE_NAME for a file.
@param[in] space_id tablespace id
@param[in] first_page_no first page number in the file
@@ -3594,12 +3611,7 @@ func_exit:
ut_ad(strchr(new_file_name, OS_PATH_SEPARATOR) != NULL);
if (!recv_recovery_on) {
- mtr_t mtr;
-
- mtr.start();
- fil_name_write_rename(
- id, 0, old_file_name, new_file_name, &mtr);
- mtr.commit();
+ fil_name_write_rename(id, old_file_name, new_file_name);
log_mutex_enter();
}
@@ -4665,9 +4677,7 @@ startup, there may be many tablespaces which are not yet in the memory cache.
@param[in] print_error_if_does_not_exist
Print detailed error information to the
error log if a matching tablespace is not found from memory.
-@param[in] adjust_space Whether to adjust space id on mismatch
@param[in] heap Heap memory
-@param[in] table_id table id
@param[in] table_flags table flags
@return true if a matching tablespace exists in the memory cache */
bool
@@ -4675,9 +4685,7 @@ fil_space_for_table_exists_in_mem(
ulint id,
const char* name,
bool print_error_if_does_not_exist,
- bool adjust_space,
mem_heap_t* heap,
- table_id_t table_id,
ulint table_flags)
{
fil_space_t* fnamespace;
@@ -4702,41 +4710,6 @@ fil_space_for_table_exists_in_mem(
} else if (!valid || space == fnamespace) {
/* Found with the same file name, or got a flag mismatch. */
goto func_exit;
- } else if (adjust_space
- && row_is_mysql_tmp_table_name(space->name)
- && !row_is_mysql_tmp_table_name(name)) {
- /* Info from fnamespace comes from the ibd file
- itself, it can be different from data obtained from
- System tables since renaming files is not
- transactional. We shall adjust the ibd file name
- according to system table info. */
- mutex_exit(&fil_system->mutex);
-
- DBUG_EXECUTE_IF("ib_crash_before_adjust_fil_space",
- DBUG_SUICIDE(););
-
- const char* tmp_name = dict_mem_create_temporary_tablename(
- heap, name, table_id);
-
- fil_rename_tablespace(
- fnamespace->id,
- UT_LIST_GET_FIRST(fnamespace->chain)->name,
- tmp_name, NULL);
-
- DBUG_EXECUTE_IF("ib_crash_after_adjust_one_fil_space",
- DBUG_SUICIDE(););
-
- fil_rename_tablespace(
- id, UT_LIST_GET_FIRST(space->chain)->name,
- name, NULL);
-
- DBUG_EXECUTE_IF("ib_crash_after_adjust_fil_space",
- DBUG_SUICIDE(););
-
- mutex_enter(&fil_system->mutex);
- fnamespace = fil_space_get_by_name(name);
- ut_ad(space == fnamespace);
- goto func_exit;
}
if (!print_error_if_does_not_exist) {
@@ -6174,51 +6147,6 @@ fil_delete_file(
}
}
-/**
-Iterate over all the spaces in the space list and fetch the
-tablespace names. It will return a copy of the name that must be
-freed by the caller using: delete[].
-@return DB_SUCCESS if all OK. */
-dberr_t
-fil_get_space_names(
-/*================*/
- space_name_list_t& space_name_list)
- /*!< in/out: List to append to */
-{
- fil_space_t* space;
- dberr_t err = DB_SUCCESS;
-
- mutex_enter(&fil_system->mutex);
-
- for (space = UT_LIST_GET_FIRST(fil_system->space_list);
- space != NULL;
- space = UT_LIST_GET_NEXT(space_list, space)) {
-
- if (space->purpose == FIL_TYPE_TABLESPACE) {
- ulint len;
- char* name;
-
- len = ::strlen(space->name);
- name = UT_NEW_ARRAY_NOKEY(char, len + 1);
-
- if (name == 0) {
- /* Caller to free elements allocated so far. */
- err = DB_OUT_OF_MEMORY;
- break;
- }
-
- memcpy(name, space->name, len);
- name[len] = 0;
-
- space_name_list.push_back(name);
- }
- }
-
- mutex_exit(&fil_system->mutex);
-
- return(err);
-}
-
/** Generate redo log for swapping two .ibd files
@param[in] old_table old table
@param[in] new_table new table
@@ -6274,7 +6202,7 @@ fil_mtr_rename_log(
return(err);
}
- fil_name_write_rename(
+ fil_name_write_rename_low(
old_table->space, 0, old_path, tmp_path, mtr);
ut_free(tmp_path);
@@ -6305,7 +6233,7 @@ fil_mtr_rename_log(
}
}
- fil_name_write_rename(
+ fil_name_write_rename_low(
new_table->space, 0, new_path, old_path, mtr);
ut_free(new_path);
diff --git a/storage/innobase/fts/fts0fts.cc b/storage/innobase/fts/fts0fts.cc
index c2628d609fa..cf21d87fc85 100644
--- a/storage/innobase/fts/fts0fts.cc
+++ b/storage/innobase/fts/fts0fts.cc
@@ -7149,15 +7149,6 @@ fts_drop_orphaned_tables(void)
que_t* graph;
ib_vector_t* tables;
ib_alloc_t* heap_alloc;
- space_name_list_t space_name_list;
- dberr_t error = DB_SUCCESS;
-
- /* Note: We have to free the memory after we are done with the list. */
- error = fil_get_space_names(space_name_list);
-
- if (error == DB_OUT_OF_MEMORY) {
- ib::fatal() << "Out of memory";
- }
heap = mem_heap_create(1024);
heap_alloc = ib_heap_allocator_create(heap);
@@ -7170,35 +7161,32 @@ fts_drop_orphaned_tables(void)
users can't map them back to table names and this will create
unnecessary clutter. */
- for (space_name_list_t::iterator it = space_name_list.begin();
- it != space_name_list.end();
- ++it) {
-
- fts_aux_table_t* fts_aux_table;
+ mutex_enter(&fil_system->mutex);
- fts_aux_table = static_cast<fts_aux_table_t*>(
- ib_vector_push(tables, NULL));
+ for (fil_space_t* space = UT_LIST_GET_FIRST(fil_system->space_list);
+ space != NULL;
+ space = UT_LIST_GET_NEXT(space_list, space)) {
- memset(fts_aux_table, 0x0, sizeof(*fts_aux_table));
-
- if (!fts_is_aux_table_name(fts_aux_table, *it, strlen(*it))) {
- ib_vector_pop(tables);
- } else {
- ulint len = strlen(*it);
-
- fts_aux_table->id = fil_space_get_id_by_name(*it);
+ if (space->purpose != FIL_TYPE_TABLESPACE) {
+ continue;
+ }
- /* We got this list from fil0fil.cc. The tablespace
- with this name must exist. */
- ut_a(fts_aux_table->id != ULINT_UNDEFINED);
+ fts_aux_table_t fts_aux_table;
+ memset(&fts_aux_table, 0x0, sizeof fts_aux_table);
- fts_aux_table->name = static_cast<char*>(
- mem_heap_dup(heap, *it, len + 1));
+ size_t len = strlen(space->name);
- fts_aux_table->name[len] = 0;
+ if (!fts_is_aux_table_name(&fts_aux_table, space->name, len)) {
+ continue;
}
+
+ fts_aux_table.id = space->id;
+ fts_aux_table.name = mem_heap_strdupl(heap, space->name, len);
+ ib_vector_push(tables, &fts_aux_table);
}
+ mutex_exit(&fil_system->mutex);
+
trx = trx_allocate_for_background();
trx->op_info = "dropping orphaned FTS tables";
row_mysql_lock_data_dictionary(trx);
@@ -7226,7 +7214,7 @@ fts_drop_orphaned_tables(void)
"CLOSE c;");
for (;;) {
- error = fts_eval_sql(trx, graph);
+ dberr_t error = fts_eval_sql(trx, graph);
if (error == DB_SUCCESS) {
fts_check_and_drop_orphaned_tables(trx, tables);
@@ -7259,14 +7247,6 @@ fts_drop_orphaned_tables(void)
if (heap != NULL) {
mem_heap_free(heap);
}
-
- /** Free the memory allocated to store the .ibd names. */
- for (space_name_list_t::iterator it = space_name_list.begin();
- it != space_name_list.end();
- ++it) {
-
- UT_DELETE_ARRAY(*it);
- }
}
/**********************************************************************//**
diff --git a/storage/innobase/handler/ha_innodb.cc b/storage/innobase/handler/ha_innodb.cc
index 1b230c4b91d..6fb88500c2e 100644
--- a/storage/innobase/handler/ha_innodb.cc
+++ b/storage/innobase/handler/ha_innodb.cc
@@ -296,11 +296,7 @@ is_partition(
{
/* We look for pattern #P# to see if the table is partitioned
MariaDB table. */
-#ifdef _WIN32
- return strstr(file_name, "#p#");
-#else
- return strstr(file_name, "#P#");
-#endif /* _WIN32 */
+ return strstr(file_name, table_name_t::part_suffix);
}
/** Signal to shut down InnoDB (NULL if shutdown was signaled, or if
@@ -2973,7 +2969,8 @@ innobase_copy_frm_flags_from_create_info(
ibool ps_on;
ibool ps_off;
- if (dict_table_is_temporary(innodb_table)) {
+ if (dict_table_is_temporary(innodb_table)
+ || innodb_table->no_rollback()) {
/* Temp tables do not use persistent stats. */
ps_on = FALSE;
ps_off = TRUE;
@@ -3059,6 +3056,7 @@ ha_innobase::ha_innobase(
*/
| HA_CAN_EXPORT
| HA_CAN_RTREEKEYS
+ | HA_CAN_TABLES_WITHOUT_ROLLBACK
| HA_CONCURRENT_OPTIMIZE
| (srv_force_primary_key ? HA_REQUIRE_PRIMARY_KEY : 0)
),
@@ -3232,7 +3230,7 @@ innobase_query_caching_of_table_permitted(
THD* thd, /*!< in: thd of the user who is trying to
store a result to the query cache or
retrieve it */
- char* full_name, /*!< in: normalized path to the table */
+ const char* full_name, /*!< in: normalized path to the table */
uint full_name_len, /*!< in: length of the normalized path
to the table */
ulonglong *unused) /*!< unused for this engine */
@@ -6062,7 +6060,7 @@ innobase_build_v_templ(
name = dict_table_get_v_col_name(ib_table, z);
}
- ut_ad(!ut_strcmp(name, field->field_name));
+ ut_ad(!ut_strcmp(name, field->field_name.str));
#endif
const dict_v_col_t* vcol;
@@ -6097,7 +6095,7 @@ innobase_build_v_templ(
const char* name = dict_table_get_col_name(
ib_table, j);
- ut_ad(!ut_strcmp(name, field->field_name));
+ ut_ad(!ut_strcmp(name, field->field_name.str));
#endif
s_templ->vtempl[j] = static_cast<
@@ -7772,7 +7770,7 @@ build_template_field(
ib::info() << "MySQL table "
<< table->s->table_name.str
<< " field " << j << " name "
- << table->field[j]->field_name;
+ << table->field[j]->field_name.str;
}
ib::error() << "Clustered record field for column " << i
@@ -7899,10 +7897,11 @@ ha_innobase::build_template(
ibool fetch_primary_key_cols = FALSE;
ulint i;
- if (m_prebuilt->select_lock_type == LOCK_X) {
+ if (m_prebuilt->select_lock_type == LOCK_X || m_prebuilt->table->no_rollback()) {
/* We always retrieve the whole clustered index record if we
use exclusive row level locks, for example, if the read is
- done in an UPDATE statement. */
+ done in an UPDATE statement or if we are using a no rollback
+ table */
whole_row = true;
} else if (!whole_row) {
@@ -8658,7 +8657,7 @@ dberr_t
calc_row_difference(
upd_t* uvect,
const uchar* old_row,
- uchar* new_row,
+ const uchar* new_row,
TABLE* table,
uchar* upd_buff,
ulint buff_len,
@@ -8763,7 +8762,7 @@ calc_row_difference(
if (field_mysql_type == MYSQL_TYPE_LONGLONG
&& prebuilt->table->fts
&& innobase_strcasecmp(
- field->field_name, FTS_DOC_ID_COL_NAME) == 0) {
+ field->field_name.str, FTS_DOC_ID_COL_NAME) == 0) {
doc_id = (doc_id_t) mach_read_from_n_little_endian(
n_ptr, 8);
if (doc_id == 0) {
@@ -9148,7 +9147,7 @@ if its index columns are updated!
int
ha_innobase::update_row(
const uchar* old_row,
- uchar* new_row)
+ const uchar* new_row)
{
int err;
@@ -11157,7 +11156,7 @@ create_table_check_doc_id_col(
col_len = field->pack_length();
- if (innobase_strcasecmp(field->field_name,
+ if (innobase_strcasecmp(field->field_name.str,
FTS_DOC_ID_COL_NAME) == 0) {
/* Note the name is case sensitive due to
@@ -11165,7 +11164,7 @@ create_table_check_doc_id_col(
if (col_type == DATA_INT
&& !field->real_maybe_null()
&& col_len == sizeof(doc_id_t)
- && (strcmp(field->field_name,
+ && (strcmp(field->field_name.str,
FTS_DOC_ID_COL_NAME) == 0)) {
*doc_id_col = i;
} else {
@@ -11177,7 +11176,7 @@ create_table_check_doc_id_col(
" of BIGINT NOT NULL type, and named"
" in all capitalized characters");
my_error(ER_WRONG_COLUMN_NAME, MYF(0),
- field->field_name);
+ field->field_name.str);
*doc_id_col = ULINT_UNDEFINED;
}
@@ -11248,7 +11247,7 @@ innodb_base_col_setup(
for (z = 0; z < table->n_cols; z++) {
const char* name = dict_table_get_col_name(table, z);
if (!innobase_strcasecmp(name,
- base_field->field_name)) {
+ base_field->field_name.str)) {
break;
}
}
@@ -11289,7 +11288,7 @@ innodb_base_col_setup_for_stored(
const char* name = dict_table_get_col_name(
table, z);
if (!innobase_strcasecmp(
- name, base_field->field_name)) {
+ name, base_field->field_name.str)) {
break;
}
}
@@ -11433,7 +11432,7 @@ create_table_info_t::create_table_def()
" column type and try to re-create"
" the table with an appropriate"
" column type.",
- table->name.m_name, field->field_name);
+ table->name.m_name, field->field_name.str);
goto err_col;
}
@@ -11492,9 +11491,9 @@ create_table_info_t::create_table_def()
/* First check whether the column to be added has a
system reserved name. */
- if (dict_col_name_is_reserved(field->field_name)){
+ if (dict_col_name_is_reserved(field->field_name.str)){
my_error(ER_WRONG_COLUMN_NAME, MYF(0),
- field->field_name);
+ field->field_name.str);
err_col:
dict_mem_table_free(table);
mem_heap_free(heap);
@@ -11506,7 +11505,7 @@ err_col:
if (!is_virtual) {
dict_mem_table_add_col(table, heap,
- field->field_name, col_type,
+ field->field_name.str, col_type,
dtype_form_prtype(
(ulint) field->type()
| nulls_allowed | unsigned_type
@@ -11515,7 +11514,7 @@ err_col:
col_len);
} else {
dict_mem_table_add_v_col(table, heap,
- field->field_name, col_type,
+ field->field_name.str, col_type,
dtype_form_prtype(
(ulint) field->type()
| nulls_allowed | unsigned_type
@@ -11697,7 +11696,7 @@ create_index(
}
dict_mem_index_add_field(
- index, key_part->field->field_name, 0);
+ index, key_part->field->field_name.str, 0);
}
DBUG_RETURN(convert_error_code_to_mysql(
@@ -11749,7 +11748,7 @@ create_index(
if (field == NULL)
ut_error;
- const char* field_name = key_part->field->field_name;
+ const char* field_name = key_part->field->field_name.str;
col_type = get_innobase_type_from_mysql_type(
&is_unsigned, key_part->field);
@@ -11775,7 +11774,7 @@ create_index(
" inappropriate data type. Table"
" name %s, column name %s.",
table_name,
- key_part->field->field_name);
+ key_part->field->field_name.str);
prefix_len = 0;
}
@@ -12407,7 +12406,7 @@ create_table_info_t::innobase_table_flags()
/* Do a pre-check on FTS DOC ID index */
if (!(key->flags & HA_NOSAME)
|| strcmp(key->name, FTS_DOC_ID_INDEX_NAME)
- || strcmp(key->key_part[0].field->field_name,
+ || strcmp(key->key_part[0].field->field_name.str,
FTS_DOC_ID_COL_NAME)) {
fts_doc_id_index_bad = key->name;
}
@@ -12588,6 +12587,10 @@ index_bad:
default_compression_level : static_cast<ulint>(options->page_compression_level),
0);
+ if (m_form->s->table_type == TABLE_TYPE_SEQUENCE) {
+ m_flags |= DICT_TF_MASK_NO_ROLLBACK;
+ }
+
/* Set the flags2 when create table or alter tables */
m_flags2 |= DICT_TF2_FTS_AUX_HEX_NAME;
DBUG_EXECUTE_IF("innodb_test_wrong_fts_aux_table_name",
@@ -13212,6 +13215,10 @@ ha_innobase::create(
trx_t* trx;
DBUG_ENTER("ha_innobase::create");
+ DBUG_ASSERT(form->s == table_share);
+ DBUG_ASSERT(table_share->table_type == TABLE_TYPE_SEQUENCE
+ || table_share->table_type == TABLE_TYPE_NORMAL);
+
create_table_info_t info(ha_thd(),
form,
create_info,
@@ -13741,17 +13748,13 @@ innobase_rename_table(
TrxInInnoDB trx_in_innodb(trx);
trx_start_if_not_started(trx, true);
+ ut_ad(trx->will_lock > 0);
/* Serialize data dictionary operations with dictionary mutex:
no deadlocks can occur then in these operations. */
row_mysql_lock_data_dictionary(trx);
- /* Transaction must be flagged as a locking transaction or it hasn't
- been started yet. */
-
- ut_a(trx->will_lock > 0);
-
error = row_rename_table_for_mysql(norm_from, norm_to, trx, TRUE);
if (error != DB_SUCCESS) {
@@ -15328,8 +15331,8 @@ get_foreign_key_info(
char tmp_buff[NAME_LEN+1];
char name_buff[NAME_LEN+1];
const char* ptr;
- LEX_STRING* referenced_key_name;
- LEX_STRING* name = NULL;
+ LEX_CSTRING* referenced_key_name;
+ LEX_CSTRING* name = NULL;
ptr = dict_remove_db_name(foreign->id);
f_key_info.foreign_id = thd_make_lex_string(
@@ -16063,24 +16066,24 @@ ha_innobase::external_lock(
}
/* Check for UPDATEs in read-only mode. */
- if (srv_read_only_mode
- && (thd_sql_command(thd) == SQLCOM_UPDATE
- || thd_sql_command(thd) == SQLCOM_INSERT
- || thd_sql_command(thd) == SQLCOM_REPLACE
- || thd_sql_command(thd) == SQLCOM_DROP_TABLE
- || thd_sql_command(thd) == SQLCOM_ALTER_TABLE
- || thd_sql_command(thd) == SQLCOM_OPTIMIZE
- || (thd_sql_command(thd) == SQLCOM_CREATE_TABLE
- && lock_type == F_WRLCK)
- || thd_sql_command(thd) == SQLCOM_CREATE_INDEX
- || thd_sql_command(thd) == SQLCOM_DROP_INDEX
- || thd_sql_command(thd) == SQLCOM_DELETE)) {
-
- if (thd_sql_command(thd) == SQLCOM_CREATE_TABLE) {
- ib_senderrf(thd, IB_LOG_LEVEL_WARN,
- ER_READ_ONLY_MODE);
- DBUG_RETURN(HA_ERR_TABLE_READONLY);
- } else {
+ if (srv_read_only_mode) {
+ switch (thd_sql_command(thd)) {
+ case SQLCOM_CREATE_TABLE:
+ if (lock_type != F_WRLCK) {
+ break;
+ }
+ /* fall through */
+ case SQLCOM_UPDATE:
+ case SQLCOM_INSERT:
+ case SQLCOM_REPLACE:
+ case SQLCOM_DROP_TABLE:
+ case SQLCOM_ALTER_TABLE:
+ case SQLCOM_OPTIMIZE:
+ case SQLCOM_CREATE_INDEX:
+ case SQLCOM_DROP_INDEX:
+ case SQLCOM_CREATE_SEQUENCE:
+ case SQLCOM_DROP_SEQUENCE:
+ case SQLCOM_DELETE:
ib_senderrf(thd, IB_LOG_LEVEL_WARN,
ER_READ_ONLY_MODE);
DBUG_RETURN(HA_ERR_TABLE_READONLY);
@@ -16939,6 +16942,8 @@ ha_innobase::store_lock(
&& lock_type <= TL_WRITE))
|| sql_command == SQLCOM_CREATE_INDEX
|| sql_command == SQLCOM_DROP_INDEX
+ || sql_command == SQLCOM_CREATE_SEQUENCE
+ || sql_command == SQLCOM_DROP_SEQUENCE
|| sql_command == SQLCOM_DELETE)) {
ib_senderrf(trx->mysql_thd,
@@ -16968,7 +16973,8 @@ ha_innobase::store_lock(
}
/* Check for DROP TABLE */
- } else if (sql_command == SQLCOM_DROP_TABLE) {
+ } else if (sql_command == SQLCOM_DROP_TABLE ||
+ sql_command == SQLCOM_DROP_SEQUENCE) {
/* MySQL calls this function in DROP TABLE though this table
handle may belong to another thd that is running a query. Let
@@ -17003,7 +17009,8 @@ ha_innobase::store_lock(
/* Use consistent read for checksum table */
if (sql_command == SQLCOM_CHECKSUM
- || (sql_command == SQLCOM_ANALYZE && lock_type == TL_READ)
+ || sql_command == SQLCOM_CREATE_SEQUENCE
+ || (sql_command == SQLCOM_ANALYZE && lock_type == TL_READ)
|| ((srv_locks_unsafe_for_binlog
|| trx->isolation_level <= TRX_ISO_READ_COMMITTED)
&& trx->isolation_level != TRX_ISO_SERIALIZABLE
@@ -17012,6 +17019,7 @@ ha_innobase::store_lock(
&& (sql_command == SQLCOM_INSERT_SELECT
|| sql_command == SQLCOM_REPLACE_SELECT
|| sql_command == SQLCOM_UPDATE
+ || sql_command == SQLCOM_CREATE_SEQUENCE
|| sql_command == SQLCOM_CREATE_TABLE))) {
/* If we either have innobase_locks_unsafe_for_binlog
@@ -17507,7 +17515,7 @@ my_bool
ha_innobase::register_query_cache_table(
/*====================================*/
THD* thd, /*!< in: user thread handle */
- char* table_key, /*!< in: normalized path to the
+ const char* table_key, /*!< in: normalized path to the
table */
uint key_length, /*!< in: length of the normalized
path to the table */
diff --git a/storage/innobase/handler/ha_innodb.h b/storage/innobase/handler/ha_innodb.h
index 9dccfa38016..34c01929a56 100644
--- a/storage/innobase/handler/ha_innodb.h
+++ b/storage/innobase/handler/ha_innodb.h
@@ -138,7 +138,7 @@ public:
int write_row(uchar * buf);
- int update_row(const uchar * old_data, uchar * new_data);
+ int update_row(const uchar * old_data, const uchar * new_data);
int delete_row(const uchar * buf);
@@ -289,7 +289,7 @@ public:
*/
my_bool register_query_cache_table(
THD* thd,
- char* table_key,
+ const char* table_key,
uint key_length,
qc_engine_callback* call_back,
ulonglong* engine_data);
diff --git a/storage/innobase/handler/handler0alter.cc b/storage/innobase/handler/handler0alter.cc
index c7368a43192..0f0534853a8 100644
--- a/storage/innobase/handler/handler0alter.cc
+++ b/storage/innobase/handler/handler0alter.cc
@@ -529,8 +529,8 @@ check_v_col_in_order(
}
if (my_strcasecmp(system_charset_info,
- field->field_name,
- new_field->field_name) != 0) {
+ field->field_name.str,
+ new_field->field_name.str) != 0) {
/* different column */
return(false);
} else {
@@ -831,7 +831,7 @@ ha_innobase::check_if_supported_inplace_alter(
&& innobase_fulltext_exist(altered_table)
&& !my_strcasecmp(
system_charset_info,
- key_part->field->field_name,
+ key_part->field->field_name.str,
FTS_DOC_ID_COL_NAME)) {
ha_alter_info->unsupported_reason = innobase_get_err_msg(
ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_HIDDEN_FTS);
@@ -905,7 +905,7 @@ ha_innobase::check_if_supported_inplace_alter(
if (!my_strcasecmp(
system_charset_info,
- (*fp)->field_name,
+ (*fp)->field_name.str,
FTS_DOC_ID_COL_NAME)) {
ha_alter_info->unsupported_reason = innobase_get_err_msg(
ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_CHANGE_FTS);
@@ -1094,7 +1094,7 @@ innobase_init_foreign(
/*==================*/
dict_foreign_t* foreign, /*!< in/out: structure to
initialize */
- char* constraint_name, /*!< in/out: constraint name if
+ const char* constraint_name, /*!< in/out: constraint name if
exists */
dict_table_t* table, /*!< in: foreign table */
dict_index_t* index, /*!< in: foreign key index */
@@ -1302,7 +1302,7 @@ no_match:
}
if (innobase_strcasecmp(col_names[j],
- key_part.field->field_name)) {
+ key_part.field->field_name.str)) {
/* Name mismatch */
goto no_match;
}
@@ -2090,7 +2090,7 @@ name_ok:
}
my_error(ER_WRONG_KEY_COLUMN, MYF(0), "InnoDB",
- field->field_name);
+ field->field_name.str);
return(ER_WRONG_KEY_COLUMN);
}
@@ -2106,7 +2106,7 @@ name_ok:
}
my_error(ER_WRONG_KEY_COLUMN, MYF(0), "InnoDB",
- field->field_name);
+ field->field_name.str);
return(ER_WRONG_KEY_COLUMN);
}
}
@@ -2329,20 +2329,20 @@ innobase_fts_check_doc_id_col(
}
if (my_strcasecmp(system_charset_info,
- field->field_name, FTS_DOC_ID_COL_NAME)) {
+ field->field_name.str, FTS_DOC_ID_COL_NAME)) {
continue;
}
- if (strcmp(field->field_name, FTS_DOC_ID_COL_NAME)) {
+ if (strcmp(field->field_name.str, FTS_DOC_ID_COL_NAME)) {
my_error(ER_WRONG_COLUMN_NAME, MYF(0),
- field->field_name);
+ field->field_name.str);
} else if (field->type() != MYSQL_TYPE_LONGLONG
|| field->pack_length() != 8
|| field->real_maybe_null()
|| !(field->flags & UNSIGNED_FLAG)
|| innobase_is_v_fld(field)) {
my_error(ER_INNODB_FT_WRONG_DOCID_COLUMN, MYF(0),
- field->field_name);
+ field->field_name.str);
} else {
*fts_doc_col_no = i - *num_v;
}
@@ -2415,7 +2415,7 @@ innobase_fts_check_doc_id_index(
if ((key.flags & HA_NOSAME)
&& key.user_defined_key_parts == 1
&& !strcmp(key.name, FTS_DOC_ID_INDEX_NAME)
- && !strcmp(key.key_part[0].field->field_name,
+ && !strcmp(key.key_part[0].field->field_name.str,
FTS_DOC_ID_COL_NAME)) {
if (fts_doc_col_no) {
*fts_doc_col_no = ULINT_UNDEFINED;
@@ -2494,7 +2494,7 @@ innobase_fts_check_doc_id_index_in_def(
if (!(key->flags & HA_NOSAME)
|| key->user_defined_key_parts != 1
|| strcmp(key->name, FTS_DOC_ID_INDEX_NAME)
- || strcmp(key->key_part[0].field->field_name,
+ || strcmp(key->key_part[0].field->field_name.str,
FTS_DOC_ID_COL_NAME)) {
return(FTS_INCORRECT_DOC_ID_INDEX);
}
@@ -2985,7 +2985,7 @@ innobase_check_foreigns(
if (!new_field || (new_field->flags & NOT_NULL_FLAG)) {
if (innobase_check_foreigns_low(
user_table, drop_fk, n_drop_fk,
- (*fp)->field_name, !new_field)) {
+ (*fp)->field_name.str, !new_field)) {
return(true);
}
}
@@ -3247,7 +3247,7 @@ innobase_get_col_names(
}
if (new_field->field == table->field[old_i]) {
- cols[old_i - num_v] = new_field->field_name;
+ cols[old_i - num_v] = new_field->field_name.str;
break;
}
}
@@ -3535,7 +3535,7 @@ innobase_check_gis_columns(
ulint col_nr = dict_table_has_column(
table,
- key_part.field->field_name,
+ key_part.field->field_name.str,
key_part.fieldnr);
ut_ad(col_nr != table->n_def);
dict_col_t* col = &table->cols[col_nr];
@@ -3658,7 +3658,7 @@ prepare_inplace_add_virtual(
if (charset_no > MAX_CHAR_COLL_NUM) {
my_error(ER_WRONG_KEY_COLUMN, MYF(0), "InnoDB",
- field->field_name);
+ field->field_name.str);
return(true);
}
} else {
@@ -3689,7 +3689,7 @@ prepare_inplace_add_virtual(
ctx->add_vcol[j].m_col.ind = i - 1;
ctx->add_vcol[j].num_base = 0;
- ctx->add_vcol_name[j] = field->field_name;
+ ctx->add_vcol_name[j] = field->field_name.str;
ctx->add_vcol[j].base_col = NULL;
ctx->add_vcol[j].v_pos = ctx->old_table->n_v_cols
- ctx->num_to_drop_vcol + j;
@@ -3777,7 +3777,7 @@ prepare_inplace_drop_virtual(
if (charset_no > MAX_CHAR_COLL_NUM) {
my_error(ER_WRONG_KEY_COLUMN, MYF(0), "InnoDB",
- field->field_name);
+ field->field_name.str);
return(true);
}
} else {
@@ -3808,7 +3808,7 @@ prepare_inplace_drop_virtual(
ctx->drop_vcol[j].m_col.ind = i;
- ctx->drop_vcol_name[j] = field->field_name;
+ ctx->drop_vcol_name[j] = field->field_name.str;
dict_v_col_t* v_col = dict_table_get_nth_v_col_mysql(
ctx->old_table, i);
@@ -4514,11 +4514,18 @@ prepare_inplace_alter_table_dict(
to rebuild the table with a temporary name. */
if (new_clustered) {
- const char* new_table_name
- = dict_mem_create_temporary_tablename(
- ctx->heap,
- ctx->new_table->name.m_name,
- ctx->new_table->id);
+ size_t dblen = ctx->old_table->name.dblen() + 1;
+ size_t tablen = altered_table->s->table_name.length;
+ const char* part = ctx->old_table->name.part();
+ size_t partlen = part ? strlen(part) : 0;
+ char* new_table_name = static_cast<char*>(
+ mem_heap_alloc(ctx->heap,
+ dblen + tablen + partlen + 1));
+ memcpy(new_table_name, ctx->old_table->name.m_name, dblen);
+ memcpy(new_table_name + dblen,
+ altered_table->s->table_name.str, tablen);
+ memcpy(new_table_name + dblen + tablen,
+ part ? part : "", partlen + 1);
ulint n_cols = 0;
ulint n_v_cols = 0;
dtuple_t* add_cols;
@@ -4641,7 +4648,7 @@ prepare_inplace_alter_table_dict(
dict_mem_table_free(
ctx->new_table);
my_error(ER_WRONG_KEY_COLUMN, MYF(0), "InnoDB",
- field->field_name);
+ field->field_name.str);
goto new_clustered_failed;
}
} else {
@@ -4669,17 +4676,17 @@ prepare_inplace_alter_table_dict(
}
- if (dict_col_name_is_reserved(field->field_name)) {
+ if (dict_col_name_is_reserved(field->field_name.str)) {
dict_mem_table_free(ctx->new_table);
my_error(ER_WRONG_COLUMN_NAME, MYF(0),
- field->field_name);
+ field->field_name.str);
goto new_clustered_failed;
}
if (is_virtual) {
dict_mem_table_add_v_col(
ctx->new_table, ctx->heap,
- field->field_name,
+ field->field_name.str,
col_type,
dtype_form_prtype(
field_type, charset_no)
@@ -4688,7 +4695,7 @@ prepare_inplace_alter_table_dict(
} else {
dict_mem_table_add_col(
ctx->new_table, ctx->heap,
- field->field_name,
+ field->field_name.str,
col_type,
dtype_form_prtype(
field_type, charset_no),
@@ -5669,7 +5676,7 @@ err_exit_no_heap:
cf_it.rewind();
while (Create_field* cf = cf_it++) {
if (cf->field == *fp) {
- name = cf->field_name;
+ name = cf->field_name.str;
goto check_if_ok_to_rename;
}
}
@@ -5679,7 +5686,7 @@ check_if_ok_to_rename:
/* Prohibit renaming a column from FTS_DOC_ID
if full-text indexes exist. */
if (!my_strcasecmp(system_charset_info,
- (*fp)->field_name,
+ (*fp)->field_name.str,
FTS_DOC_ID_COL_NAME)
&& innobase_fulltext_exist(altered_table)) {
my_error(ER_INNODB_FT_WRONG_DOCID_COLUMN,
@@ -7138,8 +7145,8 @@ innobase_rename_columns_try(
if (innobase_rename_column_try(
ctx->old_table, trx, table_name,
col_n,
- cf->field->field_name,
- cf->field_name,
+ cf->field->field_name.str,
+ cf->field_name.str,
ctx->need_rebuild(),
is_virtual)) {
return(true);
@@ -7363,8 +7370,8 @@ innobase_rename_or_enlarge_columns_cache(
if ((*fp)->flags & FIELD_IS_RENAMED) {
dict_mem_table_col_rename(
user_table, col_n,
- cf->field->field_name,
- cf->field_name, is_virtual);
+ cf->field->field_name.str,
+ cf->field_name.str, is_virtual);
}
break;
@@ -7415,7 +7422,7 @@ commit_set_autoinc(
const Field* ai = old_table->found_next_number_field;
ut_ad(!strcmp(dict_table_get_col_name(ctx->old_table,
innodb_col_no(ai)),
- ai->field_name));
+ ai->field_name.str));
ib_uint64_t autoinc
= ha_alter_info->create_info->auto_increment_value;
diff --git a/storage/innobase/include/btr0cur.h b/storage/innobase/include/btr0cur.h
index d346a047eac..f0948fdaebf 100644
--- a/storage/innobase/include/btr0cur.h
+++ b/storage/innobase/include/btr0cur.h
@@ -42,6 +42,11 @@ enum {
/** sys fields will be found in the update vector or inserted
entry */
BTR_KEEP_SYS_FLAG = 4,
+
+ /** no rollback */
+ BTR_NO_ROLLBACK = BTR_NO_UNDO_LOG_FLAG
+ | BTR_NO_LOCKING_FLAG | BTR_KEEP_SYS_FLAG,
+
/** btr_cur_pessimistic_update() must keep cursor position
when moving columns to big_rec */
BTR_KEEP_POS_FLAG = 8,
diff --git a/storage/innobase/include/dict0dict.h b/storage/innobase/include/dict0dict.h
index 82f3cf403ba..b22e38d4bd0 100644
--- a/storage/innobase/include/dict0dict.h
+++ b/storage/innobase/include/dict0dict.h
@@ -411,7 +411,7 @@ dict_table_rename_in_cache(
/*!< in: in ALTER TABLE we want
to preserve the original table name
in constraints which reference it */
- MY_ATTRIBUTE((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull));
/** Removes an index from the dictionary cache.
@param[in,out] table table whose index to remove
diff --git a/storage/innobase/include/dict0dict.ic b/storage/innobase/include/dict0dict.ic
index 15b6ed0ad86..fd54f772958 100644
--- a/storage/innobase/include/dict0dict.ic
+++ b/storage/innobase/include/dict0dict.ic
@@ -651,7 +651,7 @@ dict_tf_is_valid(
bit. For ROW_FORMAT=REDUNDANT, only the DATA_DIR flag
(which we cleared above) can be set. If any other flags
are set, the flags are invalid. */
- return(flags == 0);
+ return(flags == 0 || flags == DICT_TF_MASK_NO_ROLLBACK);
}
return(dict_tf_is_valid_not_redundant(flags));
@@ -872,7 +872,8 @@ dict_tf_to_sys_tables_type(
| DICT_TF_MASK_ATOMIC_BLOBS
| DICT_TF_MASK_DATA_DIR
| DICT_TF_MASK_PAGE_COMPRESSION
- | DICT_TF_MASK_PAGE_COMPRESSION_LEVEL);
+ | DICT_TF_MASK_PAGE_COMPRESSION_LEVEL
+ | DICT_TF_MASK_NO_ROLLBACK);
return(type);
}
diff --git a/storage/innobase/include/dict0mem.h b/storage/innobase/include/dict0mem.h
index 3c8366ae28e..909605b99a4 100644
--- a/storage/innobase/include/dict0mem.h
+++ b/storage/innobase/include/dict0mem.h
@@ -142,10 +142,10 @@ Width of the page compression flag
#define DICT_TF_WIDTH_PAGE_COMPRESSION_LEVEL 4
/**
-Width of atomic writes flag
-DEFAULT=0, ON = 1, OFF = 2
+The NO_ROLLBACK flag (3=yes; the values 1,2 used stand for
+ATOMIC_WRITES=ON and ATOMIC_WRITES=OFF between MariaDB 10.1.0 and 10.2.3)
*/
-#define DICT_TF_WIDTH_ATOMIC_WRITES 2
+#define DICT_TF_WIDTH_NO_ROLLBACK 2
/** Width of all the currently known table flags */
#define DICT_TF_BITS (DICT_TF_WIDTH_COMPACT \
@@ -153,7 +153,8 @@ DEFAULT=0, ON = 1, OFF = 2
+ DICT_TF_WIDTH_ATOMIC_BLOBS \
+ DICT_TF_WIDTH_DATA_DIR \
+ DICT_TF_WIDTH_PAGE_COMPRESSION \
- + DICT_TF_WIDTH_PAGE_COMPRESSION_LEVEL)
+ + DICT_TF_WIDTH_PAGE_COMPRESSION_LEVEL \
+ + DICT_TF_WIDTH_NO_ROLLBACK)
/** Zero relative shift position of the COMPACT field */
#define DICT_TF_POS_COMPACT 0
@@ -172,11 +173,11 @@ DEFAULT=0, ON = 1, OFF = 2
/** Zero relative shift position of the PAGE_COMPRESSION_LEVEL field */
#define DICT_TF_POS_PAGE_COMPRESSION_LEVEL (DICT_TF_POS_PAGE_COMPRESSION \
+ DICT_TF_WIDTH_PAGE_COMPRESSION)
-/** Zero relative shift position of the ATOMIC_WRITES field */
-#define DICT_TF_POS_ATOMIC_WRITES (DICT_TF_POS_PAGE_COMPRESSION_LEVEL \
+/** Zero relative shift position of the NO_ROLLBACK field */
+#define DICT_TF_POS_NO_ROLLBACK (DICT_TF_POS_PAGE_COMPRESSION_LEVEL \
+ DICT_TF_WIDTH_PAGE_COMPRESSION_LEVEL)
-#define DICT_TF_POS_UNUSED (DICT_TF_POS_ATOMIC_WRITES \
- + DICT_TF_WIDTH_ATOMIC_WRITES)
+#define DICT_TF_POS_UNUSED (DICT_TF_POS_NO_ROLLBACK \
+ + DICT_TF_WIDTH_NO_ROLLBACK)
/** Bit mask of the COMPACT field */
#define DICT_TF_MASK_COMPACT \
@@ -202,10 +203,10 @@ DEFAULT=0, ON = 1, OFF = 2
#define DICT_TF_MASK_PAGE_COMPRESSION_LEVEL \
((~(~0U << DICT_TF_WIDTH_PAGE_COMPRESSION_LEVEL)) \
<< DICT_TF_POS_PAGE_COMPRESSION_LEVEL)
-/** Bit mask of the ATOMIC_WRITES field */
-#define DICT_TF_MASK_ATOMIC_WRITES \
- ((~(~0U << DICT_TF_WIDTH_ATOMIC_WRITES)) \
- << DICT_TF_POS_ATOMIC_WRITES)
+/** Bit mask of the NO_ROLLBACK field */
+#define DICT_TF_MASK_NO_ROLLBACK \
+ ((~(~0U << DICT_TF_WIDTH_NO_ROLLBACK)) \
+ << DICT_TF_POS_NO_ROLLBACK)
/** Return the value of the COMPACT field */
#define DICT_TF_GET_COMPACT(flags) \
@@ -231,10 +232,6 @@ DEFAULT=0, ON = 1, OFF = 2
#define DICT_TF_GET_PAGE_COMPRESSION_LEVEL(flags) \
((flags & DICT_TF_MASK_PAGE_COMPRESSION_LEVEL) \
>> DICT_TF_POS_PAGE_COMPRESSION_LEVEL)
-/** Return the value of the ATOMIC_WRITES field */
-#define DICT_TF_GET_ATOMIC_WRITES(flags) \
- ((flags & DICT_TF_MASK_ATOMIC_WRITES) \
- >> DICT_TF_POS_ATOMIC_WRITES)
/* @} */
@@ -568,6 +565,29 @@ struct table_name_t
{
/** The name in internal representation */
char* m_name;
+
+ /** @return the end of the schema name */
+ const char* dbend() const
+ {
+ const char* sep = strchr(m_name, '/');
+ ut_ad(sep);
+ return sep;
+ }
+
+ /** @return the length of the schema name, in bytes */
+ size_t dblen() const { return dbend() - m_name; }
+
+ /** Determine the filename-safe encoded table name.
+ @return the filename-safe encoded table name */
+ const char* basename() const { return dbend() + 1; }
+
+ /** The start of the table basename suffix for partitioned tables */
+ static const char part_suffix[4];
+
+ /** Determine the partition or subpartition name suffix.
+ @return the partition name
+ @retval NULL if the table is not partitioned */
+ const char* part() const { return strstr(basename(), part_suffix); }
};
/** Data structure for a column in a table */
@@ -1321,6 +1341,11 @@ struct dict_table_t {
@return whether the last handle was released */
inline bool release();
+ /** @return whether the table supports transactions */
+ bool no_rollback() const
+ {
+ return !(~flags & DICT_TF_MASK_NO_ROLLBACK);
+ }
/** @return whether this is a temporary table */
bool is_temporary() const
{
diff --git a/storage/innobase/include/fil0fil.h b/storage/innobase/include/fil0fil.h
index c2152ce11d0..924c15ca14e 100644
--- a/storage/innobase/include/fil0fil.h
+++ b/storage/innobase/include/fil0fil.h
@@ -35,16 +35,11 @@ Created 10/25/1995 Heikki Tuuri
#include "page0size.h"
#include "ibuf0types.h"
-#include <list>
-#include <vector>
-
// Forward declaration
struct trx_t;
class page_id_t;
class truncate_t;
-typedef std::list<char*, ut_allocator<char*> > space_name_list_t;
-
/** Structure containing encryption specification */
struct fil_space_crypt_t;
@@ -874,6 +869,15 @@ fil_create_directory_for_tablename(
/*===============================*/
const char* name); /*!< in: name in the standard
'databasename/tablename' format */
+/** Write redo log for renaming a file.
+@param[in] space_id tablespace id
+@param[in] old_name tablespace file name
+@param[in] new_name tablespace file name after renaming */
+void
+fil_name_write_rename(
+ ulint space_id,
+ const char* old_name,
+ const char* new_name);
/********************************************************//**
Recreates table indexes by applying
TRUNCATE log record during recovery.
@@ -1153,27 +1157,24 @@ fil_file_readdir_next_file(
os_file_dir_t dir, /*!< in: directory stream */
os_file_stat_t* info); /*!< in/out: buffer where the
info is returned */
-/*******************************************************************//**
-Returns true if a matching tablespace exists in the InnoDB tablespace memory
-cache. Note that if we have not done a crash recovery at the database startup,
-there may be many tablespaces which are not yet in the memory cache.
+/** Determine if a matching tablespace exists in the InnoDB tablespace
+memory cache. Note that if we have not done a crash recovery at the database
+startup, there may be many tablespaces which are not yet in the memory cache.
+@param[in] id Tablespace ID
+@param[in] name Tablespace name used in fil_space_create().
+@param[in] print_error_if_does_not_exist
+ Print detailed error information to the
+error log if a matching tablespace is not found from memory.
+@param[in] heap Heap memory
+@param[in] table_flags table flags
@return true if a matching tablespace exists in the memory cache */
bool
fil_space_for_table_exists_in_mem(
-/*==============================*/
- ulint id, /*!< in: space id */
- const char* name, /*!< in: table name in the standard
- 'databasename/tablename' format */
+ ulint id,
+ const char* name,
bool print_error_if_does_not_exist,
- /*!< in: print detailed error
- information to the .err log if a
- matching tablespace is not found from
- memory */
- bool adjust_space, /*!< in: whether to adjust space id
- when find table space mismatch */
- mem_heap_t* heap, /*!< in: heap memory */
- table_id_t table_id, /*!< in: table id */
- ulint table_flags); /*!< in: table flags */
+ mem_heap_t* heap,
+ ulint table_flags);
/** Try to extend a tablespace if it is smaller than the specified size.
@param[in,out] space tablespace
@@ -1496,18 +1497,6 @@ ulint
fil_space_get_id_by_name(
const char* tablespace);
-/**
-Iterate over all the spaces in the space list and fetch the
-tablespace names. It will return a copy of the name that must be
-freed by the caller using: delete[].
-@return DB_SUCCESS if all OK. */
-dberr_t
-fil_get_space_names(
-/*================*/
- space_name_list_t& space_name_list)
- /*!< in/out: Vector for collecting the names. */
- MY_ATTRIBUTE((warn_unused_result));
-
/** Generate redo log for swapping two .ibd files
@param[in] old_table old table
@param[in] new_table new table
diff --git a/storage/innobase/include/os0once.h b/storage/innobase/include/os0once.h
index 05a45a69f33..551e78d24ba 100644
--- a/storage/innobase/include/os0once.h
+++ b/storage/innobase/include/os0once.h
@@ -30,6 +30,7 @@ Created Feb 20, 2014 Vasil Dimov
#include "univ.i"
#include "ut0ut.h"
+#include "my_cpu.h"
/** Execute a given function exactly once in a multi-threaded environment
or wait for the function to be executed by another thread.
@@ -110,7 +111,7 @@ public:
ut_error;
}
- UT_RELAX_CPU();
+ MY_RELAX_CPU();
}
}
}
diff --git a/storage/innobase/include/row0mysql.h b/storage/innobase/include/row0mysql.h
index 7a34c025dab..bc537dac4c9 100644
--- a/storage/innobase/include/row0mysql.h
+++ b/storage/innobase/include/row0mysql.h
@@ -422,6 +422,10 @@ ulint
row_get_background_drop_list_len_low(void);
/*======================================*/
+/** Drop garbage tables during recovery. */
+void
+row_mysql_drop_garbage_tables();
+
/*********************************************************************//**
Sets an exclusive lock on a table.
@return error code or DB_SUCCESS */
diff --git a/storage/innobase/include/trx0rec.h b/storage/innobase/include/trx0rec.h
index be859fa9450..2551d5759ae 100644
--- a/storage/innobase/include/trx0rec.h
+++ b/storage/innobase/include/trx0rec.h
@@ -179,6 +179,13 @@ trx_undo_rec_get_partial_row(
mem_heap_t* heap) /*!< in: memory heap from which the memory
needed is allocated */
MY_ATTRIBUTE((nonnull, warn_unused_result));
+/** Report a RENAME TABLE operation.
+@param[in,out] trx transaction
+@param[in] table table that is being renamed
+@return DB_SUCCESS or error code */
+dberr_t
+trx_undo_report_rename(trx_t* trx, const dict_table_t* table)
+ MY_ATTRIBUTE((nonnull, warn_unused_result));
/***********************************************************************//**
Writes information to an undo log about an insert, update, or a delete marking
of a clustered index record. This information is used in a rollback of the
@@ -322,6 +329,7 @@ trx_undo_read_v_idx(
compilation info multiplied by 16 is ORed to this value in an undo log
record */
+#define TRX_UNDO_RENAME_TABLE 9 /*!< RENAME TABLE */
#define TRX_UNDO_INSERT_REC 11 /* fresh insert into clustered index */
#define TRX_UNDO_UPD_EXIST_REC 12 /* update of a non-delete-marked
record */
diff --git a/storage/innobase/include/trx0rec.ic b/storage/innobase/include/trx0rec.ic
index c2c756484b2..d0771a94b05 100644
--- a/storage/innobase/include/trx0rec.ic
+++ b/storage/innobase/include/trx0rec.ic
@@ -95,5 +95,8 @@ trx_undo_rec_copy(
len = mach_read_from_2(undo_rec)
- ut_align_offset(undo_rec, UNIV_PAGE_SIZE);
ut_ad(len < UNIV_PAGE_SIZE);
- return((trx_undo_rec_t*) mem_heap_dup(heap, undo_rec, len));
+ trx_undo_rec_t* rec = static_cast<trx_undo_rec_t*>(
+ mem_heap_dup(heap, undo_rec, len));
+ mach_write_to_2(rec, len);
+ return rec;
}
diff --git a/storage/innobase/include/ut0dbg.h b/storage/innobase/include/ut0dbg.h
index fd9a064ba35..13228ad852f 100644
--- a/storage/innobase/include/ut0dbg.h
+++ b/storage/innobase/include/ut0dbg.h
@@ -62,7 +62,7 @@ ut_dbg_assertion_failed(
/** Debug assertion */
#define ut_ad DBUG_ASSERT
-#ifdef UNIV_DEBUG
+#if defined(UNIV_DEBUG) || !defined(DBUG_OFF)
/** Debug statement. Does nothing unless UNIV_DEBUG is defined. */
#define ut_d(EXPR) EXPR
#else
diff --git a/storage/innobase/include/ut0ut.h b/storage/innobase/include/ut0ut.h
index 4e9c2599933..c91e24ad314 100644
--- a/storage/innobase/include/ut0ut.h
+++ b/storage/innobase/include/ut0ut.h
@@ -52,35 +52,6 @@ Created 1/20/1994 Heikki Tuuri
/** Time stamp */
typedef time_t ib_time_t;
-#ifdef HAVE_PAUSE_INSTRUCTION
- /* According to the gcc info page, asm volatile means that the
- instruction has important side-effects and must not be removed.
- Also asm volatile may trigger a memory barrier (spilling all registers
- to memory). */
-# ifdef __SUNPRO_CC
-# define UT_RELAX_CPU() asm ("pause" )
-# else
-# define UT_RELAX_CPU() __asm__ __volatile__ ("pause")
-# endif /* __SUNPRO_CC */
-
-#elif defined(HAVE_FAKE_PAUSE_INSTRUCTION)
-# define UT_RELAX_CPU() __asm__ __volatile__ ("rep; nop")
-#elif defined _WIN32
- /* In the Win32 API, the x86 PAUSE instruction is executed by calling
- the YieldProcessor macro defined in WinNT.h. It is a CPU architecture-
- independent way by using YieldProcessor. */
-# define UT_RELAX_CPU() YieldProcessor()
-#elif defined(__powerpc__) && defined __GLIBC__
-# include <sys/platform/ppc.h>
-# define UT_RELAX_CPU() __ppc_get_timebase()
-#else
-# define UT_RELAX_CPU() do { \
- volatile int32 volatile_var; \
- int32 oldval= 0; \
- my_atomic_cas32(&volatile_var, &oldval, 1); \
- } while (0)
-#endif
-
#if defined (__GNUC__)
# define UT_COMPILER_BARRIER() __asm__ __volatile__ ("":::"memory")
#elif defined (_MSC_VER)
@@ -89,15 +60,6 @@ typedef time_t ib_time_t;
# define UT_COMPILER_BARRIER()
#endif
-#if defined(HAVE_HMT_PRIORITY_INSTRUCTION)
-# include <sys/platform/ppc.h>
-# define UT_LOW_PRIORITY_CPU() __ppc_set_ppr_low()
-# define UT_RESUME_PRIORITY_CPU() __ppc_set_ppr_med()
-#else
-# define UT_LOW_PRIORITY_CPU() ((void)0)
-# define UT_RESUME_PRIORITY_CPU() ((void)0)
-#endif
-
/*********************************************************************//**
Delays execution for at most max_wait_us microseconds or returns earlier
if cond becomes true.
diff --git a/storage/innobase/log/log0recv.cc b/storage/innobase/log/log0recv.cc
index e55c01aca3a..03304ed4dca 100644
--- a/storage/innobase/log/log0recv.cc
+++ b/storage/innobase/log/log0recv.cc
@@ -3486,6 +3486,8 @@ recv_recovery_rollback_active(void)
/* Drop partially created indexes. */
row_merge_drop_temp_indexes();
+ /* Drop garbage tables. */
+ row_mysql_drop_garbage_tables();
/* Drop any auxiliary tables that were not dropped when the
parent table was dropped. This can happen if the parent table
diff --git a/storage/innobase/que/que0que.cc b/storage/innobase/que/que0que.cc
index 87d37e347f1..937f215dc39 100644
--- a/storage/innobase/que/que0que.cc
+++ b/storage/innobase/que/que0que.cc
@@ -1026,8 +1026,10 @@ que_thr_step(
} else if (type == QUE_NODE_SELECT) {
thr = row_sel_step(thr);
} else if (type == QUE_NODE_INSERT) {
+ trx_start_if_not_started_xa(thr_get_trx(thr), true);
thr = row_ins_step(thr);
} else if (type == QUE_NODE_UPDATE) {
+ trx_start_if_not_started_xa(thr_get_trx(thr), true);
thr = row_upd_step(thr);
} else if (type == QUE_NODE_FETCH) {
thr = fetch_step(thr);
diff --git a/storage/innobase/row/row0ins.cc b/storage/innobase/row/row0ins.cc
index 98e94e06464..195f61ba3f2 100644
--- a/storage/innobase/row/row0ins.cc
+++ b/storage/innobase/row/row0ins.cc
@@ -3148,9 +3148,9 @@ row_ins_clust_index_entry(
n_uniq = dict_index_is_unique(index) ? index->n_uniq : 0;
- ulint flags = dict_table_is_temporary(index->table)
- ? BTR_NO_LOCKING_FLAG
- : 0;
+ ulint flags = index->table->no_rollback() ? BTR_NO_ROLLBACK
+ : dict_table_is_temporary(index->table)
+ ? BTR_NO_LOCKING_FLAG : 0;
/* For intermediate table during copy alter table,
skip the undo log and record lock checking for
@@ -3280,7 +3280,7 @@ row_ins_index_entry(
dtuple_t* entry, /*!< in/out: index entry to insert */
que_thr_t* thr) /*!< in: query thread */
{
- ut_ad(thr_get_trx(thr)->id != 0);
+ ut_ad(thr_get_trx(thr)->id || index->table->no_rollback());
DBUG_EXECUTE_IF("row_ins_index_entry_timeout", {
DBUG_SET("-d,row_ins_index_entry_timeout");
@@ -3740,8 +3740,6 @@ row_ins_step(
trx = thr_get_trx(thr);
- trx_start_if_not_started_xa(trx, true);
-
node = static_cast<ins_node_t*>(thr->run_node);
ut_ad(que_node_get_type(node) == QUE_NODE_INSERT);
@@ -3763,6 +3761,25 @@ row_ins_step(
table during the search operation, and there is no need to set
it again here. But we must write trx->id to node->sys_buf. */
+ if (node->table->no_rollback()) {
+ /* No-rollback tables should only be written to by a
+ single thread at a time, but there can be multiple
+ concurrent readers. We must hold an open table handle. */
+ DBUG_ASSERT(node->table->n_ref_count > 0);
+ DBUG_ASSERT(node->ins_type == INS_DIRECT);
+ /* No-rollback tables can consist only of a single index. */
+ DBUG_ASSERT(UT_LIST_GET_LEN(node->entry_list) == 1);
+ DBUG_ASSERT(UT_LIST_GET_LEN(node->table->indexes) == 1);
+ /* There should be no possibility for interruption and
+ restarting here. In theory, we could allow resumption
+ from the INS_NODE_INSERT_ENTRIES state here. */
+ DBUG_ASSERT(node->state == INS_NODE_SET_IX_LOCK);
+ node->index = dict_table_get_first_index(node->table);
+ node->entry = UT_LIST_GET_FIRST(node->entry_list);
+ node->state = INS_NODE_INSERT_ENTRIES;
+ goto do_insert;
+ }
+
trx_write_trx_id(&node->sys_buf[DATA_ROW_ID_LEN], trx->id);
if (node->state == INS_NODE_SET_IX_LOCK) {
@@ -3812,7 +3829,7 @@ same_trx:
return(thr);
}
-
+do_insert:
/* DO THE CHECKS OF THE CONSISTENCY CONSTRAINTS HERE */
err = row_ins(node, thr);
diff --git a/storage/innobase/row/row0mysql.cc b/storage/innobase/row/row0mysql.cc
index c7414d9c766..d367e5446a3 100644
--- a/storage/innobase/row/row0mysql.cc
+++ b/storage/innobase/row/row0mysql.cc
@@ -64,6 +64,7 @@ Created 9/17/2000 Heikki Tuuri
#include "trx0roll.h"
#include "trx0undo.h"
#include "row0ext.h"
+#include "srv0start.h"
#include "ut0new.h"
#include <algorithm>
@@ -1379,7 +1380,9 @@ row_insert_for_mysql(
row_mysql_delay_if_needed();
- trx_start_if_not_started_xa(trx, true);
+ if (!table->no_rollback()) {
+ trx_start_if_not_started_xa(trx, true);
+ }
row_get_prebuilt_insert_row(prebuilt);
node = prebuilt->ins_node;
@@ -1771,7 +1774,9 @@ row_update_for_mysql(row_prebuilt_t* prebuilt)
init_fts_doc_id_for_ref(table, &fk_depth);
- trx_start_if_not_started_xa(trx, true);
+ if (!table->no_rollback()) {
+ trx_start_if_not_started_xa(trx, true);
+ }
if (dict_table_is_referenced_by_foreign_key(table)) {
/* Share lock the data dictionary to prevent any
@@ -2628,12 +2633,6 @@ row_drop_table_for_mysql_in_background(
error = row_drop_table_for_mysql(name, trx, FALSE, FALSE);
- /* Flush the log to reduce probability that the .frm files and
- the InnoDB data dictionary get out-of-sync if the user runs
- with innodb_flush_log_at_trx_commit = 0 */
-
- log_buffer_flush_to_disk();
-
trx_commit_for_mysql(trx);
trx_free_for_background(trx);
@@ -2671,8 +2670,11 @@ next:
return(n_tables + n_tables_dropped);
}
- table = dict_table_open_on_id(drop->table_id, FALSE,
- DICT_TABLE_OP_OPEN_ONLY_IF_CACHED);
+ /* On fast shutdown, just empty the list without dropping tables. */
+ table = srv_shutdown_state == SRV_SHUTDOWN_NONE || !srv_fast_shutdown
+ ? dict_table_open_on_id(drop->table_id, FALSE,
+ DICT_TABLE_OP_OPEN_ONLY_IF_CACHED)
+ : NULL;
if (!table) {
n_tables_dropped++;
@@ -2727,6 +2729,74 @@ row_get_background_drop_list_len_low(void)
return(len);
}
+/** Drop garbage tables during recovery. */
+void
+row_mysql_drop_garbage_tables()
+{
+ mem_heap_t* heap = mem_heap_create(FN_REFLEN);
+ btr_pcur_t pcur;
+ mtr_t mtr;
+ trx_t* trx = trx_allocate_for_background();
+ trx->op_info = "dropping garbage tables";
+ row_mysql_lock_data_dictionary(trx);
+
+ mtr.start();
+ btr_pcur_open_at_index_side(
+ true, dict_table_get_first_index(dict_sys->sys_tables),
+ BTR_SEARCH_LEAF, &pcur, true, 0, &mtr);
+
+ for (;;) {
+ const rec_t* rec;
+ const byte* field;
+ ulint len;
+ const char* table_name;
+
+ btr_pcur_move_to_next_user_rec(&pcur, &mtr);
+
+ if (!btr_pcur_is_on_user_rec(&pcur)) {
+ break;
+ }
+
+ rec = btr_pcur_get_rec(&pcur);
+ if (rec_get_deleted_flag(rec, 0)) {
+ continue;
+ }
+
+ field = rec_get_nth_field_old(rec, 0/*NAME*/, &len);
+ if (len == UNIV_SQL_NULL || len == 0) {
+ /* Corrupted SYS_TABLES.NAME */
+ continue;
+ }
+
+ table_name = mem_heap_strdupl(
+ heap,
+ reinterpret_cast<const char*>(field), len);
+ if (strstr(table_name, "/" TEMP_FILE_PREFIX "-")) {
+ btr_pcur_store_position(&pcur, &mtr);
+ btr_pcur_commit_specify_mtr(&pcur, &mtr);
+
+ if (dict_load_table(table_name, true,
+ DICT_ERR_IGNORE_ALL)) {
+ row_drop_table_for_mysql(
+ table_name, trx, FALSE, FALSE);
+ trx_commit_for_mysql(trx);
+ }
+
+ mtr.start();
+ btr_pcur_restore_position(BTR_SEARCH_LEAF,
+ &pcur, &mtr);
+ }
+
+ mem_heap_empty(heap);
+ }
+
+ btr_pcur_close(&pcur);
+ mtr.commit();
+ row_mysql_unlock_data_dictionary(trx);
+ trx_free_for_background(trx);
+ mem_heap_free(heap);
+}
+
/*********************************************************************//**
If a table is not yet in the drop list, adds the table to the list of tables
which the master thread drops in background. We need this on Unix because in
@@ -3317,7 +3387,7 @@ row_drop_single_table_tablespace(
/* If the tablespace is not in the cache, just delete the file. */
if (!fil_space_for_table_exists_in_mem(
- space_id, tablename, true, false, NULL, 0, table_flags)) {
+ space_id, tablename, true, NULL, table_flags)) {
/* Force a delete of any discarded or temporary files. */
fil_delete_file(filepath);
@@ -3454,7 +3524,7 @@ row_drop_table_for_mysql(
RemoteDatafile::delete_link_file(name);
}
- if (!dict_table_is_temporary(table)) {
+ if (!dict_table_is_temporary(table) && !table->no_rollback()) {
dict_stats_recalc_pool_del(table);
dict_stats_defrag_pool_del(table, NULL);
@@ -3531,11 +3601,7 @@ row_drop_table_for_mysql(
}
- DBUG_EXECUTE_IF("row_drop_table_add_to_background",
- row_add_table_to_background_drop_list(table->id);
- err = DB_SUCCESS;
- goto funct_exit;
- );
+ DBUG_EXECUTE_IF("row_drop_table_add_to_background", goto defer;);
/* TODO: could we replace the counter n_foreign_key_checks_running
with lock checks on the table? Acquire here an exclusive lock on the
@@ -3544,17 +3610,22 @@ row_drop_table_for_mysql(
checks take an IS or IX lock on the table. */
if (table->n_foreign_key_checks_running > 0) {
- if (row_add_table_to_background_drop_list(table->id)) {
- ib::info() << "You are trying to drop table "
- << table->name
- << " though there is a foreign key check"
- " running on it. Adding the table to the"
- " background drop queue.";
+defer:
+ if (!strstr(table->name.m_name, "/" TEMP_FILE_PREFIX)) {
+ heap = mem_heap_create(FN_REFLEN);
+ const char* tmp_name
+ = dict_mem_create_temporary_tablename(
+ heap, table->name.m_name, table->id);
+ ib::info() << "Deferring DROP TABLE " << table->name
+ << "; renaming to " << tmp_name;
+ err = row_rename_table_for_mysql(
+ table->name.m_name, tmp_name, trx, false);
+ } else {
+ err = DB_SUCCESS;
+ }
+ if (err == DB_SUCCESS) {
+ row_add_table_to_background_drop_list(table->id);
}
-
- /* We return DB_SUCCESS to MySQL though the drop will
- happen lazily later */
- err = DB_SUCCESS;
goto funct_exit;
}
@@ -3575,26 +3646,9 @@ row_drop_table_for_mysql(
/* Wait on background threads to stop using table */
fil_wait_crypt_bg_threads(table);
- if (table->get_ref_count() == 0) {
- lock_remove_all_on_table(table, TRUE);
- ut_a(table->n_rec_locks == 0);
- } else if (table->get_ref_count() > 0 || table->n_rec_locks > 0) {
- if (row_add_table_to_background_drop_list(table->id)) {
- ib::info() << "MySQL is trying to drop table "
- << table->name
- << " though there are still open handles to"
- " it. Adding the table to the background drop"
- " queue.";
-
- /* We return DB_SUCCESS to MySQL though the drop will
- happen lazily later */
- err = DB_SUCCESS;
- } else {
- /* The table is already in the background drop list */
- err = DB_ERROR;
- }
-
- goto funct_exit;
+ if (table->get_ref_count() > 0 || table->n_rec_locks > 0
+ || lock_table_has_locks(table)) {
+ goto defer;
}
/* The "to_be_dropped" marks table that is to be dropped, but
@@ -3604,11 +3658,6 @@ row_drop_table_for_mysql(
and it is free to be dropped */
table->to_be_dropped = false;
- /* If we get this far then the table to be dropped must not have
- any table or record locks on it. */
-
- ut_a(!lock_table_has_locks(table));
-
switch (trx_get_dict_operation(trx)) {
case TRX_DICT_OP_NONE:
trx_set_dict_operation(trx, TRX_DICT_OP_TABLE);
@@ -3620,8 +3669,8 @@ row_drop_table_for_mysql(
TRX_DICT_OP_INDEX, we should be dropping auxiliary
tables for full-text indexes or temp tables. */
ut_ad(strstr(table->name.m_name, "/FTS_") != NULL
- || strstr(table->name.m_name, TEMP_FILE_PREFIX_INNODB)
- != NULL);
+ || strstr(table->name.m_name,
+ TEMP_TABLE_PATH_PREFIX) != NULL);
}
/* Mark all indexes unavailable in the data dictionary cache
@@ -3793,9 +3842,11 @@ row_drop_table_for_mysql(
table_flags = table->flags;
ut_ad(!dict_table_is_temporary(table));
- err = row_drop_ancillary_fts_tables(table, trx);
- if (err != DB_SUCCESS) {
- break;
+ if (!table->no_rollback()) {
+ err = row_drop_ancillary_fts_tables(table, trx);
+ if (err != DB_SUCCESS) {
+ break;
+ }
}
/* Determine the tablespace filename before we drop
@@ -4319,7 +4370,7 @@ row_rename_table_for_mysql(
goto funct_exit;
- } else if (new_is_tmp) {
+ } else if (!old_is_tmp && new_is_tmp) {
/* MySQL is doing an ALTER TABLE command and it renames the
original table to a temporary table name. We want to preserve
the original foreign key constraint definitions despite the
@@ -4354,6 +4405,14 @@ row_rename_table_for_mysql(
goto funct_exit;
}
+ if (!table->is_temporary()) {
+ err = trx_undo_report_rename(trx, table);
+
+ if (err != DB_SUCCESS) {
+ goto funct_exit;
+ }
+ }
+
/* We use the private SQL parser of Innobase to generate the query
graphs needed in updating the dictionary data from system tables. */
@@ -4539,7 +4598,8 @@ row_rename_table_for_mysql(
}
}
- if (dict_table_has_fts_index(table)
+ if (err == DB_SUCCESS
+ && dict_table_has_fts_index(table)
&& !dict_tables_have_same_db(old_name, new_name)) {
err = fts_rename_aux_tables(table, new_name, trx);
if (err != DB_TABLE_NOT_FOUND) {
@@ -4696,6 +4756,7 @@ funct_exit:
}
if (commit) {
+ DEBUG_SYNC(trx->mysql_thd, "before_rename_table_commit");
trx_commit_for_mysql(trx);
}
diff --git a/storage/innobase/row/row0sel.cc b/storage/innobase/row/row0sel.cc
index 46cd1e00021..157c2ab64da 100644
--- a/storage/innobase/row/row0sel.cc
+++ b/storage/innobase/row/row0sel.cc
@@ -4089,9 +4089,10 @@ row_search_mvcc(
ulint direction)
{
DBUG_ENTER("row_search_mvcc");
+ DBUG_ASSERT(prebuilt->index->table == prebuilt->table);
dict_index_t* index = prebuilt->index;
- ibool comp = dict_table_is_comp(index->table);
+ ibool comp = dict_table_is_comp(prebuilt->table);
const dtuple_t* search_tuple = prebuilt->search_tuple;
btr_pcur_t* pcur = prebuilt->pcur;
trx_t* trx = prebuilt->trx;
@@ -4412,17 +4413,19 @@ row_search_mvcc(
thread that is currently serving the transaction. Because we
are that thread, we can read trx->state without holding any
mutex. */
- ut_ad(prebuilt->sql_stat_start || trx->state == TRX_STATE_ACTIVE);
+ ut_ad(prebuilt->sql_stat_start
+ || trx->state == TRX_STATE_ACTIVE
+ || (prebuilt->table->no_rollback()
+ && trx->state == TRX_STATE_NOT_STARTED));
ut_ad(!trx_is_started(trx) || trx->state == TRX_STATE_ACTIVE);
ut_ad(prebuilt->sql_stat_start
|| prebuilt->select_lock_type != LOCK_NONE
|| MVCC::is_view_active(trx->read_view)
+ || prebuilt->table->no_rollback()
|| srv_read_only_mode);
- trx_start_if_not_started(trx, false);
-
if (trx->isolation_level <= TRX_ISO_READ_COMMITTED
&& prebuilt->select_lock_type != LOCK_NONE
&& trx->mysql_thd != NULL
@@ -4455,11 +4458,15 @@ row_search_mvcc(
que_thr_move_to_run_state_for_mysql(thr, trx);
- clust_index = dict_table_get_first_index(index->table);
+ clust_index = dict_table_get_first_index(prebuilt->table);
/* Do some start-of-statement preparations */
- if (!prebuilt->sql_stat_start) {
+ if (prebuilt->table->no_rollback()) {
+ /* NO_ROLLBACK tables do not support MVCC or locking. */
+ prebuilt->select_lock_type = LOCK_NONE;
+ prebuilt->sql_stat_start = FALSE;
+ } else if (!prebuilt->sql_stat_start) {
/* No need to set an intention lock or assign a read view */
if (!MVCC::is_view_active(trx->read_view)
@@ -4476,6 +4483,7 @@ row_search_mvcc(
} else if (prebuilt->select_lock_type == LOCK_NONE) {
/* This is a consistent read */
/* Assign a read view for the query */
+ trx_start_if_not_started(trx, false);
if (!srv_read_only_mode) {
trx_assign_read_view(trx);
@@ -4483,8 +4491,9 @@ row_search_mvcc(
prebuilt->sql_stat_start = FALSE;
} else {
+ trx_start_if_not_started(trx, false);
wait_table_again:
- err = lock_table(0, index->table,
+ err = lock_table(0, prebuilt->table,
prebuilt->select_lock_type == LOCK_S
? LOCK_IS : LOCK_IX, thr);
@@ -5055,7 +5064,8 @@ no_gap_lock:
/* This is a non-locking consistent read: if necessary, fetch
a previous version of the record */
- if (trx->isolation_level == TRX_ISO_READ_UNCOMMITTED) {
+ if (trx->isolation_level == TRX_ISO_READ_UNCOMMITTED
+ || prebuilt->table->no_rollback()) {
/* Do nothing: we let a non-locking SELECT read the
latest version of the record */
diff --git a/storage/innobase/row/row0uins.cc b/storage/innobase/row/row0uins.cc
index f4cd07dc53a..bdb0f86ee81 100644
--- a/storage/innobase/row/row0uins.cc
+++ b/storage/innobase/row/row0uins.cc
@@ -332,16 +332,13 @@ row_undo_ins_parse_undo_rec(
byte* ptr;
undo_no_t undo_no;
table_id_t table_id;
- ulint type;
ulint dummy;
bool dummy_extern;
ut_ad(node);
- ptr = trx_undo_rec_get_pars(node->undo_rec, &type, &dummy,
+ ptr = trx_undo_rec_get_pars(node->undo_rec, &node->rec_type, &dummy,
&dummy_extern, &undo_no, &table_id);
- ut_ad(type == TRX_UNDO_INSERT_REC);
- node->rec_type = type;
node->update = NULL;
node->table = dict_table_open_on_id(
@@ -352,6 +349,27 @@ row_undo_ins_parse_undo_rec(
return;
}
+ switch (node->rec_type) {
+ default:
+ ut_ad(!"wrong undo record type");
+ goto close_table;
+ case TRX_UNDO_INSERT_REC:
+ break;
+ case TRX_UNDO_RENAME_TABLE:
+ dict_table_t* table = node->table;
+ ut_ad(!table->is_temporary());
+ ut_ad(dict_table_is_file_per_table(table)
+ == (table->space != TRX_SYS_SPACE));
+ size_t len = mach_read_from_2(node->undo_rec)
+ + node->undo_rec - ptr - 2;
+ ptr[len] = 0;
+ const char* name = reinterpret_cast<char*>(ptr);
+ if (strcmp(table->name.m_name, name)) {
+ dict_table_rename_in_cache(table, name, false);
+ }
+ goto close_table;
+ }
+
if (UNIV_UNLIKELY(!fil_table_accessible(node->table))) {
close_table:
/* Normally, tables should not disappear or become
diff --git a/storage/innobase/row/row0upd.cc b/storage/innobase/row/row0upd.cc
index 5009ac02408..aefe7e6dc96 100644
--- a/storage/innobase/row/row0upd.cc
+++ b/storage/innobase/row/row0upd.cc
@@ -2305,7 +2305,7 @@ row_upd_sec_index_entry(
mtr.set_named_space(index->space);
/* fall through */
case IBUF_SPACE_ID:
- flags = 0;
+ flags = index->table->no_rollback() ? BTR_NO_ROLLBACK : 0;
break;
}
@@ -3025,6 +3025,7 @@ row_upd_clust_step(
ulint offsets_[REC_OFFS_NORMAL_SIZE];
ulint* offsets;
ibool referenced;
+ ulint flags;
trx_t* trx = thr_get_trx(thr);
rec_offs_init(offsets_);
@@ -3043,11 +3044,16 @@ row_upd_clust_step(
mtr.start();
- const ulint flags = index->table->is_temporary()
- ? BTR_NO_LOCKING_FLAG : 0;
- if (flags) {
+ if (dict_table_is_temporary(node->table)) {
+ /* Disable locking, because temporary tables are
+ private to the connection (no concurrent access). */
+ flags = node->table->no_rollback()
+ ? BTR_NO_ROLLBACK
+ : BTR_NO_LOCKING_FLAG;
+ /* Redo logging only matters for persistent tables. */
mtr.set_log_mode(MTR_LOG_NO_REDO);
} else {
+ flags = node->table->no_rollback() ? BTR_NO_ROLLBACK : 0;
mtr.set_named_space(index->space);
}
@@ -3126,9 +3132,10 @@ row_upd_clust_step(
}
}
- ut_ad(lock_trx_has_rec_x_lock(thr_get_trx(thr), index->table,
- btr_pcur_get_block(pcur),
- page_rec_get_heap_no(rec)));
+ ut_ad(index->table->no_rollback()
+ || lock_trx_has_rec_x_lock(thr_get_trx(thr), index->table,
+ btr_pcur_get_block(pcur),
+ page_rec_get_heap_no(rec)));
/* NOTE: the following function calls will also commit mtr */
@@ -3336,8 +3343,6 @@ row_upd_step(
trx = thr_get_trx(thr);
- trx_start_if_not_started_xa(trx, true);
-
node = static_cast<upd_node_t*>(thr->run_node);
sel_node = node->select;
diff --git a/storage/innobase/trx/trx0rec.cc b/storage/innobase/trx/trx0rec.cc
index 358bd458912..e4801c4c5ca 100644
--- a/storage/innobase/trx/trx0rec.cc
+++ b/storage/innobase/trx/trx0rec.cc
@@ -1880,6 +1880,119 @@ trx_undo_parse_erase_page_end(
return(ptr);
}
+/** Report a RENAME TABLE operation.
+@param[in,out] trx transaction
+@param[in] table table that is being renamed
+@param[in,out] block undo page
+@param[in,out] mtr mini-transaction
+@return byte offset of the undo log record
+@retval 0 in case of failure */
+static
+ulint
+trx_undo_page_report_rename(trx_t* trx, const dict_table_t* table,
+ buf_block_t* block, mtr_t* mtr)
+{
+ byte* ptr_first_free = TRX_UNDO_PAGE_HDR + TRX_UNDO_PAGE_FREE
+ + block->frame;
+ ulint first_free = mach_read_from_2(ptr_first_free);
+ ut_ad(first_free >= TRX_UNDO_PAGE_HDR + TRX_UNDO_PAGE_HDR_SIZE);
+ ut_ad(first_free <= UNIV_PAGE_SIZE);
+ byte* start = block->frame + first_free;
+ size_t len = strlen(table->name.m_name);
+ const size_t fixed = 2 + 1 + 11 + 11 + 2;
+ ut_ad(len <= NAME_LEN * 2 + 1);
+ /* The -10 is used in trx_undo_left() */
+ compile_time_assert((NAME_LEN * 1) * 2 + fixed
+ + TRX_UNDO_PAGE_HDR + TRX_UNDO_PAGE_HDR_SIZE
+ < UNIV_PAGE_SIZE_MIN - 10 - FIL_PAGE_DATA_END);
+
+ if (trx_undo_left(block->frame, start) < fixed + len) {
+ ut_ad(first_free > TRX_UNDO_PAGE_HDR
+ + TRX_UNDO_PAGE_HDR_SIZE);
+ return 0;
+ }
+
+ byte* ptr = start + 2;
+ *ptr++ = TRX_UNDO_RENAME_TABLE;
+ ptr += mach_u64_write_much_compressed(ptr, trx->undo_no);
+ ptr += mach_u64_write_much_compressed(ptr, table->id);
+ memcpy(ptr, table->name.m_name, len);
+ ptr += len;
+ mach_write_to_2(ptr, first_free);
+ ptr += 2;
+ ulint offset = page_offset(ptr);
+ mach_write_to_2(start, offset);
+ mach_write_to_2(ptr_first_free, offset);
+
+ trx_undof_page_add_undo_rec_log(block->frame, first_free, offset, mtr);
+ return first_free;
+}
+
+/** Report a RENAME TABLE operation.
+@param[in,out] trx transaction
+@param[in] table table that is being renamed
+@return DB_SUCCESS or error code */
+dberr_t
+trx_undo_report_rename(trx_t* trx, const dict_table_t* table)
+{
+ ut_ad(!trx->read_only);
+ ut_ad(trx->id);
+ ut_ad(!table->is_temporary());
+
+ trx_rseg_t* rseg = trx->rsegs.m_redo.rseg;
+ trx_undo_t** pundo = &trx->rsegs.m_redo.insert_undo;
+ mutex_enter(&trx->undo_mutex);
+ dberr_t err = *pundo
+ ? DB_SUCCESS
+ : trx_undo_assign_undo(trx, rseg, pundo, TRX_UNDO_INSERT);
+ ut_ad((err == DB_SUCCESS) == (*pundo != NULL));
+ if (trx_undo_t* undo = *pundo) {
+ mtr_t mtr;
+ mtr.start();
+
+ buf_block_t* block = buf_page_get_gen(
+ page_id_t(undo->space, undo->last_page_no),
+ univ_page_size, RW_X_LATCH,
+ buf_pool_is_obsolete(undo->withdraw_clock)
+ ? NULL : undo->guess_block,
+ BUF_GET, __FILE__, __LINE__, &mtr, &err);
+ ut_ad((err == DB_SUCCESS) == !!block);
+
+ for (ut_d(int loop_count = 0); block;) {
+ ut_ad(++loop_count < 2);
+ buf_block_dbg_add_level(block, SYNC_TRX_UNDO_PAGE);
+ ut_ad(undo->last_page_no == block->page.id.page_no());
+
+ if (ulint offset = trx_undo_page_report_rename(
+ trx, table, block, &mtr)) {
+ undo->withdraw_clock = buf_withdraw_clock;
+ undo->empty = FALSE;
+ undo->top_page_no = undo->last_page_no;
+ undo->top_offset = offset;
+ undo->top_undo_no = trx->undo_no++;
+ undo->guess_block = block;
+
+ trx->undo_rseg_space = rseg->space;
+ err = DB_SUCCESS;
+ break;
+ } else {
+ mtr.commit();
+ mtr.start();
+ block = trx_undo_add_page(trx, undo, &mtr);
+ if (!block) {
+ err = DB_OUT_OF_FILE_SPACE;
+ break;
+ }
+ }
+ }
+
+ mtr.commit();
+ }
+
+ mutex_exit(&trx->undo_mutex);
+ return err;
+}
+
/***********************************************************************//**
Writes information to an undo log about an insert, update, or a delete marking
of a clustered index record. This information is used in a rollback of the
diff --git a/storage/innobase/trx/trx0roll.cc b/storage/innobase/trx/trx0roll.cc
index 5c2258f25be..15f6749c529 100644
--- a/storage/innobase/trx/trx0roll.cc
+++ b/storage/innobase/trx/trx0roll.cc
@@ -1076,11 +1076,17 @@ trx_roll_pop_top_rec_of_trx(trx_t* trx, roll_ptr_t* roll_ptr, mem_heap_t* heap)
trx_undo_rec_t* undo_rec = trx_roll_pop_top_rec(trx, undo, &mtr);
const undo_no_t undo_no = trx_undo_rec_get_undo_no(undo_rec);
- if (trx_undo_rec_get_type(undo_rec) == TRX_UNDO_INSERT_REC) {
+ switch (trx_undo_rec_get_type(undo_rec)) {
+ case TRX_UNDO_RENAME_TABLE:
+ ut_ad(undo == insert);
+ /* fall through */
+ case TRX_UNDO_INSERT_REC:
ut_ad(undo == insert || undo == temp);
*roll_ptr |= 1ULL << ROLL_PTR_INSERT_FLAG_POS;
- } else {
+ break;
+ default:
ut_ad(undo == update || undo == temp);
+ break;
}
ut_ad(trx_roll_check_undo_rec_ordering(
diff --git a/storage/innobase/ut/ut0ut.cc b/storage/innobase/ut/ut0ut.cc
index 2a64d77da6f..a8ff700847a 100644
--- a/storage/innobase/ut/ut0ut.cc
+++ b/storage/innobase/ut/ut0ut.cc
@@ -37,6 +37,7 @@ Created 5/11/1994 Heikki Tuuri
#include "trx0trx.h"
#include <string>
#include "log.h"
+#include "my_cpu.h"
#ifdef _WIN32
typedef VOID(WINAPI *time_fn)(LPFILETIME);
@@ -293,14 +294,14 @@ ut_delay(
{
ulint i;
- UT_LOW_PRIORITY_CPU();
+ HMT_low();
for (i = 0; i < delay * 50; i++) {
- UT_RELAX_CPU();
+ MY_RELAX_CPU();
UT_COMPILER_BARRIER();
}
- UT_RESUME_PRIORITY_CPU();
+ HMT_medium();
}
/*************************************************************//**
diff --git a/storage/maria/ha_maria.cc b/storage/maria/ha_maria.cc
index d45da38ab79..4aa207902a9 100644
--- a/storage/maria/ha_maria.cc
+++ b/storage/maria/ha_maria.cc
@@ -990,7 +990,8 @@ int_table_flags(HA_NULL_IN_KEY | HA_CAN_FULLTEXT | HA_CAN_SQL_HANDLER |
HA_FILE_BASED | HA_CAN_GEOMETRY | CANNOT_ROLLBACK_FLAG |
HA_CAN_BIT_FIELD | HA_CAN_RTREEKEYS | HA_CAN_REPAIR |
HA_CAN_VIRTUAL_COLUMNS | HA_CAN_EXPORT |
- HA_HAS_RECORDS | HA_STATS_RECORDS_IS_EXACT),
+ HA_HAS_RECORDS | HA_STATS_RECORDS_IS_EXACT |
+ HA_CAN_TABLES_WITHOUT_ROLLBACK),
can_enable_indexes(1), bulk_insert_single_undo(BULK_INSERT_NONE)
{}
@@ -2278,7 +2279,7 @@ bool ha_maria::check_and_repair(THD *thd)
if (!file->state->del && (maria_recover_options & HA_RECOVER_QUICK))
check_opt.flags |= T_QUICK;
- thd->set_query(table->s->table_name.str,
+ thd->set_query((char*) table->s->table_name.str,
(uint) table->s->table_name.length, system_charset_info);
if (!(crashed= maria_is_crashed(file)))
@@ -2314,14 +2315,14 @@ bool ha_maria::is_crashed() const
#define CHECK_UNTIL_WE_FULLY_IMPLEMENTED_VERSIONING(msg) \
do { \
- if (file->lock.type == TL_WRITE_CONCURRENT_INSERT) \
+ if (file->lock.type == TL_WRITE_CONCURRENT_INSERT && !table->s->sequence) \
{ \
my_error(ER_CHECK_NOT_IMPLEMENTED, MYF(0), msg); \
return 1; \
} \
} while(0)
-int ha_maria::update_row(const uchar * old_data, uchar * new_data)
+int ha_maria::update_row(const uchar * old_data, const uchar * new_data)
{
CHECK_UNTIL_WE_FULLY_IMPLEMENTED_VERSIONING("UPDATE in WRITE CONCURRENT");
return maria_update(file, old_data, new_data);
@@ -3110,6 +3111,13 @@ int ha_maria::create(const char *name, register TABLE *table_arg,
ER_ILLEGAL_HA_CREATE_OPTION,
"Row format set to PAGE because of TRANSACTIONAL=1 option");
+ if (share->table_type == TABLE_TYPE_SEQUENCE)
+ {
+ /* For sequences, the simples record type is appropriate */
+ row_type= STATIC_RECORD;
+ ha_create_info->transactional= HA_CHOICE_NO;
+ }
+
bzero((char*) &create_info, sizeof(create_info));
if ((error= table2maria(table_arg, row_type, &keydef, &recinfo,
&record_count, &create_info)))
@@ -3406,7 +3414,7 @@ bool maria_show_status(handlerton *hton,
stat_print_fn *print,
enum ha_stat_type stat)
{
- const LEX_STRING *engine_name= hton_name(hton);
+ const LEX_CSTRING *engine_name= hton_name(hton);
switch (stat) {
case HA_ENGINE_LOGS:
{
@@ -3646,7 +3654,7 @@ static int ha_maria_init(void *p)
@retval FALSE An error occurred
*/
-my_bool ha_maria::register_query_cache_table(THD *thd, char *table_name,
+my_bool ha_maria::register_query_cache_table(THD *thd, const char *table_name,
uint table_name_len,
qc_engine_callback
*engine_callback,
@@ -3931,6 +3939,36 @@ Item *ha_maria::idx_cond_push(uint keyno_arg, Item* idx_cond_arg)
return NULL;
}
+/**
+ Find record by unique constrain (used in temporary tables)
+
+ @param record (IN|OUT) the record to find
+ @param constrain_no (IN) number of constrain (for this engine)
+
+ @note It is like hp_search but uses function for raw where hp_search
+ uses functions for index.
+
+ @retval 0 OK
+ @retval 1 Not found
+ @retval -1 Error
+*/
+
+int ha_maria::find_unique_row(uchar *record, uint constrain_no)
+{
+ MARIA_UNIQUEDEF *def= file->s->uniqueinfo + constrain_no;
+ ha_checksum unique_hash= _ma_unique_hash(def, record);
+ int rc= _ma_check_unique(file, def, record, unique_hash, HA_OFFSET_ERROR);
+ if (rc)
+ {
+ file->cur_row.lastpos= file->dup_key_pos;
+ if ((*file->read_record)(file, record, file->cur_row.lastpos))
+ return -1;
+ file->update|= HA_STATE_AKTIV; /* Record is read */
+ }
+ // invert logic
+ return (rc ? 0 : 1);
+}
+
struct st_mysql_storage_engine maria_storage_engine=
{ MYSQL_HANDLERTON_INTERFACE_VERSION };
diff --git a/storage/maria/ha_maria.h b/storage/maria/ha_maria.h
index 65fe5f545d1..51438462787 100644
--- a/storage/maria/ha_maria.h
+++ b/storage/maria/ha_maria.h
@@ -75,7 +75,7 @@ public:
int open(const char *name, int mode, uint test_if_locked);
int close(void);
int write_row(uchar * buf);
- int update_row(const uchar * old_data, uchar * new_data);
+ int update_row(const uchar * old_data, const uchar * new_data);
int delete_row(const uchar * buf);
int index_read_map(uchar * buf, const uchar * key, key_part_map keypart_map,
enum ha_rkey_function find_flag);
@@ -162,7 +162,7 @@ public:
int net_read_dump(NET * net);
#endif
#ifdef HAVE_QUERY_CACHE
- my_bool register_query_cache_table(THD *thd, char *table_key,
+ my_bool register_query_cache_table(THD *thd, const char *table_key,
uint key_length,
qc_engine_callback
*engine_callback,
@@ -190,6 +190,8 @@ public:
/* Index condition pushdown implementation */
Item *idx_cond_push(uint keyno, Item* idx_cond);
+
+ int find_unique_row(uchar *record, uint unique_idx);
private:
DsMrr_impl ds_mrr;
friend ICP_RESULT index_cond_func_maria(void *arg);
diff --git a/storage/maria/lockman.c b/storage/maria/lockman.c
index efdf7e1c4b8..fa0a3289106 100644
--- a/storage/maria/lockman.c
+++ b/storage/maria/lockman.c
@@ -268,7 +268,7 @@ retry:
do {
cursor->curr= PTR(*cursor->prev);
lf_pin(pins, 1, cursor->curr);
- } while(*cursor->prev != (intptr)cursor->curr && LF_BACKOFF);
+ } while(*cursor->prev != (intptr)cursor->curr && LF_BACKOFF());
for (;;)
{
if (!cursor->curr)
@@ -277,7 +277,7 @@ retry:
cur_link= cursor->curr->link;
cursor->next= PTR(cur_link);
lf_pin(pins, 0, cursor->next);
- } while (cur_link != cursor->curr->link && LF_BACKOFF);
+ } while (cur_link != cursor->curr->link && LF_BACKOFF());
cur_hashnr= cursor->curr->hashnr;
cur_resource= cursor->curr->resource;
cur_lock= cursor->curr->lock;
@@ -285,7 +285,7 @@ retry:
cur_flags= cursor->curr->flags;
if (*cursor->prev != (intptr)cursor->curr)
{
- (void)LF_BACKOFF;
+ (void)LF_BACKOFF();
goto retry;
}
if (!DELETED(cur_link))
@@ -362,7 +362,7 @@ retry:
lf_alloc_free(pins, cursor->curr);
else
{
- (void)LF_BACKOFF;
+ (void)LF_BACKOFF();
goto retry;
}
}
diff --git a/storage/maria/ma_check.c b/storage/maria/ma_check.c
index 7caec1fd834..0e7777ee855 100644
--- a/storage/maria/ma_check.c
+++ b/storage/maria/ma_check.c
@@ -3783,7 +3783,7 @@ int maria_repair_by_sort(HA_CHECK *param, register MARIA_HA *info,
param->read_cache.end_of_file= sort_info.filelength;
sort_param.wordlist=NULL;
- init_alloc_root(&sort_param.wordroot, FTPARSER_MEMROOT_ALLOC_SIZE, 0,
+ init_alloc_root(&sort_param.wordroot, "sort", FTPARSER_MEMROOT_ALLOC_SIZE, 0,
MYF(param->malloc_flags));
sort_param.key_cmp=sort_key_cmp;
@@ -4431,7 +4431,8 @@ int maria_repair_parallel(HA_CHECK *param, register MARIA_HA *info,
(FT_MAX_WORD_LEN_FOR_SORT *
sort_param[i].keyinfo->seg->charset->mbmaxlen);
sort_param[i].key_length+=ft_max_word_len_for_sort-HA_FT_MAXBYTELEN;
- init_alloc_root(&sort_param[i].wordroot, FTPARSER_MEMROOT_ALLOC_SIZE, 0,
+ init_alloc_root(&sort_param[i].wordroot, "sort",
+ FTPARSER_MEMROOT_ALLOC_SIZE, 0,
MYF(param->malloc_flags));
}
}
diff --git a/storage/maria/ma_ft_boolean_search.c b/storage/maria/ma_ft_boolean_search.c
index a37a1322ad0..03f84e086b1 100644
--- a/storage/maria/ma_ft_boolean_search.c
+++ b/storage/maria/ma_ft_boolean_search.c
@@ -574,7 +574,7 @@ FT_INFO * maria_ft_init_boolean_search(MARIA_HA *info, uint keynr,
bzero(& ftb->no_dupes, sizeof(TREE));
ftb->last_word= 0;
- init_alloc_root(&ftb->mem_root, 1024, 1024, 0);
+ init_alloc_root(&ftb->mem_root, "fulltext", 1024, 1024, 0);
ftb->queue.max_elements= 0;
if (!(ftbe=(FTB_EXPR *)alloc_root(&ftb->mem_root, sizeof(FTB_EXPR))))
goto err;
diff --git a/storage/maria/ma_ft_parser.c b/storage/maria/ma_ft_parser.c
index f0a2e1e1425..d868b66dfbe 100644
--- a/storage/maria/ma_ft_parser.c
+++ b/storage/maria/ma_ft_parser.c
@@ -348,7 +348,8 @@ MYSQL_FTPARSER_PARAM* maria_ftparser_alloc_param(MARIA_HA *info)
info->ftparser_param= (MYSQL_FTPARSER_PARAM *)
my_malloc(MAX_PARAM_NR * sizeof(MYSQL_FTPARSER_PARAM) *
info->s->ftkeys, MYF(MY_WME | MY_ZEROFILL));
- init_alloc_root(&info->ft_memroot, FTPARSER_MEMROOT_ALLOC_SIZE, 0, MYF(0));
+ init_alloc_root(&info->ft_memroot, "fulltext_parser",
+ FTPARSER_MEMROOT_ALLOC_SIZE, 0, MYF(0));
}
return info->ftparser_param;
}
diff --git a/storage/maria/ma_ft_update.c b/storage/maria/ma_ft_update.c
index 51f27520dc1..ddf2a7251ab 100644
--- a/storage/maria/ma_ft_update.c
+++ b/storage/maria/ma_ft_update.c
@@ -28,6 +28,8 @@ void _ma_ft_segiterator_init(MARIA_HA *info, uint keynr, const uchar *record,
ftsi->num=info->s->keyinfo[keynr].keysegs;
ftsi->seg=info->s->keyinfo[keynr].seg;
ftsi->rec=record;
+ ftsi->pos= 0; /* Avoid warnings from gcc */
+ ftsi->len= 0; /* Avoid warnings from gcc */
DBUG_VOID_RETURN;
}
diff --git a/storage/maria/ma_open.c b/storage/maria/ma_open.c
index 8b3fa921cf1..3febf879ec6 100644
--- a/storage/maria/ma_open.c
+++ b/storage/maria/ma_open.c
@@ -274,7 +274,7 @@ MARIA_HA *maria_open(const char *name, int mode, uint open_flags)
{
int kfile,open_mode,save_errno;
uint i,j,len,errpos,head_length,base_pos,keys, realpath_err,
- key_parts,unique_key_parts,fulltext_keys,uniques;
+ key_parts,base_key_parts,unique_key_parts,fulltext_keys,uniques;
uint internal_table= MY_TEST(open_flags & HA_OPEN_INTERNAL_TABLE);
uint file_version;
size_t info_length;
@@ -404,21 +404,11 @@ MARIA_HA *maria_open(const char *name, int mode, uint open_flags)
Allocate space for header information and for data that is too
big to keep on stack
*/
- if (!my_multi_malloc(MY_WME,
- &disk_cache, info_length+128,
- &rec_per_key_part,
- (sizeof(*rec_per_key_part) * HA_MAX_POSSIBLE_KEY *
- HA_MAX_KEY_SEG),
- &nulls_per_key_part,
- (sizeof(*nulls_per_key_part) * HA_MAX_POSSIBLE_KEY *
- HA_MAX_KEY_SEG),
- NullS))
+ if (!(disk_cache= my_malloc(info_length+128, MYF(MY_WME))))
{
my_errno=ENOMEM;
goto err;
}
- share_buff.state.rec_per_key_part= rec_per_key_part;
- share_buff.state.nulls_per_key_part= nulls_per_key_part;
end_pos=disk_cache+info_length;
errpos= 3;
@@ -431,7 +421,7 @@ MARIA_HA *maria_open(const char *name, int mode, uint open_flags)
keys= (uint) share->state.header.keys;
uniques= (uint) share->state.header.uniques;
fulltext_keys= (uint) share->state.header.fulltext_keys;
- key_parts= mi_uint2korr(share->state.header.key_parts);
+ base_key_parts= key_parts= mi_uint2korr(share->state.header.key_parts);
unique_key_parts= mi_uint2korr(share->state.header.unique_key_parts);
if (len != MARIA_STATE_INFO_SIZE)
{
@@ -441,7 +431,8 @@ MARIA_HA *maria_open(const char *name, int mode, uint open_flags)
}
share->state_diff_length=len-MARIA_STATE_INFO_SIZE;
- _ma_state_info_read(disk_cache, &share->state);
+ if (!_ma_state_info_read(disk_cache, &share->state))
+ goto err;
len= mi_uint2korr(share->state.header.base_info_length);
if (len != MARIA_BASE_INFO_SIZE)
{
@@ -582,9 +573,9 @@ MARIA_HA *maria_open(const char *name, int mode, uint open_flags)
share->open_file_name.length= strlen(name);
if (!my_multi_malloc(MY_WME,
&share,sizeof(*share),
- &share->state.rec_per_key_part,
+ &rec_per_key_part,
sizeof(double) * key_parts,
- &share->state.nulls_per_key_part,
+ &nulls_per_key_part,
sizeof(long)* key_parts,
&share->keyinfo,keys*sizeof(MARIA_KEYDEF),
&share->uniqueinfo,uniques*sizeof(MARIA_UNIQUEDEF),
@@ -609,11 +600,16 @@ MARIA_HA *maria_open(const char *name, int mode, uint open_flags)
goto err;
errpos= 4;
- *share=share_buff;
- memcpy((char*) share->state.rec_per_key_part,
- (char*) rec_per_key_part, sizeof(double)*key_parts);
- memcpy((char*) share->state.nulls_per_key_part,
- (char*) nulls_per_key_part, sizeof(long)*key_parts);
+ *share= share_buff;
+ share->state.rec_per_key_part= rec_per_key_part;
+ share->state.nulls_per_key_part= nulls_per_key_part;
+
+ memcpy((char*) rec_per_key_part,
+ (char*) share_buff.state.rec_per_key_part,
+ sizeof(double)*base_key_parts);
+ memcpy((char*) nulls_per_key_part,
+ (char*) share_buff.state.nulls_per_key_part,
+ sizeof(long)*base_key_parts);
memcpy((char*) share->state.key_root,
(char*) key_root, sizeof(my_off_t)*keys);
strmov(share->unique_file_name.str, name_buff);
@@ -911,6 +907,10 @@ MARIA_HA *maria_open(const char *name, int mode, uint open_flags)
(keys ? MARIA_INDEX_BLOCK_MARGIN *
share->block_size * keys : 0));
my_free(disk_cache);
+ my_free(share_buff.state.rec_per_key_part);
+ disk_cache= 0;
+ share_buff.state.rec_per_key_part= 0;
+
_ma_setup_functions(share);
max_data_file_length= share->base.max_data_file_length;
if ((*share->once_init)(share, info.dfile.file))
@@ -1092,6 +1092,7 @@ err:
/* fall through */
case 3:
my_free(disk_cache);
+ my_free(share_buff.state.rec_per_key_part);
/* fall through */
case 1:
mysql_file_close(kfile,MYF(0));
@@ -1507,6 +1508,16 @@ static uchar *_ma_state_info_read(uchar *ptr, MARIA_STATE_INFO *state)
keys= (uint) state->header.keys;
key_parts= mi_uint2korr(state->header.key_parts);
+ /* Allocate memory for key parts if not already done */
+ if (!state->rec_per_key_part &&
+ !my_multi_malloc(MY_WME,
+ &state->rec_per_key_part,
+ sizeof(*state->rec_per_key_part) * key_parts,
+ &state->nulls_per_key_part,
+ sizeof(*state->nulls_per_key_part) * key_parts,
+ NullS))
+ DBUG_RETURN(0);
+
state->open_count = mi_uint2korr(ptr); ptr+= 2;
state->changed= mi_uint2korr(ptr); ptr+= 2;
state->create_rename_lsn= lsn_korr(ptr); ptr+= LSN_STORE_SIZE;
diff --git a/storage/maria/ma_unique.c b/storage/maria/ma_unique.c
index 72104e25e3f..dfbb471f31e 100644
--- a/storage/maria/ma_unique.c
+++ b/storage/maria/ma_unique.c
@@ -27,8 +27,9 @@
isn't any versioning information.
*/
-my_bool _ma_check_unique(MARIA_HA *info, MARIA_UNIQUEDEF *def, uchar *record,
- ha_checksum unique_hash, my_off_t disk_pos)
+my_bool _ma_check_unique(MARIA_HA *info, MARIA_UNIQUEDEF *def,
+ const uchar *record,
+ ha_checksum unique_hash, my_off_t disk_pos)
{
my_off_t lastpos=info->cur_row.lastpos;
MARIA_KEYDEF *keyinfo= &info->s->keyinfo[def->key];
@@ -38,6 +39,7 @@ my_bool _ma_check_unique(MARIA_HA *info, MARIA_UNIQUEDEF *def, uchar *record,
DBUG_ENTER("_ma_check_unique");
DBUG_PRINT("enter",("unique_hash: %lu", (ulong) unique_hash));
+ /* We need to store the hash value as a key in the record, breaking const */
maria_unique_store(record+keyinfo->seg->start, unique_hash);
/* Can't be spatial so it's ok to call _ma_make_key directly here */
_ma_make_key(info, &key, def->key, key_buff, record, 0, 0);
diff --git a/storage/maria/ma_update.c b/storage/maria/ma_update.c
index 0e006d2473d..6e150f727d6 100644
--- a/storage/maria/ma_update.c
+++ b/storage/maria/ma_update.c
@@ -21,7 +21,8 @@
Update an old row in a MARIA table
*/
-int maria_update(register MARIA_HA *info, const uchar *oldrec, uchar *newrec)
+int maria_update(register MARIA_HA *info, const uchar *oldrec,
+ const uchar *newrec)
{
int flag,key_changed,save_errno;
reg3 my_off_t pos;
diff --git a/storage/maria/maria_def.h b/storage/maria/maria_def.h
index a35403fff39..716c0ef824c 100644
--- a/storage/maria/maria_def.h
+++ b/storage/maria/maria_def.h
@@ -1325,7 +1325,7 @@ ulong _ma_calc_total_blob_length(MARIA_HA *info, const uchar *record);
ha_checksum _ma_checksum(MARIA_HA *info, const uchar *buf);
ha_checksum _ma_static_checksum(MARIA_HA *info, const uchar *buf);
my_bool _ma_check_unique(MARIA_HA *info, MARIA_UNIQUEDEF *def,
- uchar *record, ha_checksum unique_hash,
+ const uchar *record, ha_checksum unique_hash,
MARIA_RECORD_POS pos);
ha_checksum _ma_unique_hash(MARIA_UNIQUEDEF *def, const uchar *buf);
my_bool _ma_cmp_static_unique(MARIA_HA *info, MARIA_UNIQUEDEF *def,
diff --git a/storage/mroonga/ha_mroonga.cpp b/storage/mroonga/ha_mroonga.cpp
index b4bfc152053..db84b6ad1cc 100644
--- a/storage/mroonga/ha_mroonga.cpp
+++ b/storage/mroonga/ha_mroonga.cpp
@@ -481,6 +481,9 @@ static const char *mrn_inspect_extra_function(enum ha_extra_function operation)
case HA_EXTRA_PREPARE_FOR_DROP:
inspected = "HA_EXTRA_PREPARE_FOR_DROP";
break;
+ case HA_EXTRA_PREPARE_FOR_ALTER_TABLE:
+ inspected = "HA_EXTRA_PREPARE_FOR_ALTER_TABLE";
+ break;
case HA_EXTRA_PREPARE_FOR_UPDATE:
inspected = "HA_EXTRA_PREPARE_FOR_UPDATE";
break;
@@ -3446,7 +3449,7 @@ int ha_mroonga::storage_create(const char *name, TABLE *table,
int key_parts = KEY_N_KEY_PARTS(key_info);
if (key_parts == 1) {
Field *pkey_field = key_info->key_part[0].field;
- const char *column_name = pkey_field->field_name;
+ const char *column_name = pkey_field->field_name.str;
is_id = (strcmp(MRN_COLUMN_NAME_ID, column_name) == 0);
grn_builtin_type gtype = mrn_grn_type_from_field(ctx, pkey_field, false);
@@ -3613,7 +3616,7 @@ int ha_mroonga::storage_create_validate_pseudo_column(TABLE *table)
n_columns = table->s->fields;
for (i = 0; i < n_columns; i++) {
Field *field = table->s->field[i];
- const char *column_name = field->field_name;
+ const char *column_name = field->field_name.str;
if (strcmp(MRN_COLUMN_NAME_ID, column_name) == 0) {
switch (field->type()) {
case MYSQL_TYPE_TINY :
@@ -3661,17 +3664,17 @@ bool ha_mroonga::storage_create_foreign_key(TABLE *table,
}
List_iterator<Key_part_spec> key_part_col_iterator(key->columns);
Key_part_spec *key_part_col = key_part_col_iterator++;
- LEX_STRING field_name = key_part_col->field_name;
+ LEX_CSTRING field_name = key_part_col->field_name;
DBUG_PRINT("info", ("mroonga: field_name=%s", field_name.str));
- DBUG_PRINT("info", ("mroonga: field->field_name=%s", field->field_name));
- if (strcmp(field->field_name, field_name.str))
+ DBUG_PRINT("info", ("mroonga: field->field_name=%s", field->field_name.str));
+ if (strcmp(field->field_name.str, field_name.str))
{
continue;
}
Foreign_key *fk = (Foreign_key *) key;
List_iterator<Key_part_spec> key_part_ref_col_iterator(fk->ref_columns);
Key_part_spec *key_part_ref_col = key_part_ref_col_iterator++;
- LEX_STRING ref_field_name = key_part_ref_col->field_name;
+ LEX_CSTRING ref_field_name = key_part_ref_col->field_name;
DBUG_PRINT("info", ("mroonga: ref_field_name=%s", ref_field_name.str));
#ifdef MRN_FOREIGN_KEY_USE_CONST_STRING
LEX_CSTRING ref_db_name = fk->ref_db;
@@ -3776,7 +3779,7 @@ bool ha_mroonga::storage_create_foreign_key(TABLE *table,
DBUG_RETURN(false);
}
Field *ref_field = &ref_key_info->key_part->field[0];
- if (strcmp(ref_field->field_name, ref_field_name.str)) {
+ if (strcmp(ref_field->field_name.str, ref_field_name.str)) {
mrn_open_mutex_lock(table->s);
mrn_free_tmp_table_share(tmp_ref_table_share);
mrn_open_mutex_unlock(table->s);
@@ -3793,8 +3796,8 @@ bool ha_mroonga::storage_create_foreign_key(TABLE *table,
mrn_free_tmp_table_share(tmp_ref_table_share);
mrn_open_mutex_unlock(table->s);
grn_obj_flags col_flags = GRN_OBJ_PERSISTENT;
- column = grn_column_create(ctx, table_obj, field->field_name,
- strlen(field->field_name),
+ column = grn_column_create(ctx, table_obj, field->field_name.str,
+ field->field_name.length,
NULL, col_flags, grn_table_ref);
if (ctx->rc) {
grn_obj_unlink(ctx, grn_table_ref);
@@ -3803,7 +3806,7 @@ bool ha_mroonga::storage_create_foreign_key(TABLE *table,
DBUG_RETURN(false);
}
- mrn::IndexColumnName index_column_name(grn_table_name, field->field_name);
+ mrn::IndexColumnName index_column_name(grn_table_name, field->field_name.str);
grn_obj_flags ref_col_flags = GRN_OBJ_COLUMN_INDEX | GRN_OBJ_PERSISTENT;
column_ref = grn_column_create(ctx, grn_table_ref,
index_column_name.c_str(),
@@ -3857,7 +3860,7 @@ int ha_mroonga::storage_create_validate_index(TABLE *table)
continue;
}
Field *field = key_info->key_part[0].field;
- const char *column_name = field->field_name;
+ const char *column_name = field->field_name.str;
if (strcmp(MRN_COLUMN_NAME_ID, column_name) == 0) {
if (key_info->algorithm == HA_KEY_ALG_HASH) {
continue; // hash index is ok
@@ -3995,13 +3998,13 @@ int ha_mroonga::storage_create_index(TABLE *table, const char *grn_table_name,
bool is_multiple_column_index = KEY_N_KEY_PARTS(key_info) > 1;
if (!is_multiple_column_index) {
Field *field = key_info->key_part[0].field;
- if (strcmp(MRN_COLUMN_NAME_ID, field->field_name) == 0) {
+ if (strcmp(MRN_COLUMN_NAME_ID, field->field_name.str) == 0) {
// skipping _id virtual column
DBUG_RETURN(0);
}
if (is_foreign_key_field(table->s->table_name.str,
- field->field_name)) {
+ field->field_name.str)) {
DBUG_RETURN(0);
}
@@ -4011,7 +4014,7 @@ int ha_mroonga::storage_create_index(TABLE *table, const char *grn_table_name,
snprintf(error_message, MRN_MESSAGE_BUFFER_SIZE,
"mroonga: storage: failed to create index: "
ER_MRN_KEY_BASED_ON_GENERATED_VIRTUAL_COLUMN_STR,
- field->field_name);
+ field->field_name.str);
error = ER_MRN_KEY_BASED_ON_GENERATED_VIRTUAL_COLUMN_NUM;
my_message(error, error_message, MYF(0));
DBUG_RETURN(error);
@@ -4025,7 +4028,7 @@ int ha_mroonga::storage_create_index(TABLE *table, const char *grn_table_name,
snprintf(error_message, MRN_MESSAGE_BUFFER_SIZE,
"mroonga: storage: failed to create index: "
ER_MRN_KEY_BASED_ON_GENERATED_VIRTUAL_COLUMN_STR,
- field->field_name);
+ field->field_name.str);
error = ER_MRN_KEY_BASED_ON_GENERATED_VIRTUAL_COLUMN_NUM;
my_message(error, error_message, MYF(0));
DBUG_RETURN(error);
@@ -4447,8 +4450,8 @@ int ha_mroonga::wrapper_open_indexes(const char *name)
/* just for backward compatibility before 1.0. */
Field *field = key_info->key_part[0].field;
grn_index_columns[i] = grn_obj_column(ctx, grn_index_tables[i],
- field->field_name,
- strlen(field->field_name));
+ field->field_name.str,
+ field->field_name.length);
}
if (ctx->rc) {
@@ -4872,8 +4875,8 @@ int ha_mroonga::storage_open_indexes(const char *name)
/* just for backward compatibility before 1.0. */
Field *field = key_info->key_part[0].field;
grn_index_columns[i] = grn_obj_column(ctx, grn_index_tables[i],
- field->field_name,
- strlen(field->field_name));
+ field->field_name.str,
+ field->field_name.length);
}
}
}
@@ -6129,7 +6132,7 @@ int ha_mroonga::storage_write_row(uchar *buf)
}
grn_obj *column = grn_columns[i];
- if (is_foreign_key_field(table->s->table_name.str, field->field_name)) {
+ if (is_foreign_key_field(table->s->table_name.str, field->field_name.str)) {
grn_obj value;
GRN_RECORD_INIT(&value, 0, grn_obj_get_range(ctx, column));
grn_rc cast_rc = grn_obj_cast(ctx, &colbuf, &value, GRN_FALSE);
@@ -6140,7 +6143,7 @@ int ha_mroonga::storage_write_row(uchar *buf)
error = HA_ERR_NO_REFERENCED_ROW;
GRN_PLUGIN_ERROR(ctx, GRN_INVALID_ARGUMENT,
"foreign record doesn't exist: <%s>:<%.*s>",
- field->field_name,
+ field->field_name.str,
static_cast<int>(GRN_TEXT_LEN(&inspected)),
GRN_TEXT_VALUE(&inspected));
GRN_OBJ_FIN(ctx, &value);
@@ -6302,7 +6305,7 @@ err:
DBUG_RETURN(error);
}
-int ha_mroonga::storage_write_row_unique_index(uchar *buf,
+int ha_mroonga::storage_write_row_unique_index(const uchar *buf,
KEY *key_info,
grn_obj *index_table,
grn_obj *index_column,
@@ -6484,7 +6487,8 @@ int ha_mroonga::wrapper_get_record_id(uchar *data, grn_id *record_id,
DBUG_RETURN(error);
}
-int ha_mroonga::wrapper_update_row(const uchar *old_data, uchar *new_data)
+int ha_mroonga::wrapper_update_row(const uchar *old_data,
+ const uchar *new_data)
{
MRN_DBUG_ENTER_METHOD();
@@ -6511,7 +6515,8 @@ int ha_mroonga::wrapper_update_row(const uchar *old_data, uchar *new_data)
DBUG_RETURN(error);
}
-int ha_mroonga::wrapper_update_row_index(const uchar *old_data, uchar *new_data)
+int ha_mroonga::wrapper_update_row_index(const uchar *old_data,
+ const uchar *new_data)
{
MRN_DBUG_ENTER_METHOD();
@@ -6623,7 +6628,8 @@ err:
DBUG_RETURN(error);
}
-int ha_mroonga::storage_update_row(const uchar *old_data, uchar *new_data)
+int ha_mroonga::storage_update_row(const uchar *old_data,
+ const uchar *new_data)
{
MRN_DBUG_ENTER_METHOD();
int error = 0;
@@ -6673,7 +6679,7 @@ int ha_mroonga::storage_update_row(const uchar *old_data, uchar *new_data)
}
}
- if (!is_foreign_key_field(table->s->table_name.str, field->field_name))
+ if (!is_foreign_key_field(table->s->table_name.str, field->field_name.str))
continue;
{
@@ -6696,7 +6702,7 @@ int ha_mroonga::storage_update_row(const uchar *old_data, uchar *new_data)
error = HA_ERR_NO_REFERENCED_ROW;
GRN_PLUGIN_ERROR(ctx, GRN_INVALID_ARGUMENT,
"foreign record doesn't exist: <%s>:<%.*s>",
- field->field_name,
+ field->field_name.str,
static_cast<int>(GRN_TEXT_LEN(&inspected)),
GRN_TEXT_VALUE(&inspected));
GRN_OBJ_FIN(ctx, &inspected);
@@ -6755,7 +6761,7 @@ int ha_mroonga::storage_update_row(const uchar *old_data, uchar *new_data)
if (pkey_info && !on_duplicate_key_update) {
for (j = 0; j < KEY_N_KEY_PARTS(pkey_info); j++) {
Field *pkey_field = pkey_info->key_part[j].field;
- if (strcmp(pkey_field->field_name, column_name.c_str()) == 0) {
+ if (strcmp(pkey_field->field_name.str, column_name.c_str()) == 0) {
is_pkey = true;
break;
}
@@ -6855,7 +6861,8 @@ err:
DBUG_RETURN(error);
}
-int ha_mroonga::storage_update_row_index(const uchar *old_data, uchar *new_data)
+int ha_mroonga::storage_update_row_index(const uchar *old_data,
+ const uchar *new_data)
{
MRN_DBUG_ENTER_METHOD();
int error = 0;
@@ -6947,7 +6954,7 @@ err:
DBUG_RETURN(error);
}
-int ha_mroonga::storage_update_row_unique_indexes(uchar *new_data)
+int ha_mroonga::storage_update_row_unique_indexes(const uchar *new_data)
{
int error;
uint i;
@@ -7024,7 +7031,7 @@ err:
DBUG_RETURN(error);
}
-int ha_mroonga::update_row(const uchar *old_data, uchar *new_data)
+int ha_mroonga::update_row(const uchar *old_data, const uchar *new_data)
{
MRN_DBUG_ENTER_METHOD();
int error = 0;
@@ -7506,7 +7513,7 @@ ha_rows ha_mroonga::storage_records_in_range(uint key_nr, key_range *range_min,
DBUG_RETURN(row_count);
} else {
Field *field = key_info->key_part[0].field;
- const char *column_name = field->field_name;
+ const char *column_name = field->field_name.str;
mrn_change_encoding(ctx, field->charset());
if (strcmp(MRN_COLUMN_NAME_ID, column_name) == 0) {
@@ -7925,7 +7932,7 @@ int ha_mroonga::storage_index_read_map(uchar *buf, const uchar *key,
DBUG_RETURN(error);
if (find_flag == HA_READ_KEY_EXACT) {
- const char *column_name = field->field_name;
+ const char *column_name = field->field_name.str;
key_min = key_min_entity;
key_max = key_min_entity;
@@ -9571,7 +9578,7 @@ grn_obj *ha_mroonga::find_column_type(Field *field, MRN_SHARE *mrn_share, int i,
char error_message[MRN_BUFFER_SIZE];
snprintf(error_message, MRN_BUFFER_SIZE,
"unknown custom Groonga type name for <%s> column: <%s>",
- field->field_name, grn_type_name);
+ field->field_name.str, grn_type_name);
GRN_LOG(ctx, GRN_LOG_ERROR, "%s", error_message);
my_message(error_code, error_message, MYF(0));
@@ -10166,8 +10173,8 @@ bool ha_mroonga::is_primary_key_field(Field *field) const
DBUG_RETURN(false);
}
- if (strcmp(field->field_name,
- key_info->key_part[0].field->field_name) == 0) {
+ if (strcmp(field->field_name.str,
+ key_info->key_part[0].field->field_name.str) == 0) {
DBUG_RETURN(true);
} else {
DBUG_RETURN(false);
@@ -11411,11 +11418,11 @@ void ha_mroonga::storage_store_fields(uchar *buf, grn_id record_id)
if (bitmap_is_set(table->read_set, field->field_index) ||
bitmap_is_set(table->write_set, field->field_index)) {
- const char *column_name = field->field_name;
+ const char *column_name = field->field_name.str;
if (ignoring_no_key_columns) {
KEY *key_info = &(table->s->key_info[active_index]);
- if (strcmp(key_info->key_part[0].field->field_name, column_name)) {
+ if (strcmp(key_info->key_part[0].field->field_name.str, column_name)) {
continue;
}
}
@@ -11428,7 +11435,7 @@ void ha_mroonga::storage_store_fields(uchar *buf, grn_id record_id)
field->set_notnull();
field->store((int)record_id);
} else if (primary_key_field &&
- strcmp(primary_key_field->field_name, column_name) == 0) {
+ strcmp(primary_key_field->field_name.str, column_name) == 0) {
// for primary key column
storage_store_field_column(field, true, i, record_id);
} else {
@@ -11442,7 +11449,7 @@ void ha_mroonga::storage_store_fields(uchar *buf, grn_id record_id)
}
void ha_mroonga::storage_store_fields_for_prep_update(const uchar *old_data,
- uchar *new_data,
+ const uchar *new_data,
grn_id record_id)
{
MRN_DBUG_ENTER_METHOD();
@@ -11896,7 +11903,8 @@ int ha_mroonga::storage_encode_key_set(Field *field, const uchar *key,
MRN_DBUG_ENTER_METHOD();
int error = 0;
Field_set unpacker((uchar *)key, field->field_length, (uchar *)(key - 1),
- field->null_bit, field->unireg_check, field->field_name,
+ field->null_bit, field->unireg_check,
+ &field->field_name,
field->pack_length(),
static_cast<Field_set*>(field)->typelib,
static_cast<Field_set*>(field)->charset());
@@ -13407,7 +13415,7 @@ int ha_mroonga::storage_rename_foreign_key(MRN_SHARE *tmp_share,
for (i = 0; i < n_columns; ++i) {
Field *field = tmp_table_share->field[i];
- if (!is_foreign_key_field(from_table_name, field->field_name)) {
+ if (!is_foreign_key_field(from_table_name, field->field_name.str)) {
continue;
}
@@ -15288,8 +15296,8 @@ bool ha_mroonga::storage_inplace_alter_table_drop_column(
continue;
}
- const char *column_name = field->field_name;
- int column_name_size = strlen(column_name);
+ const char *column_name = field->field_name.str;
+ int column_name_size = field->field_name.length;
grn_obj *column_obj;
column_obj = grn_obj_column(ctx, table_obj, column_name, column_name_size);
@@ -15329,7 +15337,8 @@ bool ha_mroonga::storage_inplace_alter_table_rename_column(
continue;
}
- const char *new_name = NULL;
+ LEX_CSTRING new_name;
+ new_name.str= 0;
List_iterator_fast<Create_field> create_fields(alter_info->create_list);
while (Create_field *create_field = create_fields++) {
if (create_field->field == field) {
@@ -15338,15 +15347,16 @@ bool ha_mroonga::storage_inplace_alter_table_rename_column(
}
}
- if (!new_name) {
+ if (!new_name.str) {
continue;
}
- const char *old_name = field->field_name;
+ const char *old_name = field->field_name.str;
grn_obj *column_obj;
- column_obj = grn_obj_column(ctx, table_obj, old_name, strlen(old_name));
+ column_obj = grn_obj_column(ctx, table_obj, old_name,
+ field->field_name.length);
if (column_obj) {
- grn_column_rename(ctx, column_obj, new_name, strlen(new_name));
+ grn_column_rename(ctx, column_obj, new_name.str, new_name.length);
if (ctx->rc) {
int error = ER_WRONG_COLUMN_NAME;
my_message(error, ctx->errbuf, MYF(0));
@@ -16525,7 +16535,7 @@ char *ha_mroonga::storage_get_foreign_key_create_info()
Field *field = table_share->field[i];
if (!is_foreign_key_field(table_share->table_name.str,
- field->field_name)) {
+ field->field_name.str)) {
continue;
}
@@ -16598,8 +16608,8 @@ char *ha_mroonga::storage_get_foreign_key_create_info()
uint ref_pkey_nr = tmp_ref_table_share->primary_key;
KEY *ref_key_info = &tmp_ref_table_share->key_info[ref_pkey_nr];
Field *ref_field = &ref_key_info->key_part->field[0];
- append_identifier(ha_thd(), &create_info_str, ref_field->field_name,
- strlen(ref_field->field_name));
+ append_identifier(ha_thd(), &create_info_str, ref_field->field_name.str,
+ ref_field->field_name.length);
mrn_open_mutex_lock(table_share);
mrn_free_tmp_table_share(tmp_ref_table_share);
mrn_open_mutex_unlock(table_share);
@@ -16731,7 +16741,8 @@ int ha_mroonga::storage_get_foreign_key_list(THD *thd,
for (i = 0; i < n_columns; ++i) {
Field *field = table_share->field[i];
- if (!is_foreign_key_field(table_share->table_name.str, field->field_name)) {
+ if (!is_foreign_key_field(table_share->table_name.str,
+ field->field_name.str)) {
continue;
}
@@ -16782,7 +16793,7 @@ int ha_mroonga::storage_get_foreign_key_list(THD *thd,
#endif
f_key_info.referenced_key_name = thd_make_lex_string(thd, NULL, "PRIMARY",
7, TRUE);
- LEX_STRING *field_name = thd_make_lex_string(thd,
+ LEX_CSTRING *field_name = thd_make_lex_string(thd,
NULL,
column_name.c_str(),
column_name.length(),
@@ -16810,9 +16821,9 @@ int ha_mroonga::storage_get_foreign_key_list(THD *thd,
uint ref_pkey_nr = tmp_ref_table_share->primary_key;
KEY *ref_key_info = &tmp_ref_table_share->key_info[ref_pkey_nr];
Field *ref_field = &ref_key_info->key_part->field[0];
- LEX_STRING *ref_col_name = thd_make_lex_string(thd, NULL,
- ref_field->field_name,
- strlen(ref_field->field_name),
+ LEX_CSTRING *ref_col_name = thd_make_lex_string(thd, NULL,
+ ref_field->field_name.str,
+ ref_field->field_name.length,
TRUE);
f_key_info.referenced_fields.push_back(ref_col_name);
mrn_open_mutex_lock(table_share);
@@ -17087,7 +17098,7 @@ void ha_mroonga::rebind_psi()
#endif
my_bool ha_mroonga::wrapper_register_query_cache_table(THD *thd,
- char *table_key,
+ const char *table_key,
uint key_length,
qc_engine_callback
*engine_callback,
@@ -17108,7 +17119,7 @@ my_bool ha_mroonga::wrapper_register_query_cache_table(THD *thd,
}
my_bool ha_mroonga::storage_register_query_cache_table(THD *thd,
- char *table_key,
+ const char *table_key,
uint key_length,
qc_engine_callback
*engine_callback,
@@ -17124,7 +17135,7 @@ my_bool ha_mroonga::storage_register_query_cache_table(THD *thd,
}
my_bool ha_mroonga::register_query_cache_table(THD *thd,
- char *table_key,
+ const char *table_key,
uint key_length,
qc_engine_callback
*engine_callback,
diff --git a/storage/mroonga/ha_mroonga.hpp b/storage/mroonga/ha_mroonga.hpp
index 2533913961e..15497e70c59 100644
--- a/storage/mroonga/ha_mroonga.hpp
+++ b/storage/mroonga/ha_mroonga.hpp
@@ -207,7 +207,7 @@ extern "C" {
# define MRN_HAVE_HTON_ALTER_TABLE_FLAGS
#endif
-#if MYSQL_VERSION_ID >= 50706 && !defined(MRN_MARIADB_P)
+#if MYSQL_VERSION_ID >= 50706
# define MRN_FOREIGN_KEY_USE_CONST_STRING
#endif
@@ -456,7 +456,7 @@ public:
int delete_table(const char *name);
int write_row(uchar *buf);
- int update_row(const uchar *old_data, uchar *new_data);
+ int update_row(const uchar *old_data, const uchar *new_data);
int delete_row(const uchar *buf);
uint max_supported_record_length() const;
@@ -633,7 +633,7 @@ protected:
void rebind_psi();
#endif
my_bool register_query_cache_table(THD *thd,
- char *table_key,
+ const char *table_key,
uint key_length,
qc_engine_callback *engine_callback,
ulonglong *engine_data);
@@ -776,7 +776,7 @@ private:
int nth_column, grn_id record_id);
void storage_store_fields(uchar *buf, grn_id record_id);
void storage_store_fields_for_prep_update(const uchar *old_data,
- uchar *new_data,
+ const uchar *new_data,
grn_id record_id);
void storage_store_fields_by_index(uchar *buf);
@@ -920,18 +920,21 @@ private:
KEY *key_info,
grn_obj *index_column);
int storage_write_row_multiple_column_indexes(uchar *buf, grn_id record_id);
- int storage_write_row_unique_index(uchar *buf,
+ int storage_write_row_unique_index(const uchar *buf,
KEY *key_info,
grn_obj *index_table,
grn_obj *index_column,
grn_id *key_id);
int storage_write_row_unique_indexes(uchar *buf);
- int wrapper_get_record_id(uchar *data, grn_id *record_id, const char *context);
- int wrapper_update_row(const uchar *old_data, uchar *new_data);
- int wrapper_update_row_index(const uchar *old_data, uchar *new_data);
- int storage_update_row(const uchar *old_data, uchar *new_data);
- int storage_update_row_index(const uchar *old_data, uchar *new_data);
- int storage_update_row_unique_indexes(uchar *new_data);
+ int wrapper_get_record_id(uchar *data, grn_id *record_id,
+ const char *context);
+ int wrapper_update_row(const uchar *old_data, const uchar *new_data);
+ int wrapper_update_row_index(const uchar *old_data,
+ const uchar *new_data);
+ int storage_update_row(const uchar *old_data, const uchar *new_data);
+ int storage_update_row_index(const uchar *old_data,
+ const uchar *new_data);
+ int storage_update_row_unique_indexes(const uchar *new_data);
int wrapper_delete_row(const uchar *buf);
int wrapper_delete_row_index(const uchar *buf);
int storage_delete_row(const uchar *buf);
@@ -1293,13 +1296,13 @@ private:
void storage_rebind_psi();
#endif
my_bool wrapper_register_query_cache_table(THD *thd,
- char *table_key,
+ const char *table_key,
uint key_length,
qc_engine_callback
*engine_callback,
ulonglong *engine_data);
my_bool storage_register_query_cache_table(THD *thd,
- char *table_key,
+ const char *table_key,
uint key_length,
qc_engine_callback
*engine_callback,
diff --git a/storage/mroonga/lib/mrn_column_name.cpp b/storage/mroonga/lib/mrn_column_name.cpp
index e469ad2fd19..e2e8f6d8f63 100644
--- a/storage/mroonga/lib/mrn_column_name.cpp
+++ b/storage/mroonga/lib/mrn_column_name.cpp
@@ -32,7 +32,12 @@
namespace mrn {
ColumnName::ColumnName(const char *mysql_name)
: mysql_name_(mysql_name) {
- encode();
+ encode(mysql_name, strlen(mysql_name));
+ }
+
+ ColumnName::ColumnName(const LEX_CSTRING &mysql_name)
+ : mysql_name_(mysql_name.str) {
+ encode(mysql_name.str, mysql_name.length);
}
const char *ColumnName::mysql_name() {
@@ -47,12 +52,13 @@ namespace mrn {
return length_;
}
- void ColumnName::encode() {
+ void ColumnName::encode(const char *mysql_name,
+ size_t mysql_name_length) {
MRN_DBUG_ENTER_METHOD();
uint errors;
length_ = mrn_strconvert(system_charset_info,
- mysql_name_,
- strlen(mysql_name_),
+ mysql_name,
+ mysql_name_length,
&my_charset_filename,
name_,
MRN_MAX_PATH_SIZE,
diff --git a/storage/mroonga/lib/mrn_column_name.hpp b/storage/mroonga/lib/mrn_column_name.hpp
index ed8fb67e506..e68e0182f5e 100644
--- a/storage/mroonga/lib/mrn_column_name.hpp
+++ b/storage/mroonga/lib/mrn_column_name.hpp
@@ -25,6 +25,7 @@ namespace mrn {
class ColumnName {
public:
ColumnName(const char *mysql_name);
+ ColumnName(const LEX_CSTRING &mysql_name);
const char *mysql_name();
const char *c_str();
size_t length();
@@ -33,6 +34,6 @@ namespace mrn {
char name_[MRN_MAX_PATH_SIZE];
size_t length_;
- void encode();
+ void encode(const char *mysql_name, size_t mysql_name_length);
};
}
diff --git a/storage/mroonga/lib/mrn_condition_converter.cpp b/storage/mroonga/lib/mrn_condition_converter.cpp
index 6df601d6250..4fc54259f66 100644
--- a/storage/mroonga/lib/mrn_condition_converter.cpp
+++ b/storage/mroonga/lib/mrn_condition_converter.cpp
@@ -28,8 +28,8 @@
# define MRN_ITEM_FIELD_GET_NAME(item) ((item)->item_name.ptr())
# define MRN_ITEM_FIELD_GET_NAME_LENGTH(item) ((item)->item_name.length())
#else
-# define MRN_ITEM_FIELD_GET_NAME(item) ((item)->name)
-# define MRN_ITEM_FIELD_GET_NAME_LENGTH(item) (strlen((item)->name))
+# define MRN_ITEM_FIELD_GET_NAME(item) ((item)->name.str)
+# define MRN_ITEM_FIELD_GET_NAME_LENGTH(item) ((item)->name.length)
#endif
namespace mrn {
diff --git a/storage/mroonga/lib/mrn_count_skip_checker.cpp b/storage/mroonga/lib/mrn_count_skip_checker.cpp
index 216f3b7b7b5..07852d9dda6 100644
--- a/storage/mroonga/lib/mrn_count_skip_checker.cpp
+++ b/storage/mroonga/lib/mrn_count_skip_checker.cpp
@@ -271,7 +271,7 @@ namespace mrn {
GRN_LOG(ctx_, GRN_LOG_DEBUG,
"[mroonga][count-skip][false] no active index: <%s>:<%s>",
*(field->table_name),
- field->field_name);
+ field->field_name.str);
DBUG_RETURN(false);
}
@@ -288,7 +288,7 @@ namespace mrn {
i,
target_key_part_map_,
*(field->table_name),
- field->field_name);
+ field->field_name.str);
DBUG_RETURN(false);
}
}
@@ -297,7 +297,7 @@ namespace mrn {
GRN_LOG(ctx_, GRN_LOG_DEBUG,
"[mroonga][count-skip][false] field isn't indexed: <%s>:<%s>",
*(field->table_name),
- field->field_name);
+ field->field_name.str);
DBUG_RETURN(false);
}
}
diff --git a/storage/mroonga/mrn_mysql_compat.h b/storage/mroonga/mrn_mysql_compat.h
index d33a8c88d87..aac16d3c7bd 100644
--- a/storage/mroonga/mrn_mysql_compat.h
+++ b/storage/mroonga/mrn_mysql_compat.h
@@ -60,7 +60,10 @@
# define KEY_N_KEY_PARTS(key) (key)->key_parts
#endif
-#if defined(MRN_MARIADB_P) && MYSQL_VERSION_ID >= 100000
+#if defined(MRN_MARIADB_P) && MYSQL_VERSION_ID >= 100213
+# define mrn_init_alloc_root(PTR, SZ1, SZ2, FLAG) \
+ init_alloc_root(PTR, "mroonga", SZ1, SZ2, FLAG)
+#elif defined(MRN_MARIADB_P) && MYSQL_VERSION_ID >= 100000
# define mrn_init_alloc_root(PTR, SZ1, SZ2, FLAG) \
init_alloc_root(PTR, SZ1, SZ2, FLAG)
#elif MYSQL_VERSION_ID >= 50706
@@ -238,7 +241,13 @@
#endif
#if defined(MRN_MARIADB_P) && MYSQL_VERSION_ID >= 100000
-# if MYSQL_VERSION_ID >= 100104
+# if MYSQL_VERSION_ID >= 100213
+# define mrn_init_sql_alloc(thd, mem_root) \
+ init_sql_alloc(mem_root, "Mroonga", \
+ TABLE_ALLOC_BLOCK_SIZE, \
+ 0, \
+ MYF(thd->slave_thread ? 0 : MY_THREAD_SPECIFIC))
+#elif MYSQL_VERSION_ID >= 100104
# define mrn_init_sql_alloc(thd, mem_root) \
init_sql_alloc(mem_root, \
TABLE_ALLOC_BLOCK_SIZE, \
diff --git a/storage/mroonga/mrn_table.cpp b/storage/mroonga/mrn_table.cpp
index 65db014b89b..bb7c8fdb7ba 100644
--- a/storage/mroonga/mrn_table.cpp
+++ b/storage/mroonga/mrn_table.cpp
@@ -480,7 +480,7 @@ int mrn_parse_table_param(MRN_SHARE *share, TABLE *table)
if (share->engine)
{
- LEX_STRING engine_name;
+ LEX_CSTRING engine_name;
if (
(
share->engine_length == MRN_DEFAULT_LEN &&
@@ -1088,9 +1088,9 @@ TABLE_SHARE *mrn_create_tmp_table_share(TABLE_LIST *table_list, const char *path
void mrn_free_tmp_table_share(TABLE_SHARE *tmp_table_share)
{
MRN_DBUG_ENTER_FUNCTION();
- char *normalized_path = tmp_table_share->normalized_path.str;
+ const char *normalized_path = tmp_table_share->normalized_path.str;
free_table_share(tmp_table_share);
- my_free(normalized_path);
+ my_free((char*) normalized_path);
DBUG_VOID_RETURN;
}
diff --git a/storage/myisam/ft_boolean_search.c b/storage/myisam/ft_boolean_search.c
index a44e24c9db1..129384245bf 100644
--- a/storage/myisam/ft_boolean_search.c
+++ b/storage/myisam/ft_boolean_search.c
@@ -579,7 +579,7 @@ FT_INFO * ft_init_boolean_search(MI_INFO *info, uint keynr, uchar *query,
bzero(& ftb->no_dupes, sizeof(TREE));
ftb->last_word= 0;
- init_alloc_root(&ftb->mem_root, 1024, 1024, MYF(0));
+ init_alloc_root(&ftb->mem_root, "fulltext", 1024, 1024, MYF(0));
ftb->queue.max_elements= 0;
if (!(ftbe=(FTB_EXPR *)alloc_root(&ftb->mem_root, sizeof(FTB_EXPR))))
goto err;
diff --git a/storage/myisam/ft_parser.c b/storage/myisam/ft_parser.c
index c8d99b68e8a..78ddb0af4fe 100644
--- a/storage/myisam/ft_parser.c
+++ b/storage/myisam/ft_parser.c
@@ -342,7 +342,8 @@ MYSQL_FTPARSER_PARAM* ftparser_alloc_param(MI_INFO *info)
info->ftparser_param= (MYSQL_FTPARSER_PARAM *)
my_malloc(MAX_PARAM_NR * sizeof(MYSQL_FTPARSER_PARAM) *
info->s->ftkeys, MYF(MY_WME | MY_ZEROFILL));
- init_alloc_root(&info->ft_memroot, FTPARSER_MEMROOT_ALLOC_SIZE, 0, MYF(0));
+ init_alloc_root(&info->ft_memroot, "fulltext_parser",
+ FTPARSER_MEMROOT_ALLOC_SIZE, 0, MYF(0));
}
return info->ftparser_param;
}
diff --git a/storage/myisam/ft_update.c b/storage/myisam/ft_update.c
index 8f437476121..f851c0236ae 100644
--- a/storage/myisam/ft_update.c
+++ b/storage/myisam/ft_update.c
@@ -28,6 +28,8 @@ void _mi_ft_segiterator_init(MI_INFO *info, uint keynr, const uchar *record,
ftsi->num=info->s->keyinfo[keynr].keysegs;
ftsi->seg=info->s->keyinfo[keynr].seg;
ftsi->rec=record;
+ ftsi->pos= 0; /* Avoid warnings from gcc */
+ ftsi->len= 0; /* Avoid warnings from gcc */
DBUG_VOID_RETURN;
}
diff --git a/storage/myisam/ha_myisam.cc b/storage/myisam/ha_myisam.cc
index 74c55b75352..89f2a90deff 100644
--- a/storage/myisam/ha_myisam.cc
+++ b/storage/myisam/ha_myisam.cc
@@ -696,7 +696,8 @@ ha_myisam::ha_myisam(handlerton *hton, TABLE_SHARE *table_arg)
HA_DUPLICATE_POS | HA_CAN_INDEX_BLOBS | HA_AUTO_PART_KEY |
HA_FILE_BASED | HA_CAN_GEOMETRY | HA_NO_TRANSACTIONS |
HA_CAN_INSERT_DELAYED | HA_CAN_BIT_FIELD | HA_CAN_RTREEKEYS |
- HA_HAS_RECORDS | HA_STATS_RECORDS_IS_EXACT | HA_CAN_REPAIR),
+ HA_HAS_RECORDS | HA_STATS_RECORDS_IS_EXACT | HA_CAN_REPAIR |
+ HA_CAN_TABLES_WITHOUT_ROLLBACK),
can_enable_indexes(1)
{}
@@ -1777,7 +1778,7 @@ bool ha_myisam::check_and_repair(THD *thd)
sql_print_warning("Checking table: '%s'",table->s->path.str);
const CSET_STRING query_backup= thd->query_string;
- thd->set_query(table->s->table_name.str,
+ thd->set_query((char*) table->s->table_name.str,
(uint) table->s->table_name.length, system_charset_info);
if ((marked_crashed= mi_is_crashed(file)) || check(thd, &check_opt))
@@ -1815,7 +1816,7 @@ bool ha_myisam::is_crashed() const
(my_disable_locking && file->s->state.open_count));
}
-int ha_myisam::update_row(const uchar *old_data, uchar *new_data)
+int ha_myisam::update_row(const uchar *old_data, const uchar *new_data)
{
return mi_update(file,old_data,new_data);
}
@@ -2602,7 +2603,7 @@ maria_declare_plugin_end;
@retval FALSE An error occurred
*/
-my_bool ha_myisam::register_query_cache_table(THD *thd, char *table_name,
+my_bool ha_myisam::register_query_cache_table(THD *thd, const char *table_name,
uint table_name_len,
qc_engine_callback
*engine_callback,
diff --git a/storage/myisam/ha_myisam.h b/storage/myisam/ha_myisam.h
index 4068720e39e..804963f5efc 100644
--- a/storage/myisam/ha_myisam.h
+++ b/storage/myisam/ha_myisam.h
@@ -71,7 +71,7 @@ class ha_myisam: public handler
int open(const char *name, int mode, uint test_if_locked);
int close(void);
int write_row(uchar * buf);
- int update_row(const uchar * old_data, uchar * new_data);
+ int update_row(const uchar * old_data, const uchar * new_data);
int delete_row(const uchar * buf);
int index_read_map(uchar *buf, const uchar *key, key_part_map keypart_map,
enum ha_rkey_function find_flag);
@@ -144,7 +144,7 @@ class ha_myisam: public handler
Alter_inplace_info *alter_info);
bool check_if_incompatible_data(HA_CREATE_INFO *info, uint table_changes);
#ifdef HAVE_QUERY_CACHE
- my_bool register_query_cache_table(THD *thd, char *table_key,
+ my_bool register_query_cache_table(THD *thd, const char *table_key,
uint key_length,
qc_engine_callback
*engine_callback,
diff --git a/storage/myisam/mi_check.c b/storage/myisam/mi_check.c
index c5995d74ca3..1ca252c2505 100644
--- a/storage/myisam/mi_check.c
+++ b/storage/myisam/mi_check.c
@@ -2292,7 +2292,7 @@ int mi_repair_by_sort(HA_CHECK *param, register MI_INFO *info,
mysql_file_seek(param->read_cache.file, 0L, MY_SEEK_END, MYF(0));
sort_param.wordlist=NULL;
- init_alloc_root(&sort_param.wordroot, FTPARSER_MEMROOT_ALLOC_SIZE, 0,
+ init_alloc_root(&sort_param.wordroot, "sort", FTPARSER_MEMROOT_ALLOC_SIZE, 0,
MYF(param->malloc_flags));
if (share->data_file_type == DYNAMIC_RECORD)
@@ -2870,7 +2870,8 @@ int mi_repair_parallel(HA_CHECK *param, register MI_INFO *info,
uint ft_max_word_len_for_sort=FT_MAX_WORD_LEN_FOR_SORT*
sort_param[i].keyinfo->seg->charset->mbmaxlen;
sort_param[i].key_length+=ft_max_word_len_for_sort-HA_FT_MAXBYTELEN;
- init_alloc_root(&sort_param[i].wordroot, FTPARSER_MEMROOT_ALLOC_SIZE, 0,
+ init_alloc_root(&sort_param[i].wordroot, "sort",
+ FTPARSER_MEMROOT_ALLOC_SIZE, 0,
MYF(param->malloc_flags));
}
}
diff --git a/storage/myisam/mi_create.c b/storage/myisam/mi_create.c
index c0967a60d13..ab45a16a96c 100644
--- a/storage/myisam/mi_create.c
+++ b/storage/myisam/mi_create.c
@@ -47,7 +47,7 @@ int mi_create(const char *name,uint keys,MI_KEYDEF *keydefs,
uint internal_table= flags & HA_CREATE_INTERNAL_TABLE;
ulong reclength, real_reclength,min_pack_length;
char kfilename[FN_REFLEN],klinkname[FN_REFLEN], *klinkname_ptr;
- char dfilename[FN_REFLEN],dlinkname[FN_REFLEN], *dlinkname_ptr;
+ char dfilename[FN_REFLEN],dlinkname[FN_REFLEN], *dlinkname_ptr= 0;
ulong pack_reclength;
ulonglong tot_length,max_rows, tmp;
enum en_fieldtype type;
diff --git a/storage/myisam/mi_unique.c b/storage/myisam/mi_unique.c
index dae453beaec..371c1a7fd6c 100644
--- a/storage/myisam/mi_unique.c
+++ b/storage/myisam/mi_unique.c
@@ -19,7 +19,7 @@
#include "myisamdef.h"
#include <m_ctype.h>
-my_bool mi_check_unique(MI_INFO *info, MI_UNIQUEDEF *def, uchar *record,
+my_bool mi_check_unique(MI_INFO *info, MI_UNIQUEDEF *def, const uchar *record,
ha_checksum unique_hash, my_off_t disk_pos)
{
my_off_t lastpos=info->lastpos;
@@ -27,7 +27,8 @@ my_bool mi_check_unique(MI_INFO *info, MI_UNIQUEDEF *def, uchar *record,
uchar *key_buff=info->lastkey2;
DBUG_ENTER("mi_check_unique");
- mi_unique_store(record+key->seg->start, unique_hash);
+ /* We need to store the hash value as a key in the record, breaking const */
+ mi_unique_store(((uchar*) record)+key->seg->start, unique_hash);
_mi_make_key(info,def->key,key_buff,record,0);
/* The above changed info->lastkey2. Inform mi_rnext_same(). */
diff --git a/storage/myisam/mi_update.c b/storage/myisam/mi_update.c
index b75bd4bf2aa..459ce0eade3 100644
--- a/storage/myisam/mi_update.c
+++ b/storage/myisam/mi_update.c
@@ -19,7 +19,8 @@
#include "fulltext.h"
#include "rt_index.h"
-int mi_update(register MI_INFO *info, const uchar *oldrec, uchar *newrec)
+int mi_update(register MI_INFO *info, const uchar *oldrec,
+ const uchar *newrec)
{
int flag,key_changed,save_errno;
reg3 my_off_t pos;
diff --git a/storage/myisam/myisamdef.h b/storage/myisam/myisamdef.h
index c63baa13875..583f115c908 100644
--- a/storage/myisam/myisamdef.h
+++ b/storage/myisam/myisamdef.h
@@ -696,7 +696,7 @@ extern int mi_indexes_are_disabled(MI_INFO *info);
ulong _mi_calc_total_blob_length(MI_INFO *info, const uchar *record);
ha_checksum mi_checksum(MI_INFO *info, const uchar *buf);
ha_checksum mi_static_checksum(MI_INFO *info, const uchar *buf);
-my_bool mi_check_unique(MI_INFO *info, MI_UNIQUEDEF *def, uchar *record,
+my_bool mi_check_unique(MI_INFO *info, MI_UNIQUEDEF *def, const uchar *record,
ha_checksum unique_hash, my_off_t pos);
ha_checksum mi_unique_hash(MI_UNIQUEDEF *def, const uchar *buf);
int _mi_cmp_static_unique(MI_INFO *info, MI_UNIQUEDEF *def,
diff --git a/storage/myisammrg/ha_myisammrg.cc b/storage/myisammrg/ha_myisammrg.cc
index 7ff4b2e483b..da7d7fe0240 100644
--- a/storage/myisammrg/ha_myisammrg.cc
+++ b/storage/myisammrg/ha_myisammrg.cc
@@ -120,7 +120,7 @@ static handler *myisammrg_create_handler(handlerton *hton,
ha_myisammrg::ha_myisammrg(handlerton *hton, TABLE_SHARE *table_arg)
:handler(hton, table_arg), file(0), is_cloned(0)
{
- init_sql_alloc(&children_mem_root,
+ init_sql_alloc(&children_mem_root, "ha_myisammrg",
FN_REFLEN + ALLOC_ROOT_MIN_BLOCK_SIZE, 0, MYF(0));
}
@@ -1104,7 +1104,7 @@ int ha_myisammrg::write_row(uchar * buf)
DBUG_RETURN(myrg_write(file,buf));
}
-int ha_myisammrg::update_row(const uchar * old_data, uchar * new_data)
+int ha_myisammrg::update_row(const uchar * old_data, const uchar * new_data)
{
DBUG_ASSERT(this->file->children_attached);
return myrg_update(file,old_data,new_data);
@@ -1609,7 +1609,7 @@ void ha_myisammrg::append_create_info(String *packet)
for (first= open_table= children_l;;
open_table= open_table->next_global)
{
- LEX_STRING db= { open_table->db, open_table->db_length };
+ LEX_CSTRING db= { open_table->db, open_table->db_length };
if (open_table != first)
packet->append(',');
@@ -1646,7 +1646,7 @@ bool ha_myisammrg::inplace_alter_table(TABLE *altered_table,
Alter_inplace_info *ha_alter_info)
{
char tmp_path[FN_REFLEN];
- char *name= table->s->normalized_path.str;
+ const char *name= table->s->normalized_path.str;
DBUG_ENTER("ha_myisammrg::inplace_alter_table");
fn_format(tmp_path, name, "", MYRG_NAME_TMPEXT, MY_UNPACK_FILENAME | MY_APPEND_EXT);
int res= create_mrg(tmp_path, ha_alter_info->create_info);
diff --git a/storage/myisammrg/ha_myisammrg.h b/storage/myisammrg/ha_myisammrg.h
index b6db549119d..6ace880ab99 100644
--- a/storage/myisammrg/ha_myisammrg.h
+++ b/storage/myisammrg/ha_myisammrg.h
@@ -112,7 +112,7 @@ public:
virtual handler *clone(const char *name, MEM_ROOT *mem_root);
int close(void);
int write_row(uchar * buf);
- int update_row(const uchar * old_data, uchar * new_data);
+ int update_row(const uchar * old_data, const uchar * new_data);
int delete_row(const uchar * buf);
int index_read_map(uchar *buf, const uchar *key, key_part_map keypart_map,
enum ha_rkey_function find_flag);
diff --git a/storage/myisammrg/myrg_update.c b/storage/myisammrg/myrg_update.c
index add6f9f819b..fd28b2699e9 100644
--- a/storage/myisammrg/myrg_update.c
+++ b/storage/myisammrg/myrg_update.c
@@ -18,7 +18,8 @@
#include "myrg_def.h"
-int myrg_update(register MYRG_INFO *info,const uchar *oldrec, uchar *newrec)
+int myrg_update(register MYRG_INFO *info,const uchar *oldrec,
+ const uchar *newrec)
{
if (!info->current_table)
return (my_errno=HA_ERR_NO_ACTIVE_RECORD);
diff --git a/storage/myisammrg/mysql-test/storage_engine/autoincrement.rdiff b/storage/myisammrg/mysql-test/storage_engine/autoincrement.rdiff
index 68264bdeb8d..cc04b800793 100644
--- a/storage/myisammrg/mysql-test/storage_engine/autoincrement.rdiff
+++ b/storage/myisammrg/mysql-test/storage_engine/autoincrement.rdiff
@@ -12,40 +12,40 @@
@@ -52,14 +52,14 @@
SET sql_mode = '<INITIAL_SQL_MODE>';
SHOW TABLE STATUS FROM test LIKE 't1';
- Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment
--t1 <STORAGE_ENGINE> # # # # # # # # 6 # # # # # # #
-+t1 <STORAGE_ENGINE> # # # # # # # # 0 # # # # # # #
+ Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment Max_index_length Temporary
+-t1 <STORAGE_ENGINE> # # # # # # # # 6 # # # # # # # # N
++t1 <STORAGE_ENGINE> # # # # # # # # 0 # # # # # # # # N
INSERT INTO t1 (a,b) VALUES (6,'g'),(7,'h');
SELECT LAST_INSERT_ID();
LAST_INSERT_ID()
5
SHOW TABLE STATUS FROM test LIKE 't1';
- Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment
--t1 # # # # # # # # # 8 # # # # # # #
-+t1 # # # # # # # # # 0 # # # # # # #
+ Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment Max_index_length Temporary
+-t1 # # # # # # # # # 8 # # # # # # # # N
++t1 # # # # # # # # # 0 # # # # # # # # N
INSERT INTO t1 (a,b) VALUES (NULL,'i'),(9,'j');
SELECT a,b FROM t1 ORDER BY a;
a b
@@ -78,11 +78,11 @@
8
SHOW TABLE STATUS FROM test LIKE 't1';
- Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment
--t1 # # # # # # # # # 10 # # # # # # #
-+t1 # # # # # # # # # 0 # # # # # # #
+ Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment Max_index_length Temporary
+-t1 # # # # # # # # # 10 # # # # # # # # N
++t1 # # # # # # # # # 0 # # # # # # # # N
INSERT INTO t1 (a,b) VALUES (20,'k');
SHOW TABLE STATUS FROM test LIKE 't1';
- Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment
--t1 # # # # # # # # # 21 # # # # # # #
-+t1 # # # # # # # # # 0 # # # # # # #
+ Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment Max_index_length Temporary
+-t1 # # # # # # # # # 21 # # # # # # # # N
++t1 # # # # # # # # # 0 # # # # # # # # N
INSERT INTO t1 (a,b) VALUES (NULL,'l');
SELECT a,b FROM t1 ORDER BY a;
a b
@@ -103,7 +103,7 @@
21
SHOW TABLE STATUS FROM test LIKE 't1';
- Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment
--t1 # # # # # # # # # 22 # # # # # # #
-+t1 # # # # # # # # # 0 # # # # # # #
+ Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment Max_index_length Temporary
+-t1 # # # # # # # # # 22 # # # # # # # # N
++t1 # # # # # # # # # 0 # # # # # # # # N
INSERT INTO t1 (a,b) VALUES (-5,'m');
SELECT a,b FROM t1 ORDER BY a;
a b
diff --git a/storage/myisammrg/mysql-test/storage_engine/create_table.rdiff b/storage/myisammrg/mysql-test/storage_engine/create_table.rdiff
index 5f5c2528a95..ad6352d3e7b 100644
--- a/storage/myisammrg/mysql-test/storage_engine/create_table.rdiff
+++ b/storage/myisammrg/mysql-test/storage_engine/create_table.rdiff
@@ -29,14 +29,14 @@
-SHOW CREATE TABLE t1;
-Table Create Table
-t1 CREATE TABLE `t1` (
-- `1` bigint(20) NOT NULL DEFAULT 0
+- `1` int(1) NOT NULL DEFAULT 0
-) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1
-SELECT * FROM t1;
-1
-1
-2
-DROP TABLE t1;
-+ERROR HY000: 'test.t1' is not BASE TABLE
++ERROR HY000: 'test.t1' is not of type 'BASE TABLE'
+# ERROR: Statement ended with errno 1347, errname ER_WRONG_OBJECT (expected to succeed)
+# ------------ UNEXPECTED RESULT ------------
+# The statement|command finished with ER_WRONG_OBJECT.
diff --git a/storage/myisammrg/mysql-test/storage_engine/truncate_table.rdiff b/storage/myisammrg/mysql-test/storage_engine/truncate_table.rdiff
index bb2e5585910..e429bbdb177 100644
--- a/storage/myisammrg/mysql-test/storage_engine/truncate_table.rdiff
+++ b/storage/myisammrg/mysql-test/storage_engine/truncate_table.rdiff
@@ -3,24 +3,24 @@
@@ -9,19 +9,19 @@
CREATE TABLE t1 (a <INT_COLUMN> KEY AUTO_INCREMENT, c <CHAR_COLUMN>) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
SHOW TABLE STATUS LIKE 't1';
- Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment
--t1 # # # # # # # # # 1 # # # # # # #
-+t1 # # # # # # # # # 0 # # # # # # #
+ Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment Max_index_length Temporary
+-t1 # # # # # # # # # 1 # # # # # # # # N
++t1 # # # # # # # # # 0 # # # # # # # # N
INSERT INTO t1 (c) VALUES ('a'),('b'),('c');
SHOW TABLE STATUS LIKE 't1';
- Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment
--t1 # # # # # # # # # 4 # # # # # # #
-+t1 # # # # # # # # # 0 # # # # # # #
+ Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment Max_index_length Temporary
+-t1 # # # # # # # # # 4 # # # # # # # # N
++t1 # # # # # # # # # 0 # # # # # # # # N
TRUNCATE TABLE t1;
SHOW TABLE STATUS LIKE 't1';
- Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment
--t1 # # # # # # # # # 1 # # # # # # #
-+t1 # # # # # # # # # 0 # # # # # # #
+ Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment Max_index_length Temporary
+-t1 # # # # # # # # # 1 # # # # # # # # N
++t1 # # # # # # # # # 0 # # # # # # # # N
INSERT INTO t1 (c) VALUES ('d');
SHOW TABLE STATUS LIKE 't1';
- Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment
--t1 # # # # # # # # # 2 # # # # # # #
-+t1 # # # # # # # # # 0 # # # # # # #
+ Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment Max_index_length Temporary
+-t1 # # # # # # # # # 2 # # # # # # # # N
++t1 # # # # # # # # # 0 # # # # # # # # N
SELECT a,c FROM t1;
a c
1 d
diff --git a/storage/oqgraph/ha_oqgraph.cc b/storage/oqgraph/ha_oqgraph.cc
index 66a20e9a6a1..4d13cc111c0 100644
--- a/storage/oqgraph/ha_oqgraph.cc
+++ b/storage/oqgraph/ha_oqgraph.cc
@@ -297,7 +297,7 @@ int ha_oqgraph::oqgraph_check_table_structure (TABLE *table_arg)
Field **field= table_arg->field;
for (i= 0; *field && skel[i].colname; i++, field++) {
- DBUG_PRINT( "oq-debug", ("Column %d: name='%s', expected '%s'; type=%d, expected %d.", i, (*field)->field_name, skel[i].colname, (*field)->type(), skel[i].coltype));
+ DBUG_PRINT( "oq-debug", ("Column %d: name='%s', expected '%s'; type=%d, expected %d.", i, (*field)->field_name.str, skel[i].colname, (*field)->type(), skel[i].coltype));
bool badColumn = false;
bool isLatchColumn = strcmp(skel[i].colname, "latch")==0;
bool isStringLatch = true;
@@ -346,7 +346,7 @@ int ha_oqgraph::oqgraph_check_table_structure (TABLE *table_arg)
push_warning_printf( current_thd, Sql_condition::WARN_LEVEL_WARN, HA_WRONG_CREATE_OPTION, "Column %d must be NULL.", i);
}
/* Check the column name */
- if (!badColumn) if (strcmp(skel[i].colname,(*field)->field_name)) {
+ if (!badColumn) if (strcmp(skel[i].colname,(*field)->field_name.str)) {
badColumn = true;
push_warning_printf( current_thd, Sql_condition::WARN_LEVEL_WARN, HA_WRONG_CREATE_OPTION, "Column %d must be named '%s'.", i, skel[i].colname);
}
@@ -562,7 +562,7 @@ int ha_oqgraph::open(const char *name, int mode, uint test_if_locked)
init_tmp_table_share( thd, share, table->s->db.str, table->s->db.length, options->table_name, "");
// because of that, we need to reinitialize the memroot (to reset MY_THREAD_SPECIFIC flag)
DBUG_ASSERT(share->mem_root.used == NULL); // it's still empty
- init_sql_alloc(&share->mem_root, TABLE_ALLOC_BLOCK_SIZE, 0, MYF(0));
+ init_sql_alloc(&share->mem_root, "share", TABLE_ALLOC_BLOCK_SIZE, 0, MYF(0));
// What I think this code is doing:
// * Our OQGRAPH table is `database_blah/name`
@@ -577,11 +577,10 @@ int ha_oqgraph::open(const char *name, int mode, uint test_if_locked)
size_t tlen= strlen(options->table_name);
size_t plen= (int)(p - name) + tlen + 1;
- share->path.str= (char*)alloc_root(&share->mem_root, plen + 1); // MDEV-5996 space for trailing zero
- // it seems there was a misunderstanding of why there is a separate length field in the String object
- strmov(strnmov(share->path.str, name, (int)(p - name) + 1), options->table_name);
-
- share->path.str[plen] = 0; // MDEV-5996 Make sure the pointer is zero terminated. I really think this needs refactoring, soon...
+ share->path.str= (char*)alloc_root(&share->mem_root, plen + 1);
+ strmov(strnmov((char*) share->path.str, name, (int)(p - name) + 1),
+ options->table_name);
+ DBUG_ASSERT(strlen(share->path.str) == plen);
share->normalized_path.str= share->path.str;
share->path.length= share->normalized_path.length= plen;
@@ -655,7 +654,7 @@ int ha_oqgraph::open(const char *name, int mode, uint test_if_locked)
for (Field **field= edges->field; *field; ++field)
{
- if (strcmp(options->origid, (*field)->field_name))
+ if (strcmp(options->origid, (*field)->field_name.str))
continue;
if ((*field)->cmp_type() != INT_RESULT ||
!((*field)->flags & NOT_NULL_FLAG))
@@ -680,7 +679,7 @@ int ha_oqgraph::open(const char *name, int mode, uint test_if_locked)
for (Field **field= edges->field; *field; ++field)
{
- if (strcmp(options->destid, (*field)->field_name))
+ if (strcmp(options->destid, (*field)->field_name.str))
continue;
if ((*field)->type() != origid->type() ||
!((*field)->flags & NOT_NULL_FLAG))
@@ -703,7 +702,7 @@ int ha_oqgraph::open(const char *name, int mode, uint test_if_locked)
}
// Make sure origid column != destid column
- if (strcmp( origid->field_name, destid->field_name)==0) {
+ if (strcmp( origid->field_name.str, destid->field_name.str)==0) {
fprint_error("Invalid OQGRAPH backing store ('%s.destid' attribute set to same column as origid attribute)", p+1, options->table_name);
closefrm(edges);
free_table_share(share);
@@ -712,7 +711,7 @@ int ha_oqgraph::open(const char *name, int mode, uint test_if_locked)
for (Field **field= edges->field; options->weight && *field; ++field)
{
- if (strcmp(options->weight, (*field)->field_name))
+ if (strcmp(options->weight, (*field)->field_name.str))
continue;
if ((*field)->result_type() != REAL_RESULT ||
!((*field)->flags & NOT_NULL_FLAG))
@@ -803,7 +802,7 @@ int ha_oqgraph::write_row(byte * buf)
return HA_ERR_TABLE_READONLY;
}
-int ha_oqgraph::update_row(const byte * old, byte * buf)
+int ha_oqgraph::update_row(const uchar * old, const uchar * buf)
{
return HA_ERR_TABLE_READONLY;
}
diff --git a/storage/oqgraph/ha_oqgraph.h b/storage/oqgraph/ha_oqgraph.h
index 07f47bd1239..f06db8bbf14 100644
--- a/storage/oqgraph/ha_oqgraph.h
+++ b/storage/oqgraph/ha_oqgraph.h
@@ -84,7 +84,7 @@ public:
int open(const char *name, int mode, uint test_if_locked);
int close(void);
int write_row(byte * buf);
- int update_row(const byte * old_data, byte * new_data);
+ int update_row(const uchar * old_data, const uchar * new_data);
int delete_row(const byte * buf);
int index_read(byte * buf, const byte * key,
uint key_len, enum ha_rkey_function find_flag);
@@ -118,7 +118,7 @@ public:
virtual const char *table_type() const { return hton_name(ht)->str; }
#endif
- my_bool register_query_cache_table(THD *thd, char *table_key,
+ my_bool register_query_cache_table(THD *thd, const char *table_key,
uint key_length,
qc_engine_callback
*engine_callback,
diff --git a/storage/perfschema/ha_perfschema.cc b/storage/perfschema/ha_perfschema.cc
index 4077e28a51c..7ea68670304 100644
--- a/storage/perfschema/ha_perfschema.cc
+++ b/storage/perfschema/ha_perfschema.cc
@@ -286,7 +286,7 @@ void ha_perfschema::use_hidden_primary_key(void)
table->column_bitmaps_set_no_signal(&table->s->all_set, table->write_set);
}
-int ha_perfschema::update_row(const uchar *old_data, uchar *new_data)
+int ha_perfschema::update_row(const uchar *old_data, const uchar *new_data)
{
DBUG_ENTER("ha_perfschema::update_row");
if (!pfs_initialized)
diff --git a/storage/perfschema/ha_perfschema.h b/storage/perfschema/ha_perfschema.h
index ff87b1fd665..988caae2de3 100644
--- a/storage/perfschema/ha_perfschema.h
+++ b/storage/perfschema/ha_perfschema.h
@@ -130,7 +130,7 @@ public:
@param new_data the row new values
@return 0 on success
*/
- int update_row(const uchar *old_data, uchar *new_data);
+ int update_row(const uchar *old_data, const uchar *new_data);
/**
Delete a row.
@@ -188,7 +188,8 @@ public:
{ return HA_CACHE_TBL_NOCACHE; }
virtual my_bool register_query_cache_table
- (THD *, char *, uint , qc_engine_callback *engine_callback, ulonglong *)
+ (THD *, const char *, uint , qc_engine_callback *engine_callback,
+ ulonglong *)
{
*engine_callback= 0;
return FALSE;
diff --git a/storage/perfschema/pfs.cc b/storage/perfschema/pfs.cc
index 624c7f4697b..fd415d8bc21 100644
--- a/storage/perfschema/pfs.cc
+++ b/storage/perfschema/pfs.cc
@@ -1236,7 +1236,7 @@ static enum_operation_type socket_operation_map[]=
@param [out] output_length Length of the resulting output string.
@return 0 for success, non zero for errors
*/
-static int build_prefix(const LEX_STRING *prefix, const char *category,
+static int build_prefix(const LEX_CSTRING *prefix, const char *category,
char *output, int *output_length)
{
int len= strlen(category);
diff --git a/storage/perfschema/pfs_autosize.cc b/storage/perfschema/pfs_autosize.cc
index 6f267cb4599..fd428cd6004 100644
--- a/storage/perfschema/pfs_autosize.cc
+++ b/storage/perfschema/pfs_autosize.cc
@@ -124,7 +124,7 @@ PFS_sizing_data small_data=
/* Account / user / host */
10, 5, 20,
/* History sizes */
- 5, 100, 5, 100, 5, 100,
+ 10, 100, 10, 100, 10, 100,
/* Digests */
1000,
/* Session connect attrs. */
@@ -140,7 +140,7 @@ PFS_sizing_data medium_data=
/* Account / user / host */
100, 100, 100,
/* History sizes */
- 10, 1000, 10, 1000, 10, 1000,
+ 20, 1000, 20, 1000, 20, 1000,
/* Digests */
5000,
/* Session connect attrs. */
@@ -156,7 +156,7 @@ PFS_sizing_data large_data=
/* Account / user / host */
100, 100, 100,
/* History sizes */
- 10, 10000, 10, 10000, 10, 10000,
+ 20, 10000, 20, 10000, 20, 10000,
/* Digests */
10000,
/* Session connect attrs. */
diff --git a/storage/perfschema/pfs_column_values.cc b/storage/perfschema/pfs_column_values.cc
index 65d0ae7171b..9c4dee89af5 100644
--- a/storage/perfschema/pfs_column_values.cc
+++ b/storage/perfschema/pfs_column_values.cc
@@ -22,29 +22,29 @@
#include "my_global.h"
#include "pfs_column_values.h"
-LEX_STRING PERFORMANCE_SCHEMA_str=
-{ C_STRING_WITH_LEN("performance_schema") };
+LEX_CSTRING PERFORMANCE_SCHEMA_str=
+{ STRING_WITH_LEN("performance_schema") };
-LEX_STRING mutex_instrument_prefix=
-{ C_STRING_WITH_LEN("wait/synch/mutex/") };
+LEX_CSTRING mutex_instrument_prefix=
+{ STRING_WITH_LEN("wait/synch/mutex/") };
-LEX_STRING rwlock_instrument_prefix=
-{ C_STRING_WITH_LEN("wait/synch/rwlock/") };
+LEX_CSTRING rwlock_instrument_prefix=
+{ STRING_WITH_LEN("wait/synch/rwlock/") };
-LEX_STRING cond_instrument_prefix=
-{ C_STRING_WITH_LEN("wait/synch/cond/") };
+LEX_CSTRING cond_instrument_prefix=
+{ STRING_WITH_LEN("wait/synch/cond/") };
-LEX_STRING thread_instrument_prefix=
-{ C_STRING_WITH_LEN("thread/") };
+LEX_CSTRING thread_instrument_prefix=
+{ STRING_WITH_LEN("thread/") };
-LEX_STRING file_instrument_prefix=
-{ C_STRING_WITH_LEN("wait/io/file/") };
+LEX_CSTRING file_instrument_prefix=
+{ STRING_WITH_LEN("wait/io/file/") };
-LEX_STRING stage_instrument_prefix=
-{ C_STRING_WITH_LEN("stage/") };
+LEX_CSTRING stage_instrument_prefix=
+{ STRING_WITH_LEN("stage/") };
-LEX_STRING statement_instrument_prefix=
-{ C_STRING_WITH_LEN("statement/") };
+LEX_CSTRING statement_instrument_prefix=
+{ STRING_WITH_LEN("statement/") };
-LEX_STRING socket_instrument_prefix=
-{ C_STRING_WITH_LEN("wait/io/socket/") };
+LEX_CSTRING socket_instrument_prefix=
+{ STRING_WITH_LEN("wait/io/socket/") };
diff --git a/storage/perfschema/pfs_column_values.h b/storage/perfschema/pfs_column_values.h
index 204d5230ddf..952230043af 100644
--- a/storage/perfschema/pfs_column_values.h
+++ b/storage/perfschema/pfs_column_values.h
@@ -25,23 +25,23 @@
*/
/** String, "PERFORMANCE_SCHEMA". */
-extern LEX_STRING PERFORMANCE_SCHEMA_str;
+extern LEX_CSTRING PERFORMANCE_SCHEMA_str;
/** String prefix for all mutex instruments. */
-extern LEX_STRING mutex_instrument_prefix;
+extern LEX_CSTRING mutex_instrument_prefix;
/** String prefix for all rwlock instruments. */
-extern LEX_STRING rwlock_instrument_prefix;
+extern LEX_CSTRING rwlock_instrument_prefix;
/** String prefix for all cond instruments. */
-extern LEX_STRING cond_instrument_prefix;
+extern LEX_CSTRING cond_instrument_prefix;
/** String prefix for all thread instruments. */
-extern LEX_STRING thread_instrument_prefix;
+extern LEX_CSTRING thread_instrument_prefix;
/** String prefix for all file instruments. */
-extern LEX_STRING file_instrument_prefix;
+extern LEX_CSTRING file_instrument_prefix;
/** String prefix for all stage instruments. */
-extern LEX_STRING stage_instrument_prefix;
+extern LEX_CSTRING stage_instrument_prefix;
/** String prefix for all statement instruments. */
-extern LEX_STRING statement_instrument_prefix;
-extern LEX_STRING socket_instrument_prefix;
+extern LEX_CSTRING statement_instrument_prefix;
+extern LEX_CSTRING socket_instrument_prefix;
#endif
diff --git a/storage/perfschema/pfs_engine_table.cc b/storage/perfschema/pfs_engine_table.cc
index 547e5a3dfe3..2dfad937e87 100644
--- a/storage/perfschema/pfs_engine_table.cc
+++ b/storage/perfschema/pfs_engine_table.cc
@@ -284,7 +284,7 @@ int PFS_engine_table::read_row(TABLE *table,
*/
int PFS_engine_table::update_row(TABLE *table,
const unsigned char *old_buf,
- unsigned char *new_buf,
+ const unsigned char *new_buf,
Field **fields)
{
my_bitmap_map *org_bitmap;
@@ -396,7 +396,7 @@ void PFS_engine_table::set_field_enum(Field *f, ulonglong value)
void PFS_engine_table::set_field_timestamp(Field *f, ulonglong value)
{
- DBUG_ASSERT(is_timestamp_type(f->real_type()));
+ DBUG_ASSERT(f->type_handler()->is_timestamp_type());
Field_timestamp *f2= (Field_timestamp*) f;
f2->store_TIME((long)(value / 1000000), (value % 1000000));
}
@@ -428,7 +428,7 @@ PFS_engine_table::get_field_varchar_utf8(Field *f, String *val)
int PFS_engine_table::update_row_values(TABLE *,
const unsigned char *,
- unsigned char *,
+ const unsigned char *,
Field **)
{
return HA_ERR_WRONG_COMMAND;
@@ -1372,7 +1372,7 @@ end:
}
int pfs_discover_table_names(handlerton *hton __attribute__((unused)),
- LEX_STRING *db,
+ LEX_CSTRING *db,
MY_DIR *dir __attribute__((unused)),
handlerton::discovered_list *result)
{
diff --git a/storage/perfschema/pfs_engine_table.h b/storage/perfschema/pfs_engine_table.h
index 2bbf8891420..e12ab36cd61 100644
--- a/storage/perfschema/pfs_engine_table.h
+++ b/storage/perfschema/pfs_engine_table.h
@@ -46,7 +46,7 @@ public:
int read_row(TABLE *table, unsigned char *buf, Field **fields);
int update_row(TABLE *table, const unsigned char *old_buf,
- unsigned char *new_buf, Field **fields);
+ const unsigned char *new_buf, Field **fields);
/**
Delete a row from this table.
@@ -165,7 +165,7 @@ protected:
@param fields Table fields
*/
virtual int update_row_values(TABLE *table, const unsigned char *old_buf,
- unsigned char *new_buf, Field **fields);
+ const unsigned char *new_buf, Field **fields);
/**
Delete a row.
@@ -457,7 +457,7 @@ struct PFS_triple_index
bool pfs_show_status(handlerton *hton, THD *thd,
stat_print_fn *print, enum ha_stat_type stat);
-int pfs_discover_table_names(handlerton *hton, LEX_STRING *db,
+int pfs_discover_table_names(handlerton *hton, LEX_CSTRING *db,
MY_DIR *dir,
handlerton::discovered_list *result);
diff --git a/storage/perfschema/pfs_server.cc b/storage/perfschema/pfs_server.cc
index 7577154515d..ee965c0e7da 100644
--- a/storage/perfschema/pfs_server.cc
+++ b/storage/perfschema/pfs_server.cc
@@ -67,8 +67,10 @@ initialize_performance_schema(PFS_global_param *param)
The performance schema is disabled in the startup command line.
All the instrumentation is turned off.
*/
+ pfs_enabled= 0;
return NULL;
}
+ pfs_enabled= TRUE;
init_timers();
diff --git a/storage/perfschema/pfs_server.h b/storage/perfschema/pfs_server.h
index bc0c69e86b9..dd092713d8e 100644
--- a/storage/perfschema/pfs_server.h
+++ b/storage/perfschema/pfs_server.h
@@ -49,7 +49,7 @@
#define PFS_MAX_SETUP_OBJECT 100
#endif
#ifndef PFS_MAX_STAGE_CLASS
- #define PFS_MAX_STAGE_CLASS 150
+ #define PFS_MAX_STAGE_CLASS 160
#endif
#ifndef PFS_STATEMENTS_STACK_SIZE
#define PFS_STATEMENTS_STACK_SIZE 10
diff --git a/storage/perfschema/table_setup_actors.cc b/storage/perfschema/table_setup_actors.cc
index c82d67fba2d..bf6006057df 100644
--- a/storage/perfschema/table_setup_actors.cc
+++ b/storage/perfschema/table_setup_actors.cc
@@ -217,7 +217,7 @@ int table_setup_actors::read_row_values(TABLE *table,
int table_setup_actors::update_row_values(TABLE *table,
const unsigned char *old_buf,
- unsigned char *new_buf,
+ const unsigned char *new_buf,
Field **fields)
{
Field *f;
diff --git a/storage/perfschema/table_setup_actors.h b/storage/perfschema/table_setup_actors.h
index 2a9395dfac7..66379593cc5 100644
--- a/storage/perfschema/table_setup_actors.h
+++ b/storage/perfschema/table_setup_actors.h
@@ -71,7 +71,7 @@ protected:
virtual int update_row_values(TABLE *table,
const unsigned char *old_buf,
- unsigned char *new_buf,
+ const unsigned char *new_buf,
Field **fields);
virtual int delete_row_values(TABLE *table,
diff --git a/storage/perfschema/table_setup_consumers.cc b/storage/perfschema/table_setup_consumers.cc
index c09853ffeca..f3529eb8846 100644
--- a/storage/perfschema/table_setup_consumers.cc
+++ b/storage/perfschema/table_setup_consumers.cc
@@ -190,7 +190,7 @@ int table_setup_consumers::read_row_values(TABLE *table,
int table_setup_consumers::update_row_values(TABLE *table,
const unsigned char *,
- unsigned char *,
+ const unsigned char *,
Field **fields)
{
Field *f;
diff --git a/storage/perfschema/table_setup_consumers.h b/storage/perfschema/table_setup_consumers.h
index e59033c0ad1..f5a386829a1 100644
--- a/storage/perfschema/table_setup_consumers.h
+++ b/storage/perfschema/table_setup_consumers.h
@@ -60,7 +60,7 @@ protected:
virtual int update_row_values(TABLE *table,
const unsigned char *old_buf,
- unsigned char *new_buf,
+ const unsigned char *new_buf,
Field **fields);
table_setup_consumers();
diff --git a/storage/perfschema/table_setup_instruments.cc b/storage/perfschema/table_setup_instruments.cc
index d911128ce94..f5ac1cafe63 100644
--- a/storage/perfschema/table_setup_instruments.cc
+++ b/storage/perfschema/table_setup_instruments.cc
@@ -217,7 +217,7 @@ int table_setup_instruments::read_row_values(TABLE *table,
int table_setup_instruments::update_row_values(TABLE *table,
const unsigned char *,
- unsigned char *,
+ const unsigned char *,
Field **fields)
{
Field *f;
diff --git a/storage/perfschema/table_setup_instruments.h b/storage/perfschema/table_setup_instruments.h
index 2e70a528bbd..dfe9070721b 100644
--- a/storage/perfschema/table_setup_instruments.h
+++ b/storage/perfschema/table_setup_instruments.h
@@ -92,7 +92,7 @@ protected:
virtual int update_row_values(TABLE *table,
const unsigned char *old_buf,
- unsigned char *new_buf,
+ const unsigned char *new_buf,
Field **fields);
table_setup_instruments();
diff --git a/storage/perfschema/table_setup_objects.cc b/storage/perfschema/table_setup_objects.cc
index 5321271a62d..994622f4a52 100644
--- a/storage/perfschema/table_setup_objects.cc
+++ b/storage/perfschema/table_setup_objects.cc
@@ -265,7 +265,7 @@ int table_setup_objects::read_row_values(TABLE *table,
int table_setup_objects::update_row_values(TABLE *table,
const unsigned char *,
- unsigned char *,
+ const unsigned char *,
Field **fields)
{
int result;
diff --git a/storage/perfschema/table_setup_objects.h b/storage/perfschema/table_setup_objects.h
index 55423ffd90f..7fcefa45407 100644
--- a/storage/perfschema/table_setup_objects.h
+++ b/storage/perfschema/table_setup_objects.h
@@ -74,7 +74,7 @@ protected:
virtual int update_row_values(TABLE *table,
const unsigned char *old_buf,
- unsigned char *new_buf,
+ const unsigned char *new_buf,
Field **fields);
virtual int delete_row_values(TABLE *table,
diff --git a/storage/perfschema/table_setup_timers.cc b/storage/perfschema/table_setup_timers.cc
index 911fa121e06..9c6af49595d 100644
--- a/storage/perfschema/table_setup_timers.cc
+++ b/storage/perfschema/table_setup_timers.cc
@@ -145,7 +145,7 @@ int table_setup_timers::read_row_values(TABLE *table,
int table_setup_timers::update_row_values(TABLE *table,
const unsigned char *,
- unsigned char *,
+ const unsigned char *,
Field **fields)
{
Field *f;
diff --git a/storage/perfschema/table_setup_timers.h b/storage/perfschema/table_setup_timers.h
index 46af68bb9e3..f230a68af98 100644
--- a/storage/perfschema/table_setup_timers.h
+++ b/storage/perfschema/table_setup_timers.h
@@ -58,7 +58,7 @@ protected:
virtual int update_row_values(TABLE *table,
const unsigned char *old_buf,
- unsigned char *new_buf,
+ const unsigned char *new_buf,
Field **fields);
table_setup_timers();
diff --git a/storage/perfschema/table_threads.cc b/storage/perfschema/table_threads.cc
index 211865c0f63..1458c0b11d5 100644
--- a/storage/perfschema/table_threads.cc
+++ b/storage/perfschema/table_threads.cc
@@ -292,7 +292,7 @@ int table_threads::read_row_values(TABLE *table,
int table_threads::update_row_values(TABLE *table,
const unsigned char *old_buf,
- unsigned char *new_buf,
+ const unsigned char *new_buf,
Field **fields)
{
Field *f;
diff --git a/storage/perfschema/table_threads.h b/storage/perfschema/table_threads.h
index 10afbe14e74..fb843f5ca36 100644
--- a/storage/perfschema/table_threads.h
+++ b/storage/perfschema/table_threads.h
@@ -87,7 +87,7 @@ protected:
virtual int update_row_values(TABLE *table,
const unsigned char *old_buf,
- unsigned char *new_buf,
+ const unsigned char *new_buf,
Field **fields);
protected:
diff --git a/storage/rocksdb/ha_rocksdb.cc b/storage/rocksdb/ha_rocksdb.cc
index 4a0a3eddb1a..3dd69659e1a 100644
--- a/storage/rocksdb/ha_rocksdb.cc
+++ b/storage/rocksdb/ha_rocksdb.cc
@@ -6135,7 +6135,7 @@ int ha_rocksdb::create_cfs(
"allow index-only access in secondary key and has "
"reduced disk space efficiency in primary key.",
tbl_def_arg->full_tablename().c_str(),
- table_arg->key_info[i].key_part[part].field->field_name);
+ table_arg->key_info[i].key_part[part].field->field_name.str);
my_error(ER_INTERNAL_ERROR, MYF(ME_JUST_WARNING), buf);
}
@@ -6382,8 +6382,8 @@ int ha_rocksdb::compare_key_parts(const KEY *const old_key,
/* Check to see that key parts themselves match */
for (uint i = 0; i < old_key->user_defined_key_parts; i++) {
- if (strcmp(old_key->key_part[i].field->field_name,
- new_key->key_part[i].field->field_name) != 0) {
+ if (strcmp(old_key->key_part[i].field->field_name.str,
+ new_key->key_part[i].field->field_name.str) != 0) {
DBUG_RETURN(HA_EXIT_FAILURE);
}
@@ -9805,7 +9805,8 @@ void ha_rocksdb::calc_updated_indexes() {
HA_EXIT_SUCCESS OK
other HA_ERR error code (can be SE-specific)
*/
-int ha_rocksdb::update_row(const uchar *const old_data, uchar *const new_data) {
+int ha_rocksdb::update_row(const uchar *const old_data,
+ const uchar *const new_data) {
DBUG_ENTER_FUNC();
DBUG_ASSERT(old_data != nullptr);
diff --git a/storage/rocksdb/ha_rocksdb.h b/storage/rocksdb/ha_rocksdb.h
index 4432a4de8d1..ff715423f71 100644
--- a/storage/rocksdb/ha_rocksdb.h
+++ b/storage/rocksdb/ha_rocksdb.h
@@ -1005,7 +1005,7 @@ public:
int write_row(uchar *const buf) override
MY_ATTRIBUTE((__warn_unused_result__));
- int update_row(const uchar *const old_data, uchar *const new_data) override
+ int update_row(const uchar *const old_data, const uchar *const new_data) override
MY_ATTRIBUTE((__warn_unused_result__));
int delete_row(const uchar *const buf) override
MY_ATTRIBUTE((__warn_unused_result__));
@@ -1288,7 +1288,7 @@ public:
enum thr_lock_type lock_type) override
MY_ATTRIBUTE((__warn_unused_result__));
- my_bool register_query_cache_table(THD *const thd, char *const table_key,
+ my_bool register_query_cache_table(THD *const thd, const char *table_key,
uint key_length,
qc_engine_callback *const engine_callback,
ulonglong *const engine_data) override {
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/2pc_group_commit.result b/storage/rocksdb/mysql-test/rocksdb/r/2pc_group_commit.result
index a9e80f1562e..ac546a284fc 100644
--- a/storage/rocksdb/mysql-test/rocksdb/r/2pc_group_commit.result
+++ b/storage/rocksdb/mysql-test/rocksdb/r/2pc_group_commit.result
@@ -14,16 +14,16 @@ SET GLOBAL rocksdb_flush_log_at_trx_commit=1;
select variable_value into @b1 from information_schema.global_status where variable_name='Binlog_commits';
select variable_value into @b2 from information_schema.global_status where variable_name='Binlog_group_commits';
select variable_value into @b3 from information_schema.global_status where variable_name='Rocksdb_wal_synced';
-select IF(variable_value - @b1 = 1000, 'OK', 'FAIL') as Binlog_commits
+select IF(variable_value - @b1 = 1000, 'OK', variable_value - @b1 = 1000) as Binlog_commits
from information_schema.global_status where variable_name='Binlog_commits';
Binlog_commits
OK
-select IF(variable_value - @b2 = 1000, 'OK', 'FAIL') as Binlog_group_commits
+select IF(variable_value - @b2 = 1000, 'OK', variable_value - @b2 = 1000) as Binlog_group_commits
from information_schema.global_status where variable_name='Binlog_group_commits';
Binlog_group_commits
OK
# Prepare operations sync, commits don't. We expect slightly more than 1K syncs:
-select IF(variable_value - @b3 between 1000 and 1500, 'OK', 'FAIL') as Rocksdb_wal_synced
+select IF(variable_value - @b3 between 1000 and 1500, 'OK', variable_value - @b3 between 1000 and 1500) as Rocksdb_wal_synced
from information_schema.global_status where variable_name='Rocksdb_wal_synced';
Rocksdb_wal_synced
OK
@@ -33,17 +33,17 @@ OK
select variable_value into @b1 from information_schema.global_status where variable_name='Binlog_commits';
select variable_value into @b2 from information_schema.global_status where variable_name='Binlog_group_commits';
select variable_value into @b3 from information_schema.global_status where variable_name='Rocksdb_wal_synced';
-select IF(variable_value - @b1 = 10000, 'OK', 'FAIL') as Binlog_commits
+select IF(variable_value - @b1 = 10000, 'OK', variable_value - @b1 = 10000) as Binlog_commits
from information_schema.global_status where variable_name='Binlog_commits';
Binlog_commits
OK
-select IF(variable_value - @b2 between 100 and 5000, 'OK', 'FAIL') as Binlog_group_commits
+select IF(variable_value - @b2 between 100 and 5000, 'OK', variable_value - @b2 between 100 and 5000) as Binlog_group_commits
from information_schema.global_status where variable_name='Binlog_group_commits';
Binlog_group_commits
OK
-select IF(variable_value - @b3 between 1 and 9000, 'OK', 'FAIL')
+select IF(variable_value - @b3 between 1 and 9000, 'OK', variable_value - @b3 between 1 and 9000)
from information_schema.global_status where variable_name='Rocksdb_wal_synced';
-IF(variable_value - @b3 between 1 and 9000, 'OK', 'FAIL')
+IF(variable_value - @b3 between 1 and 9000, 'OK', variable_value - @b3 between 1 and 9000)
OK
##
# 2PC enabled, MyRocks durability disabled, single thread
@@ -53,17 +53,17 @@ SET GLOBAL rocksdb_flush_log_at_trx_commit=0;
select variable_value into @b1 from information_schema.global_status where variable_name='Binlog_commits';
select variable_value into @b2 from information_schema.global_status where variable_name='Binlog_group_commits';
select variable_value into @b3 from information_schema.global_status where variable_name='Rocksdb_wal_synced';
-select IF(variable_value - @b1 = 1000, 'OK', 'FAIL') as Binlog_commits
+select IF(variable_value - @b1 = 1000, 'OK', variable_value - @b1 = 1000) as Binlog_commits
from information_schema.global_status where variable_name='Binlog_commits';
Binlog_commits
OK
-select IF(variable_value - @b2 = 1000, 'OK', 'FAIL') as Binlog_group_commits
+select IF(variable_value - @b2 = 1000, 'OK', variable_value - @b2 = 1000) as Binlog_group_commits
from information_schema.global_status where variable_name='Binlog_group_commits';
Binlog_group_commits
OK
-select IF(variable_value - @b3 < 10, 'OK', 'FAIL')
+select IF(variable_value - @b3 < 10, 'OK', variable_value - @b3 < 10)
from information_schema.global_status where variable_name='Rocksdb_wal_synced';
-IF(variable_value - @b3 < 10, 'OK', 'FAIL')
+IF(variable_value - @b3 < 10, 'OK', variable_value - @b3 < 10)
OK
##
# 2PC enabled, MyRocks durability disabled, concurrent workload
@@ -71,17 +71,17 @@ OK
select variable_value into @b1 from information_schema.global_status where variable_name='Binlog_commits';
select variable_value into @b2 from information_schema.global_status where variable_name='Binlog_group_commits';
select variable_value into @b3 from information_schema.global_status where variable_name='Rocksdb_wal_synced';
-select IF(variable_value - @b1 = 10000, 'OK', 'FAIL') as Binlog_commits
+select IF(variable_value - @b1 = 10000, 'OK', variable_value - @b1 = 10000) as Binlog_commits
from information_schema.global_status where variable_name='Binlog_commits';
Binlog_commits
OK
-select IF(variable_value - @b2 < 8000, 'OK', 'FAIL') as Binlog_group_commits
+select IF(variable_value - @b2 < 8000, 'OK', variable_value - @b2 < 8000) as Binlog_group_commits
from information_schema.global_status where variable_name='Binlog_group_commits';
Binlog_group_commits
OK
-select IF(variable_value - @b3 < 10, 'OK', 'FAIL')
+select IF(variable_value - @b3 < 10, 'OK', variable_value - @b3 < 10)
from information_schema.global_status where variable_name='Rocksdb_wal_synced';
-IF(variable_value - @b3 < 10, 'OK', 'FAIL')
+IF(variable_value - @b3 < 10, 'OK', variable_value - @b3 < 10)
OK
SET GLOBAL rocksdb_enable_2pc= @save_rocksdb_enable_2pc;
SET GLOBAL rocksdb_flush_log_at_trx_commit= @save_rocksdb_flush_log_at_trx_commit;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/bulk_load.result b/storage/rocksdb/mysql-test/rocksdb/r/bulk_load.result
index b931a61e233..90f28929db6 100644
--- a/storage/rocksdb/mysql-test/rocksdb/r/bulk_load.result
+++ b/storage/rocksdb/mysql-test/rocksdb/r/bulk_load.result
@@ -38,20 +38,20 @@ LOAD DATA INFILE <input_file> INTO TABLE t2;
LOAD DATA INFILE <input_file> INTO TABLE t3;
set rocksdb_bulk_load=0;
SHOW TABLE STATUS WHERE name LIKE 't%';
-Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment
-t1 ROCKSDB 10 Fixed 5000000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL
-t2 ROCKSDB 10 Fixed 5000000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL
-t3 ROCKSDB 10 Fixed 5000000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL partitioned
+Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment Max_index_length Temporary
+t1 ROCKSDB 10 Fixed 5000000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL 0 N
+t2 ROCKSDB 10 Fixed 5000000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL 0 N
+t3 ROCKSDB 10 Fixed 5000000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL partitioned 0 N
ANALYZE TABLE t1, t2, t3;
Table Op Msg_type Msg_text
test.t1 analyze status OK
test.t2 analyze status OK
test.t3 analyze status OK
SHOW TABLE STATUS WHERE name LIKE 't%';
-Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment
-t1 ROCKSDB 10 Fixed 5000000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL
-t2 ROCKSDB 10 Fixed 5000000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL
-t3 ROCKSDB 10 Fixed 5000000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL partitioned
+Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment Max_index_length Temporary
+t1 ROCKSDB 10 Fixed 5000000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL 0 N
+t2 ROCKSDB 10 Fixed 5000000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL 0 N
+t3 ROCKSDB 10 Fixed 5000000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL partitioned 0 N
select count(pk) from t1;
count(pk)
5000000
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/bulk_load_rev_cf.result b/storage/rocksdb/mysql-test/rocksdb/r/bulk_load_rev_cf.result
index 947f67434a5..c24d987a906 100644
--- a/storage/rocksdb/mysql-test/rocksdb/r/bulk_load_rev_cf.result
+++ b/storage/rocksdb/mysql-test/rocksdb/r/bulk_load_rev_cf.result
@@ -38,20 +38,20 @@ LOAD DATA INFILE <input_file> INTO TABLE t2;
LOAD DATA INFILE <input_file> INTO TABLE t3;
set rocksdb_bulk_load=0;
SHOW TABLE STATUS WHERE name LIKE 't%';
-Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment
-t1 ROCKSDB 10 Fixed 5000000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL
-t2 ROCKSDB 10 Fixed 5000000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL
-t3 ROCKSDB 10 Fixed 5000000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL partitioned
+Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment Max_index_length Temporary
+t1 ROCKSDB 10 Fixed 5000000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL 0 N
+t2 ROCKSDB 10 Fixed 5000000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL 0 N
+t3 ROCKSDB 10 Fixed 5000000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL partitioned 0 N
ANALYZE TABLE t1, t2, t3;
Table Op Msg_type Msg_text
test.t1 analyze status OK
test.t2 analyze status OK
test.t3 analyze status OK
SHOW TABLE STATUS WHERE name LIKE 't%';
-Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment
-t1 ROCKSDB 10 Fixed 5000000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL
-t2 ROCKSDB 10 Fixed 5000000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL
-t3 ROCKSDB 10 Fixed 5000000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL partitioned
+Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment Max_index_length Temporary
+t1 ROCKSDB 10 Fixed 5000000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL 0 N
+t2 ROCKSDB 10 Fixed 5000000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL 0 N
+t3 ROCKSDB 10 Fixed 5000000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL partitioned 0 N
select count(pk) from t1;
count(pk)
5000000
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/bulk_load_rev_cf_and_data.result b/storage/rocksdb/mysql-test/rocksdb/r/bulk_load_rev_cf_and_data.result
index 6c38e030afb..b851133ab18 100644
--- a/storage/rocksdb/mysql-test/rocksdb/r/bulk_load_rev_cf_and_data.result
+++ b/storage/rocksdb/mysql-test/rocksdb/r/bulk_load_rev_cf_and_data.result
@@ -38,20 +38,20 @@ LOAD DATA INFILE <input_file> INTO TABLE t2;
LOAD DATA INFILE <input_file> INTO TABLE t3;
set rocksdb_bulk_load=0;
SHOW TABLE STATUS WHERE name LIKE 't%';
-Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment
-t1 ROCKSDB 10 Fixed 5000000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL
-t2 ROCKSDB 10 Fixed 5000000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL
-t3 ROCKSDB 10 Fixed 5000000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL partitioned
+Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment Max_index_length Temporary
+t1 ROCKSDB 10 Fixed 5000000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL 0 N
+t2 ROCKSDB 10 Fixed 5000000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL 0 N
+t3 ROCKSDB 10 Fixed 5000000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL partitioned 0 N
ANALYZE TABLE t1, t2, t3;
Table Op Msg_type Msg_text
test.t1 analyze status OK
test.t2 analyze status OK
test.t3 analyze status OK
SHOW TABLE STATUS WHERE name LIKE 't%';
-Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment
-t1 ROCKSDB 10 Fixed 5000000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL
-t2 ROCKSDB 10 Fixed 5000000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL
-t3 ROCKSDB 10 Fixed 5000000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL partitioned
+Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment Max_index_length Temporary
+t1 ROCKSDB 10 Fixed 5000000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL 0 N
+t2 ROCKSDB 10 Fixed 5000000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL 0 N
+t3 ROCKSDB 10 Fixed 5000000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL partitioned 0 N
select count(pk) from t1;
count(pk)
5000000
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/bulk_load_rev_data.result b/storage/rocksdb/mysql-test/rocksdb/r/bulk_load_rev_data.result
index e566691af28..efd7c40ed69 100644
--- a/storage/rocksdb/mysql-test/rocksdb/r/bulk_load_rev_data.result
+++ b/storage/rocksdb/mysql-test/rocksdb/r/bulk_load_rev_data.result
@@ -38,20 +38,20 @@ LOAD DATA INFILE <input_file> INTO TABLE t2;
LOAD DATA INFILE <input_file> INTO TABLE t3;
set rocksdb_bulk_load=0;
SHOW TABLE STATUS WHERE name LIKE 't%';
-Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment
-t1 ROCKSDB 10 Fixed 5000000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL
-t2 ROCKSDB 10 Fixed 5000000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL
-t3 ROCKSDB 10 Fixed 5000000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL partitioned
+Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment Max_index_length Temporary
+t1 ROCKSDB 10 Fixed 5000000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL 0 N
+t2 ROCKSDB 10 Fixed 5000000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL 0 N
+t3 ROCKSDB 10 Fixed 5000000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL partitioned 0 N
ANALYZE TABLE t1, t2, t3;
Table Op Msg_type Msg_text
test.t1 analyze status OK
test.t2 analyze status OK
test.t3 analyze status OK
SHOW TABLE STATUS WHERE name LIKE 't%';
-Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment
-t1 ROCKSDB 10 Fixed 5000000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL
-t2 ROCKSDB 10 Fixed 5000000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL
-t3 ROCKSDB 10 Fixed 5000000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL partitioned
+Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment Max_index_length Temporary
+t1 ROCKSDB 10 Fixed 5000000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL 0 N
+t2 ROCKSDB 10 Fixed 5000000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL 0 N
+t3 ROCKSDB 10 Fixed 5000000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL partitioned 0 N
select count(pk) from t1;
count(pk)
5000000
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/bulk_load_unsorted.result b/storage/rocksdb/mysql-test/rocksdb/r/bulk_load_unsorted.result
index 2a7c7bd69fd..2bc8193e94f 100644
--- a/storage/rocksdb/mysql-test/rocksdb/r/bulk_load_unsorted.result
+++ b/storage/rocksdb/mysql-test/rocksdb/r/bulk_load_unsorted.result
@@ -67,20 +67,20 @@ LOAD DATA INFILE <input_file> INTO TABLE t2;
LOAD DATA INFILE <input_file> INTO TABLE t3;
set rocksdb_bulk_load=0;
SHOW TABLE STATUS WHERE name LIKE 't%';
-Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment
-t1 ROCKSDB 10 Fixed 5000000 # # # # 0 NULL NULL NULL NULL latin1_swedish_ci NULL
-t2 ROCKSDB 10 Fixed 5000000 # # # # 0 NULL NULL NULL NULL latin1_swedish_ci NULL
-t3 ROCKSDB 10 Fixed 5000000 # # # # 0 NULL NULL NULL NULL latin1_swedish_ci NULL partitioned
+Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment Max_index_length Temporary
+t1 ROCKSDB 10 Fixed 5000000 # # # # 0 NULL NULL NULL NULL latin1_swedish_ci NULL 0 N
+t2 ROCKSDB 10 Fixed 5000000 # # # # 0 NULL NULL NULL NULL latin1_swedish_ci NULL 0 N
+t3 ROCKSDB 10 Fixed 5000000 # # # # 0 NULL NULL NULL NULL latin1_swedish_ci NULL partitioned 0 N
ANALYZE TABLE t1, t2, t3;
Table Op Msg_type Msg_text
test.t1 analyze status OK
test.t2 analyze status OK
test.t3 analyze status OK
SHOW TABLE STATUS WHERE name LIKE 't%';
-Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment
-t1 ROCKSDB 10 Fixed 5000000 # # # # 0 NULL NULL NULL NULL latin1_swedish_ci NULL
-t2 ROCKSDB 10 Fixed 5000000 # # # # 0 NULL NULL NULL NULL latin1_swedish_ci NULL
-t3 ROCKSDB 10 Fixed 5000000 # # # # 0 NULL NULL NULL NULL latin1_swedish_ci NULL partitioned
+Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment Max_index_length Temporary
+t1 ROCKSDB 10 Fixed 5000000 # # # # 0 NULL NULL NULL NULL latin1_swedish_ci NULL 0 N
+t2 ROCKSDB 10 Fixed 5000000 # # # # 0 NULL NULL NULL NULL latin1_swedish_ci NULL 0 N
+t3 ROCKSDB 10 Fixed 5000000 # # # # 0 NULL NULL NULL NULL latin1_swedish_ci NULL partitioned 0 N
select count(a) from t1;
count(a)
5000000
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/corrupted_data_reads_debug.result b/storage/rocksdb/mysql-test/rocksdb/r/corrupted_data_reads_debug.result
index 5cf9c620341..47f7bb923ba 100644
--- a/storage/rocksdb/mysql-test/rocksdb/r/corrupted_data_reads_debug.result
+++ b/storage/rocksdb/mysql-test/rocksdb/r/corrupted_data_reads_debug.result
@@ -20,7 +20,7 @@ set @tmp1=@@rocksdb_verify_row_debug_checksums;
set rocksdb_verify_row_debug_checksums=1;
set session debug_dbug= "+d,myrocks_simulate_bad_row_read1";
select * from t1 where pk=1;
-ERROR HY000: Got error 200 'Found data corruption.' from ROCKSDB
+ERROR HY000: Got error 202 'Found data corruption.' from ROCKSDB
set session debug_dbug= "-d,myrocks_simulate_bad_row_read1";
set rocksdb_verify_row_debug_checksums=@tmp1;
select * from t1 where pk=1;
@@ -28,11 +28,11 @@ pk col1
1 1
set session debug_dbug= "+d,myrocks_simulate_bad_row_read2";
select * from t1 where pk=1;
-ERROR HY000: Got error 200 'Found data corruption.' from ROCKSDB
+ERROR HY000: Got error 202 'Found data corruption.' from ROCKSDB
set session debug_dbug= "-d,myrocks_simulate_bad_row_read2";
set session debug_dbug= "+d,myrocks_simulate_bad_row_read3";
select * from t1 where pk=1;
-ERROR HY000: Got error 200 'Found data corruption.' from ROCKSDB
+ERROR HY000: Got error 202 'Found data corruption.' from ROCKSDB
set session debug_dbug= "-d,myrocks_simulate_bad_row_read3";
insert into t1 values(4,'0123456789');
select * from t1;
@@ -56,7 +56,7 @@ pk col1
ABCD 1
set session debug_dbug= "+d,myrocks_simulate_bad_pk_read1";
select * from t2;
-ERROR HY000: Got error 200 'Found data corruption.' from ROCKSDB
+ERROR HY000: Got error 202 'Found data corruption.' from ROCKSDB
set session debug_dbug= "-d,myrocks_simulate_bad_pk_read1";
drop table t2;
create table t2 (
@@ -69,6 +69,6 @@ pk col1
ABCD 1
set session debug_dbug= "+d,myrocks_simulate_bad_pk_read1";
select * from t2;
-ERROR HY000: Got error 200 'Found data corruption.' from ROCKSDB
+ERROR HY000: Got error 202 'Found data corruption.' from ROCKSDB
set session debug_dbug= "-d,myrocks_simulate_bad_pk_read1";
drop table t2;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/issue255.result b/storage/rocksdb/mysql-test/rocksdb/r/issue255.result
index 62875e378a4..be9e6d1167a 100644
--- a/storage/rocksdb/mysql-test/rocksdb/r/issue255.result
+++ b/storage/rocksdb/mysql-test/rocksdb/r/issue255.result
@@ -1,14 +1,14 @@
CREATE TABLE t1 (pk BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT);
INSERT INTO t1 VALUES (5);
SHOW TABLE STATUS LIKE 't1';
-Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment
-t1 ROCKSDB # Fixed 1 # # # # # 6 NULL NULL NULL latin1_swedish_ci NULL
+Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment Max_index_length Temporary
+t1 ROCKSDB # Fixed 1 # # # # # 6 NULL NULL NULL latin1_swedish_ci NULL 0 N
INSERT INTO t1 VALUES ('538647864786478647864');
Warnings:
Warning 1264 Out of range value for column 'pk' at row 1
SHOW TABLE STATUS LIKE 't1';
-Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment
-t1 ROCKSDB # Fixed 2 # # # # # 9223372036854775807 NULL NULL NULL latin1_swedish_ci NULL
+Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment Max_index_length Temporary
+t1 ROCKSDB # Fixed 2 # # # # # 9223372036854775807 NULL NULL NULL latin1_swedish_ci NULL 0 N
INSERT INTO t1 VALUES ();
ERROR 23000: Duplicate entry '9223372036854775807' for key 'PRIMARY'
SELECT * FROM t1;
@@ -16,6 +16,6 @@ pk
5
9223372036854775807
SHOW TABLE STATUS LIKE 't1';
-Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment
-t1 ROCKSDB # Fixed 2 # # # # # 9223372036854775807 NULL NULL NULL latin1_swedish_ci NULL
+Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment Max_index_length Temporary
+t1 ROCKSDB # Fixed 2 # # # # # 9223372036854775807 NULL NULL NULL latin1_swedish_ci NULL 0 N
DROP TABLE t1;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/rocksdb.result b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb.result
index 9b084e63cd5..875950336b6 100644
--- a/storage/rocksdb/mysql-test/rocksdb/r/rocksdb.result
+++ b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb.result
@@ -1407,8 +1407,8 @@ drop table t1;
create table t1 (i int primary key auto_increment) engine=RocksDB;
insert into t1 values (null),(null);
show table status like 't1';
-Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment
-t1 ROCKSDB 10 Fixed 1000 0 # 0 0 0 3 NULL NULL NULL latin1_swedish_ci NULL
+Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment Max_index_length Temporary
+t1 ROCKSDB 10 Fixed 1000 0 # 0 0 0 3 NULL NULL NULL latin1_swedish_ci NULL 0 N
drop table t1;
#
# Fix Issue #4: Crash when using pseudo-unique keys
@@ -2501,8 +2501,8 @@ DROP TABLE t1;
CREATE TABLE t1(a INT AUTO_INCREMENT KEY);
INSERT INTO t1 VALUES(0),(-1),(0);
SHOW TABLE STATUS LIKE 't1';
-Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment
-t1 ROCKSDB 10 Fixed 1000 0 0 0 0 0 3 NULL NULL NULL latin1_swedish_ci NULL
+Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment Max_index_length Temporary
+t1 ROCKSDB 10 Fixed 1000 0 0 0 0 0 3 NULL NULL NULL latin1_swedish_ci NULL 0 N
SELECT * FROM t1;
a
-1
@@ -2512,8 +2512,8 @@ DROP TABLE t1;
CREATE TABLE t1(a INT AUTO_INCREMENT KEY);
INSERT INTO t1 VALUES(0),(10),(0);
SHOW TABLE STATUS LIKE 't1';
-Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment
-t1 ROCKSDB 10 Fixed 1000 0 0 0 0 0 12 NULL NULL NULL latin1_swedish_ci NULL
+Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment Max_index_length Temporary
+t1 ROCKSDB 10 Fixed 1000 0 0 0 0 0 12 NULL NULL NULL latin1_swedish_ci NULL 0 N
SELECT * FROM t1;
a
1
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_icp.result b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_icp.result
index 9ef1ff28f10..101e159eaf3 100644
--- a/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_icp.result
+++ b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_icp.result
@@ -56,7 +56,7 @@ EXPLAIN
"used_key_parts": ["kp1"],
"rows": 1000,
"filtered": 100,
- "index_condition": "t3.kp1 between 2 and 4 and t3.kp1 % 3 = 0",
+ "index_condition": "t3.kp1 between 2 and 4 and t3.kp1 MOD 3 = 0",
"attached_condition": "t3.kp2 like '%foo%'"
}
}
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_icp_rev.result b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_icp_rev.result
index 9c4b2d22ad7..b00e0e14e46 100644
--- a/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_icp_rev.result
+++ b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_icp_rev.result
@@ -56,7 +56,7 @@ EXPLAIN
"used_key_parts": ["kp1"],
"rows": 1000,
"filtered": 100,
- "index_condition": "t3.kp1 between 2 and 4 and t3.kp1 % 3 = 0",
+ "index_condition": "t3.kp1 between 2 and 4 and t3.kp1 MOD 3 = 0",
"attached_condition": "t3.kp2 like '%foo%'"
}
}
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/show_table_status.result b/storage/rocksdb/mysql-test/rocksdb/r/show_table_status.result
index 407a8b103bd..989ddc0f03e 100644
--- a/storage/rocksdb/mysql-test/rocksdb/r/show_table_status.result
+++ b/storage/rocksdb/mysql-test/rocksdb/r/show_table_status.result
@@ -6,13 +6,13 @@ INSERT INTO t2 (a,b) VALUES (1,'bar');
set global rocksdb_force_flush_memtable_now = true;
CREATE TABLE t3 (a INT, b CHAR(8), pk INT PRIMARY KEY) ENGINE=rocksdb CHARACTER SET utf8;
SHOW TABLE STATUS WHERE name IN ( 't1', 't2', 't3' );
-Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment
-t1 ROCKSDB 10 Fixed 2 # # 0 0 0 NULL NULL NULL NULL latin1_swedish_ci NULL
-t2 ROCKSDB 10 Fixed 1 # # 0 0 0 NULL NULL NULL NULL latin1_swedish_ci NULL
-t3 ROCKSDB 10 Fixed 1000 # # 0 0 0 NULL NULL NULL NULL utf8_general_ci NULL
+Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment Max_index_length Temporary
+t1 ROCKSDB 10 Fixed 2 # # 0 0 0 NULL NULL NULL NULL latin1_swedish_ci NULL 0 N
+t2 ROCKSDB 10 Fixed 1 # # 0 0 0 NULL NULL NULL NULL latin1_swedish_ci NULL 0 N
+t3 ROCKSDB 10 Fixed 1000 # # 0 0 0 NULL NULL NULL NULL utf8_general_ci NULL 0 N
SHOW TABLE STATUS WHERE name LIKE 't2';
-Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment
-t2 ROCKSDB 10 Fixed 10000 # # 0 0 0 NULL NULL NULL NULL latin1_swedish_ci NULL
+Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment Max_index_length Temporary
+t2 ROCKSDB 10 Fixed 10000 # # 0 0 0 NULL NULL NULL NULL latin1_swedish_ci NULL 0 N
DROP TABLE t1, t2, t3;
CREATE DATABASE `db_new..............................................end`;
USE `db_new..............................................end`;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/tbl_opt_data_index_dir.result b/storage/rocksdb/mysql-test/rocksdb/r/tbl_opt_data_index_dir.result
index 7f31b4434f5..d1e445f734c 100644
--- a/storage/rocksdb/mysql-test/rocksdb/r/tbl_opt_data_index_dir.result
+++ b/storage/rocksdb/mysql-test/rocksdb/r/tbl_opt_data_index_dir.result
@@ -3,14 +3,14 @@ CREATE TABLE t1 (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb DATA DIRECTORY = '
ERROR HY000: Can't create table `test`.`t1` (errno: 140 "Wrong create options")
show warnings;
Level Code Message
-Warning 1296 Got error 196 'Specifying DATA DIRECTORY for an individual table is not supported.' from ROCKSDB
+Warning 1296 Got error 198 'Specifying DATA DIRECTORY for an individual table is not supported.' from ROCKSDB
Error 1005 Can't create table `test`.`t1` (errno: 140 "Wrong create options")
Warning 1030 Got error 140 "Wrong create options" from storage engine ROCKSDB
CREATE TABLE t1 (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb INDEX DIRECTORY = '/foo/bar/index';
ERROR HY000: Can't create table `test`.`t1` (errno: 140 "Wrong create options")
show warnings;
Level Code Message
-Warning 1296 Got error 197 'Specifying INDEX DIRECTORY for an individual table is not supported.' from ROCKSDB
+Warning 1296 Got error 199 'Specifying INDEX DIRECTORY for an individual table is not supported.' from ROCKSDB
Error 1005 Can't create table `test`.`t1` (errno: 140 "Wrong create options")
Warning 1030 Got error 140 "Wrong create options" from storage engine ROCKSDB
CREATE TABLE t1 (id INT NOT NULL PRIMARY KEY) ENGINE=rocksdb PARTITION BY RANGE (id)
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/truncate_table.result b/storage/rocksdb/mysql-test/rocksdb/r/truncate_table.result
index e6ff6e1ca32..76d00d90420 100644
--- a/storage/rocksdb/mysql-test/rocksdb/r/truncate_table.result
+++ b/storage/rocksdb/mysql-test/rocksdb/r/truncate_table.result
@@ -8,20 +8,20 @@ a b
DROP TABLE t1;
CREATE TABLE t1 (a INT KEY AUTO_INCREMENT, c CHAR(8)) ENGINE=rocksdb;
SHOW TABLE STATUS LIKE 't1';
-Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment
-t1 ROCKSDB 10 Fixed # # # 0 0 0 1 NULL NULL NULL latin1_swedish_ci NULL
+Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment Max_index_length Temporary
+t1 ROCKSDB 10 Fixed # # # 0 0 0 1 NULL NULL NULL latin1_swedish_ci NULL 0 N
INSERT INTO t1 (c) VALUES ('a'),('b'),('c');
SHOW TABLE STATUS LIKE 't1';
-Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment
-t1 ROCKSDB 10 Fixed # # # 0 0 0 4 NULL NULL NULL latin1_swedish_ci NULL
+Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment Max_index_length Temporary
+t1 ROCKSDB 10 Fixed # # # 0 0 0 4 NULL NULL NULL latin1_swedish_ci NULL 0 N
TRUNCATE TABLE t1;
SHOW TABLE STATUS LIKE 't1';
-Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment
-t1 ROCKSDB 10 Fixed # # # 0 0 0 1 NULL NULL NULL latin1_swedish_ci NULL
+Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment Max_index_length Temporary
+t1 ROCKSDB 10 Fixed # # # 0 0 0 1 NULL NULL NULL latin1_swedish_ci NULL 0 N
INSERT INTO t1 (c) VALUES ('d');
SHOW TABLE STATUS LIKE 't1';
-Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment
-t1 ROCKSDB 10 Fixed # # # 0 0 0 2 NULL NULL NULL latin1_swedish_ci NULL
+Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment Max_index_length Temporary
+t1 ROCKSDB 10 Fixed # # # 0 0 0 2 NULL NULL NULL latin1_swedish_ci NULL 0 N
SELECT a,c FROM t1;
a c
1 d
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/2pc_group_commit.test b/storage/rocksdb/mysql-test/rocksdb/t/2pc_group_commit.test
index 1a77424de39..d91d54a3543 100644
--- a/storage/rocksdb/mysql-test/rocksdb/t/2pc_group_commit.test
+++ b/storage/rocksdb/mysql-test/rocksdb/t/2pc_group_commit.test
@@ -31,12 +31,12 @@ select variable_value into @b1 from information_schema.global_status where varia
select variable_value into @b2 from information_schema.global_status where variable_name='Binlog_group_commits';
select variable_value into @b3 from information_schema.global_status where variable_name='Rocksdb_wal_synced';
--exec $MYSQL_SLAP --silent --concurrency=1 --number-of-queries=1000 --query="INSERT INTO t1 (id, value) VALUES(NULL, 1)"
-select IF(variable_value - @b1 = 1000, 'OK', 'FAIL') as Binlog_commits
+select IF(variable_value - @b1 = 1000, 'OK', variable_value - @b1 = 1000) as Binlog_commits
from information_schema.global_status where variable_name='Binlog_commits';
-select IF(variable_value - @b2 = 1000, 'OK', 'FAIL') as Binlog_group_commits
+select IF(variable_value - @b2 = 1000, 'OK', variable_value - @b2 = 1000) as Binlog_group_commits
from information_schema.global_status where variable_name='Binlog_group_commits';
--echo # Prepare operations sync, commits don't. We expect slightly more than 1K syncs:
-select IF(variable_value - @b3 between 1000 and 1500, 'OK', 'FAIL') as Rocksdb_wal_synced
+select IF(variable_value - @b3 between 1000 and 1500, 'OK', variable_value - @b3 between 1000 and 1500) as Rocksdb_wal_synced
from information_schema.global_status where variable_name='Rocksdb_wal_synced';
--echo ##
@@ -48,11 +48,11 @@ select variable_value into @b3 from information_schema.global_status where varia
--exec $MYSQL_SLAP --silent --concurrency=50 --number-of-queries=10000 --query="INSERT INTO t1 (id, value) VALUES(NULL, 1)"
-select IF(variable_value - @b1 = 10000, 'OK', 'FAIL') as Binlog_commits
+select IF(variable_value - @b1 = 10000, 'OK', variable_value - @b1 = 10000) as Binlog_commits
from information_schema.global_status where variable_name='Binlog_commits';
-select IF(variable_value - @b2 between 100 and 5000, 'OK', 'FAIL') as Binlog_group_commits
+select IF(variable_value - @b2 between 100 and 5000, 'OK', variable_value - @b2 between 100 and 5000) as Binlog_group_commits
from information_schema.global_status where variable_name='Binlog_group_commits';
-select IF(variable_value - @b3 between 1 and 9000, 'OK', 'FAIL')
+select IF(variable_value - @b3 between 1 and 9000, 'OK', variable_value - @b3 between 1 and 9000)
from information_schema.global_status where variable_name='Rocksdb_wal_synced';
--echo ##
@@ -66,11 +66,11 @@ select variable_value into @b2 from information_schema.global_status where varia
select variable_value into @b3 from information_schema.global_status where variable_name='Rocksdb_wal_synced';
--exec $MYSQL_SLAP --silent --concurrency=1 --number-of-queries=1000 --query="INSERT INTO t1 (id, value) VALUES(NULL, 1)"
-select IF(variable_value - @b1 = 1000, 'OK', 'FAIL') as Binlog_commits
+select IF(variable_value - @b1 = 1000, 'OK', variable_value - @b1 = 1000) as Binlog_commits
from information_schema.global_status where variable_name='Binlog_commits';
-select IF(variable_value - @b2 = 1000, 'OK', 'FAIL') as Binlog_group_commits
+select IF(variable_value - @b2 = 1000, 'OK', variable_value - @b2 = 1000) as Binlog_group_commits
from information_schema.global_status where variable_name='Binlog_group_commits';
-select IF(variable_value - @b3 < 10, 'OK', 'FAIL')
+select IF(variable_value - @b3 < 10, 'OK', variable_value - @b3 < 10)
from information_schema.global_status where variable_name='Rocksdb_wal_synced';
--echo ##
@@ -83,11 +83,11 @@ select variable_value into @b3 from information_schema.global_status where varia
--exec $MYSQL_SLAP --silent --concurrency=50 --number-of-queries=10000 --query="INSERT INTO t1 (id, value) VALUES(NULL, 1)"
-select IF(variable_value - @b1 = 10000, 'OK', 'FAIL') as Binlog_commits
+select IF(variable_value - @b1 = 10000, 'OK', variable_value - @b1 = 10000) as Binlog_commits
from information_schema.global_status where variable_name='Binlog_commits';
-select IF(variable_value - @b2 < 8000, 'OK', 'FAIL') as Binlog_group_commits
+select IF(variable_value - @b2 < 8000, 'OK', variable_value - @b2 < 8000) as Binlog_group_commits
from information_schema.global_status where variable_name='Binlog_group_commits';
-select IF(variable_value - @b3 < 10, 'OK', 'FAIL')
+select IF(variable_value - @b3 < 10, 'OK', variable_value - @b3 < 10)
from information_schema.global_status where variable_name='Rocksdb_wal_synced';
##
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/disabled.def b/storage/rocksdb/mysql-test/rocksdb/t/disabled.def
index 212e09ce2ff..47005c1baff 100644
--- a/storage/rocksdb/mysql-test/rocksdb/t/disabled.def
+++ b/storage/rocksdb/mysql-test/rocksdb/t/disabled.def
@@ -67,6 +67,8 @@ rpl_row_triggers : Requires read-free slave.
compact_deletes: MDEV-12663 : rocksdb.compact_deletes times out and causes other tests to fail
blind_delete_without_tx_api: MDEV-12286: rocksdb.blind_delete_without_tx_api test fails
+unique_check: wrong error number
+autoinc_vars_thread: debug sync point wait timed out
information_schema: MDEV-14372: unstable testcase
bloomfilter: MDEV-14562
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_flush_memtable_on_analyze_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_flush_memtable_on_analyze_basic.result
index 905feec9b1a..010ba954366 100644
--- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_flush_memtable_on_analyze_basic.result
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_flush_memtable_on_analyze_basic.result
@@ -47,12 +47,12 @@ a b
2 2
3 3
SHOW TABLE STATUS LIKE 't1';
-Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment
-t1 ROCKSDB 10 Fixed # # # 0 0 0 4 NULL NULL NULL latin1_swedish_ci NULL
+Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment Max_index_length Temporary
+t1 ROCKSDB 10 Fixed # # # 0 0 0 4 NULL NULL NULL latin1_swedish_ci NULL 0 N
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
test.t1 analyze status OK
SHOW TABLE STATUS LIKE 't1';
-Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment
-t1 ROCKSDB 10 Fixed # # # 0 0 0 4 NULL NULL NULL latin1_swedish_ci NULL
+Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment Max_index_length Temporary
+t1 ROCKSDB 10 Fixed # # # 0 0 0 4 NULL NULL NULL latin1_swedish_ci NULL 0 N
DROP TABLE t1;
diff --git a/storage/rocksdb/rdb_datadic.cc b/storage/rocksdb/rdb_datadic.cc
index b2f5af705a3..9d4a0475fb2 100644
--- a/storage/rocksdb/rdb_datadic.cc
+++ b/storage/rocksdb/rdb_datadic.cc
@@ -61,7 +61,7 @@ void get_mem_comparable_space(const CHARSET_INFO *cs,
inline bool field_check_field_name_match(Field *field, const char *name)
{
return (0 == my_strcasecmp(system_charset_info,
- field->field_name,
+ field->field_name.str,
name));
}
@@ -3591,16 +3591,16 @@ bool Rdb_validate_tbls::check_frm_file(const std::string &fullpath,
the connection handle as we don't have one here.
*/
char eng_type_buf[NAME_CHAR_LEN+1];
- LEX_STRING eng_type_str = {eng_type_buf, 0};
- //enum legacy_db_type eng_type;
- frm_type_enum type = dd_frm_type(nullptr, fullfilename.c_ptr(), &eng_type_str);
- if (type == FRMTYPE_ERROR) {
+ LEX_CSTRING eng_type_str = {eng_type_buf, 0};
+ bool is_sequence;
+ enum Table_type type = dd_frm_type(nullptr, fullfilename.c_ptr(), &eng_type_str, &is_sequence);
+ if (type == TABLE_TYPE_UNKNOWN) {
sql_print_warning("RocksDB: Failed to open/read .from file: %s",
fullfilename.ptr());
return false;
}
- if (type == FRMTYPE_TABLE) {
+ if (type == TABLE_TYPE_NORMAL) {
/* For a RocksDB table do we have a reference in the data dictionary? */
if (!strncmp(eng_type_str.str, "ROCKSDB", eng_type_str.length)) {
/*
diff --git a/storage/sequence/sequence.cc b/storage/sequence/sequence.cc
index 599374c6f8c..3e56303973d 100644
--- a/storage/sequence/sequence.cc
+++ b/storage/sequence/sequence.cc
@@ -29,7 +29,7 @@
#include <table.h>
#include <field.h>
-handlerton *sequence_hton;
+static handlerton *sequence_hton;
class Sequence_share : public Handler_share {
public:
@@ -418,7 +418,7 @@ create_group_by_handler(THD *thd, Query *query)
if (field->table != query->from->table)
return 0;
/* Check that we are using a SUM() on the primary key */
- if (strcmp(field->field_name, "seq"))
+ if (strcmp(field->field_name.str, "seq"))
return 0;
}
diff --git a/storage/sphinx/ha_sphinx.cc b/storage/sphinx/ha_sphinx.cc
index 688431a90ef..df0e266db8d 100644
--- a/storage/sphinx/ha_sphinx.cc
+++ b/storage/sphinx/ha_sphinx.cc
@@ -1031,7 +1031,7 @@ static bool ParseUrl ( CSphSEShare * share, TABLE * table, bool bCreate )
for ( int i=0; i<share->m_iTableFields; i++ )
{
- share->m_sTableField[i] = sphDup ( table->field[i]->field_name );
+ share->m_sTableField[i] = sphDup ( table->field[i]->field_name.str );
share->m_eTableFieldType[i] = table->field[i]->type();
}
}
@@ -2338,7 +2338,7 @@ int ha_sphinx::write_row ( byte * )
for ( Field ** ppField = table->field; *ppField; ppField++ )
{
- sQuery.append ( (*ppField)->field_name );
+ sQuery.append ( (*ppField)->field_name.str );
if ( ppField[1] )
sQuery.append ( ", " );
}
@@ -2464,7 +2464,7 @@ int ha_sphinx::delete_row ( const byte * )
}
-int ha_sphinx::update_row ( const byte *, byte * )
+int ha_sphinx::update_row ( const byte *, const byte * )
{
SPH_ENTER_METHOD();
SPH_RET ( HA_ERR_WRONG_COMMAND );
@@ -3434,7 +3434,7 @@ int ha_sphinx::create ( const char * name, TABLE * table_arg, HA_CREATE_INFO * )
if ( eType!=MYSQL_TYPE_TIMESTAMP && !IsIntegerFieldType(eType) && eType!=MYSQL_TYPE_VARCHAR && eType!=MYSQL_TYPE_FLOAT )
{
my_snprintf ( sError, sizeof(sError), "%s: %dth column (attribute %s) MUST be integer, bigint, timestamp, varchar, or float",
- name, i+1, table_arg->field[i]->field_name );
+ name, i+1, table_arg->field[i]->field_name.str );
break;
}
}
@@ -3446,10 +3446,10 @@ int ha_sphinx::create ( const char * name, TABLE * table_arg, HA_CREATE_INFO * )
if (
table_arg->s->keys!=1 ||
table_arg->key_info[0].user_defined_key_parts!=1 ||
- strcasecmp ( table_arg->key_info[0].key_part[0].field->field_name, table_arg->field[2]->field_name ) )
+ strcasecmp ( table_arg->key_info[0].key_part[0].field->field_name.str, table->field[2]->field_name.str ) )
{
my_snprintf ( sError, sizeof(sError), "%s: there must be an index on '%s' column",
- name, table_arg->field[2]->field_name );
+ name, table->field[2]->field_name.str );
break;
}
@@ -3464,7 +3464,7 @@ int ha_sphinx::create ( const char * name, TABLE * table_arg, HA_CREATE_INFO * )
sError[0] = '\0';
// check that 1st column is id, is of int type, and has an index
- if ( strcmp ( table_arg->field[0]->field_name, "id" ) )
+ if ( strcmp ( table_arg->field[0]->field_name.str, "id" ) )
{
my_snprintf ( sError, sizeof(sError), "%s: 1st column must be called 'id'", name );
break;
@@ -3480,7 +3480,7 @@ int ha_sphinx::create ( const char * name, TABLE * table_arg, HA_CREATE_INFO * )
if (
table_arg->s->keys!=1 ||
table_arg->key_info[0].user_defined_key_parts!=1 ||
- strcasecmp ( table_arg->key_info[0].key_part[0].field->field_name, "id" ) )
+ strcasecmp ( table_arg->key_info[0].key_part[0].field->field_name.str, "id" ) )
{
my_snprintf ( sError, sizeof(sError), "%s: 'id' column must be indexed", name );
break;
@@ -3493,7 +3493,7 @@ int ha_sphinx::create ( const char * name, TABLE * table_arg, HA_CREATE_INFO * )
if ( eType!=MYSQL_TYPE_TIMESTAMP && !IsIntegerFieldType(eType) && eType!=MYSQL_TYPE_VARCHAR && eType!=MYSQL_TYPE_FLOAT )
{
my_snprintf ( sError, sizeof(sError), "%s: column %d(%s) is of unsupported type (use int/bigint/timestamp/varchar/float)",
- name, i+1, table_arg->field[i]->field_name );
+ name, i+1, table_arg->field[i]->field_name.str );
break;
}
}
diff --git a/storage/sphinx/ha_sphinx.h b/storage/sphinx/ha_sphinx.h
index c310deb7060..8e6af908aab 100644
--- a/storage/sphinx/ha_sphinx.h
+++ b/storage/sphinx/ha_sphinx.h
@@ -86,7 +86,7 @@ public:
int close ();
int write_row ( byte * buf );
- int update_row ( const byte * old_data, byte * new_data );
+ int update_row ( const byte * old_data, const byte * new_data );
int delete_row ( const byte * buf );
int extra ( enum ha_extra_function op );
diff --git a/storage/spider/ha_spider.cc b/storage/spider/ha_spider.cc
index 7363a69f49f..8d7064a16f9 100644
--- a/storage/spider/ha_spider.cc
+++ b/storage/spider/ha_spider.cc
@@ -7795,7 +7795,8 @@ int ha_spider::cmp_ref(
) {
if ((ret = (*field)->cmp_binary_offset((uint)ptr_diff)))
{
- DBUG_PRINT("info",("spider different at %s", (*field)->field_name));
+ DBUG_PRINT("info",("spider different at %s",
+ (*field)->field_name.str));
break;
}
}
@@ -9752,7 +9753,7 @@ void ha_spider::end_bulk_update(
int ha_spider::bulk_update_row(
const uchar *old_data,
- uchar *new_data,
+ const uchar *new_data,
uint *dup_key_found
) {
DBUG_ENTER("ha_spider::bulk_update_row");
@@ -9763,7 +9764,7 @@ int ha_spider::bulk_update_row(
int ha_spider::update_row(
const uchar *old_data,
- uchar *new_data
+ const uchar *new_data
) {
int error_num;
THD *thd = ha_thd();
@@ -9995,7 +9996,7 @@ int ha_spider::pre_direct_update_rows_init(
KEY_MULTI_RANGE *ranges,
uint range_count,
bool sorted,
- uchar *new_data
+ const uchar *new_data
) {
int error_num;
DBUG_ENTER("ha_spider::pre_direct_update_rows_init");
diff --git a/storage/spider/ha_spider.h b/storage/spider/ha_spider.h
index acc75ed1c05..87c6afaa89f 100644
--- a/storage/spider/ha_spider.h
+++ b/storage/spider/ha_spider.h
@@ -569,12 +569,12 @@ public:
void end_bulk_update();
int bulk_update_row(
const uchar *old_data,
- uchar *new_data,
+ const uchar *new_data,
uint *dup_key_found
);
int update_row(
const uchar *old_data,
- uchar *new_data
+ const uchar *new_data
);
#ifdef HANDLER_HAS_DIRECT_UPDATE_ROWS
int direct_update_rows_init(
@@ -582,7 +582,7 @@ public:
KEY_MULTI_RANGE *ranges,
uint range_count,
bool sorted,
- uchar *new_data
+ const uchar *new_data
);
#ifdef HA_CAN_BULK_ACCESS
int pre_direct_update_rows_init(
diff --git a/storage/spider/hs_client/hs_compat.h b/storage/spider/hs_client/hs_compat.h
index a26dd18e481..22497d85200 100644
--- a/storage/spider/hs_client/hs_compat.h
+++ b/storage/spider/hs_client/hs_compat.h
@@ -16,7 +16,12 @@
#ifndef HS_COMPAT_H
#define HS_COMPAT_H
-#if defined(MARIADB_BASE_VERSION) && MYSQL_VERSION_ID >= 100000
+#if defined(MARIADB_BASE_VERSION) && MYSQL_VERSION_ID >= 100213
+#define SPD_INIT_DYNAMIC_ARRAY2(A, B, C, D, E, F) \
+ my_init_dynamic_array2(A, B, C, D, E, F)
+#define SPD_INIT_ALLOC_ROOT(A, B, C, D) \
+ init_alloc_root(A, "spider", B, C, D)
+#elif defined(MARIADB_BASE_VERSION) && MYSQL_VERSION_ID >= 100000
#define SPD_INIT_DYNAMIC_ARRAY2(A, B, C, D, E, F) \
my_init_dynamic_array2(A, B, C, D, E, F)
#define SPD_INIT_ALLOC_ROOT(A, B, C, D) \
diff --git a/storage/spider/spd_db_conn.cc b/storage/spider/spd_db_conn.cc
index b8a295b10f8..e677ea3e674 100644
--- a/storage/spider/spd_db_conn.cc
+++ b/storage/spider/spd_db_conn.cc
@@ -1665,7 +1665,7 @@ int spider_db_append_key_where_internal(
if (sql_kind == SPIDER_SQL_KIND_HANDLER)
{
- char *key_name = key_info->name;
+ const char *key_name = key_info->name;
key_name_length = strlen(key_name);
if (str->reserve(SPIDER_SQL_READ_LEN +
/* SPIDER_SQL_NAME_QUOTE_LEN */ 2 + key_name_length))
@@ -2844,7 +2844,7 @@ int spider_db_fetch_row(
) {
int error_num;
DBUG_ENTER("spider_db_fetch_row");
- DBUG_PRINT("info", ("spider field_name %s", field->field_name));
+ DBUG_PRINT("info", ("spider field_name %s", field->field_name.str));
DBUG_PRINT("info", ("spider fieldcharset %s", field->charset()->csname));
field->move_field_offset(ptr_diff);
error_num = row->store_to_field(field, share->access_charset);
@@ -2967,7 +2967,8 @@ int spider_db_fetch_table(
my_bitmap_map *tmp_map =
dbug_tmp_use_all_columns(table, table->write_set);
#endif
- DBUG_PRINT("info", ("spider bitmap is set %s", (*field)->field_name));
+ DBUG_PRINT("info", ("spider bitmap is set %s",
+ (*field)->field_name.str));
if ((error_num =
spider_db_fetch_row(share, *field, row, ptr_diff)))
DBUG_RETURN(error_num);
@@ -3138,7 +3139,7 @@ int spider_db_fetch_key(
my_bitmap_map *tmp_map =
dbug_tmp_use_all_columns(table, table->write_set);
#endif
- DBUG_PRINT("info", ("spider bitmap is set %s", field->field_name));
+ DBUG_PRINT("info", ("spider bitmap is set %s", field->field_name.str));
if ((error_num =
spider_db_fetch_row(share, field, row, ptr_diff)))
DBUG_RETURN(error_num);
@@ -3252,7 +3253,8 @@ int spider_db_fetch_minimum_columns(
my_bitmap_map *tmp_map =
dbug_tmp_use_all_columns(table, table->write_set);
#endif
- DBUG_PRINT("info", ("spider bitmap is set %s", (*field)->field_name));
+ DBUG_PRINT("info", ("spider bitmap is set %s",
+ (*field)->field_name.str));
if ((error_num = spider_db_fetch_row(share, *field, row, ptr_diff)))
DBUG_RETURN(error_num);
#ifndef DBUG_OFF
@@ -5116,7 +5118,8 @@ int spider_db_seek_tmp_table(
my_bitmap_map *tmp_map =
dbug_tmp_use_all_columns(table, table->write_set);
#endif
- DBUG_PRINT("info", ("spider bitmap is set %s", (*field)->field_name));
+ DBUG_PRINT("info", ("spider bitmap is set %s",
+ (*field)->field_name.str));
if ((error_num =
spider_db_fetch_row(spider->share, *field, row, ptr_diff)))
DBUG_RETURN(error_num);
@@ -5203,7 +5206,7 @@ int spider_db_seek_tmp_key(
my_bitmap_map *tmp_map =
dbug_tmp_use_all_columns(table, table->write_set);
#endif
- DBUG_PRINT("info", ("spider bitmap is set %s", field->field_name));
+ DBUG_PRINT("info", ("spider bitmap is set %s", field->field_name.str));
if ((error_num =
spider_db_fetch_row(spider->share, field, row, ptr_diff)))
DBUG_RETURN(error_num);
@@ -5293,7 +5296,8 @@ int spider_db_seek_tmp_minimum_columns(
my_bitmap_map *tmp_map =
dbug_tmp_use_all_columns(table, table->write_set);
#endif
- DBUG_PRINT("info", ("spider bitmap is set %s", (*field)->field_name));
+ DBUG_PRINT("info", ("spider bitmap is set %s",
+ (*field)->field_name.str));
if ((error_num =
spider_db_fetch_row(spider->share, *field, row, ptr_diff)))
DBUG_RETURN(error_num);
@@ -5305,7 +5309,7 @@ int spider_db_seek_tmp_minimum_columns(
else if (bitmap_is_set(table->read_set, (*field)->field_index))
{
DBUG_PRINT("info", ("spider bitmap is cleared %s",
- (*field)->field_name));
+ (*field)->field_name.str));
bitmap_clear_bit(table->read_set, (*field)->field_index);
}
}
@@ -8061,10 +8065,7 @@ int spider_db_open_item_ident(
}
if (str)
{
- if (item_ident->field_name)
- field_name_length = strlen(item_ident->field_name);
- else
- field_name_length = 0;
+ field_name_length = item_ident->field_name.length;
if (share->access_charset->cset == system_charset_info->cset)
{
if (str->reserve(alias_length +
@@ -8074,7 +8075,7 @@ int spider_db_open_item_ident(
}
str->q_append(alias, alias_length);
if ((error_num = spider_dbton[dbton_id].db_util->
- append_name(str, item_ident->field_name, field_name_length)))
+ append_name(str, item_ident->field_name.str, field_name_length)))
{
DBUG_RETURN(error_num);
}
@@ -8083,7 +8084,7 @@ int spider_db_open_item_ident(
DBUG_RETURN(HA_ERR_OUT_OF_MEM);
str->q_append(alias, alias_length);
if ((error_num = spider_dbton[dbton_id].db_util->
- append_name_with_charset(str, item_ident->field_name,
+ append_name_with_charset(str, item_ident->field_name.str,
field_name_length, system_charset_info)))
{
DBUG_RETURN(error_num);
@@ -8142,18 +8143,18 @@ int spider_db_open_item_ref(
(*(item_ref->ref))->type() != Item::CACHE_ITEM &&
item_ref->ref_type() != Item_ref::VIEW_REF &&
!item_ref->table_name &&
- item_ref->name &&
+ item_ref->name.str &&
item_ref->alias_name_used
) {
if (str)
{
- uint length = strlen(item_ref->name);
+ uint length = item_ref->name.length;
if (str->reserve(length + /* SPIDER_SQL_NAME_QUOTE_LEN */ 2))
{
DBUG_RETURN(HA_ERR_OUT_OF_MEM);
}
if ((error_num = spider_dbton[dbton_id].db_util->
- append_name(str, item_ref->name, length)))
+ append_name(str, item_ref->name.str, length)))
{
DBUG_RETURN(error_num);
}
@@ -9623,7 +9624,7 @@ int spider_db_udf_copy_key_row(
int error_num;
DBUG_ENTER("spider_db_udf_copy_key_row");
if ((error_num = spider_db_append_name_with_quote_str(str,
- (char *) field->field_name, dbton_id)))
+ (char *) field->field_name.str, dbton_id)))
DBUG_RETURN(error_num);
if (str->reserve(joint_length + *length + SPIDER_SQL_AND_LEN))
DBUG_RETURN(HA_ERR_OUT_OF_MEM);
diff --git a/storage/spider/spd_db_handlersocket.cc b/storage/spider/spd_db_handlersocket.cc
index 32acb8df6fb..51b1d1f1752 100644
--- a/storage/spider/spd_db_handlersocket.cc
+++ b/storage/spider/spd_db_handlersocket.cc
@@ -3904,7 +3904,7 @@ int spider_handlersocket_share::create_column_name_str()
str->init_calc_mem(202);
str->set_charset(spider_share->access_charset);
if ((error_num = spider_db_append_name_with_quote_str(str,
- (char *) (*field)->field_name, dbton_id)))
+ (char *) (*field)->field_name.str, dbton_id)))
goto error;
}
DBUG_RETURN(0);
diff --git a/storage/spider/spd_db_mysql.cc b/storage/spider/spd_db_mysql.cc
index 9974b6b0628..d01ae5548b2 100644
--- a/storage/spider/spd_db_mysql.cc
+++ b/storage/spider/spd_db_mysql.cc
@@ -623,9 +623,10 @@ int spider_db_mysql_result::fetch_table_status(
}
if (mode == 1)
{
- if (num_fields() != 18)
+ /* Ok to test for 18 fields as all new fields are added last */
+ if (num_fields() < 18)
{
- DBUG_PRINT("info",("spider field_count != 18"));
+ DBUG_PRINT("info",("spider field_count < 18"));
DBUG_RETURN(ER_SPIDER_INVALID_REMOTE_TABLE_INFO_NUM);
}
@@ -2707,7 +2708,7 @@ void spider_db_mysql::set_dup_key_idx(
uint roop_count, pk_idx = table->s->primary_key;
int key_name_length;
int max_length = 0;
- char *key_name;
+ const char *key_name;
DBUG_ENTER("spider_db_mysql::set_dup_key_idx");
DBUG_PRINT("info",("spider this=%p", this));
DBUG_PRINT("info",("spider error_str=%s", conn->error_str));
@@ -4560,7 +4561,7 @@ int spider_mysql_share::create_column_name_str()
str->init_calc_mem(89);
str->set_charset(spider_share->access_charset);
if ((error_num = spider_db_append_name_with_quote_str(str,
- (char *) (*field)->field_name, dbton_id)))
+ (char *) (*field)->field_name.str, dbton_id)))
goto error;
}
DBUG_RETURN(0);
@@ -11941,7 +11942,7 @@ int spider_mysql_copy_table::append_table_columns(
DBUG_RETURN(HA_ERR_OUT_OF_MEM);
sql.q_append(SPIDER_SQL_NAME_QUOTE_STR, SPIDER_SQL_NAME_QUOTE_LEN);
if ((error_num = spider_db_append_name_with_quote_str(&sql,
- (char *) (*field)->field_name, spider_dbton_mysql.dbton_id)))
+ (char *) (*field)->field_name.str, spider_dbton_mysql.dbton_id)))
DBUG_RETURN(error_num);
if (sql.reserve(SPIDER_SQL_NAME_QUOTE_LEN + SPIDER_SQL_COMMA_LEN))
DBUG_RETURN(HA_ERR_OUT_OF_MEM);
@@ -12070,7 +12071,7 @@ int spider_mysql_copy_table::append_key_order_str(
DBUG_RETURN(HA_ERR_OUT_OF_MEM);
sql.q_append(SPIDER_SQL_NAME_QUOTE_STR, SPIDER_SQL_NAME_QUOTE_LEN);
if ((error_num = spider_db_append_name_with_quote_str(&sql,
- (char *) field->field_name, spider_dbton_mysql.dbton_id)))
+ (char *) field->field_name.str, spider_dbton_mysql.dbton_id)))
DBUG_RETURN(error_num);
if (key_part->key_part_flag & HA_REVERSE_SORT)
{
@@ -12100,7 +12101,7 @@ int spider_mysql_copy_table::append_key_order_str(
DBUG_RETURN(HA_ERR_OUT_OF_MEM);
sql.q_append(SPIDER_SQL_NAME_QUOTE_STR, SPIDER_SQL_NAME_QUOTE_LEN);
if ((error_num = spider_db_append_name_with_quote_str(&sql,
- (char *) field->field_name, spider_dbton_mysql.dbton_id)))
+ (char *) field->field_name.str, spider_dbton_mysql.dbton_id)))
DBUG_RETURN(error_num);
if (key_part->key_part_flag & HA_REVERSE_SORT)
{
@@ -12232,7 +12233,7 @@ int spider_mysql_copy_table::copy_key_row(
DBUG_RETURN(HA_ERR_OUT_OF_MEM);
sql.q_append(SPIDER_SQL_NAME_QUOTE_STR, SPIDER_SQL_NAME_QUOTE_LEN);
if ((error_num = spider_db_append_name_with_quote_str(&sql,
- (char *) field->field_name, spider_dbton_mysql.dbton_id)))
+ (char *) field->field_name.str, spider_dbton_mysql.dbton_id)))
DBUG_RETURN(error_num);
if (sql.reserve(SPIDER_SQL_NAME_QUOTE_LEN + joint_length + *length +
SPIDER_SQL_AND_LEN))
diff --git a/storage/spider/spd_db_oracle.cc b/storage/spider/spd_db_oracle.cc
index 94f354e0a0f..5b322b9c4d7 100644
--- a/storage/spider/spd_db_oracle.cc
+++ b/storage/spider/spd_db_oracle.cc
@@ -4361,7 +4361,7 @@ int spider_oracle_share::create_column_name_str()
str->init_calc_mem(196);
str->set_charset(spider_share->access_charset);
if ((error_num = spider_db_append_name_with_quote_str(str,
- (char *) (*field)->field_name, dbton_id)))
+ (char *) (*field)->field_name.str, dbton_id)))
goto error;
}
DBUG_RETURN(0);
@@ -12090,7 +12090,7 @@ int spider_oracle_copy_table::append_table_columns(
DBUG_RETURN(HA_ERR_OUT_OF_MEM);
sql.q_append(SPIDER_SQL_NAME_QUOTE_STR, SPIDER_SQL_NAME_QUOTE_LEN);
if ((error_num = spider_db_append_name_with_quote_str(&sql,
- (char *) (*field)->field_name, spider_dbton_oracle.dbton_id)))
+ (char *) (*field)->field_name.str, spider_dbton_oracle.dbton_id)))
DBUG_RETURN(error_num);
if (sql.reserve(SPIDER_SQL_NAME_QUOTE_LEN + SPIDER_SQL_COMMA_LEN))
DBUG_RETURN(HA_ERR_OUT_OF_MEM);
@@ -12236,7 +12236,7 @@ int spider_oracle_copy_table::append_key_order_str(
sql_part.q_append(SPIDER_SQL_NAME_QUOTE_STR,
SPIDER_SQL_NAME_QUOTE_LEN);
if ((error_num = spider_db_append_name_with_quote_str(&sql_part,
- (char *) field->field_name, spider_dbton_oracle.dbton_id)))
+ (char *) field->field_name.str, spider_dbton_oracle.dbton_id)))
DBUG_RETURN(error_num);
if (key_part->key_part_flag & HA_REVERSE_SORT)
{
@@ -12270,7 +12270,7 @@ int spider_oracle_copy_table::append_key_order_str(
sql_part.q_append(SPIDER_SQL_NAME_QUOTE_STR,
SPIDER_SQL_NAME_QUOTE_LEN);
if ((error_num = spider_db_append_name_with_quote_str(&sql_part,
- (char *) field->field_name, spider_dbton_oracle.dbton_id)))
+ (char *) field->field_name.str, spider_dbton_oracle.dbton_id)))
DBUG_RETURN(error_num);
if (key_part->key_part_flag & HA_REVERSE_SORT)
{
@@ -12334,7 +12334,7 @@ int spider_oracle_copy_table::append_key_order_str(
sql.q_append(SPIDER_SQL_NAME_QUOTE_STR,
SPIDER_SQL_NAME_QUOTE_LEN);
if ((error_num = spider_db_append_name_with_quote_str(&sql,
- (char *) field->field_name, spider_dbton_oracle.dbton_id)))
+ (char *) field->field_name.str, spider_dbton_oracle.dbton_id)))
DBUG_RETURN(error_num);
if (key_part->key_part_flag & HA_REVERSE_SORT)
{
@@ -12367,7 +12367,7 @@ int spider_oracle_copy_table::append_key_order_str(
sql.q_append(SPIDER_SQL_NAME_QUOTE_STR,
SPIDER_SQL_NAME_QUOTE_LEN);
if ((error_num = spider_db_append_name_with_quote_str(&sql,
- (char *) field->field_name, spider_dbton_oracle.dbton_id)))
+ (char *) field->field_name.str, spider_dbton_oracle.dbton_id)))
DBUG_RETURN(error_num);
if (key_part->key_part_flag & HA_REVERSE_SORT)
{
@@ -12575,7 +12575,7 @@ int spider_oracle_copy_table::copy_key_row(
DBUG_RETURN(HA_ERR_OUT_OF_MEM);
sql.q_append(SPIDER_SQL_NAME_QUOTE_STR, SPIDER_SQL_NAME_QUOTE_LEN);
if ((error_num = spider_db_append_name_with_quote_str(&sql,
- (char *) field->field_name, spider_dbton_oracle.dbton_id)))
+ (char *) field->field_name.str, spider_dbton_oracle.dbton_id)))
DBUG_RETURN(error_num);
if (sql.reserve(SPIDER_SQL_NAME_QUOTE_LEN + joint_length + *length +
SPIDER_SQL_AND_LEN))
diff --git a/storage/spider/spd_sys_table.cc b/storage/spider/spd_sys_table.cc
index 09dbeff328e..f0d24f37d6d 100644
--- a/storage/spider/spd_sys_table.cc
+++ b/storage/spider/spd_sys_table.cc
@@ -2386,10 +2386,11 @@ TABLE *spider_mk_sys_tmp_table(
Item_field *i_field;
List<Item> i_list;
TABLE *tmp_table;
+ LEX_CSTRING name= { field_name, strlen(field_name) };
DBUG_ENTER("spider_mk_sys_tmp_table");
if (!(field = new Field_blob(
- 4294967295U, FALSE, field_name, cs, TRUE)))
+ 4294967295U, FALSE, &name, cs, TRUE)))
goto error_alloc_field;
field->init(table);
@@ -2444,10 +2445,13 @@ TABLE *spider_mk_sys_tmp_table_for_result(
Item_field *i_field1, *i_field2, *i_field3;
List<Item> i_list;
TABLE *tmp_table;
+ LEX_CSTRING name1= { field_name1, strlen(field_name1) };
+ LEX_CSTRING name2= { field_name2, strlen(field_name2) };
+ LEX_CSTRING name3= { field_name3, strlen(field_name3) };
DBUG_ENTER("spider_mk_sys_tmp_table_for_result");
if (!(field1 = new Field_blob(
- 4294967295U, FALSE, field_name1, cs, TRUE)))
+ 4294967295U, FALSE, &name1, cs, TRUE)))
goto error_alloc_field1;
field1->init(table);
@@ -2463,7 +2467,7 @@ TABLE *spider_mk_sys_tmp_table_for_result(
goto error_push_item1;
if (!(field2 = new (thd->mem_root) Field_blob(
- 4294967295U, FALSE, field_name2, cs, TRUE)))
+ 4294967295U, FALSE, &name2, cs, TRUE)))
goto error_alloc_field2;
field2->init(table);
@@ -2479,7 +2483,7 @@ TABLE *spider_mk_sys_tmp_table_for_result(
goto error_push_item2;
if (!(field3 = new (thd->mem_root) Field_blob(
- 4294967295U, FALSE, field_name3, cs, TRUE)))
+ 4294967295U, FALSE, &name3, cs, TRUE)))
goto error_alloc_field3;
field3->init(table);
diff --git a/storage/tokudb/ha_tokudb.cc b/storage/tokudb/ha_tokudb.cc
index 91041ec6df4..f4b44c395dd 100644
--- a/storage/tokudb/ha_tokudb.cc
+++ b/storage/tokudb/ha_tokudb.cc
@@ -3596,7 +3596,7 @@ cleanup:
return error;
}
-int ha_tokudb::is_val_unique(bool* is_unique, uchar* record, KEY* key_info, uint dict_index, DB_TXN* txn) {
+int ha_tokudb::is_val_unique(bool* is_unique, const uchar* record, KEY* key_info, uint dict_index, DB_TXN* txn) {
int error = 0;
bool has_null;
DBC* tmp_cursor = NULL;
@@ -4151,7 +4151,7 @@ bool ha_tokudb::key_changed(uint keynr, const uchar * old_row, const uchar * new
// 0 on success
// error otherwise
//
-int ha_tokudb::update_row(const uchar * old_row, uchar * new_row) {
+int ha_tokudb::update_row(const uchar * old_row, const uchar * new_row) {
TOKUDB_HANDLER_DBUG_ENTER("");
DBT prim_key, old_prim_key, prim_row, old_prim_row;
int UNINIT_VAR(error);
@@ -6819,7 +6819,7 @@ void ha_tokudb::update_create_info(HA_CREATE_INFO* create_info) {
// during drop table, we do not attempt to remove already dropped
// indexes because we did not keep status.tokudb in sync with list of indexes.
//
-int ha_tokudb::remove_key_name_from_status(DB* status_block, char* key_name, DB_TXN* txn) {
+int ha_tokudb::remove_key_name_from_status(DB* status_block, const char* key_name, DB_TXN* txn) {
int error;
uchar status_key_info[FN_REFLEN + sizeof(HA_METADATA_KEY)];
HA_METADATA_KEY md_key = hatoku_key_name;
@@ -6845,7 +6845,8 @@ int ha_tokudb::remove_key_name_from_status(DB* status_block, char* key_name, DB_
// writes the key name in status.tokudb, so that we may later delete or rename
// the dictionary associated with key_name
//
-int ha_tokudb::write_key_name_to_status(DB* status_block, char* key_name, DB_TXN* txn) {
+int ha_tokudb::write_key_name_to_status(DB* status_block, const char* key_name,
+ DB_TXN* txn) {
int error;
uchar status_key_info[FN_REFLEN + sizeof(HA_METADATA_KEY)];
HA_METADATA_KEY md_key = hatoku_key_name;
@@ -6884,7 +6885,7 @@ void ha_tokudb::trace_create_table_info(const char *name, TABLE * form) {
TOKUDB_HANDLER_TRACE(
"field:%d:%s:type=%d:flags=%x",
i,
- field->field_name,
+ field->field_name.str,
field->type(),
field->flags);
}
@@ -6904,7 +6905,7 @@ void ha_tokudb::trace_create_table_info(const char *name, TABLE * form) {
i,
p,
key_part->length,
- field->field_name,
+ field->field_name.str,
field->type(),
field->flags);
}
@@ -7254,7 +7255,7 @@ int ha_tokudb::create(
"This is probably due to an alter table engine=TokuDB. To load this "
"table, do a dump and load",
name,
- field->field_name
+ field->field_name.str
);
error = HA_ERR_UNSUPPORTED;
goto cleanup;
diff --git a/storage/tokudb/ha_tokudb.h b/storage/tokudb/ha_tokudb.h
index a2fd747bb92..c80be207005 100644
--- a/storage/tokudb/ha_tokudb.h
+++ b/storage/tokudb/ha_tokudb.h
@@ -681,8 +681,8 @@ private:
int remove_metadata(DB* db, void* key_data, uint key_size, DB_TXN* transaction);
int update_max_auto_inc(DB* db, ulonglong val);
- int remove_key_name_from_status(DB* status_block, char* key_name, DB_TXN* txn);
- int write_key_name_to_status(DB* status_block, char* key_name, DB_TXN* txn);
+ int remove_key_name_from_status(DB* status_block, const char* key_name, DB_TXN* txn);
+ int write_key_name_to_status(DB* status_block, const char* key_name, DB_TXN* txn);
int write_auto_inc_create(DB* db, ulonglong val, DB_TXN* txn);
void init_auto_increment();
bool can_replace_into_be_fast(TABLE_SHARE* table_share, KEY_AND_COL_INFO* kc_info, uint pk);
@@ -708,7 +708,7 @@ private:
int create_main_dictionary(const char* name, TABLE* form, DB_TXN* txn, KEY_AND_COL_INFO* kc_info, toku_compression_method compression_method);
void trace_create_table_info(const char *name, TABLE * form);
int is_index_unique(bool* is_unique, DB_TXN* txn, DB* db, KEY* key_info, int lock_flags);
- int is_val_unique(bool* is_unique, uchar* record, KEY* key_info, uint dict_index, DB_TXN* txn);
+ int is_val_unique(bool* is_unique, const uchar* record, KEY* key_info, uint dict_index, DB_TXN* txn);
int do_uniqueness_checks(uchar* record, DB_TXN* txn, THD* thd);
void set_main_dict_put_flags(THD* thd, bool opt_eligible, uint32_t* put_flags);
int insert_row_to_main_dictionary(uchar* record, DBT* pk_key, DBT* pk_val, DB_TXN* txn);
@@ -792,7 +792,7 @@ public:
int optimize(THD * thd, HA_CHECK_OPT * check_opt);
int analyze(THD * thd, HA_CHECK_OPT * check_opt);
int write_row(uchar * buf);
- int update_row(const uchar * old_data, uchar * new_data);
+ int update_row(const uchar * old_data, const uchar * new_data);
int delete_row(const uchar * buf);
#if MYSQL_VERSION_ID >= 100000
void start_bulk_insert(ha_rows rows, uint flags);
diff --git a/storage/tokudb/ha_tokudb_alter_56.cc b/storage/tokudb/ha_tokudb_alter_56.cc
index ba1afbf091a..b4eccf17b57 100644
--- a/storage/tokudb/ha_tokudb_alter_56.cc
+++ b/storage/tokudb/ha_tokudb_alter_56.cc
@@ -113,7 +113,7 @@ void ha_tokudb::print_alter_info(
TOKUDB_TRACE(
"name: %s, types: %u %u, nullable: %d, null_offset: %d, is_null_field: "
"%d, is_null %d, pack_length %u",
- curr_field->field_name,
+ curr_field->field_name.str,
curr_field->real_type(),
mysql_to_toku_type(curr_field),
curr_field->null_bit,
@@ -132,7 +132,7 @@ void ha_tokudb::print_alter_info(
TOKUDB_TRACE(
"name: %s, types: %u %u, nullable: %d, null_offset: %d, "
"is_null_field: %d, is_null %d, pack_length %u",
- curr_field->field_name,
+ curr_field->field_name.str,
curr_field->real_type(),
mysql_to_toku_type(curr_field),
curr_field->null_bit,
@@ -398,7 +398,7 @@ enum_alter_inplace_result ha_tokudb::check_if_supported_inplace_alter(
TOKUDB_TRACE(
"Added column: index %d, name %s",
curr_added_index,
- curr_added_field->field_name);
+ curr_added_field->field_name.str);
}
}
result = HA_ALTER_INPLACE_EXCLUSIVE_LOCK;
@@ -427,7 +427,7 @@ enum_alter_inplace_result ha_tokudb::check_if_supported_inplace_alter(
TOKUDB_TRACE(
"Dropped column: index %d, name %s",
curr_dropped_index,
- curr_dropped_field->field_name);
+ curr_dropped_field->field_name.str);
}
}
result = HA_ALTER_INPLACE_EXCLUSIVE_LOCK;
@@ -1125,7 +1125,7 @@ int ha_tokudb::alter_table_expand_varchar_offsets(
static bool field_in_key(KEY *key, Field *field) {
for (uint i = 0; i < key->user_defined_key_parts; i++) {
KEY_PART_INFO *key_part = &key->key_part[i];
- if (strcmp(key_part->field->field_name, field->field_name) == 0)
+ if (strcmp(key_part->field->field_name.str, field->field_name.str) == 0)
return true;
}
return false;
diff --git a/storage/tokudb/ha_tokudb_alter_common.cc b/storage/tokudb/ha_tokudb_alter_common.cc
index d41a676de1f..17f9a4daa39 100644
--- a/storage/tokudb/ha_tokudb_alter_common.cc
+++ b/storage/tokudb/ha_tokudb_alter_common.cc
@@ -697,8 +697,8 @@ static int find_changed_columns(
sql_print_error(
"Two fields that were supposedly the same are not: %s in "
"original, %s in new",
- curr_field_in_orig->field_name,
- curr_field_in_new->field_name);
+ curr_field_in_orig->field_name.str,
+ curr_field_in_new->field_name.str);
retval = 1;
goto cleanup;
}
diff --git a/storage/tokudb/ha_tokudb_update.cc b/storage/tokudb/ha_tokudb_update.cc
index 9fe5e729ec4..2e56d4c6698 100644
--- a/storage/tokudb/ha_tokudb_update.cc
+++ b/storage/tokudb/ha_tokudb_update.cc
@@ -91,7 +91,7 @@ static void dump_item(Item* item) {
":field=%s.%s.%s",
field_item->db_name,
field_item->table_name,
- field_item->field_name);
+ field_item->field_name.str);
break;
}
case Item::COND_ITEM: {
@@ -141,7 +141,7 @@ static Field* find_field_by_name(TABLE* table, Item* item) {
Field *found_field = NULL;
for (uint i = 0; i < table->s->fields; i++) {
Field *test_field = table->s->field[i];
- if (strcmp(field_item->field_name, test_field->field_name) == 0) {
+ if (strcmp(field_item->field_name.str, test_field->field_name.str) == 0) {
found_field = test_field;
break;
}
@@ -290,7 +290,7 @@ static bool check_insert_value(Item* item, const char* field_name) {
if (value_item->arg->type() != Item::FIELD_ITEM)
return false;
Item_field* arg = static_cast<Item_field*>(value_item->arg);
- if (strcmp(field_name, arg->field_name) != 0)
+ if (strcmp(field_name, arg->field_name.str) != 0)
return false;
return true;
}
@@ -315,7 +315,7 @@ static bool check_x_op_constant(
if (arguments[0]->type() != Item::FIELD_ITEM)
return false;
Item_field* arg0 = static_cast<Item_field*>(arguments[0]);
- if (strcmp(field_name, arg0->field_name) != 0)
+ if (strcmp(field_name, arg0->field_name.str) != 0)
return false;
if (!check_int_result(arguments[1]))
if (!(allow_insert_value &&
@@ -359,11 +359,11 @@ static bool check_decr_floor_expression(Field* lhs_field, Item* item) {
uint n = item_func->argument_count();
if (n != 3)
return false;
- if (!check_x_equal_0(lhs_field->field_name, arguments[0]))
+ if (!check_x_equal_0(lhs_field->field_name.str, arguments[0]))
return false;
if (arguments[1]->type() != Item::INT_ITEM || arguments[1]->val_int() != 0)
return false;
- if (!check_x_minus_1(lhs_field->field_name, arguments[2]))
+ if (!check_x_minus_1(lhs_field->field_name.str, arguments[2]))
return false;
if (!(lhs_field->flags & UNSIGNED_FLAG))
return false;
@@ -394,14 +394,14 @@ static bool check_update_expression(
return true;
Item* item_constant;
if (check_x_op_constant(
- lhs_field->field_name,
+ lhs_field->field_name.str,
rhs_item,
"+",
&item_constant,
allow_insert_value))
return true;
if (check_x_op_constant(
- lhs_field->field_name,
+ lhs_field->field_name.str,
rhs_item,
"-",
&item_constant,
@@ -455,7 +455,7 @@ static bool full_field_in_key(TABLE* table, Field* field) {
KEY* key = &table->s->key_info[table->s->primary_key];
for (uint i = 0; i < key->user_defined_key_parts; i++) {
KEY_PART_INFO* key_part = &key->key_part[i];
- if (strcmp(field->field_name, key_part->field->field_name) == 0) {
+ if (strcmp(field->field_name.str, key_part->field->field_name.str) == 0) {
return key_part->length == field->field_length;
}
}
diff --git a/storage/tokudb/hatoku_cmp.cc b/storage/tokudb/hatoku_cmp.cc
index a8932f78ef8..05a4b2da8c7 100644
--- a/storage/tokudb/hatoku_cmp.cc
+++ b/storage/tokudb/hatoku_cmp.cc
@@ -3032,7 +3032,7 @@ static uint32_t pack_key_from_desc(
}
static bool fields_have_same_name(Field* a, Field* b) {
- return strcmp(a->field_name, b->field_name) == 0;
+ return strcmp(a->field_name.str, b->field_name.str) == 0;
}
static bool fields_are_same_type(Field* a, Field* b) {
diff --git a/storage/tokudb/hatoku_hton.cc b/storage/tokudb/hatoku_hton.cc
index 75f4a5a70f0..cee8575eebb 100644
--- a/storage/tokudb/hatoku_hton.cc
+++ b/storage/tokudb/hatoku_hton.cc
@@ -112,7 +112,7 @@ static int tokudb_discover3(
THD* thd,
const char* db,
const char* name,
- char* path,
+ const char* path,
uchar** frmblob,
size_t* frmlen);
handlerton* tokudb_hton;
@@ -1236,7 +1236,7 @@ static int tokudb_discover3(
THD* thd,
const char* db,
const char* name,
- char* path,
+ const char* path,
uchar** frmblob,
size_t* frmlen) {
diff --git a/storage/tokudb/mysql-test/tokudb/r/ext_key_1_innodb.result b/storage/tokudb/mysql-test/tokudb/r/ext_key_1_innodb.result
index e61dccc6868..16b3571db9f 100644
--- a/storage/tokudb/mysql-test/tokudb/r/ext_key_1_innodb.result
+++ b/storage/tokudb/mysql-test/tokudb/r/ext_key_1_innodb.result
@@ -1,7 +1,7 @@
drop table if exists t;
select @@optimizer_switch;
@@optimizer_switch
-index_merge=on,index_merge_union=on,index_merge_sort_union=on,index_merge_intersection=on,index_merge_sort_intersection=off,engine_condition_pushdown=off,index_condition_pushdown=on,derived_merge=on,derived_with_keys=on,firstmatch=on,loosescan=on,materialization=on,in_to_exists=on,semijoin=on,partial_match_rowid_merge=on,partial_match_table_scan=on,subquery_cache=on,mrr=off,mrr_cost_based=off,mrr_sort_keys=off,outer_join_with_cache=on,semijoin_with_cache=on,join_cache_incremental=on,join_cache_hashed=on,join_cache_bka=on,optimize_join_buffer_size=off,table_elimination=on,extended_keys=on,exists_to_in=on,orderby_uses_equalities=on,condition_pushdown_for_derived=on
+index_merge=on,index_merge_union=on,index_merge_sort_union=on,index_merge_intersection=on,index_merge_sort_intersection=off,engine_condition_pushdown=off,index_condition_pushdown=on,derived_merge=on,derived_with_keys=on,firstmatch=on,loosescan=on,materialization=on,in_to_exists=on,semijoin=on,partial_match_rowid_merge=on,partial_match_table_scan=on,subquery_cache=on,mrr=off,mrr_cost_based=off,mrr_sort_keys=off,outer_join_with_cache=on,semijoin_with_cache=on,join_cache_incremental=on,join_cache_hashed=on,join_cache_bka=on,optimize_join_buffer_size=off,table_elimination=on,extended_keys=on,exists_to_in=on,orderby_uses_equalities=on,condition_pushdown_for_derived=on,split_grouping_derived=on
create table t (id int not null, x int not null, y int not null, primary key(id), key(x)) engine=innodb;
insert into t values (0,0,0),(1,1,1),(2,2,2),(3,2,3),(4,2,4);
explain select x,id from t force index (x) where x=0 and id=0;
diff --git a/storage/tokudb/mysql-test/tokudb/r/ext_key_1_tokudb.result b/storage/tokudb/mysql-test/tokudb/r/ext_key_1_tokudb.result
index 9a54bedb02f..961504412a9 100644
--- a/storage/tokudb/mysql-test/tokudb/r/ext_key_1_tokudb.result
+++ b/storage/tokudb/mysql-test/tokudb/r/ext_key_1_tokudb.result
@@ -1,7 +1,7 @@
drop table if exists t;
select @@optimizer_switch;
@@optimizer_switch
-index_merge=on,index_merge_union=on,index_merge_sort_union=on,index_merge_intersection=on,index_merge_sort_intersection=off,engine_condition_pushdown=off,index_condition_pushdown=on,derived_merge=on,derived_with_keys=on,firstmatch=on,loosescan=on,materialization=on,in_to_exists=on,semijoin=on,partial_match_rowid_merge=on,partial_match_table_scan=on,subquery_cache=on,mrr=off,mrr_cost_based=off,mrr_sort_keys=off,outer_join_with_cache=on,semijoin_with_cache=on,join_cache_incremental=on,join_cache_hashed=on,join_cache_bka=on,optimize_join_buffer_size=off,table_elimination=on,extended_keys=on,exists_to_in=on,orderby_uses_equalities=on,condition_pushdown_for_derived=on
+index_merge=on,index_merge_union=on,index_merge_sort_union=on,index_merge_intersection=on,index_merge_sort_intersection=off,engine_condition_pushdown=off,index_condition_pushdown=on,derived_merge=on,derived_with_keys=on,firstmatch=on,loosescan=on,materialization=on,in_to_exists=on,semijoin=on,partial_match_rowid_merge=on,partial_match_table_scan=on,subquery_cache=on,mrr=off,mrr_cost_based=off,mrr_sort_keys=off,outer_join_with_cache=on,semijoin_with_cache=on,join_cache_incremental=on,join_cache_hashed=on,join_cache_bka=on,optimize_join_buffer_size=off,table_elimination=on,extended_keys=on,exists_to_in=on,orderby_uses_equalities=on,condition_pushdown_for_derived=on,split_grouping_derived=on
create table t (id int not null, x int not null, y int not null, primary key(id), key(x)) engine=tokudb;
insert into t values (0,0,0),(1,1,1),(2,2,2),(3,2,3),(4,2,4);
explain select x,id from t force index (x) where x=0 and id=0;
diff --git a/storage/tokudb/mysql-test/tokudb/r/ext_key_2_innodb.result b/storage/tokudb/mysql-test/tokudb/r/ext_key_2_innodb.result
index 7c0c0a67623..e5796f7a9b1 100644
--- a/storage/tokudb/mysql-test/tokudb/r/ext_key_2_innodb.result
+++ b/storage/tokudb/mysql-test/tokudb/r/ext_key_2_innodb.result
@@ -1,7 +1,7 @@
drop table if exists t;
select @@optimizer_switch;
@@optimizer_switch
-index_merge=on,index_merge_union=on,index_merge_sort_union=on,index_merge_intersection=on,index_merge_sort_intersection=off,engine_condition_pushdown=off,index_condition_pushdown=on,derived_merge=on,derived_with_keys=on,firstmatch=on,loosescan=on,materialization=on,in_to_exists=on,semijoin=on,partial_match_rowid_merge=on,partial_match_table_scan=on,subquery_cache=on,mrr=off,mrr_cost_based=off,mrr_sort_keys=off,outer_join_with_cache=on,semijoin_with_cache=on,join_cache_incremental=on,join_cache_hashed=on,join_cache_bka=on,optimize_join_buffer_size=off,table_elimination=on,extended_keys=on,exists_to_in=on,orderby_uses_equalities=on,condition_pushdown_for_derived=on
+index_merge=on,index_merge_union=on,index_merge_sort_union=on,index_merge_intersection=on,index_merge_sort_intersection=off,engine_condition_pushdown=off,index_condition_pushdown=on,derived_merge=on,derived_with_keys=on,firstmatch=on,loosescan=on,materialization=on,in_to_exists=on,semijoin=on,partial_match_rowid_merge=on,partial_match_table_scan=on,subquery_cache=on,mrr=off,mrr_cost_based=off,mrr_sort_keys=off,outer_join_with_cache=on,semijoin_with_cache=on,join_cache_incremental=on,join_cache_hashed=on,join_cache_bka=on,optimize_join_buffer_size=off,table_elimination=on,extended_keys=on,exists_to_in=on,orderby_uses_equalities=on,condition_pushdown_for_derived=on,split_grouping_derived=on
create table t (a int not null, b int not null, c int not null, d int not null, primary key(a,b), key(c,a)) engine=innodb;
insert into t values (0,0,0,0),(0,1,0,1);
explain select c,a,b from t where c=0 and a=0 and b=1;
diff --git a/storage/tokudb/mysql-test/tokudb/r/ext_key_2_tokudb.result b/storage/tokudb/mysql-test/tokudb/r/ext_key_2_tokudb.result
index 09143707718..3f1ed9971c3 100644
--- a/storage/tokudb/mysql-test/tokudb/r/ext_key_2_tokudb.result
+++ b/storage/tokudb/mysql-test/tokudb/r/ext_key_2_tokudb.result
@@ -1,7 +1,7 @@
drop table if exists t;
select @@optimizer_switch;
@@optimizer_switch
-index_merge=on,index_merge_union=on,index_merge_sort_union=on,index_merge_intersection=on,index_merge_sort_intersection=off,engine_condition_pushdown=off,index_condition_pushdown=on,derived_merge=on,derived_with_keys=on,firstmatch=on,loosescan=on,materialization=on,in_to_exists=on,semijoin=on,partial_match_rowid_merge=on,partial_match_table_scan=on,subquery_cache=on,mrr=off,mrr_cost_based=off,mrr_sort_keys=off,outer_join_with_cache=on,semijoin_with_cache=on,join_cache_incremental=on,join_cache_hashed=on,join_cache_bka=on,optimize_join_buffer_size=off,table_elimination=on,extended_keys=on,exists_to_in=on,orderby_uses_equalities=on,condition_pushdown_for_derived=on
+index_merge=on,index_merge_union=on,index_merge_sort_union=on,index_merge_intersection=on,index_merge_sort_intersection=off,engine_condition_pushdown=off,index_condition_pushdown=on,derived_merge=on,derived_with_keys=on,firstmatch=on,loosescan=on,materialization=on,in_to_exists=on,semijoin=on,partial_match_rowid_merge=on,partial_match_table_scan=on,subquery_cache=on,mrr=off,mrr_cost_based=off,mrr_sort_keys=off,outer_join_with_cache=on,semijoin_with_cache=on,join_cache_incremental=on,join_cache_hashed=on,join_cache_bka=on,optimize_join_buffer_size=off,table_elimination=on,extended_keys=on,exists_to_in=on,orderby_uses_equalities=on,condition_pushdown_for_derived=on,split_grouping_derived=on
create table t (a int not null, b int not null, c int not null, d int not null, primary key(a,b), key(c,a)) engine=tokudb;
insert into t values (0,0,0,0),(0,1,0,1);
explain select c,a,b from t where c=0 and a=0 and b=1;
diff --git a/storage/tokudb/mysql-test/tokudb/r/type_float.result b/storage/tokudb/mysql-test/tokudb/r/type_float.result
index 6387cea5384..f8ce24f08c4 100644
--- a/storage/tokudb/mysql-test/tokudb/r/type_float.result
+++ b/storage/tokudb/mysql-test/tokudb/r/type_float.result
@@ -233,12 +233,12 @@ insert into t2 values ("1.23456780");
create table t3 select * from t2 union select * from t1;
select * from t3;
d
-1.2345678
-100000000
+1.234567800
+100000000.000000000
show create table t3;
Table Create Table
t3 CREATE TABLE `t3` (
- `d` double DEFAULT NULL
+ `d` double(18,9) DEFAULT NULL
) ENGINE=ENGINE DEFAULT CHARSET=latin1
drop table t1, t2, t3;
create table t1 select 105213674794682365.00 + 0.0 x;
diff --git a/storage/tokudb/mysql-test/tokudb/r/type_ranges.result b/storage/tokudb/mysql-test/tokudb/r/type_ranges.result
index 1c9cd769a14..38252e870df 100644
--- a/storage/tokudb/mysql-test/tokudb/r/type_ranges.result
+++ b/storage/tokudb/mysql-test/tokudb/r/type_ranges.result
@@ -273,7 +273,7 @@ drop table t2;
create table t2 (primary key (auto)) select auto+1 as auto,1 as t1, 'a' as t2, repeat('a',256) as t3, binary repeat('b',256) as t4, repeat('a',4096) as t5, binary repeat('b',4096) as t6, '' as t7, binary '' as t8 from t1;
show full columns from t2;
Field Type Collation Null Key Default Extra Privileges Comment
-auto int(11) unsigned NULL NO PRI NULL #
+auto bigint(11) unsigned NULL NO PRI NULL #
t1 int(1) NULL NO NULL #
t2 varchar(1) latin1_swedish_ci NO NULL #
t3 varchar(256) latin1_swedish_ci NO NULL #
diff --git a/storage/tokudb/mysql-test/tokudb_bugs/r/checkpoint_lock.result b/storage/tokudb/mysql-test/tokudb_bugs/r/checkpoint_lock.result
index dff746fa280..1a09412ea08 100644
--- a/storage/tokudb/mysql-test/tokudb_bugs/r/checkpoint_lock.result
+++ b/storage/tokudb/mysql-test/tokudb_bugs/r/checkpoint_lock.result
@@ -17,7 +17,7 @@ flush logs;;
connection conn1;
select DB, command, state, info from information_schema.processlist where id != connection_id();
DB command state info
-test Query init flush logs
+test Query Init flush logs
set tokudb_checkpoint_lock=0;
connection default;
disconnect conn1;
diff --git a/storage/tokudb/mysql-test/tokudb_parts/r/part_supported_sql_func_tokudb.result b/storage/tokudb/mysql-test/tokudb_parts/r/part_supported_sql_func_tokudb.result
index 6bb98671feb..3ffeaa8abe1 100644
--- a/storage/tokudb/mysql-test/tokudb_parts/r/part_supported_sql_func_tokudb.result
+++ b/storage/tokudb/mysql-test/tokudb_parts/r/part_supported_sql_func_tokudb.result
@@ -2318,7 +2318,7 @@ t55 CREATE TABLE `t55` (
`col1` int(11) DEFAULT NULL
) ENGINE=TokuDB DEFAULT CHARSET=latin1
PARTITION BY LIST (`colint`)
-SUBPARTITION BY HASH (`col1` % 10)
+SUBPARTITION BY HASH (`col1` MOD 10)
SUBPARTITIONS 5
(PARTITION `p0` VALUES IN (1,2,3,4,5,6,7,8,9,10) ENGINE = TokuDB,
PARTITION `p1` VALUES IN (11,12,13,14,15,16,17,18,19,20) ENGINE = TokuDB,
diff --git a/storage/tokudb/mysql-test/tokudb_parts/r/partition_alter1_1_2_tokudb.result b/storage/tokudb/mysql-test/tokudb_parts/r/partition_alter1_1_2_tokudb.result
index 3d944dccdb1..410e6fd0ba2 100644
--- a/storage/tokudb/mysql-test/tokudb_parts/r/partition_alter1_1_2_tokudb.result
+++ b/storage/tokudb/mysql-test/tokudb_parts/r/partition_alter1_1_2_tokudb.result
@@ -1067,7 +1067,7 @@ t1 CREATE TABLE `t1` (
`f_charbig` varchar(1000) DEFAULT NULL,
PRIMARY KEY (`f_int1`,`f_int2`)
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST ((`f_int1` + `f_int2`) % 4)
+ PARTITION BY LIST ((`f_int1` + `f_int2`) MOD 4)
(PARTITION `part_3` VALUES IN (-3) ENGINE = TokuDB,
PARTITION `part_2` VALUES IN (-2) ENGINE = TokuDB,
PARTITION `part_1` VALUES IN (-1) ENGINE = TokuDB,
@@ -3084,7 +3084,7 @@ t1 CREATE TABLE `t1` (
`f_charbig` varchar(1000) DEFAULT NULL,
PRIMARY KEY (`f_int1`,`f_int2`)
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (abs(`f_int1` % 3))
+ PARTITION BY LIST (abs(`f_int1` MOD 3))
SUBPARTITION BY HASH (`f_int2` + 1)
(PARTITION `part1` VALUES IN (0)
(SUBPARTITION `sp11` ENGINE = TokuDB,
@@ -3592,7 +3592,7 @@ t1 CREATE TABLE `t1` (
`f_charbig` varchar(1000) DEFAULT NULL,
PRIMARY KEY (`f_int1`,`f_int2`)
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (abs(`f_int1` % 2))
+ PARTITION BY LIST (abs(`f_int1` MOD 2))
SUBPARTITION BY KEY (`f_int2`)
SUBPARTITIONS 3
(PARTITION `part1` VALUES IN (0) ENGINE = TokuDB,
@@ -5081,7 +5081,7 @@ t1 CREATE TABLE `t1` (
`f_charbig` varchar(1000) DEFAULT NULL,
PRIMARY KEY (`f_int2`,`f_int1`)
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST ((`f_int1` + `f_int2`) % 4)
+ PARTITION BY LIST ((`f_int1` + `f_int2`) MOD 4)
(PARTITION `part_3` VALUES IN (-3) ENGINE = TokuDB,
PARTITION `part_2` VALUES IN (-2) ENGINE = TokuDB,
PARTITION `part_1` VALUES IN (-1) ENGINE = TokuDB,
@@ -7098,7 +7098,7 @@ t1 CREATE TABLE `t1` (
`f_charbig` varchar(1000) DEFAULT NULL,
PRIMARY KEY (`f_int2`,`f_int1`)
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (abs(`f_int1` % 3))
+ PARTITION BY LIST (abs(`f_int1` MOD 3))
SUBPARTITION BY HASH (`f_int2` + 1)
(PARTITION `part1` VALUES IN (0)
(SUBPARTITION `sp11` ENGINE = TokuDB,
@@ -7606,7 +7606,7 @@ t1 CREATE TABLE `t1` (
`f_charbig` varchar(1000) DEFAULT NULL,
PRIMARY KEY (`f_int2`,`f_int1`)
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (abs(`f_int1` % 2))
+ PARTITION BY LIST (abs(`f_int1` MOD 2))
SUBPARTITION BY KEY (`f_int2`)
SUBPARTITIONS 3
(PARTITION `part1` VALUES IN (0) ENGINE = TokuDB,
@@ -9128,7 +9128,7 @@ t1 CREATE TABLE `t1` (
`f_charbig` varchar(1000) DEFAULT NULL,
UNIQUE KEY `uidx1` (`f_int1`,`f_int2`)
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST ((`f_int1` + `f_int2`) % 4)
+ PARTITION BY LIST ((`f_int1` + `f_int2`) MOD 4)
(PARTITION `part_3` VALUES IN (-3) ENGINE = TokuDB,
PARTITION `part_2` VALUES IN (-2) ENGINE = TokuDB,
PARTITION `part_1` VALUES IN (-1) ENGINE = TokuDB,
@@ -11209,7 +11209,7 @@ t1 CREATE TABLE `t1` (
`f_charbig` varchar(1000) DEFAULT NULL,
UNIQUE KEY `uidx1` (`f_int1`,`f_int2`)
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (abs(`f_int1` % 3))
+ PARTITION BY LIST (abs(`f_int1` MOD 3))
SUBPARTITION BY HASH (`f_int2` + 1)
(PARTITION `part1` VALUES IN (0)
(SUBPARTITION `sp11` ENGINE = TokuDB,
@@ -11733,7 +11733,7 @@ t1 CREATE TABLE `t1` (
`f_charbig` varchar(1000) DEFAULT NULL,
UNIQUE KEY `uidx1` (`f_int1`,`f_int2`)
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (abs(`f_int1` % 2))
+ PARTITION BY LIST (abs(`f_int1` MOD 2))
SUBPARTITION BY KEY (`f_int2`)
SUBPARTITIONS 3
(PARTITION `part1` VALUES IN (0) ENGINE = TokuDB,
@@ -13270,7 +13270,7 @@ t1 CREATE TABLE `t1` (
`f_charbig` varchar(1000) DEFAULT NULL,
UNIQUE KEY `uidx1` (`f_int2`,`f_int1`)
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST ((`f_int1` + `f_int2`) % 4)
+ PARTITION BY LIST ((`f_int1` + `f_int2`) MOD 4)
(PARTITION `part_3` VALUES IN (-3) ENGINE = TokuDB,
PARTITION `part_2` VALUES IN (-2) ENGINE = TokuDB,
PARTITION `part_1` VALUES IN (-1) ENGINE = TokuDB,
@@ -15351,7 +15351,7 @@ t1 CREATE TABLE `t1` (
`f_charbig` varchar(1000) DEFAULT NULL,
UNIQUE KEY `uidx1` (`f_int2`,`f_int1`)
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (abs(`f_int1` % 3))
+ PARTITION BY LIST (abs(`f_int1` MOD 3))
SUBPARTITION BY HASH (`f_int2` + 1)
(PARTITION `part1` VALUES IN (0)
(SUBPARTITION `sp11` ENGINE = TokuDB,
@@ -15875,7 +15875,7 @@ t1 CREATE TABLE `t1` (
`f_charbig` varchar(1000) DEFAULT NULL,
UNIQUE KEY `uidx1` (`f_int2`,`f_int1`)
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (abs(`f_int1` % 2))
+ PARTITION BY LIST (abs(`f_int1` MOD 2))
SUBPARTITION BY KEY (`f_int2`)
SUBPARTITIONS 3
(PARTITION `part1` VALUES IN (0) ENGINE = TokuDB,
@@ -17384,7 +17384,7 @@ t1 CREATE TABLE `t1` (
PRIMARY KEY (`f_int2`,`f_int1`),
UNIQUE KEY `uidx1` (`f_int1`,`f_int2`)
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST ((`f_int1` + `f_int2`) % 4)
+ PARTITION BY LIST ((`f_int1` + `f_int2`) MOD 4)
(PARTITION `part_3` VALUES IN (-3) ENGINE = TokuDB,
PARTITION `part_2` VALUES IN (-2) ENGINE = TokuDB,
PARTITION `part_1` VALUES IN (-1) ENGINE = TokuDB,
@@ -19405,7 +19405,7 @@ t1 CREATE TABLE `t1` (
PRIMARY KEY (`f_int2`,`f_int1`),
UNIQUE KEY `uidx1` (`f_int1`,`f_int2`)
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (abs(`f_int1` % 3))
+ PARTITION BY LIST (abs(`f_int1` MOD 3))
SUBPARTITION BY HASH (`f_int2` + 1)
(PARTITION `part1` VALUES IN (0)
(SUBPARTITION `sp11` ENGINE = TokuDB,
@@ -19914,7 +19914,7 @@ t1 CREATE TABLE `t1` (
PRIMARY KEY (`f_int2`,`f_int1`),
UNIQUE KEY `uidx1` (`f_int1`,`f_int2`)
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (abs(`f_int1` % 2))
+ PARTITION BY LIST (abs(`f_int1` MOD 2))
SUBPARTITION BY KEY (`f_int2`)
SUBPARTITIONS 3
(PARTITION `part1` VALUES IN (0) ENGINE = TokuDB,
@@ -21406,7 +21406,7 @@ t1 CREATE TABLE `t1` (
PRIMARY KEY (`f_int1`,`f_int2`),
UNIQUE KEY `uidx1` (`f_int2`,`f_int1`)
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST ((`f_int1` + `f_int2`) % 4)
+ PARTITION BY LIST ((`f_int1` + `f_int2`) MOD 4)
(PARTITION `part_3` VALUES IN (-3) ENGINE = TokuDB,
PARTITION `part_2` VALUES IN (-2) ENGINE = TokuDB,
PARTITION `part_1` VALUES IN (-1) ENGINE = TokuDB,
@@ -23427,7 +23427,7 @@ t1 CREATE TABLE `t1` (
PRIMARY KEY (`f_int1`,`f_int2`),
UNIQUE KEY `uidx1` (`f_int2`,`f_int1`)
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (abs(`f_int1` % 3))
+ PARTITION BY LIST (abs(`f_int1` MOD 3))
SUBPARTITION BY HASH (`f_int2` + 1)
(PARTITION `part1` VALUES IN (0)
(SUBPARTITION `sp11` ENGINE = TokuDB,
@@ -23936,7 +23936,7 @@ t1 CREATE TABLE `t1` (
PRIMARY KEY (`f_int1`,`f_int2`),
UNIQUE KEY `uidx1` (`f_int2`,`f_int1`)
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (abs(`f_int1` % 2))
+ PARTITION BY LIST (abs(`f_int1` MOD 2))
SUBPARTITION BY KEY (`f_int2`)
SUBPARTITIONS 3
(PARTITION `part1` VALUES IN (0) ENGINE = TokuDB,
@@ -25428,7 +25428,7 @@ t1 CREATE TABLE `t1` (
PRIMARY KEY (`f_int1`,`f_int2`),
UNIQUE KEY `uidx1` (`f_int2`,`f_int1`)
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST ((`f_int1` + `f_int2`) % 4)
+ PARTITION BY LIST ((`f_int1` + `f_int2`) MOD 4)
(PARTITION `part_3` VALUES IN (-3) ENGINE = TokuDB,
PARTITION `part_2` VALUES IN (-2) ENGINE = TokuDB,
PARTITION `part_1` VALUES IN (-1) ENGINE = TokuDB,
@@ -27449,7 +27449,7 @@ t1 CREATE TABLE `t1` (
PRIMARY KEY (`f_int1`,`f_int2`),
UNIQUE KEY `uidx1` (`f_int2`,`f_int1`)
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (abs(`f_int1` % 3))
+ PARTITION BY LIST (abs(`f_int1` MOD 3))
SUBPARTITION BY HASH (`f_int2` + 1)
(PARTITION `part1` VALUES IN (0)
(SUBPARTITION `sp11` ENGINE = TokuDB,
@@ -27958,7 +27958,7 @@ t1 CREATE TABLE `t1` (
PRIMARY KEY (`f_int1`,`f_int2`),
UNIQUE KEY `uidx1` (`f_int2`,`f_int1`)
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (abs(`f_int1` % 2))
+ PARTITION BY LIST (abs(`f_int1` MOD 2))
SUBPARTITION BY KEY (`f_int2`)
SUBPARTITIONS 3
(PARTITION `part1` VALUES IN (0) ENGINE = TokuDB,
diff --git a/storage/tokudb/mysql-test/tokudb_parts/r/partition_alter1_1_tokudb.result b/storage/tokudb/mysql-test/tokudb_parts/r/partition_alter1_1_tokudb.result
index 7ad3d72441c..093dbbe11c5 100644
--- a/storage/tokudb/mysql-test/tokudb_parts/r/partition_alter1_1_tokudb.result
+++ b/storage/tokudb/mysql-test/tokudb_parts/r/partition_alter1_1_tokudb.result
@@ -1385,7 +1385,7 @@ t1 CREATE TABLE `t1` (
`f_charbig` varchar(1000) DEFAULT NULL,
PRIMARY KEY (`f_int1`,`f_int2`)
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (`f_int1` % 4)
+ PARTITION BY LIST (`f_int1` MOD 4)
(PARTITION `part_3` VALUES IN (-3) ENGINE = TokuDB,
PARTITION `part_2` VALUES IN (-2) ENGINE = TokuDB,
PARTITION `part_1` VALUES IN (-1) ENGINE = TokuDB,
@@ -3404,7 +3404,7 @@ t1 CREATE TABLE `t1` (
`f_charbig` varchar(1000) DEFAULT NULL,
PRIMARY KEY (`f_int1`,`f_int2`)
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (abs(`f_int1` % 3))
+ PARTITION BY LIST (abs(`f_int1` MOD 3))
SUBPARTITION BY HASH (`f_int1` + 1)
(PARTITION `part1` VALUES IN (0)
(SUBPARTITION `sp11` ENGINE = TokuDB,
@@ -3912,7 +3912,7 @@ t1 CREATE TABLE `t1` (
`f_charbig` varchar(1000) DEFAULT NULL,
PRIMARY KEY (`f_int1`,`f_int2`)
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (abs(`f_int1` % 2))
+ PARTITION BY LIST (abs(`f_int1` MOD 2))
SUBPARTITION BY KEY (`f_int1`)
SUBPARTITIONS 3
(PARTITION `part1` VALUES IN (0) ENGINE = TokuDB,
@@ -5401,7 +5401,7 @@ t1 CREATE TABLE `t1` (
`f_charbig` varchar(1000) DEFAULT NULL,
PRIMARY KEY (`f_int2`,`f_int1`)
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (`f_int1` % 4)
+ PARTITION BY LIST (`f_int1` MOD 4)
(PARTITION `part_3` VALUES IN (-3) ENGINE = TokuDB,
PARTITION `part_2` VALUES IN (-2) ENGINE = TokuDB,
PARTITION `part_1` VALUES IN (-1) ENGINE = TokuDB,
@@ -7420,7 +7420,7 @@ t1 CREATE TABLE `t1` (
`f_charbig` varchar(1000) DEFAULT NULL,
PRIMARY KEY (`f_int2`,`f_int1`)
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (abs(`f_int1` % 3))
+ PARTITION BY LIST (abs(`f_int1` MOD 3))
SUBPARTITION BY HASH (`f_int1` + 1)
(PARTITION `part1` VALUES IN (0)
(SUBPARTITION `sp11` ENGINE = TokuDB,
@@ -7928,7 +7928,7 @@ t1 CREATE TABLE `t1` (
`f_charbig` varchar(1000) DEFAULT NULL,
PRIMARY KEY (`f_int2`,`f_int1`)
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (abs(`f_int1` % 2))
+ PARTITION BY LIST (abs(`f_int1` MOD 2))
SUBPARTITION BY KEY (`f_int1`)
SUBPARTITIONS 3
(PARTITION `part1` VALUES IN (0) ENGINE = TokuDB,
@@ -9450,7 +9450,7 @@ t1 CREATE TABLE `t1` (
`f_charbig` varchar(1000) DEFAULT NULL,
UNIQUE KEY `uidx1` (`f_int1`,`f_int2`)
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (`f_int1` % 4)
+ PARTITION BY LIST (`f_int1` MOD 4)
(PARTITION `part_3` VALUES IN (-3) ENGINE = TokuDB,
PARTITION `part_2` VALUES IN (-2) ENGINE = TokuDB,
PARTITION `part_1` VALUES IN (-1) ENGINE = TokuDB,
@@ -11533,7 +11533,7 @@ t1 CREATE TABLE `t1` (
`f_charbig` varchar(1000) DEFAULT NULL,
UNIQUE KEY `uidx1` (`f_int1`,`f_int2`)
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (abs(`f_int1` % 3))
+ PARTITION BY LIST (abs(`f_int1` MOD 3))
SUBPARTITION BY HASH (`f_int1` + 1)
(PARTITION `part1` VALUES IN (0)
(SUBPARTITION `sp11` ENGINE = TokuDB,
@@ -12057,7 +12057,7 @@ t1 CREATE TABLE `t1` (
`f_charbig` varchar(1000) DEFAULT NULL,
UNIQUE KEY `uidx1` (`f_int1`,`f_int2`)
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (abs(`f_int1` % 2))
+ PARTITION BY LIST (abs(`f_int1` MOD 2))
SUBPARTITION BY KEY (`f_int1`)
SUBPARTITIONS 3
(PARTITION `part1` VALUES IN (0) ENGINE = TokuDB,
@@ -13594,7 +13594,7 @@ t1 CREATE TABLE `t1` (
`f_charbig` varchar(1000) DEFAULT NULL,
UNIQUE KEY `uidx1` (`f_int2`,`f_int1`)
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (`f_int1` % 4)
+ PARTITION BY LIST (`f_int1` MOD 4)
(PARTITION `part_3` VALUES IN (-3) ENGINE = TokuDB,
PARTITION `part_2` VALUES IN (-2) ENGINE = TokuDB,
PARTITION `part_1` VALUES IN (-1) ENGINE = TokuDB,
@@ -15677,7 +15677,7 @@ t1 CREATE TABLE `t1` (
`f_charbig` varchar(1000) DEFAULT NULL,
UNIQUE KEY `uidx1` (`f_int2`,`f_int1`)
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (abs(`f_int1` % 3))
+ PARTITION BY LIST (abs(`f_int1` MOD 3))
SUBPARTITION BY HASH (`f_int1` + 1)
(PARTITION `part1` VALUES IN (0)
(SUBPARTITION `sp11` ENGINE = TokuDB,
@@ -16201,7 +16201,7 @@ t1 CREATE TABLE `t1` (
`f_charbig` varchar(1000) DEFAULT NULL,
UNIQUE KEY `uidx1` (`f_int2`,`f_int1`)
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (abs(`f_int1` % 2))
+ PARTITION BY LIST (abs(`f_int1` MOD 2))
SUBPARTITION BY KEY (`f_int1`)
SUBPARTITIONS 3
(PARTITION `part1` VALUES IN (0) ENGINE = TokuDB,
diff --git a/storage/tokudb/mysql-test/tokudb_parts/r/partition_alter1_2_tokudb.result b/storage/tokudb/mysql-test/tokudb_parts/r/partition_alter1_2_tokudb.result
index 9f53437fdf7..a05ce5cb71b 100644
--- a/storage/tokudb/mysql-test/tokudb_parts/r/partition_alter1_2_tokudb.result
+++ b/storage/tokudb/mysql-test/tokudb_parts/r/partition_alter1_2_tokudb.result
@@ -961,7 +961,7 @@ t1 CREATE TABLE `t1` (
`f_char2` char(20) DEFAULT NULL,
`f_charbig` varchar(1000) DEFAULT NULL
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (`f_int1` % 4)
+ PARTITION BY LIST (`f_int1` MOD 4)
(PARTITION `part_3` VALUES IN (-3) ENGINE = TokuDB,
PARTITION `part_2` VALUES IN (-2) ENGINE = TokuDB,
PARTITION `part_1` VALUES IN (-1) ENGINE = TokuDB,
@@ -2772,7 +2772,7 @@ t1 CREATE TABLE `t1` (
`f_char2` char(20) DEFAULT NULL,
`f_charbig` varchar(1000) DEFAULT NULL
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (abs(`f_int1` % 3))
+ PARTITION BY LIST (abs(`f_int1` MOD 3))
SUBPARTITION BY HASH (`f_int1` + 1)
(PARTITION `part1` VALUES IN (0)
(SUBPARTITION `sp11` ENGINE = TokuDB,
@@ -3228,7 +3228,7 @@ t1 CREATE TABLE `t1` (
`f_char2` char(20) DEFAULT NULL,
`f_charbig` varchar(1000) DEFAULT NULL
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (abs(`f_int1` % 2))
+ PARTITION BY LIST (abs(`f_int1` MOD 2))
SUBPARTITION BY KEY (`f_int1`)
SUBPARTITIONS 3
(PARTITION `part1` VALUES IN (0) ENGINE = TokuDB,
@@ -4561,7 +4561,7 @@ t1 CREATE TABLE `t1` (
`f_char2` char(20) DEFAULT NULL,
`f_charbig` varchar(1000) DEFAULT NULL
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (`f_int1` % 4)
+ PARTITION BY LIST (`f_int1` MOD 4)
(PARTITION `part_3` VALUES IN (-3) ENGINE = TokuDB,
PARTITION `part_2` VALUES IN (-2) ENGINE = TokuDB,
PARTITION `part_1` VALUES IN (-1) ENGINE = TokuDB,
@@ -6372,7 +6372,7 @@ t1 CREATE TABLE `t1` (
`f_char2` char(20) DEFAULT NULL,
`f_charbig` varchar(1000) DEFAULT NULL
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (abs(`f_int1` % 3))
+ PARTITION BY LIST (abs(`f_int1` MOD 3))
SUBPARTITION BY HASH (`f_int1` + 1)
(PARTITION `part1` VALUES IN (0)
(SUBPARTITION `sp11` ENGINE = TokuDB,
@@ -6828,7 +6828,7 @@ t1 CREATE TABLE `t1` (
`f_char2` char(20) DEFAULT NULL,
`f_charbig` varchar(1000) DEFAULT NULL
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (abs(`f_int1` % 2))
+ PARTITION BY LIST (abs(`f_int1` MOD 2))
SUBPARTITION BY KEY (`f_int1`)
SUBPARTITIONS 3
(PARTITION `part1` VALUES IN (0) ENGINE = TokuDB,
@@ -8193,7 +8193,7 @@ t1 CREATE TABLE `t1` (
`f_char2` char(20) DEFAULT NULL,
`f_charbig` varchar(1000) DEFAULT NULL
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (`f_int1` % 4)
+ PARTITION BY LIST (`f_int1` MOD 4)
(PARTITION `part_3` VALUES IN (-3) ENGINE = TokuDB,
PARTITION `part_2` VALUES IN (-2) ENGINE = TokuDB,
PARTITION `part_1` VALUES IN (-1) ENGINE = TokuDB,
@@ -10068,7 +10068,7 @@ t1 CREATE TABLE `t1` (
`f_char2` char(20) DEFAULT NULL,
`f_charbig` varchar(1000) DEFAULT NULL
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (abs(`f_int1` % 3))
+ PARTITION BY LIST (abs(`f_int1` MOD 3))
SUBPARTITION BY HASH (`f_int1` + 1)
(PARTITION `part1` VALUES IN (0)
(SUBPARTITION `sp11` ENGINE = TokuDB,
@@ -10540,7 +10540,7 @@ t1 CREATE TABLE `t1` (
`f_char2` char(20) DEFAULT NULL,
`f_charbig` varchar(1000) DEFAULT NULL
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (abs(`f_int1` % 2))
+ PARTITION BY LIST (abs(`f_int1` MOD 2))
SUBPARTITION BY KEY (`f_int1`)
SUBPARTITIONS 3
(PARTITION `part1` VALUES IN (0) ENGINE = TokuDB,
@@ -11893,7 +11893,7 @@ t1 CREATE TABLE `t1` (
`f_char2` char(20) DEFAULT NULL,
`f_charbig` varchar(1000) DEFAULT NULL
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST ((`f_int1` + `f_int2`) % 4)
+ PARTITION BY LIST ((`f_int1` + `f_int2`) MOD 4)
(PARTITION `part_3` VALUES IN (-3) ENGINE = TokuDB,
PARTITION `part_2` VALUES IN (-2) ENGINE = TokuDB,
PARTITION `part_1` VALUES IN (-1) ENGINE = TokuDB,
@@ -13702,7 +13702,7 @@ t1 CREATE TABLE `t1` (
`f_char2` char(20) DEFAULT NULL,
`f_charbig` varchar(1000) DEFAULT NULL
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (abs(`f_int1` % 3))
+ PARTITION BY LIST (abs(`f_int1` MOD 3))
SUBPARTITION BY HASH (`f_int2` + 1)
(PARTITION `part1` VALUES IN (0)
(SUBPARTITION `sp11` ENGINE = TokuDB,
@@ -14158,7 +14158,7 @@ t1 CREATE TABLE `t1` (
`f_char2` char(20) DEFAULT NULL,
`f_charbig` varchar(1000) DEFAULT NULL
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (abs(`f_int1` % 2))
+ PARTITION BY LIST (abs(`f_int1` MOD 2))
SUBPARTITION BY KEY (`f_int2`)
SUBPARTITIONS 3
(PARTITION `part1` VALUES IN (0) ENGINE = TokuDB,
@@ -15491,7 +15491,7 @@ t1 CREATE TABLE `t1` (
`f_char2` char(20) DEFAULT NULL,
`f_charbig` varchar(1000) DEFAULT NULL
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST ((`f_int1` + `f_int2`) % 4)
+ PARTITION BY LIST ((`f_int1` + `f_int2`) MOD 4)
(PARTITION `part_3` VALUES IN (-3) ENGINE = TokuDB,
PARTITION `part_2` VALUES IN (-2) ENGINE = TokuDB,
PARTITION `part_1` VALUES IN (-1) ENGINE = TokuDB,
@@ -17300,7 +17300,7 @@ t1 CREATE TABLE `t1` (
`f_char2` char(20) DEFAULT NULL,
`f_charbig` varchar(1000) DEFAULT NULL
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (abs(`f_int1` % 3))
+ PARTITION BY LIST (abs(`f_int1` MOD 3))
SUBPARTITION BY HASH (`f_int2` + 1)
(PARTITION `part1` VALUES IN (0)
(SUBPARTITION `sp11` ENGINE = TokuDB,
@@ -17756,7 +17756,7 @@ t1 CREATE TABLE `t1` (
`f_char2` char(20) DEFAULT NULL,
`f_charbig` varchar(1000) DEFAULT NULL
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (abs(`f_int1` % 2))
+ PARTITION BY LIST (abs(`f_int1` MOD 2))
SUBPARTITION BY KEY (`f_int2`)
SUBPARTITIONS 3
(PARTITION `part1` VALUES IN (0) ENGINE = TokuDB,
@@ -19122,7 +19122,7 @@ t1 CREATE TABLE `t1` (
`f_char2` char(20) DEFAULT NULL,
`f_charbig` varchar(1000) DEFAULT NULL
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST ((`f_int1` + `f_int2`) % 4)
+ PARTITION BY LIST ((`f_int1` + `f_int2`) MOD 4)
(PARTITION `part_3` VALUES IN (-3) ENGINE = TokuDB,
PARTITION `part_2` VALUES IN (-2) ENGINE = TokuDB,
PARTITION `part_1` VALUES IN (-1) ENGINE = TokuDB,
@@ -20995,7 +20995,7 @@ t1 CREATE TABLE `t1` (
`f_char2` char(20) DEFAULT NULL,
`f_charbig` varchar(1000) DEFAULT NULL
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (abs(`f_int1` % 3))
+ PARTITION BY LIST (abs(`f_int1` MOD 3))
SUBPARTITION BY HASH (`f_int2` + 1)
(PARTITION `part1` VALUES IN (0)
(SUBPARTITION `sp11` ENGINE = TokuDB,
@@ -21467,7 +21467,7 @@ t1 CREATE TABLE `t1` (
`f_char2` char(20) DEFAULT NULL,
`f_charbig` varchar(1000) DEFAULT NULL
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (abs(`f_int1` % 2))
+ PARTITION BY LIST (abs(`f_int1` MOD 2))
SUBPARTITION BY KEY (`f_int2`)
SUBPARTITIONS 3
(PARTITION `part1` VALUES IN (0) ENGINE = TokuDB,
@@ -22848,7 +22848,7 @@ t1 CREATE TABLE `t1` (
`f_char2` char(20) DEFAULT NULL,
`f_charbig` varchar(1000) DEFAULT NULL
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST ((`f_int1` + `f_int2`) % 4)
+ PARTITION BY LIST ((`f_int1` + `f_int2`) MOD 4)
(PARTITION `part_3` VALUES IN (-3) ENGINE = TokuDB,
PARTITION `part_2` VALUES IN (-2) ENGINE = TokuDB,
PARTITION `part_1` VALUES IN (-1) ENGINE = TokuDB,
@@ -24721,7 +24721,7 @@ t1 CREATE TABLE `t1` (
`f_char2` char(20) DEFAULT NULL,
`f_charbig` varchar(1000) DEFAULT NULL
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (abs(`f_int1` % 3))
+ PARTITION BY LIST (abs(`f_int1` MOD 3))
SUBPARTITION BY HASH (`f_int2` + 1)
(PARTITION `part1` VALUES IN (0)
(SUBPARTITION `sp11` ENGINE = TokuDB,
@@ -25193,7 +25193,7 @@ t1 CREATE TABLE `t1` (
`f_char2` char(20) DEFAULT NULL,
`f_charbig` varchar(1000) DEFAULT NULL
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (abs(`f_int1` % 2))
+ PARTITION BY LIST (abs(`f_int1` MOD 2))
SUBPARTITION BY KEY (`f_int2`)
SUBPARTITIONS 3
(PARTITION `part1` VALUES IN (0) ENGINE = TokuDB,
@@ -26543,7 +26543,7 @@ t1 CREATE TABLE `t1` (
`f_char2` char(20) DEFAULT NULL,
`f_charbig` varchar(1000) DEFAULT NULL
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST ((`f_int1` + `f_int2`) % 4)
+ PARTITION BY LIST ((`f_int1` + `f_int2`) MOD 4)
(PARTITION `part_3` VALUES IN (-3) ENGINE = TokuDB,
PARTITION `part_2` VALUES IN (-2) ENGINE = TokuDB,
PARTITION `part_1` VALUES IN (-1) ENGINE = TokuDB,
@@ -28352,7 +28352,7 @@ t1 CREATE TABLE `t1` (
`f_char2` char(20) DEFAULT NULL,
`f_charbig` varchar(1000) DEFAULT NULL
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (abs(`f_int1` % 3))
+ PARTITION BY LIST (abs(`f_int1` MOD 3))
SUBPARTITION BY HASH (`f_int2` + 1)
(PARTITION `part1` VALUES IN (0)
(SUBPARTITION `sp11` ENGINE = TokuDB,
@@ -28808,7 +28808,7 @@ t1 CREATE TABLE `t1` (
`f_char2` char(20) DEFAULT NULL,
`f_charbig` varchar(1000) DEFAULT NULL
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (abs(`f_int1` % 2))
+ PARTITION BY LIST (abs(`f_int1` MOD 2))
SUBPARTITION BY KEY (`f_int2`)
SUBPARTITIONS 3
(PARTITION `part1` VALUES IN (0) ENGINE = TokuDB,
@@ -30141,7 +30141,7 @@ t1 CREATE TABLE `t1` (
`f_char2` char(20) DEFAULT NULL,
`f_charbig` varchar(1000) DEFAULT NULL
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST ((`f_int1` + `f_int2`) % 4)
+ PARTITION BY LIST ((`f_int1` + `f_int2`) MOD 4)
(PARTITION `part_3` VALUES IN (-3) ENGINE = TokuDB,
PARTITION `part_2` VALUES IN (-2) ENGINE = TokuDB,
PARTITION `part_1` VALUES IN (-1) ENGINE = TokuDB,
@@ -31950,7 +31950,7 @@ t1 CREATE TABLE `t1` (
`f_char2` char(20) DEFAULT NULL,
`f_charbig` varchar(1000) DEFAULT NULL
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (abs(`f_int1` % 3))
+ PARTITION BY LIST (abs(`f_int1` MOD 3))
SUBPARTITION BY HASH (`f_int2` + 1)
(PARTITION `part1` VALUES IN (0)
(SUBPARTITION `sp11` ENGINE = TokuDB,
@@ -32406,7 +32406,7 @@ t1 CREATE TABLE `t1` (
`f_char2` char(20) DEFAULT NULL,
`f_charbig` varchar(1000) DEFAULT NULL
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (abs(`f_int1` % 2))
+ PARTITION BY LIST (abs(`f_int1` MOD 2))
SUBPARTITION BY KEY (`f_int2`)
SUBPARTITIONS 3
(PARTITION `part1` VALUES IN (0) ENGINE = TokuDB,
@@ -33771,7 +33771,7 @@ t1 CREATE TABLE `t1` (
`f_char2` char(20) DEFAULT NULL,
`f_charbig` varchar(1000) DEFAULT NULL
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST ((`f_int1` + `f_int2`) % 4)
+ PARTITION BY LIST ((`f_int1` + `f_int2`) MOD 4)
(PARTITION `part_3` VALUES IN (-3) ENGINE = TokuDB,
PARTITION `part_2` VALUES IN (-2) ENGINE = TokuDB,
PARTITION `part_1` VALUES IN (-1) ENGINE = TokuDB,
@@ -35644,7 +35644,7 @@ t1 CREATE TABLE `t1` (
`f_char2` char(20) DEFAULT NULL,
`f_charbig` varchar(1000) DEFAULT NULL
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (abs(`f_int1` % 3))
+ PARTITION BY LIST (abs(`f_int1` MOD 3))
SUBPARTITION BY HASH (`f_int2` + 1)
(PARTITION `part1` VALUES IN (0)
(SUBPARTITION `sp11` ENGINE = TokuDB,
@@ -36116,7 +36116,7 @@ t1 CREATE TABLE `t1` (
`f_char2` char(20) DEFAULT NULL,
`f_charbig` varchar(1000) DEFAULT NULL
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (abs(`f_int1` % 2))
+ PARTITION BY LIST (abs(`f_int1` MOD 2))
SUBPARTITION BY KEY (`f_int2`)
SUBPARTITIONS 3
(PARTITION `part1` VALUES IN (0) ENGINE = TokuDB,
diff --git a/storage/tokudb/mysql-test/tokudb_parts/r/partition_alter2_1_1_tokudb.result b/storage/tokudb/mysql-test/tokudb_parts/r/partition_alter2_1_1_tokudb.result
index d994f5f2654..a398ac33f42 100644
--- a/storage/tokudb/mysql-test/tokudb_parts/r/partition_alter2_1_1_tokudb.result
+++ b/storage/tokudb/mysql-test/tokudb_parts/r/partition_alter2_1_1_tokudb.result
@@ -994,7 +994,7 @@ t1 CREATE TABLE `t1` (
`f_char2` char(20) DEFAULT NULL,
`f_charbig` varchar(1000) DEFAULT NULL
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (`f_int1` % 4)
+ PARTITION BY LIST (`f_int1` MOD 4)
(PARTITION `part_3` VALUES IN (-3) ENGINE = TokuDB,
PARTITION `part_2` VALUES IN (-2) ENGINE = TokuDB,
PARTITION `part_1` VALUES IN (-1) ENGINE = TokuDB,
@@ -2869,7 +2869,7 @@ t1 CREATE TABLE `t1` (
`f_char2` char(20) DEFAULT NULL,
`f_charbig` varchar(1000) DEFAULT NULL
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (abs(`f_int1` % 3))
+ PARTITION BY LIST (abs(`f_int1` MOD 3))
SUBPARTITION BY HASH (`f_int1` + 1)
(PARTITION `part1` VALUES IN (0)
(SUBPARTITION `sp11` ENGINE = TokuDB,
@@ -3341,7 +3341,7 @@ t1 CREATE TABLE `t1` (
`f_char2` char(20) DEFAULT NULL,
`f_charbig` varchar(1000) DEFAULT NULL
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (abs(`f_int1` % 2))
+ PARTITION BY LIST (abs(`f_int1` MOD 2))
SUBPARTITION BY KEY (`f_int1`)
SUBPARTITIONS 3
(PARTITION `part1` VALUES IN (0) ENGINE = TokuDB,
@@ -4800,7 +4800,7 @@ t1 CREATE TABLE `t1` (
`f_charbig` varchar(1000) DEFAULT NULL,
PRIMARY KEY (`f_int1`,`f_int2`)
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (`f_int1` % 4)
+ PARTITION BY LIST (`f_int1` MOD 4)
(PARTITION `part_3` VALUES IN (-3) ENGINE = TokuDB,
PARTITION `part_2` VALUES IN (-2) ENGINE = TokuDB,
PARTITION `part_1` VALUES IN (-1) ENGINE = TokuDB,
@@ -6827,7 +6827,7 @@ t1 CREATE TABLE `t1` (
`f_charbig` varchar(1000) DEFAULT NULL,
PRIMARY KEY (`f_int1`,`f_int2`)
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (abs(`f_int1` % 3))
+ PARTITION BY LIST (abs(`f_int1` MOD 3))
SUBPARTITION BY HASH (`f_int1` + 1)
(PARTITION `part1` VALUES IN (0)
(SUBPARTITION `sp11` ENGINE = TokuDB,
@@ -7337,7 +7337,7 @@ t1 CREATE TABLE `t1` (
`f_charbig` varchar(1000) DEFAULT NULL,
PRIMARY KEY (`f_int1`,`f_int2`)
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (abs(`f_int1` % 2))
+ PARTITION BY LIST (abs(`f_int1` MOD 2))
SUBPARTITION BY KEY (`f_int1`)
SUBPARTITIONS 3
(PARTITION `part1` VALUES IN (0) ENGINE = TokuDB,
@@ -8832,7 +8832,7 @@ t1 CREATE TABLE `t1` (
`f_charbig` varchar(1000) DEFAULT NULL,
PRIMARY KEY (`f_int2`,`f_int1`)
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (`f_int1` % 4)
+ PARTITION BY LIST (`f_int1` MOD 4)
(PARTITION `part_3` VALUES IN (-3) ENGINE = TokuDB,
PARTITION `part_2` VALUES IN (-2) ENGINE = TokuDB,
PARTITION `part_1` VALUES IN (-1) ENGINE = TokuDB,
@@ -10859,7 +10859,7 @@ t1 CREATE TABLE `t1` (
`f_charbig` varchar(1000) DEFAULT NULL,
PRIMARY KEY (`f_int2`,`f_int1`)
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (abs(`f_int1` % 3))
+ PARTITION BY LIST (abs(`f_int1` MOD 3))
SUBPARTITION BY HASH (`f_int1` + 1)
(PARTITION `part1` VALUES IN (0)
(SUBPARTITION `sp11` ENGINE = TokuDB,
@@ -11369,7 +11369,7 @@ t1 CREATE TABLE `t1` (
`f_charbig` varchar(1000) DEFAULT NULL,
PRIMARY KEY (`f_int2`,`f_int1`)
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (abs(`f_int1` % 2))
+ PARTITION BY LIST (abs(`f_int1` MOD 2))
SUBPARTITION BY KEY (`f_int1`)
SUBPARTITIONS 3
(PARTITION `part1` VALUES IN (0) ENGINE = TokuDB,
@@ -12893,7 +12893,7 @@ t1 CREATE TABLE `t1` (
`f_charbig` varchar(1000) DEFAULT NULL,
UNIQUE KEY `uidx1` (`f_int1`,`f_int2`)
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (`f_int1` % 4)
+ PARTITION BY LIST (`f_int1` MOD 4)
(PARTITION `part_3` VALUES IN (-3) ENGINE = TokuDB,
PARTITION `part_2` VALUES IN (-2) ENGINE = TokuDB,
PARTITION `part_1` VALUES IN (-1) ENGINE = TokuDB,
@@ -14976,7 +14976,7 @@ t1 CREATE TABLE `t1` (
`f_charbig` varchar(1000) DEFAULT NULL,
UNIQUE KEY `uidx1` (`f_int1`,`f_int2`)
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (abs(`f_int1` % 3))
+ PARTITION BY LIST (abs(`f_int1` MOD 3))
SUBPARTITION BY HASH (`f_int1` + 1)
(PARTITION `part1` VALUES IN (0)
(SUBPARTITION `sp11` ENGINE = TokuDB,
@@ -15500,7 +15500,7 @@ t1 CREATE TABLE `t1` (
`f_charbig` varchar(1000) DEFAULT NULL,
UNIQUE KEY `uidx1` (`f_int1`,`f_int2`)
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (abs(`f_int1` % 2))
+ PARTITION BY LIST (abs(`f_int1` MOD 2))
SUBPARTITION BY KEY (`f_int1`)
SUBPARTITIONS 3
(PARTITION `part1` VALUES IN (0) ENGINE = TokuDB,
@@ -17037,7 +17037,7 @@ t1 CREATE TABLE `t1` (
`f_charbig` varchar(1000) DEFAULT NULL,
UNIQUE KEY `uidx1` (`f_int2`,`f_int1`)
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (`f_int1` % 4)
+ PARTITION BY LIST (`f_int1` MOD 4)
(PARTITION `part_3` VALUES IN (-3) ENGINE = TokuDB,
PARTITION `part_2` VALUES IN (-2) ENGINE = TokuDB,
PARTITION `part_1` VALUES IN (-1) ENGINE = TokuDB,
@@ -19120,7 +19120,7 @@ t1 CREATE TABLE `t1` (
`f_charbig` varchar(1000) DEFAULT NULL,
UNIQUE KEY `uidx1` (`f_int2`,`f_int1`)
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (abs(`f_int1` % 3))
+ PARTITION BY LIST (abs(`f_int1` MOD 3))
SUBPARTITION BY HASH (`f_int1` + 1)
(PARTITION `part1` VALUES IN (0)
(SUBPARTITION `sp11` ENGINE = TokuDB,
@@ -19644,7 +19644,7 @@ t1 CREATE TABLE `t1` (
`f_charbig` varchar(1000) DEFAULT NULL,
UNIQUE KEY `uidx1` (`f_int2`,`f_int1`)
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (abs(`f_int1` % 2))
+ PARTITION BY LIST (abs(`f_int1` MOD 2))
SUBPARTITION BY KEY (`f_int1`)
SUBPARTITIONS 3
(PARTITION `part1` VALUES IN (0) ENGINE = TokuDB,
diff --git a/storage/tokudb/mysql-test/tokudb_parts/r/partition_alter2_1_2_tokudb.result b/storage/tokudb/mysql-test/tokudb_parts/r/partition_alter2_1_2_tokudb.result
index f8c7eadb44c..60cc765a570 100644
--- a/storage/tokudb/mysql-test/tokudb_parts/r/partition_alter2_1_2_tokudb.result
+++ b/storage/tokudb/mysql-test/tokudb_parts/r/partition_alter2_1_2_tokudb.result
@@ -990,7 +990,7 @@ t1 CREATE TABLE `t1` (
`f_char2` char(20) DEFAULT NULL,
`f_charbig` varchar(1000) DEFAULT NULL
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST ((`f_int1` + `f_int2`) % 4)
+ PARTITION BY LIST ((`f_int1` + `f_int2`) MOD 4)
(PARTITION `part_3` VALUES IN (-3) ENGINE = TokuDB,
PARTITION `part_2` VALUES IN (-2) ENGINE = TokuDB,
PARTITION `part_1` VALUES IN (-1) ENGINE = TokuDB,
@@ -2863,7 +2863,7 @@ t1 CREATE TABLE `t1` (
`f_char2` char(20) DEFAULT NULL,
`f_charbig` varchar(1000) DEFAULT NULL
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (abs(`f_int1` % 3))
+ PARTITION BY LIST (abs(`f_int1` MOD 3))
SUBPARTITION BY HASH (`f_int2` + 1)
(PARTITION `part1` VALUES IN (0)
(SUBPARTITION `sp11` ENGINE = TokuDB,
@@ -3335,7 +3335,7 @@ t1 CREATE TABLE `t1` (
`f_char2` char(20) DEFAULT NULL,
`f_charbig` varchar(1000) DEFAULT NULL
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (abs(`f_int1` % 2))
+ PARTITION BY LIST (abs(`f_int1` MOD 2))
SUBPARTITION BY KEY (`f_int2`)
SUBPARTITIONS 3
(PARTITION `part1` VALUES IN (0) ENGINE = TokuDB,
@@ -4796,7 +4796,7 @@ t1 CREATE TABLE `t1` (
`f_charbig` varchar(1000) DEFAULT NULL,
PRIMARY KEY (`f_int1`,`f_int2`)
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST ((`f_int1` + `f_int2`) % 4)
+ PARTITION BY LIST ((`f_int1` + `f_int2`) MOD 4)
(PARTITION `part_3` VALUES IN (-3) ENGINE = TokuDB,
PARTITION `part_2` VALUES IN (-2) ENGINE = TokuDB,
PARTITION `part_1` VALUES IN (-1) ENGINE = TokuDB,
@@ -6825,7 +6825,7 @@ t1 CREATE TABLE `t1` (
`f_charbig` varchar(1000) DEFAULT NULL,
PRIMARY KEY (`f_int1`,`f_int2`)
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (abs(`f_int1` % 3))
+ PARTITION BY LIST (abs(`f_int1` MOD 3))
SUBPARTITION BY HASH (`f_int2` + 1)
(PARTITION `part1` VALUES IN (0)
(SUBPARTITION `sp11` ENGINE = TokuDB,
@@ -7336,7 +7336,7 @@ t1 CREATE TABLE `t1` (
`f_charbig` varchar(1000) DEFAULT NULL,
PRIMARY KEY (`f_int1`,`f_int2`)
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (abs(`f_int1` % 2))
+ PARTITION BY LIST (abs(`f_int1` MOD 2))
SUBPARTITION BY KEY (`f_int2`)
SUBPARTITIONS 3
(PARTITION `part1` VALUES IN (0) ENGINE = TokuDB,
@@ -8834,7 +8834,7 @@ t1 CREATE TABLE `t1` (
`f_charbig` varchar(1000) DEFAULT NULL,
PRIMARY KEY (`f_int2`,`f_int1`)
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST ((`f_int1` + `f_int2`) % 4)
+ PARTITION BY LIST ((`f_int1` + `f_int2`) MOD 4)
(PARTITION `part_3` VALUES IN (-3) ENGINE = TokuDB,
PARTITION `part_2` VALUES IN (-2) ENGINE = TokuDB,
PARTITION `part_1` VALUES IN (-1) ENGINE = TokuDB,
@@ -10863,7 +10863,7 @@ t1 CREATE TABLE `t1` (
`f_charbig` varchar(1000) DEFAULT NULL,
PRIMARY KEY (`f_int2`,`f_int1`)
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (abs(`f_int1` % 3))
+ PARTITION BY LIST (abs(`f_int1` MOD 3))
SUBPARTITION BY HASH (`f_int2` + 1)
(PARTITION `part1` VALUES IN (0)
(SUBPARTITION `sp11` ENGINE = TokuDB,
@@ -11374,7 +11374,7 @@ t1 CREATE TABLE `t1` (
`f_charbig` varchar(1000) DEFAULT NULL,
PRIMARY KEY (`f_int2`,`f_int1`)
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (abs(`f_int1` % 2))
+ PARTITION BY LIST (abs(`f_int1` MOD 2))
SUBPARTITION BY KEY (`f_int2`)
SUBPARTITIONS 3
(PARTITION `part1` VALUES IN (0) ENGINE = TokuDB,
@@ -12899,7 +12899,7 @@ t1 CREATE TABLE `t1` (
`f_charbig` varchar(1000) DEFAULT NULL,
UNIQUE KEY `uidx` (`f_int1`,`f_int2`)
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST ((`f_int1` + `f_int2`) % 4)
+ PARTITION BY LIST ((`f_int1` + `f_int2`) MOD 4)
(PARTITION `part_3` VALUES IN (-3) ENGINE = TokuDB,
PARTITION `part_2` VALUES IN (-2) ENGINE = TokuDB,
PARTITION `part_1` VALUES IN (-1) ENGINE = TokuDB,
@@ -14980,7 +14980,7 @@ t1 CREATE TABLE `t1` (
`f_charbig` varchar(1000) DEFAULT NULL,
UNIQUE KEY `uidx` (`f_int1`,`f_int2`)
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (abs(`f_int1` % 3))
+ PARTITION BY LIST (abs(`f_int1` MOD 3))
SUBPARTITION BY HASH (`f_int2` + 1)
(PARTITION `part1` VALUES IN (0)
(SUBPARTITION `sp11` ENGINE = TokuDB,
@@ -15504,7 +15504,7 @@ t1 CREATE TABLE `t1` (
`f_charbig` varchar(1000) DEFAULT NULL,
UNIQUE KEY `uidx` (`f_int1`,`f_int2`)
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (abs(`f_int1` % 2))
+ PARTITION BY LIST (abs(`f_int1` MOD 2))
SUBPARTITION BY KEY (`f_int2`)
SUBPARTITIONS 3
(PARTITION `part1` VALUES IN (0) ENGINE = TokuDB,
@@ -17041,7 +17041,7 @@ t1 CREATE TABLE `t1` (
`f_charbig` varchar(1000) DEFAULT NULL,
UNIQUE KEY `uidx` (`f_int2`,`f_int1`)
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST ((`f_int1` + `f_int2`) % 4)
+ PARTITION BY LIST ((`f_int1` + `f_int2`) MOD 4)
(PARTITION `part_3` VALUES IN (-3) ENGINE = TokuDB,
PARTITION `part_2` VALUES IN (-2) ENGINE = TokuDB,
PARTITION `part_1` VALUES IN (-1) ENGINE = TokuDB,
@@ -19122,7 +19122,7 @@ t1 CREATE TABLE `t1` (
`f_charbig` varchar(1000) DEFAULT NULL,
UNIQUE KEY `uidx` (`f_int2`,`f_int1`)
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (abs(`f_int1` % 3))
+ PARTITION BY LIST (abs(`f_int1` MOD 3))
SUBPARTITION BY HASH (`f_int2` + 1)
(PARTITION `part1` VALUES IN (0)
(SUBPARTITION `sp11` ENGINE = TokuDB,
@@ -19646,7 +19646,7 @@ t1 CREATE TABLE `t1` (
`f_charbig` varchar(1000) DEFAULT NULL,
UNIQUE KEY `uidx` (`f_int2`,`f_int1`)
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (abs(`f_int1` % 2))
+ PARTITION BY LIST (abs(`f_int1` MOD 2))
SUBPARTITION BY KEY (`f_int2`)
SUBPARTITIONS 3
(PARTITION `part1` VALUES IN (0) ENGINE = TokuDB,
diff --git a/storage/tokudb/mysql-test/tokudb_parts/r/partition_alter2_2_1_tokudb.result b/storage/tokudb/mysql-test/tokudb_parts/r/partition_alter2_2_1_tokudb.result
index 1a4d1210a23..0b5d8289ecc 100644
--- a/storage/tokudb/mysql-test/tokudb_parts/r/partition_alter2_2_1_tokudb.result
+++ b/storage/tokudb/mysql-test/tokudb_parts/r/partition_alter2_2_1_tokudb.result
@@ -998,7 +998,7 @@ t1 CREATE TABLE `t1` (
`f_char2` char(20) DEFAULT NULL,
`f_charbig` varchar(1000) DEFAULT NULL
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (`f_int1` % 4)
+ PARTITION BY LIST (`f_int1` MOD 4)
(PARTITION `part_3` VALUES IN (-3) ENGINE = TokuDB,
PARTITION `part_2` VALUES IN (-2) ENGINE = TokuDB,
PARTITION `part_1` VALUES IN (-1) ENGINE = TokuDB,
@@ -2877,7 +2877,7 @@ t1 CREATE TABLE `t1` (
`f_char2` char(20) DEFAULT NULL,
`f_charbig` varchar(1000) DEFAULT NULL
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (abs(`f_int1` % 3))
+ PARTITION BY LIST (abs(`f_int1` MOD 3))
SUBPARTITION BY HASH (`f_int1` + 1)
(PARTITION `part1` VALUES IN (0)
(SUBPARTITION `sp11` ENGINE = TokuDB,
@@ -3351,7 +3351,7 @@ t1 CREATE TABLE `t1` (
`f_char2` char(20) DEFAULT NULL,
`f_charbig` varchar(1000) DEFAULT NULL
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (abs(`f_int1` % 2))
+ PARTITION BY LIST (abs(`f_int1` MOD 2))
SUBPARTITION BY KEY (`f_int1`)
SUBPARTITIONS 3
(PARTITION `part1` VALUES IN (0) ENGINE = TokuDB,
@@ -4815,7 +4815,7 @@ t1 CREATE TABLE `t1` (
`f_charbig` varchar(1000) DEFAULT NULL,
PRIMARY KEY (`f_int1`,`f_int2`)
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (`f_int1` % 4)
+ PARTITION BY LIST (`f_int1` MOD 4)
(PARTITION `part_3` VALUES IN (-3) ENGINE = TokuDB,
PARTITION `part_2` VALUES IN (-2) ENGINE = TokuDB,
PARTITION `part_1` VALUES IN (-1) ENGINE = TokuDB,
@@ -6846,7 +6846,7 @@ t1 CREATE TABLE `t1` (
`f_charbig` varchar(1000) DEFAULT NULL,
PRIMARY KEY (`f_int1`,`f_int2`)
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (abs(`f_int1` % 3))
+ PARTITION BY LIST (abs(`f_int1` MOD 3))
SUBPARTITION BY HASH (`f_int1` + 1)
(PARTITION `part1` VALUES IN (0)
(SUBPARTITION `sp11` ENGINE = TokuDB,
@@ -7358,7 +7358,7 @@ t1 CREATE TABLE `t1` (
`f_charbig` varchar(1000) DEFAULT NULL,
PRIMARY KEY (`f_int1`,`f_int2`)
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (abs(`f_int1` % 2))
+ PARTITION BY LIST (abs(`f_int1` MOD 2))
SUBPARTITION BY KEY (`f_int1`)
SUBPARTITIONS 3
(PARTITION `part1` VALUES IN (0) ENGINE = TokuDB,
@@ -8859,7 +8859,7 @@ t1 CREATE TABLE `t1` (
`f_charbig` varchar(1000) DEFAULT NULL,
PRIMARY KEY (`f_int2`,`f_int1`)
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (`f_int1` % 4)
+ PARTITION BY LIST (`f_int1` MOD 4)
(PARTITION `part_3` VALUES IN (-3) ENGINE = TokuDB,
PARTITION `part_2` VALUES IN (-2) ENGINE = TokuDB,
PARTITION `part_1` VALUES IN (-1) ENGINE = TokuDB,
@@ -10890,7 +10890,7 @@ t1 CREATE TABLE `t1` (
`f_charbig` varchar(1000) DEFAULT NULL,
PRIMARY KEY (`f_int2`,`f_int1`)
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (abs(`f_int1` % 3))
+ PARTITION BY LIST (abs(`f_int1` MOD 3))
SUBPARTITION BY HASH (`f_int1` + 1)
(PARTITION `part1` VALUES IN (0)
(SUBPARTITION `sp11` ENGINE = TokuDB,
@@ -11402,7 +11402,7 @@ t1 CREATE TABLE `t1` (
`f_charbig` varchar(1000) DEFAULT NULL,
PRIMARY KEY (`f_int2`,`f_int1`)
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (abs(`f_int1` % 2))
+ PARTITION BY LIST (abs(`f_int1` MOD 2))
SUBPARTITION BY KEY (`f_int1`)
SUBPARTITIONS 3
(PARTITION `part1` VALUES IN (0) ENGINE = TokuDB,
@@ -12932,7 +12932,7 @@ t1 CREATE TABLE `t1` (
`f_charbig` varchar(1000) DEFAULT NULL,
UNIQUE KEY `uidx1` (`f_int1`,`f_int2`)
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (`f_int1` % 4)
+ PARTITION BY LIST (`f_int1` MOD 4)
(PARTITION `part_3` VALUES IN (-3) ENGINE = TokuDB,
PARTITION `part_2` VALUES IN (-2) ENGINE = TokuDB,
PARTITION `part_1` VALUES IN (-1) ENGINE = TokuDB,
@@ -15019,7 +15019,7 @@ t1 CREATE TABLE `t1` (
`f_charbig` varchar(1000) DEFAULT NULL,
UNIQUE KEY `uidx1` (`f_int1`,`f_int2`)
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (abs(`f_int1` % 3))
+ PARTITION BY LIST (abs(`f_int1` MOD 3))
SUBPARTITION BY HASH (`f_int1` + 1)
(PARTITION `part1` VALUES IN (0)
(SUBPARTITION `sp11` ENGINE = TokuDB,
@@ -15545,7 +15545,7 @@ t1 CREATE TABLE `t1` (
`f_charbig` varchar(1000) DEFAULT NULL,
UNIQUE KEY `uidx1` (`f_int1`,`f_int2`)
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (abs(`f_int1` % 2))
+ PARTITION BY LIST (abs(`f_int1` MOD 2))
SUBPARTITION BY KEY (`f_int1`)
SUBPARTITIONS 3
(PARTITION `part1` VALUES IN (0) ENGINE = TokuDB,
@@ -17088,7 +17088,7 @@ t1 CREATE TABLE `t1` (
`f_charbig` varchar(1000) DEFAULT NULL,
UNIQUE KEY `uidx1` (`f_int2`,`f_int1`)
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (`f_int1` % 4)
+ PARTITION BY LIST (`f_int1` MOD 4)
(PARTITION `part_3` VALUES IN (-3) ENGINE = TokuDB,
PARTITION `part_2` VALUES IN (-2) ENGINE = TokuDB,
PARTITION `part_1` VALUES IN (-1) ENGINE = TokuDB,
@@ -19175,7 +19175,7 @@ t1 CREATE TABLE `t1` (
`f_charbig` varchar(1000) DEFAULT NULL,
UNIQUE KEY `uidx1` (`f_int2`,`f_int1`)
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (abs(`f_int1` % 3))
+ PARTITION BY LIST (abs(`f_int1` MOD 3))
SUBPARTITION BY HASH (`f_int1` + 1)
(PARTITION `part1` VALUES IN (0)
(SUBPARTITION `sp11` ENGINE = TokuDB,
@@ -19701,7 +19701,7 @@ t1 CREATE TABLE `t1` (
`f_charbig` varchar(1000) DEFAULT NULL,
UNIQUE KEY `uidx1` (`f_int2`,`f_int1`)
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (abs(`f_int1` % 2))
+ PARTITION BY LIST (abs(`f_int1` MOD 2))
SUBPARTITION BY KEY (`f_int1`)
SUBPARTITIONS 3
(PARTITION `part1` VALUES IN (0) ENGINE = TokuDB,
diff --git a/storage/tokudb/mysql-test/tokudb_parts/r/partition_alter2_2_2_tokudb.result b/storage/tokudb/mysql-test/tokudb_parts/r/partition_alter2_2_2_tokudb.result
index 36be0936c09..67bae3acecb 100644
--- a/storage/tokudb/mysql-test/tokudb_parts/r/partition_alter2_2_2_tokudb.result
+++ b/storage/tokudb/mysql-test/tokudb_parts/r/partition_alter2_2_2_tokudb.result
@@ -995,7 +995,7 @@ t1 CREATE TABLE `t1` (
`f_char2` char(20) DEFAULT NULL,
`f_charbig` varchar(1000) DEFAULT NULL
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST ((`f_int1` + `f_int2`) % 4)
+ PARTITION BY LIST ((`f_int1` + `f_int2`) MOD 4)
(PARTITION `part_3` VALUES IN (-3) ENGINE = TokuDB,
PARTITION `part_2` VALUES IN (-2) ENGINE = TokuDB,
PARTITION `part_1` VALUES IN (-1) ENGINE = TokuDB,
@@ -2886,7 +2886,7 @@ t1 CREATE TABLE `t1` (
`f_char2` char(20) DEFAULT NULL,
`f_charbig` varchar(1000) DEFAULT NULL
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (abs(`f_int1` % 3))
+ PARTITION BY LIST (abs(`f_int1` MOD 3))
SUBPARTITION BY HASH (`f_int2` + 1)
(PARTITION `part1` VALUES IN (0)
(SUBPARTITION `sp11` ENGINE = TokuDB,
@@ -3361,7 +3361,7 @@ t1 CREATE TABLE `t1` (
`f_char2` char(20) DEFAULT NULL,
`f_charbig` varchar(1000) DEFAULT NULL
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (abs(`f_int1` % 2))
+ PARTITION BY LIST (abs(`f_int1` MOD 2))
SUBPARTITION BY KEY (`f_int2`)
SUBPARTITIONS 3
(PARTITION `part1` VALUES IN (0) ENGINE = TokuDB,
@@ -4831,7 +4831,7 @@ t1 CREATE TABLE `t1` (
`f_charbig` varchar(1000) DEFAULT NULL,
PRIMARY KEY (`f_int1`,`f_int2`)
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST ((`f_int1` + `f_int2`) % 4)
+ PARTITION BY LIST ((`f_int1` + `f_int2`) MOD 4)
(PARTITION `part_3` VALUES IN (-3) ENGINE = TokuDB,
PARTITION `part_2` VALUES IN (-2) ENGINE = TokuDB,
PARTITION `part_1` VALUES IN (-1) ENGINE = TokuDB,
@@ -6878,7 +6878,7 @@ t1 CREATE TABLE `t1` (
`f_charbig` varchar(1000) DEFAULT NULL,
PRIMARY KEY (`f_int1`,`f_int2`)
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (abs(`f_int1` % 3))
+ PARTITION BY LIST (abs(`f_int1` MOD 3))
SUBPARTITION BY HASH (`f_int2` + 1)
(PARTITION `part1` VALUES IN (0)
(SUBPARTITION `sp11` ENGINE = TokuDB,
@@ -7392,7 +7392,7 @@ t1 CREATE TABLE `t1` (
`f_charbig` varchar(1000) DEFAULT NULL,
PRIMARY KEY (`f_int1`,`f_int2`)
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (abs(`f_int1` % 2))
+ PARTITION BY LIST (abs(`f_int1` MOD 2))
SUBPARTITION BY KEY (`f_int2`)
SUBPARTITIONS 3
(PARTITION `part1` VALUES IN (0) ENGINE = TokuDB,
@@ -8899,7 +8899,7 @@ t1 CREATE TABLE `t1` (
`f_charbig` varchar(1000) DEFAULT NULL,
PRIMARY KEY (`f_int2`,`f_int1`)
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST ((`f_int1` + `f_int2`) % 4)
+ PARTITION BY LIST ((`f_int1` + `f_int2`) MOD 4)
(PARTITION `part_3` VALUES IN (-3) ENGINE = TokuDB,
PARTITION `part_2` VALUES IN (-2) ENGINE = TokuDB,
PARTITION `part_1` VALUES IN (-1) ENGINE = TokuDB,
@@ -10946,7 +10946,7 @@ t1 CREATE TABLE `t1` (
`f_charbig` varchar(1000) DEFAULT NULL,
PRIMARY KEY (`f_int2`,`f_int1`)
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (abs(`f_int1` % 3))
+ PARTITION BY LIST (abs(`f_int1` MOD 3))
SUBPARTITION BY HASH (`f_int2` + 1)
(PARTITION `part1` VALUES IN (0)
(SUBPARTITION `sp11` ENGINE = TokuDB,
@@ -11460,7 +11460,7 @@ t1 CREATE TABLE `t1` (
`f_charbig` varchar(1000) DEFAULT NULL,
PRIMARY KEY (`f_int2`,`f_int1`)
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (abs(`f_int1` % 2))
+ PARTITION BY LIST (abs(`f_int1` MOD 2))
SUBPARTITION BY KEY (`f_int2`)
SUBPARTITIONS 3
(PARTITION `part1` VALUES IN (0) ENGINE = TokuDB,
@@ -12994,7 +12994,7 @@ t1 CREATE TABLE `t1` (
`f_charbig` varchar(1000) DEFAULT NULL,
UNIQUE KEY `uidx` (`f_int1`,`f_int2`)
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST ((`f_int1` + `f_int2`) % 4)
+ PARTITION BY LIST ((`f_int1` + `f_int2`) MOD 4)
(PARTITION `part_3` VALUES IN (-3) ENGINE = TokuDB,
PARTITION `part_2` VALUES IN (-2) ENGINE = TokuDB,
PARTITION `part_1` VALUES IN (-1) ENGINE = TokuDB,
@@ -15093,7 +15093,7 @@ t1 CREATE TABLE `t1` (
`f_charbig` varchar(1000) DEFAULT NULL,
UNIQUE KEY `uidx` (`f_int1`,`f_int2`)
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (abs(`f_int1` % 3))
+ PARTITION BY LIST (abs(`f_int1` MOD 3))
SUBPARTITION BY HASH (`f_int2` + 1)
(PARTITION `part1` VALUES IN (0)
(SUBPARTITION `sp11` ENGINE = TokuDB,
@@ -15620,7 +15620,7 @@ t1 CREATE TABLE `t1` (
`f_charbig` varchar(1000) DEFAULT NULL,
UNIQUE KEY `uidx` (`f_int1`,`f_int2`)
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (abs(`f_int1` % 2))
+ PARTITION BY LIST (abs(`f_int1` MOD 2))
SUBPARTITION BY KEY (`f_int2`)
SUBPARTITIONS 3
(PARTITION `part1` VALUES IN (0) ENGINE = TokuDB,
@@ -17166,7 +17166,7 @@ t1 CREATE TABLE `t1` (
`f_charbig` varchar(1000) DEFAULT NULL,
UNIQUE KEY `uidx` (`f_int2`,`f_int1`)
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST ((`f_int1` + `f_int2`) % 4)
+ PARTITION BY LIST ((`f_int1` + `f_int2`) MOD 4)
(PARTITION `part_3` VALUES IN (-3) ENGINE = TokuDB,
PARTITION `part_2` VALUES IN (-2) ENGINE = TokuDB,
PARTITION `part_1` VALUES IN (-1) ENGINE = TokuDB,
@@ -19265,7 +19265,7 @@ t1 CREATE TABLE `t1` (
`f_charbig` varchar(1000) DEFAULT NULL,
UNIQUE KEY `uidx` (`f_int2`,`f_int1`)
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (abs(`f_int1` % 3))
+ PARTITION BY LIST (abs(`f_int1` MOD 3))
SUBPARTITION BY HASH (`f_int2` + 1)
(PARTITION `part1` VALUES IN (0)
(SUBPARTITION `sp11` ENGINE = TokuDB,
@@ -19792,7 +19792,7 @@ t1 CREATE TABLE `t1` (
`f_charbig` varchar(1000) DEFAULT NULL,
UNIQUE KEY `uidx` (`f_int2`,`f_int1`)
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (abs(`f_int1` % 2))
+ PARTITION BY LIST (abs(`f_int1` MOD 2))
SUBPARTITION BY KEY (`f_int2`)
SUBPARTITIONS 3
(PARTITION `part1` VALUES IN (0) ENGINE = TokuDB,
diff --git a/storage/tokudb/mysql-test/tokudb_parts/r/partition_alter4_tokudb.result b/storage/tokudb/mysql-test/tokudb_parts/r/partition_alter4_tokudb.result
index 3409f1d380e..808f646dd48 100644
--- a/storage/tokudb/mysql-test/tokudb_parts/r/partition_alter4_tokudb.result
+++ b/storage/tokudb/mysql-test/tokudb_parts/r/partition_alter4_tokudb.result
@@ -1004,7 +1004,7 @@ t1 CREATE TABLE `t1` (
`f_char2` char(20) DEFAULT NULL,
`f_charbig` varchar(1000) DEFAULT NULL
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (`f_int1` % 4)
+ PARTITION BY LIST (`f_int1` MOD 4)
(PARTITION `part_3` VALUES IN (-3) ENGINE = TokuDB,
PARTITION `part_2` VALUES IN (-2) ENGINE = TokuDB,
PARTITION `part_1` VALUES IN (-1) ENGINE = TokuDB,
@@ -2887,7 +2887,7 @@ t1 CREATE TABLE `t1` (
`f_char2` char(20) DEFAULT NULL,
`f_charbig` varchar(1000) DEFAULT NULL
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (abs(`f_int1` % 3))
+ PARTITION BY LIST (abs(`f_int1` MOD 3))
SUBPARTITION BY HASH (`f_int1` + 1)
(PARTITION `part_1` VALUES IN (0)
(SUBPARTITION `sp11` ENGINE = TokuDB,
@@ -3361,7 +3361,7 @@ t1 CREATE TABLE `t1` (
`f_char2` char(20) DEFAULT NULL,
`f_charbig` varchar(1000) DEFAULT NULL
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (abs(`f_int1` % 2))
+ PARTITION BY LIST (abs(`f_int1` MOD 2))
SUBPARTITION BY KEY (`f_int1`)
SUBPARTITIONS 3
(PARTITION `part_1` VALUES IN (0) ENGINE = TokuDB,
@@ -4754,7 +4754,7 @@ t1 CREATE TABLE `t1` (
`f_char2` char(20) DEFAULT NULL,
`f_charbig` varchar(1000) DEFAULT NULL
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (`f_int1` % 4)
+ PARTITION BY LIST (`f_int1` MOD 4)
(PARTITION `part_3` VALUES IN (-3) ENGINE = TokuDB,
PARTITION `part_2` VALUES IN (-2) ENGINE = TokuDB,
PARTITION `part_1` VALUES IN (-1) ENGINE = TokuDB,
@@ -6637,7 +6637,7 @@ t1 CREATE TABLE `t1` (
`f_char2` char(20) DEFAULT NULL,
`f_charbig` varchar(1000) DEFAULT NULL
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (abs(`f_int1` % 3))
+ PARTITION BY LIST (abs(`f_int1` MOD 3))
SUBPARTITION BY HASH (`f_int1` + 1)
(PARTITION `part_1` VALUES IN (0)
(SUBPARTITION `sp11` ENGINE = TokuDB,
@@ -7111,7 +7111,7 @@ t1 CREATE TABLE `t1` (
`f_char2` char(20) DEFAULT NULL,
`f_charbig` varchar(1000) DEFAULT NULL
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (abs(`f_int1` % 2))
+ PARTITION BY LIST (abs(`f_int1` MOD 2))
SUBPARTITION BY KEY (`f_int1`)
SUBPARTITIONS 3
(PARTITION `part_1` VALUES IN (0) ENGINE = TokuDB,
@@ -8504,7 +8504,7 @@ t1 CREATE TABLE `t1` (
`f_char2` char(20) DEFAULT NULL,
`f_charbig` varchar(1000) DEFAULT NULL
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (`f_int1` % 4)
+ PARTITION BY LIST (`f_int1` MOD 4)
(PARTITION `part_3` VALUES IN (-3) ENGINE = TokuDB,
PARTITION `part_2` VALUES IN (-2) ENGINE = TokuDB,
PARTITION `part_1` VALUES IN (-1) ENGINE = TokuDB,
@@ -10387,7 +10387,7 @@ t1 CREATE TABLE `t1` (
`f_char2` char(20) DEFAULT NULL,
`f_charbig` varchar(1000) DEFAULT NULL
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (abs(`f_int1` % 3))
+ PARTITION BY LIST (abs(`f_int1` MOD 3))
SUBPARTITION BY HASH (`f_int1` + 1)
(PARTITION `part_1` VALUES IN (0)
(SUBPARTITION `sp11` ENGINE = TokuDB,
@@ -10861,7 +10861,7 @@ t1 CREATE TABLE `t1` (
`f_char2` char(20) DEFAULT NULL,
`f_charbig` varchar(1000) DEFAULT NULL
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (abs(`f_int1` % 2))
+ PARTITION BY LIST (abs(`f_int1` MOD 2))
SUBPARTITION BY KEY (`f_int1`)
SUBPARTITIONS 3
(PARTITION `part_1` VALUES IN (0) ENGINE = TokuDB,
@@ -12254,7 +12254,7 @@ t1 CREATE TABLE `t1` (
`f_char2` char(20) DEFAULT NULL,
`f_charbig` varchar(1000) DEFAULT NULL
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (`f_int1` % 4)
+ PARTITION BY LIST (`f_int1` MOD 4)
(PARTITION `part_3` VALUES IN (-3) ENGINE = TokuDB,
PARTITION `part_2` VALUES IN (-2) ENGINE = TokuDB,
PARTITION `part_1` VALUES IN (-1) ENGINE = TokuDB,
@@ -14137,7 +14137,7 @@ t1 CREATE TABLE `t1` (
`f_char2` char(20) DEFAULT NULL,
`f_charbig` varchar(1000) DEFAULT NULL
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (abs(`f_int1` % 3))
+ PARTITION BY LIST (abs(`f_int1` MOD 3))
SUBPARTITION BY HASH (`f_int1` + 1)
(PARTITION `part_1` VALUES IN (0)
(SUBPARTITION `sp11` ENGINE = TokuDB,
@@ -14611,7 +14611,7 @@ t1 CREATE TABLE `t1` (
`f_char2` char(20) DEFAULT NULL,
`f_charbig` varchar(1000) DEFAULT NULL
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (abs(`f_int1` % 2))
+ PARTITION BY LIST (abs(`f_int1` MOD 2))
SUBPARTITION BY KEY (`f_int1`)
SUBPARTITIONS 3
(PARTITION `part_1` VALUES IN (0) ENGINE = TokuDB,
@@ -16004,7 +16004,7 @@ t1 CREATE TABLE `t1` (
`f_char2` char(20) DEFAULT NULL,
`f_charbig` varchar(1000) DEFAULT NULL
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (`f_int1` % 4)
+ PARTITION BY LIST (`f_int1` MOD 4)
(PARTITION `part_3` VALUES IN (-3) ENGINE = TokuDB,
PARTITION `part_2` VALUES IN (-2) ENGINE = TokuDB,
PARTITION `part_1` VALUES IN (-1) ENGINE = TokuDB,
@@ -17887,7 +17887,7 @@ t1 CREATE TABLE `t1` (
`f_char2` char(20) DEFAULT NULL,
`f_charbig` varchar(1000) DEFAULT NULL
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (abs(`f_int1` % 3))
+ PARTITION BY LIST (abs(`f_int1` MOD 3))
SUBPARTITION BY HASH (`f_int1` + 1)
(PARTITION `part_1` VALUES IN (0)
(SUBPARTITION `sp11` ENGINE = TokuDB,
@@ -18361,7 +18361,7 @@ t1 CREATE TABLE `t1` (
`f_char2` char(20) DEFAULT NULL,
`f_charbig` varchar(1000) DEFAULT NULL
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (abs(`f_int1` % 2))
+ PARTITION BY LIST (abs(`f_int1` MOD 2))
SUBPARTITION BY KEY (`f_int1`)
SUBPARTITIONS 3
(PARTITION `part_1` VALUES IN (0) ENGINE = TokuDB,
@@ -19757,7 +19757,7 @@ t1 CREATE TABLE `t1` (
`f_char2` char(20) DEFAULT NULL,
`f_charbig` varchar(1000) DEFAULT NULL
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (`f_int1` % 4)
+ PARTITION BY LIST (`f_int1` MOD 4)
(PARTITION `part_3` VALUES IN (-3) ENGINE = TokuDB,
PARTITION `part_2` VALUES IN (-2) ENGINE = TokuDB,
PARTITION `part_1` VALUES IN (-1) ENGINE = TokuDB,
@@ -21640,7 +21640,7 @@ t1 CREATE TABLE `t1` (
`f_char2` char(20) DEFAULT NULL,
`f_charbig` varchar(1000) DEFAULT NULL
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (abs(`f_int1` % 3))
+ PARTITION BY LIST (abs(`f_int1` MOD 3))
SUBPARTITION BY HASH (`f_int1` + 1)
(PARTITION `part_1` VALUES IN (0)
(SUBPARTITION `sp11` ENGINE = TokuDB,
@@ -22114,7 +22114,7 @@ t1 CREATE TABLE `t1` (
`f_char2` char(20) DEFAULT NULL,
`f_charbig` varchar(1000) DEFAULT NULL
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (abs(`f_int1` % 2))
+ PARTITION BY LIST (abs(`f_int1` MOD 2))
SUBPARTITION BY KEY (`f_int1`)
SUBPARTITIONS 3
(PARTITION `part_1` VALUES IN (0) ENGINE = TokuDB,
@@ -23507,7 +23507,7 @@ t1 CREATE TABLE `t1` (
`f_char2` char(20) DEFAULT NULL,
`f_charbig` varchar(1000) DEFAULT NULL
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (`f_int1` % 4)
+ PARTITION BY LIST (`f_int1` MOD 4)
(PARTITION `part_3` VALUES IN (-3) ENGINE = TokuDB,
PARTITION `part_2` VALUES IN (-2) ENGINE = TokuDB,
PARTITION `part_1` VALUES IN (-1) ENGINE = TokuDB,
@@ -25390,7 +25390,7 @@ t1 CREATE TABLE `t1` (
`f_char2` char(20) DEFAULT NULL,
`f_charbig` varchar(1000) DEFAULT NULL
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (abs(`f_int1` % 3))
+ PARTITION BY LIST (abs(`f_int1` MOD 3))
SUBPARTITION BY HASH (`f_int1` + 1)
(PARTITION `part_1` VALUES IN (0)
(SUBPARTITION `sp11` ENGINE = TokuDB,
@@ -25864,7 +25864,7 @@ t1 CREATE TABLE `t1` (
`f_char2` char(20) DEFAULT NULL,
`f_charbig` varchar(1000) DEFAULT NULL
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (abs(`f_int1` % 2))
+ PARTITION BY LIST (abs(`f_int1` MOD 2))
SUBPARTITION BY KEY (`f_int1`)
SUBPARTITIONS 3
(PARTITION `part_1` VALUES IN (0) ENGINE = TokuDB,
@@ -27257,7 +27257,7 @@ t1 CREATE TABLE `t1` (
`f_char2` char(20) DEFAULT NULL,
`f_charbig` varchar(1000) DEFAULT NULL
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (`f_int1` % 4)
+ PARTITION BY LIST (`f_int1` MOD 4)
(PARTITION `part_3` VALUES IN (-3) ENGINE = TokuDB,
PARTITION `part_2` VALUES IN (-2) ENGINE = TokuDB,
PARTITION `part_1` VALUES IN (-1) ENGINE = TokuDB,
@@ -29140,7 +29140,7 @@ t1 CREATE TABLE `t1` (
`f_char2` char(20) DEFAULT NULL,
`f_charbig` varchar(1000) DEFAULT NULL
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (abs(`f_int1` % 3))
+ PARTITION BY LIST (abs(`f_int1` MOD 3))
SUBPARTITION BY HASH (`f_int1` + 1)
(PARTITION `part_1` VALUES IN (0)
(SUBPARTITION `sp11` ENGINE = TokuDB,
@@ -29614,7 +29614,7 @@ t1 CREATE TABLE `t1` (
`f_char2` char(20) DEFAULT NULL,
`f_charbig` varchar(1000) DEFAULT NULL
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (abs(`f_int1` % 2))
+ PARTITION BY LIST (abs(`f_int1` MOD 2))
SUBPARTITION BY KEY (`f_int1`)
SUBPARTITIONS 3
(PARTITION `part_1` VALUES IN (0) ENGINE = TokuDB,
@@ -31007,7 +31007,7 @@ t1 CREATE TABLE `t1` (
`f_char2` char(20) DEFAULT NULL,
`f_charbig` varchar(1000) DEFAULT NULL
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (`f_int1` % 4)
+ PARTITION BY LIST (`f_int1` MOD 4)
(PARTITION `part_3` VALUES IN (-3) ENGINE = TokuDB,
PARTITION `part_2` VALUES IN (-2) ENGINE = TokuDB,
PARTITION `part_1` VALUES IN (-1) ENGINE = TokuDB,
@@ -32890,7 +32890,7 @@ t1 CREATE TABLE `t1` (
`f_char2` char(20) DEFAULT NULL,
`f_charbig` varchar(1000) DEFAULT NULL
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (abs(`f_int1` % 3))
+ PARTITION BY LIST (abs(`f_int1` MOD 3))
SUBPARTITION BY HASH (`f_int1` + 1)
(PARTITION `part_1` VALUES IN (0)
(SUBPARTITION `sp11` ENGINE = TokuDB,
@@ -33364,7 +33364,7 @@ t1 CREATE TABLE `t1` (
`f_char2` char(20) DEFAULT NULL,
`f_charbig` varchar(1000) DEFAULT NULL
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (abs(`f_int1` % 2))
+ PARTITION BY LIST (abs(`f_int1` MOD 2))
SUBPARTITION BY KEY (`f_int1`)
SUBPARTITIONS 3
(PARTITION `part_1` VALUES IN (0) ENGINE = TokuDB,
@@ -34757,7 +34757,7 @@ t1 CREATE TABLE `t1` (
`f_char2` char(20) DEFAULT NULL,
`f_charbig` varchar(1000) DEFAULT NULL
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (`f_int1` % 4)
+ PARTITION BY LIST (`f_int1` MOD 4)
(PARTITION `part_3` VALUES IN (-3) ENGINE = TokuDB,
PARTITION `part_2` VALUES IN (-2) ENGINE = TokuDB,
PARTITION `part_1` VALUES IN (-1) ENGINE = TokuDB,
@@ -36640,7 +36640,7 @@ t1 CREATE TABLE `t1` (
`f_char2` char(20) DEFAULT NULL,
`f_charbig` varchar(1000) DEFAULT NULL
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (abs(`f_int1` % 3))
+ PARTITION BY LIST (abs(`f_int1` MOD 3))
SUBPARTITION BY HASH (`f_int1` + 1)
(PARTITION `part_1` VALUES IN (0)
(SUBPARTITION `sp11` ENGINE = TokuDB,
@@ -37114,7 +37114,7 @@ t1 CREATE TABLE `t1` (
`f_char2` char(20) DEFAULT NULL,
`f_charbig` varchar(1000) DEFAULT NULL
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (abs(`f_int1` % 2))
+ PARTITION BY LIST (abs(`f_int1` MOD 2))
SUBPARTITION BY KEY (`f_int1`)
SUBPARTITIONS 3
(PARTITION `part_1` VALUES IN (0) ENGINE = TokuDB,
@@ -38513,7 +38513,7 @@ t1 CREATE TABLE `t1` (
`f_char2` char(20) DEFAULT NULL,
`f_charbig` varchar(1000) DEFAULT NULL
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (`f_int1` % 4)
+ PARTITION BY LIST (`f_int1` MOD 4)
(PARTITION `part_3` VALUES IN (-3) ENGINE = TokuDB,
PARTITION `part_2` VALUES IN (-2) ENGINE = TokuDB,
PARTITION `part_1` VALUES IN (-1) ENGINE = TokuDB,
@@ -40400,7 +40400,7 @@ t1 CREATE TABLE `t1` (
`f_char2` char(20) DEFAULT NULL,
`f_charbig` varchar(1000) DEFAULT NULL
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (abs(`f_int1` % 3))
+ PARTITION BY LIST (abs(`f_int1` MOD 3))
SUBPARTITION BY HASH (`f_int1` + 1)
(PARTITION `part_1` VALUES IN (0)
(SUBPARTITION `sp11` ENGINE = TokuDB,
@@ -40875,7 +40875,7 @@ t1 CREATE TABLE `t1` (
`f_char2` char(20) DEFAULT NULL,
`f_charbig` varchar(1000) DEFAULT NULL
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (abs(`f_int1` % 2))
+ PARTITION BY LIST (abs(`f_int1` MOD 2))
SUBPARTITION BY KEY (`f_int1`)
SUBPARTITIONS 3
(PARTITION `part_1` VALUES IN (0) ENGINE = TokuDB,
@@ -42271,7 +42271,7 @@ t1 CREATE TABLE `t1` (
`f_char2` char(20) DEFAULT NULL,
`f_charbig` varchar(1000) DEFAULT NULL
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (`f_int1` % 4)
+ PARTITION BY LIST (`f_int1` MOD 4)
(PARTITION `part_3` VALUES IN (-3) ENGINE = TokuDB,
PARTITION `part_2` VALUES IN (-2) ENGINE = TokuDB,
PARTITION `part_1` VALUES IN (-1) ENGINE = TokuDB,
@@ -44158,7 +44158,7 @@ t1 CREATE TABLE `t1` (
`f_char2` char(20) DEFAULT NULL,
`f_charbig` varchar(1000) DEFAULT NULL
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (abs(`f_int1` % 3))
+ PARTITION BY LIST (abs(`f_int1` MOD 3))
SUBPARTITION BY HASH (`f_int1` + 1)
(PARTITION `part_1` VALUES IN (0)
(SUBPARTITION `sp11` ENGINE = TokuDB,
@@ -44633,7 +44633,7 @@ t1 CREATE TABLE `t1` (
`f_char2` char(20) DEFAULT NULL,
`f_charbig` varchar(1000) DEFAULT NULL
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (abs(`f_int1` % 2))
+ PARTITION BY LIST (abs(`f_int1` MOD 2))
SUBPARTITION BY KEY (`f_int1`)
SUBPARTITIONS 3
(PARTITION `part_1` VALUES IN (0) ENGINE = TokuDB,
@@ -46026,7 +46026,7 @@ t1 CREATE TABLE `t1` (
`f_char2` char(20) DEFAULT NULL,
`f_charbig` varchar(1000) DEFAULT NULL
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (`f_int1` % 4)
+ PARTITION BY LIST (`f_int1` MOD 4)
(PARTITION `part_3` VALUES IN (-3) ENGINE = TokuDB,
PARTITION `part_2` VALUES IN (-2) ENGINE = TokuDB,
PARTITION `part_1` VALUES IN (-1) ENGINE = TokuDB,
@@ -47909,7 +47909,7 @@ t1 CREATE TABLE `t1` (
`f_char2` char(20) DEFAULT NULL,
`f_charbig` varchar(1000) DEFAULT NULL
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (abs(`f_int1` % 3))
+ PARTITION BY LIST (abs(`f_int1` MOD 3))
SUBPARTITION BY HASH (`f_int1` + 1)
(PARTITION `part_1` VALUES IN (0)
(SUBPARTITION `sp11` ENGINE = TokuDB,
@@ -48383,7 +48383,7 @@ t1 CREATE TABLE `t1` (
`f_char2` char(20) DEFAULT NULL,
`f_charbig` varchar(1000) DEFAULT NULL
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (abs(`f_int1` % 2))
+ PARTITION BY LIST (abs(`f_int1` MOD 2))
SUBPARTITION BY KEY (`f_int1`)
SUBPARTITIONS 3
(PARTITION `part_1` VALUES IN (0) ENGINE = TokuDB,
@@ -49776,7 +49776,7 @@ t1 CREATE TABLE `t1` (
`f_char2` char(20) DEFAULT NULL,
`f_charbig` varchar(1000) DEFAULT NULL
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (`f_int1` % 4)
+ PARTITION BY LIST (`f_int1` MOD 4)
(PARTITION `part_3` VALUES IN (-3) ENGINE = TokuDB,
PARTITION `part_2` VALUES IN (-2) ENGINE = TokuDB,
PARTITION `part_1` VALUES IN (-1) ENGINE = TokuDB,
@@ -51659,7 +51659,7 @@ t1 CREATE TABLE `t1` (
`f_char2` char(20) DEFAULT NULL,
`f_charbig` varchar(1000) DEFAULT NULL
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (abs(`f_int1` % 3))
+ PARTITION BY LIST (abs(`f_int1` MOD 3))
SUBPARTITION BY HASH (`f_int1` + 1)
(PARTITION `part_1` VALUES IN (0)
(SUBPARTITION `sp11` ENGINE = TokuDB,
@@ -52133,7 +52133,7 @@ t1 CREATE TABLE `t1` (
`f_char2` char(20) DEFAULT NULL,
`f_charbig` varchar(1000) DEFAULT NULL
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (abs(`f_int1` % 2))
+ PARTITION BY LIST (abs(`f_int1` MOD 2))
SUBPARTITION BY KEY (`f_int1`)
SUBPARTITIONS 3
(PARTITION `part_1` VALUES IN (0) ENGINE = TokuDB,
@@ -53529,7 +53529,7 @@ t1 CREATE TABLE `t1` (
`f_char2` char(20) DEFAULT NULL,
`f_charbig` varchar(1000) DEFAULT NULL
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (`f_int1` % 4)
+ PARTITION BY LIST (`f_int1` MOD 4)
(PARTITION `part_3` VALUES IN (-3) ENGINE = TokuDB,
PARTITION `part_2` VALUES IN (-2) ENGINE = TokuDB,
PARTITION `part_1` VALUES IN (-1) ENGINE = TokuDB,
@@ -55416,7 +55416,7 @@ t1 CREATE TABLE `t1` (
`f_char2` char(20) DEFAULT NULL,
`f_charbig` varchar(1000) DEFAULT NULL
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (abs(`f_int1` % 3))
+ PARTITION BY LIST (abs(`f_int1` MOD 3))
SUBPARTITION BY HASH (`f_int1` + 1)
(PARTITION `part_1` VALUES IN (0)
(SUBPARTITION `sp11` ENGINE = TokuDB,
@@ -55891,7 +55891,7 @@ t1 CREATE TABLE `t1` (
`f_char2` char(20) DEFAULT NULL,
`f_charbig` varchar(1000) DEFAULT NULL
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (abs(`f_int1` % 2))
+ PARTITION BY LIST (abs(`f_int1` MOD 2))
SUBPARTITION BY KEY (`f_int1`)
SUBPARTITIONS 3
(PARTITION `part_1` VALUES IN (0) ENGINE = TokuDB,
@@ -57281,7 +57281,7 @@ t1 CREATE TABLE `t1` (
`f_char2` char(20) DEFAULT NULL,
`f_charbig` varchar(1000) DEFAULT NULL
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (`f_int1` % 4)
+ PARTITION BY LIST (`f_int1` MOD 4)
(PARTITION `part_3` VALUES IN (-3) ENGINE = TokuDB,
PARTITION `part_2` VALUES IN (-2) ENGINE = TokuDB,
PARTITION `part_1` VALUES IN (-1) ENGINE = TokuDB,
@@ -59156,7 +59156,7 @@ t1 CREATE TABLE `t1` (
`f_char2` char(20) DEFAULT NULL,
`f_charbig` varchar(1000) DEFAULT NULL
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (abs(`f_int1` % 3))
+ PARTITION BY LIST (abs(`f_int1` MOD 3))
SUBPARTITION BY HASH (`f_int1` + 1)
(PARTITION `part_1` VALUES IN (0)
(SUBPARTITION `sp11` ENGINE = TokuDB,
@@ -59628,7 +59628,7 @@ t1 CREATE TABLE `t1` (
`f_char2` char(20) DEFAULT NULL,
`f_charbig` varchar(1000) DEFAULT NULL
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (abs(`f_int1` % 2))
+ PARTITION BY LIST (abs(`f_int1` MOD 2))
SUBPARTITION BY KEY (`f_int1`)
SUBPARTITIONS 3
(PARTITION `part_1` VALUES IN (0) ENGINE = TokuDB,
@@ -61015,7 +61015,7 @@ t1 CREATE TABLE `t1` (
`f_char2` char(20) DEFAULT NULL,
`f_charbig` varchar(1000) DEFAULT NULL
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (`f_int1` % 4)
+ PARTITION BY LIST (`f_int1` MOD 4)
(PARTITION `part_3` VALUES IN (-3) ENGINE = TokuDB,
PARTITION `part_2` VALUES IN (-2) ENGINE = TokuDB,
PARTITION `part_1` VALUES IN (-1) ENGINE = TokuDB,
@@ -62890,7 +62890,7 @@ t1 CREATE TABLE `t1` (
`f_char2` char(20) DEFAULT NULL,
`f_charbig` varchar(1000) DEFAULT NULL
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (abs(`f_int1` % 3))
+ PARTITION BY LIST (abs(`f_int1` MOD 3))
SUBPARTITION BY HASH (`f_int1` + 1)
(PARTITION `part_1` VALUES IN (0)
(SUBPARTITION `sp11` ENGINE = TokuDB,
@@ -63362,7 +63362,7 @@ t1 CREATE TABLE `t1` (
`f_char2` char(20) DEFAULT NULL,
`f_charbig` varchar(1000) DEFAULT NULL
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (abs(`f_int1` % 2))
+ PARTITION BY LIST (abs(`f_int1` MOD 2))
SUBPARTITION BY KEY (`f_int1`)
SUBPARTITIONS 3
(PARTITION `part_1` VALUES IN (0) ENGINE = TokuDB,
@@ -65069,7 +65069,7 @@ t1 CREATE TABLE `t1` (
`f_char2` char(20) DEFAULT NULL,
`f_charbig` varchar(1000) DEFAULT NULL
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (`f_int1` % 4)
+ PARTITION BY LIST (`f_int1` MOD 4)
(PARTITION `part_3` VALUES IN (-3) ENGINE = TokuDB,
PARTITION `part_2` VALUES IN (-2) ENGINE = TokuDB,
PARTITION `part_1` VALUES IN (-1) ENGINE = TokuDB,
@@ -66944,7 +66944,7 @@ t1 CREATE TABLE `t1` (
`f_char2` char(20) DEFAULT NULL,
`f_charbig` varchar(1000) DEFAULT NULL
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (abs(`f_int1` % 3))
+ PARTITION BY LIST (abs(`f_int1` MOD 3))
SUBPARTITION BY HASH (`f_int1` + 1)
(PARTITION `part_1` VALUES IN (0)
(SUBPARTITION `sp11` ENGINE = TokuDB,
@@ -67416,7 +67416,7 @@ t1 CREATE TABLE `t1` (
`f_char2` char(20) DEFAULT NULL,
`f_charbig` varchar(1000) DEFAULT NULL
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (abs(`f_int1` % 2))
+ PARTITION BY LIST (abs(`f_int1` MOD 2))
SUBPARTITION BY KEY (`f_int1`)
SUBPARTITIONS 3
(PARTITION `part_1` VALUES IN (0) ENGINE = TokuDB,
@@ -68812,7 +68812,7 @@ t1 CREATE TABLE `t1` (
`f_char2` char(20) DEFAULT NULL,
`f_charbig` varchar(1000) DEFAULT NULL
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (`f_int1` % 4)
+ PARTITION BY LIST (`f_int1` MOD 4)
(PARTITION `part_3` VALUES IN (-3) ENGINE = TokuDB,
PARTITION `part_2` VALUES IN (-2) ENGINE = TokuDB,
PARTITION `part_1` VALUES IN (-1) ENGINE = TokuDB,
@@ -70695,7 +70695,7 @@ t1 CREATE TABLE `t1` (
`f_char2` char(20) DEFAULT NULL,
`f_charbig` varchar(1000) DEFAULT NULL
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (abs(`f_int1` % 3))
+ PARTITION BY LIST (abs(`f_int1` MOD 3))
SUBPARTITION BY HASH (`f_int1` + 1)
(PARTITION `part_1` VALUES IN (0)
(SUBPARTITION `sp11` ENGINE = TokuDB,
@@ -71169,7 +71169,7 @@ t1 CREATE TABLE `t1` (
`f_char2` char(20) DEFAULT NULL,
`f_charbig` varchar(1000) DEFAULT NULL
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (abs(`f_int1` % 2))
+ PARTITION BY LIST (abs(`f_int1` MOD 2))
SUBPARTITION BY KEY (`f_int1`)
SUBPARTITIONS 3
(PARTITION `part_1` VALUES IN (0) ENGINE = TokuDB,
@@ -72562,7 +72562,7 @@ t1 CREATE TABLE `t1` (
`f_char2` char(20) DEFAULT NULL,
`f_charbig` varchar(1000) DEFAULT NULL
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (`f_int1` % 4)
+ PARTITION BY LIST (`f_int1` MOD 4)
(PARTITION `part_3` VALUES IN (-3) ENGINE = TokuDB,
PARTITION `part_2` VALUES IN (-2) ENGINE = TokuDB,
PARTITION `part_1` VALUES IN (-1) ENGINE = TokuDB,
@@ -74445,7 +74445,7 @@ t1 CREATE TABLE `t1` (
`f_char2` char(20) DEFAULT NULL,
`f_charbig` varchar(1000) DEFAULT NULL
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (abs(`f_int1` % 3))
+ PARTITION BY LIST (abs(`f_int1` MOD 3))
SUBPARTITION BY HASH (`f_int1` + 1)
(PARTITION `part_1` VALUES IN (0)
(SUBPARTITION `sp11` ENGINE = TokuDB,
@@ -74919,7 +74919,7 @@ t1 CREATE TABLE `t1` (
`f_char2` char(20) DEFAULT NULL,
`f_charbig` varchar(1000) DEFAULT NULL
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (abs(`f_int1` % 2))
+ PARTITION BY LIST (abs(`f_int1` MOD 2))
SUBPARTITION BY KEY (`f_int1`)
SUBPARTITIONS 3
(PARTITION `part_1` VALUES IN (0) ENGINE = TokuDB,
@@ -76312,7 +76312,7 @@ t1 CREATE TABLE `t1` (
`f_char2` char(20) DEFAULT NULL,
`f_charbig` varchar(1000) DEFAULT NULL
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (`f_int1` % 4)
+ PARTITION BY LIST (`f_int1` MOD 4)
(PARTITION `part_3` VALUES IN (-3) ENGINE = TokuDB,
PARTITION `part_2` VALUES IN (-2) ENGINE = TokuDB,
PARTITION `part_1` VALUES IN (-1) ENGINE = TokuDB,
@@ -78195,7 +78195,7 @@ t1 CREATE TABLE `t1` (
`f_char2` char(20) DEFAULT NULL,
`f_charbig` varchar(1000) DEFAULT NULL
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (abs(`f_int1` % 3))
+ PARTITION BY LIST (abs(`f_int1` MOD 3))
SUBPARTITION BY HASH (`f_int1` + 1)
(PARTITION `part_1` VALUES IN (0)
(SUBPARTITION `sp11` ENGINE = TokuDB,
@@ -78669,7 +78669,7 @@ t1 CREATE TABLE `t1` (
`f_char2` char(20) DEFAULT NULL,
`f_charbig` varchar(1000) DEFAULT NULL
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (abs(`f_int1` % 2))
+ PARTITION BY LIST (abs(`f_int1` MOD 2))
SUBPARTITION BY KEY (`f_int1`)
SUBPARTITIONS 3
(PARTITION `part_1` VALUES IN (0) ENGINE = TokuDB,
@@ -80062,7 +80062,7 @@ t1 CREATE TABLE `t1` (
`f_char2` char(20) DEFAULT NULL,
`f_charbig` varchar(1000) DEFAULT NULL
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (`f_int1` % 4)
+ PARTITION BY LIST (`f_int1` MOD 4)
(PARTITION `part_3` VALUES IN (-3) ENGINE = TokuDB,
PARTITION `part_2` VALUES IN (-2) ENGINE = TokuDB,
PARTITION `part_1` VALUES IN (-1) ENGINE = TokuDB,
@@ -81945,7 +81945,7 @@ t1 CREATE TABLE `t1` (
`f_char2` char(20) DEFAULT NULL,
`f_charbig` varchar(1000) DEFAULT NULL
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (abs(`f_int1` % 3))
+ PARTITION BY LIST (abs(`f_int1` MOD 3))
SUBPARTITION BY HASH (`f_int1` + 1)
(PARTITION `part_1` VALUES IN (0)
(SUBPARTITION `sp11` ENGINE = TokuDB,
@@ -82419,7 +82419,7 @@ t1 CREATE TABLE `t1` (
`f_char2` char(20) DEFAULT NULL,
`f_charbig` varchar(1000) DEFAULT NULL
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (abs(`f_int1` % 2))
+ PARTITION BY LIST (abs(`f_int1` MOD 2))
SUBPARTITION BY KEY (`f_int1`)
SUBPARTITIONS 3
(PARTITION `part_1` VALUES IN (0) ENGINE = TokuDB,
@@ -83812,7 +83812,7 @@ t1 CREATE TABLE `t1` (
`f_char2` char(20) DEFAULT NULL,
`f_charbig` varchar(1000) DEFAULT NULL
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (`f_int1` % 4)
+ PARTITION BY LIST (`f_int1` MOD 4)
(PARTITION `part_3` VALUES IN (-3) ENGINE = TokuDB,
PARTITION `part_2` VALUES IN (-2) ENGINE = TokuDB,
PARTITION `part_1` VALUES IN (-1) ENGINE = TokuDB,
@@ -85695,7 +85695,7 @@ t1 CREATE TABLE `t1` (
`f_char2` char(20) DEFAULT NULL,
`f_charbig` varchar(1000) DEFAULT NULL
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (abs(`f_int1` % 3))
+ PARTITION BY LIST (abs(`f_int1` MOD 3))
SUBPARTITION BY HASH (`f_int1` + 1)
(PARTITION `part_1` VALUES IN (0)
(SUBPARTITION `sp11` ENGINE = TokuDB,
@@ -86169,7 +86169,7 @@ t1 CREATE TABLE `t1` (
`f_char2` char(20) DEFAULT NULL,
`f_charbig` varchar(1000) DEFAULT NULL
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (abs(`f_int1` % 2))
+ PARTITION BY LIST (abs(`f_int1` MOD 2))
SUBPARTITION BY KEY (`f_int1`)
SUBPARTITIONS 3
(PARTITION `part_1` VALUES IN (0) ENGINE = TokuDB,
diff --git a/storage/tokudb/mysql-test/tokudb_parts/r/partition_basic_tokudb.result b/storage/tokudb/mysql-test/tokudb_parts/r/partition_basic_tokudb.result
index fca6cbe169e..ae20097fdda 100644
--- a/storage/tokudb/mysql-test/tokudb_parts/r/partition_basic_tokudb.result
+++ b/storage/tokudb/mysql-test/tokudb_parts/r/partition_basic_tokudb.result
@@ -991,7 +991,7 @@ t1 CREATE TABLE `t1` (
`f_char2` char(20) DEFAULT NULL,
`f_charbig` varchar(1000) DEFAULT NULL
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (`f_int1` % 4)
+ PARTITION BY LIST (`f_int1` MOD 4)
(PARTITION `part_3` VALUES IN (-3) ENGINE = TokuDB,
PARTITION `part_2` VALUES IN (-2) ENGINE = TokuDB,
PARTITION `part_1` VALUES IN (-1) ENGINE = TokuDB,
@@ -2866,7 +2866,7 @@ t1 CREATE TABLE `t1` (
`f_char2` char(20) DEFAULT NULL,
`f_charbig` varchar(1000) DEFAULT NULL
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (abs(`f_int1` % 3))
+ PARTITION BY LIST (abs(`f_int1` MOD 3))
SUBPARTITION BY HASH (`f_int1` + 1)
(PARTITION `part1` VALUES IN (0)
(SUBPARTITION `sp11` ENGINE = TokuDB,
@@ -3337,7 +3337,7 @@ t1 CREATE TABLE `t1` (
`f_char2` char(20) DEFAULT NULL,
`f_charbig` varchar(1000) DEFAULT NULL
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (abs(`f_int1` % 2))
+ PARTITION BY LIST (abs(`f_int1` MOD 2))
SUBPARTITION BY KEY (`f_int1`)
SUBPARTITIONS 3
(PARTITION `part1` VALUES IN (0) ENGINE = TokuDB,
@@ -4716,7 +4716,7 @@ t1 CREATE TABLE `t1` (
`f_char2` char(20) DEFAULT NULL,
`f_charbig` varchar(1000) DEFAULT NULL
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST ((`f_int1` + `f_int2`) % 4)
+ PARTITION BY LIST ((`f_int1` + `f_int2`) MOD 4)
(PARTITION `part_3` VALUES IN (-3) ENGINE = TokuDB,
PARTITION `part_2` VALUES IN (-2) ENGINE = TokuDB,
PARTITION `part_1` VALUES IN (-1) ENGINE = TokuDB,
@@ -6585,7 +6585,7 @@ t1 CREATE TABLE `t1` (
`f_char2` char(20) DEFAULT NULL,
`f_charbig` varchar(1000) DEFAULT NULL
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (abs(`f_int1` % 3))
+ PARTITION BY LIST (abs(`f_int1` MOD 3))
SUBPARTITION BY HASH (`f_int2` + 1)
(PARTITION `part1` VALUES IN (0)
(SUBPARTITION `sp11` ENGINE = TokuDB,
@@ -7056,7 +7056,7 @@ t1 CREATE TABLE `t1` (
`f_char2` char(20) DEFAULT NULL,
`f_charbig` varchar(1000) DEFAULT NULL
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (abs(`f_int1` % 2))
+ PARTITION BY LIST (abs(`f_int1` MOD 2))
SUBPARTITION BY KEY (`f_int2`)
SUBPARTITIONS 3
(PARTITION `part1` VALUES IN (0) ENGINE = TokuDB,
@@ -8515,7 +8515,7 @@ t1 CREATE TABLE `t1` (
PRIMARY KEY (`f_int2`,`f_int1`),
UNIQUE KEY `uidx1` (`f_int1`,`f_int2`)
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (`f_int1` % 4)
+ PARTITION BY LIST (`f_int1` MOD 4)
(PARTITION `part_3` VALUES IN (-3) ENGINE = TokuDB,
PARTITION `part_2` VALUES IN (-2) ENGINE = TokuDB,
PARTITION `part_1` VALUES IN (-1) ENGINE = TokuDB,
@@ -10538,7 +10538,7 @@ t1 CREATE TABLE `t1` (
PRIMARY KEY (`f_int2`,`f_int1`),
UNIQUE KEY `uidx1` (`f_int1`,`f_int2`)
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (abs(`f_int1` % 3))
+ PARTITION BY LIST (abs(`f_int1` MOD 3))
SUBPARTITION BY HASH (`f_int1` + 1)
(PARTITION `part1` VALUES IN (0)
(SUBPARTITION `sp11` ENGINE = TokuDB,
@@ -11046,7 +11046,7 @@ t1 CREATE TABLE `t1` (
PRIMARY KEY (`f_int2`,`f_int1`),
UNIQUE KEY `uidx1` (`f_int1`,`f_int2`)
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (abs(`f_int1` % 2))
+ PARTITION BY LIST (abs(`f_int1` MOD 2))
SUBPARTITION BY KEY (`f_int1`)
SUBPARTITIONS 3
(PARTITION `part1` VALUES IN (0) ENGINE = TokuDB,
@@ -12535,7 +12535,7 @@ t1 CREATE TABLE `t1` (
PRIMARY KEY (`f_int1`,`f_int2`),
UNIQUE KEY `uidx1` (`f_int2`,`f_int1`)
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (`f_int1` % 4)
+ PARTITION BY LIST (`f_int1` MOD 4)
(PARTITION `part_3` VALUES IN (-3) ENGINE = TokuDB,
PARTITION `part_2` VALUES IN (-2) ENGINE = TokuDB,
PARTITION `part_1` VALUES IN (-1) ENGINE = TokuDB,
@@ -14558,7 +14558,7 @@ t1 CREATE TABLE `t1` (
PRIMARY KEY (`f_int1`,`f_int2`),
UNIQUE KEY `uidx1` (`f_int2`,`f_int1`)
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (abs(`f_int1` % 3))
+ PARTITION BY LIST (abs(`f_int1` MOD 3))
SUBPARTITION BY HASH (`f_int1` + 1)
(PARTITION `part1` VALUES IN (0)
(SUBPARTITION `sp11` ENGINE = TokuDB,
@@ -15066,7 +15066,7 @@ t1 CREATE TABLE `t1` (
PRIMARY KEY (`f_int1`,`f_int2`),
UNIQUE KEY `uidx1` (`f_int2`,`f_int1`)
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (abs(`f_int1` % 2))
+ PARTITION BY LIST (abs(`f_int1` MOD 2))
SUBPARTITION BY KEY (`f_int1`)
SUBPARTITIONS 3
(PARTITION `part1` VALUES IN (0) ENGINE = TokuDB,
@@ -16587,7 +16587,7 @@ t1 CREATE TABLE `t1` (
UNIQUE KEY `uidx1` (`f_int1`,`f_int2`),
UNIQUE KEY `uidx2` (`f_int2`,`f_int1`)
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (`f_int1` % 4)
+ PARTITION BY LIST (`f_int1` MOD 4)
(PARTITION `part_3` VALUES IN (-3) ENGINE = TokuDB,
PARTITION `part_2` VALUES IN (-2) ENGINE = TokuDB,
PARTITION `part_1` VALUES IN (-1) ENGINE = TokuDB,
@@ -18674,7 +18674,7 @@ t1 CREATE TABLE `t1` (
UNIQUE KEY `uidx1` (`f_int1`,`f_int2`),
UNIQUE KEY `uidx2` (`f_int2`,`f_int1`)
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (abs(`f_int1` % 3))
+ PARTITION BY LIST (abs(`f_int1` MOD 3))
SUBPARTITION BY HASH (`f_int1` + 1)
(PARTITION `part1` VALUES IN (0)
(SUBPARTITION `sp11` ENGINE = TokuDB,
@@ -19198,7 +19198,7 @@ t1 CREATE TABLE `t1` (
UNIQUE KEY `uidx1` (`f_int1`,`f_int2`),
UNIQUE KEY `uidx2` (`f_int2`,`f_int1`)
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (abs(`f_int1` % 2))
+ PARTITION BY LIST (abs(`f_int1` MOD 2))
SUBPARTITION BY KEY (`f_int1`)
SUBPARTITIONS 3
(PARTITION `part1` VALUES IN (0) ENGINE = TokuDB,
@@ -20708,7 +20708,7 @@ t1 CREATE TABLE `t1` (
PRIMARY KEY (`f_int2`,`f_int1`),
UNIQUE KEY `uidx1` (`f_int1`,`f_int2`)
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST ((`f_int1` + `f_int2`) % 4)
+ PARTITION BY LIST ((`f_int1` + `f_int2`) MOD 4)
(PARTITION `part_3` VALUES IN (-3) ENGINE = TokuDB,
PARTITION `part_2` VALUES IN (-2) ENGINE = TokuDB,
PARTITION `part_1` VALUES IN (-1) ENGINE = TokuDB,
@@ -22725,7 +22725,7 @@ t1 CREATE TABLE `t1` (
PRIMARY KEY (`f_int2`,`f_int1`),
UNIQUE KEY `uidx1` (`f_int1`,`f_int2`)
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (abs(`f_int1` % 3))
+ PARTITION BY LIST (abs(`f_int1` MOD 3))
SUBPARTITION BY HASH (`f_int2` + 1)
(PARTITION `part1` VALUES IN (0)
(SUBPARTITION `sp11` ENGINE = TokuDB,
@@ -23233,7 +23233,7 @@ t1 CREATE TABLE `t1` (
PRIMARY KEY (`f_int2`,`f_int1`),
UNIQUE KEY `uidx1` (`f_int1`,`f_int2`)
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (abs(`f_int1` % 2))
+ PARTITION BY LIST (abs(`f_int1` MOD 2))
SUBPARTITION BY KEY (`f_int2`)
SUBPARTITIONS 3
(PARTITION `part1` VALUES IN (0) ENGINE = TokuDB,
@@ -24722,7 +24722,7 @@ t1 CREATE TABLE `t1` (
PRIMARY KEY (`f_int1`,`f_int2`),
UNIQUE KEY `uidx1` (`f_int2`,`f_int1`)
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST ((`f_int1` + `f_int2`) % 4)
+ PARTITION BY LIST ((`f_int1` + `f_int2`) MOD 4)
(PARTITION `part_3` VALUES IN (-3) ENGINE = TokuDB,
PARTITION `part_2` VALUES IN (-2) ENGINE = TokuDB,
PARTITION `part_1` VALUES IN (-1) ENGINE = TokuDB,
@@ -26739,7 +26739,7 @@ t1 CREATE TABLE `t1` (
PRIMARY KEY (`f_int1`,`f_int2`),
UNIQUE KEY `uidx1` (`f_int2`,`f_int1`)
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (abs(`f_int1` % 3))
+ PARTITION BY LIST (abs(`f_int1` MOD 3))
SUBPARTITION BY HASH (`f_int2` + 1)
(PARTITION `part1` VALUES IN (0)
(SUBPARTITION `sp11` ENGINE = TokuDB,
@@ -27247,7 +27247,7 @@ t1 CREATE TABLE `t1` (
PRIMARY KEY (`f_int1`,`f_int2`),
UNIQUE KEY `uidx1` (`f_int2`,`f_int1`)
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (abs(`f_int1` % 2))
+ PARTITION BY LIST (abs(`f_int1` MOD 2))
SUBPARTITION BY KEY (`f_int2`)
SUBPARTITIONS 3
(PARTITION `part1` VALUES IN (0) ENGINE = TokuDB,
@@ -28768,7 +28768,7 @@ t1 CREATE TABLE `t1` (
UNIQUE KEY `uidx1` (`f_int1`,`f_int2`),
UNIQUE KEY `uidx2` (`f_int2`,`f_int1`)
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST ((`f_int1` + `f_int2`) % 4)
+ PARTITION BY LIST ((`f_int1` + `f_int2`) MOD 4)
(PARTITION `part_3` VALUES IN (-3) ENGINE = TokuDB,
PARTITION `part_2` VALUES IN (-2) ENGINE = TokuDB,
PARTITION `part_1` VALUES IN (-1) ENGINE = TokuDB,
@@ -30849,7 +30849,7 @@ t1 CREATE TABLE `t1` (
UNIQUE KEY `uidx1` (`f_int1`,`f_int2`),
UNIQUE KEY `uidx2` (`f_int2`,`f_int1`)
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (abs(`f_int1` % 3))
+ PARTITION BY LIST (abs(`f_int1` MOD 3))
SUBPARTITION BY HASH (`f_int2` + 1)
(PARTITION `part1` VALUES IN (0)
(SUBPARTITION `sp11` ENGINE = TokuDB,
@@ -31373,7 +31373,7 @@ t1 CREATE TABLE `t1` (
UNIQUE KEY `uidx1` (`f_int1`,`f_int2`),
UNIQUE KEY `uidx2` (`f_int2`,`f_int1`)
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (abs(`f_int1` % 2))
+ PARTITION BY LIST (abs(`f_int1` MOD 2))
SUBPARTITION BY KEY (`f_int2`)
SUBPARTITIONS 3
(PARTITION `part1` VALUES IN (0) ENGINE = TokuDB,
diff --git a/storage/tokudb/mysql-test/tokudb_parts/r/partition_syntax_tokudb.result b/storage/tokudb/mysql-test/tokudb_parts/r/partition_syntax_tokudb.result
index c84b4601332..1a4bf02cc7c 100644
--- a/storage/tokudb/mysql-test/tokudb_parts/r/partition_syntax_tokudb.result
+++ b/storage/tokudb/mysql-test/tokudb_parts/r/partition_syntax_tokudb.result
@@ -658,7 +658,7 @@ t1 CREATE TABLE `t1` (
`f_char2` char(20) DEFAULT NULL,
`f_charbig` varchar(1000) DEFAULT NULL
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (`f_int1` % 2)
+ PARTITION BY LIST (`f_int1` MOD 2)
(PARTITION `part1` VALUES IN (NULL) ENGINE = TokuDB,
PARTITION `part3` VALUES IN (1) ENGINE = TokuDB)
@@ -685,7 +685,7 @@ t1 CREATE TABLE `t1` (
`f_char2` char(20) DEFAULT NULL,
`f_charbig` varchar(1000) DEFAULT NULL
) ENGINE=TokuDB DEFAULT CHARSET=latin1
- PARTITION BY LIST (`f_int1` % 2)
+ PARTITION BY LIST (`f_int1` MOD 2)
(PARTITION `part1` VALUES IN (NULL) ENGINE = TokuDB,
PARTITION `part2` VALUES IN (0) ENGINE = TokuDB,
PARTITION `part3` VALUES IN (1) ENGINE = TokuDB)
diff --git a/storage/tokudb/tokudb_dir_cmd.cc b/storage/tokudb/tokudb_dir_cmd.cc
index f9995302d49..5431cbab7aa 100644
--- a/storage/tokudb/tokudb_dir_cmd.cc
+++ b/storage/tokudb/tokudb_dir_cmd.cc
@@ -42,14 +42,14 @@ static int MDL_and_TDC(THD *thd,
const char *table,
const dir_cmd_callbacks &cb) {
int error;
- LEX_STRING db_arg;
- LEX_STRING table_arg;
+ LEX_CSTRING db_arg;
+ LEX_CSTRING table_arg;
db_arg.str = const_cast<char *>(db);
db_arg.length = strlen(db);;
table_arg.str = const_cast<char *>(table);
table_arg.length = strlen(table);
- Table_ident table_ident(thd, db_arg, table_arg, true);;
+ Table_ident table_ident(thd, &db_arg, &table_arg, true);;
thd->lex->select_lex.add_table_to_list(
thd, &table_ident, NULL, 1, TL_UNLOCK, MDL_EXCLUSIVE, 0, 0, 0);
/* The lock will be released at the end of mysq_execute_command() */
diff --git a/storage/xtradb/handler/ha_innodb.cc b/storage/xtradb/handler/ha_innodb.cc
index 960135b42a3..396876999ce 100644
--- a/storage/xtradb/handler/ha_innodb.cc
+++ b/storage/xtradb/handler/ha_innodb.cc
@@ -3348,7 +3348,7 @@ innobase_query_caching_of_table_permitted(
THD* thd, /*!< in: thd of the user who is trying to
store a result to the query cache or
retrieve it */
- char* full_name, /*!< in: normalized path to the table */
+ const char* full_name, /*!< in: normalized path to the table */
uint full_name_len, /*!< in: length of the normalized path
to the table */
ulonglong *unused) /*!< unused for this engine */
@@ -6180,7 +6180,7 @@ ha_innobase::innobase_initialize_autoinc()
ut_a(prebuilt->trx == thd_to_trx(user_thd));
- col_name = field->field_name;
+ col_name = field->field_name.str;
index = innobase_get_index(table->s->next_number_index);
/* Execute SELECT MAX(col_name) FROM TABLE; */
@@ -8098,7 +8098,7 @@ build_template_field(
"MySQL table %s field %lu name %s",
table->s->table_name.str,
j,
- table->field[j]->field_name);
+ table->field[j]->field_name.str);
}
ib_logf(IB_LOG_LEVEL_ERROR,
@@ -9096,7 +9096,7 @@ calc_row_difference(
if (field_mysql_type == MYSQL_TYPE_LONGLONG
&& prebuilt->table->fts
&& innobase_strcasecmp(
- field->field_name, FTS_DOC_ID_COL_NAME) == 0) {
+ field->field_name.str, FTS_DOC_ID_COL_NAME) == 0) {
doc_id = (doc_id_t) mach_read_from_n_little_endian(
n_ptr, 8);
if (doc_id == 0) {
@@ -9360,7 +9360,7 @@ int
ha_innobase::update_row(
/*====================*/
const uchar* old_row, /*!< in: old row in MySQL format */
- uchar* new_row) /*!< in: new row in MySQL format */
+ const uchar* new_row) /*!< in: new row in MySQL format */
{
upd_t* uvect;
dberr_t error;
@@ -11263,7 +11263,7 @@ create_table_check_doc_id_col(
col_len = field->pack_length();
- if (innobase_strcasecmp(field->field_name,
+ if (innobase_strcasecmp(field->field_name.str,
FTS_DOC_ID_COL_NAME) == 0) {
/* Note the name is case sensitive due to
@@ -11271,7 +11271,7 @@ create_table_check_doc_id_col(
if (col_type == DATA_INT
&& !field->real_maybe_null()
&& col_len == sizeof(doc_id_t)
- && (strcmp(field->field_name,
+ && (strcmp(field->field_name.str,
FTS_DOC_ID_COL_NAME) == 0)) {
*doc_id_col = i;
} else {
@@ -11283,7 +11283,7 @@ create_table_check_doc_id_col(
"of BIGINT NOT NULL type, and named "
"in all capitalized characters");
my_error(ER_WRONG_COLUMN_NAME, MYF(0),
- field->field_name);
+ field->field_name.str);
*doc_id_col = ULINT_UNDEFINED;
}
@@ -11429,7 +11429,7 @@ create_table_def(
"column type and try to re-create "
"the table with an appropriate "
"column type.",
- table->name, field->field_name);
+ table->name, field->field_name.str);
goto err_col;
}
@@ -11479,9 +11479,9 @@ create_table_def(
/* First check whether the column to be added has a
system reserved name. */
- if (dict_col_name_is_reserved(field->field_name)){
+ if (dict_col_name_is_reserved(field->field_name.str)){
my_error(ER_WRONG_COLUMN_NAME, MYF(0),
- field->field_name);
+ field->field_name.str);
err_col:
dict_mem_table_free(table);
mem_heap_free(heap);
@@ -11492,7 +11492,7 @@ err_col:
}
dict_mem_table_add_col(table, heap,
- field->field_name,
+ field->field_name.str,
col_type,
dtype_form_prtype(
(ulint) field->type()
@@ -11570,7 +11570,7 @@ create_index(
for (ulint i = 0; i < key->user_defined_key_parts; i++) {
KEY_PART_INFO* key_part = key->key_part + i;
dict_mem_index_add_field(
- index, key_part->field->field_name, 0);
+ index, key_part->field->field_name.str, 0);
}
DBUG_RETURN(convert_error_code_to_mysql(
@@ -11621,8 +11621,8 @@ create_index(
field = form->field[j];
if (0 == innobase_strcasecmp(
- field->field_name,
- key_part->field->field_name)) {
+ field->field_name.str,
+ key_part->field->field_name.str)) {
/* Found the corresponding column */
goto found;
@@ -11655,7 +11655,7 @@ found:
"inappropriate data type. Table "
"name %s, column name %s.",
table_name,
- key_part->field->field_name);
+ key_part->field->field_name.str);
prefix_len = 0;
}
@@ -11666,7 +11666,7 @@ found:
field_lengths[i] = key_part->length;
dict_mem_index_add_field(
- index, key_part->field->field_name, prefix_len);
+ index, key_part->field->field_name.str, prefix_len);
}
ut_ad(key->flags & HA_FULLTEXT || !(index->type & DICT_FTS));
@@ -12142,7 +12142,7 @@ innobase_table_flags(
/* Do a pre-check on FTS DOC ID index */
if (!(key->flags & HA_NOSAME)
|| strcmp(key->name, FTS_DOC_ID_INDEX_NAME)
- || strcmp(key->key_part[0].field->field_name,
+ || strcmp(key->key_part[0].field->field_name.str,
FTS_DOC_ID_COL_NAME)) {
fts_doc_id_index_bad = key->name;
}
@@ -16896,7 +16896,7 @@ my_bool
ha_innobase::register_query_cache_table(
/*====================================*/
THD* thd, /*!< in: user thread handle */
- char* table_key, /*!< in: normalized path to the
+ const char* table_key, /*!< in: normalized path to the
table */
uint key_length, /*!< in: length of the normalized
path to the table */
diff --git a/storage/xtradb/handler/ha_innodb.h b/storage/xtradb/handler/ha_innodb.h
index c5b0e723702..c2905d80642 100644
--- a/storage/xtradb/handler/ha_innodb.h
+++ b/storage/xtradb/handler/ha_innodb.h
@@ -148,7 +148,7 @@ class ha_innobase: public handler
my_bool is_fake_change_enabled(THD *thd);
int write_row(uchar * buf);
- int update_row(const uchar * old_data, uchar * new_data);
+ int update_row(const uchar * old_data, const uchar * new_data);
int delete_row(const uchar * buf);
bool was_semi_consistent_read();
void try_semi_consistent_read(bool yes);
@@ -239,7 +239,7 @@ class ha_innobase: public handler
/*
ask handler about permission to cache table during query registration
*/
- my_bool register_query_cache_table(THD *thd, char *table_key,
+ my_bool register_query_cache_table(THD *thd, const char *table_key,
uint key_length,
qc_engine_callback *call_back,
ulonglong *engine_data);
diff --git a/storage/xtradb/handler/handler0alter.cc b/storage/xtradb/handler/handler0alter.cc
index d5faaabc9d2..70603499dad 100644
--- a/storage/xtradb/handler/handler0alter.cc
+++ b/storage/xtradb/handler/handler0alter.cc
@@ -424,7 +424,7 @@ ha_innobase::check_if_supported_inplace_alter(
is TIMESTAMP and it is defined as NOT NULL and
it has either constant default or function default
we must use "Copy" method. */
- if (is_timestamp_type(def->sql_type)) {
+ if (def->is_timestamp_type()) {
if ((def->flags & NOT_NULL_FLAG) != 0 && // NOT NULL
(def->default_value != NULL || // constant default ?
def->unireg_check != Field::NONE)) { // function default
@@ -505,7 +505,7 @@ ha_innobase::check_if_supported_inplace_alter(
&& innobase_fulltext_exist(altered_table)
&& !my_strcasecmp(
system_charset_info,
- key_part->field->field_name,
+ key_part->field->field_name.str,
FTS_DOC_ID_COL_NAME)) {
ha_alter_info->unsupported_reason = innobase_get_err_msg(
ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_HIDDEN_FTS);
@@ -562,7 +562,7 @@ ha_innobase::check_if_supported_inplace_alter(
if (!my_strcasecmp(
system_charset_info,
- (*fp)->field_name,
+ (*fp)->field_name.str,
FTS_DOC_ID_COL_NAME)) {
ha_alter_info->unsupported_reason = innobase_get_err_msg(
ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_CHANGE_FTS);
@@ -831,7 +831,7 @@ no_match:
}
if (innobase_strcasecmp(col_names[j],
- key_part.field->field_name)) {
+ key_part.field->field_name.str)) {
/* Name mismatch */
goto no_match;
}
@@ -1486,7 +1486,7 @@ name_ok:
}
my_error(ER_WRONG_KEY_COLUMN, MYF(0), "InnoDB",
- field->field_name);
+ field->field_name.str);
return(ER_WRONG_KEY_COLUMN);
}
@@ -1502,7 +1502,7 @@ name_ok:
}
my_error(ER_WRONG_KEY_COLUMN, MYF(0), "InnoDB",
- field->field_name);
+ field->field_name.str);
return(ER_WRONG_KEY_COLUMN);
}
}
@@ -1673,19 +1673,19 @@ innobase_fts_check_doc_id_col(
stored_in_db()))
sql_idx++;
if (my_strcasecmp(system_charset_info,
- field->field_name, FTS_DOC_ID_COL_NAME)) {
+ field->field_name.str, FTS_DOC_ID_COL_NAME)) {
continue;
}
- if (strcmp(field->field_name, FTS_DOC_ID_COL_NAME)) {
+ if (strcmp(field->field_name.str, FTS_DOC_ID_COL_NAME)) {
my_error(ER_WRONG_COLUMN_NAME, MYF(0),
- field->field_name);
+ field->field_name.str);
} else if (field->type() != MYSQL_TYPE_LONGLONG
|| field->pack_length() != 8
|| field->real_maybe_null()
|| !(field->flags & UNSIGNED_FLAG)) {
my_error(ER_INNODB_FT_WRONG_DOCID_COLUMN, MYF(0),
- field->field_name);
+ field->field_name.str);
} else {
*fts_doc_col_no = i;
}
@@ -1756,7 +1756,7 @@ innobase_fts_check_doc_id_index(
if ((key.flags & HA_NOSAME)
&& key.user_defined_key_parts == 1
&& !strcmp(key.name, FTS_DOC_ID_INDEX_NAME)
- && !strcmp(key.key_part[0].field->field_name,
+ && !strcmp(key.key_part[0].field->field_name.str,
FTS_DOC_ID_COL_NAME)) {
if (fts_doc_col_no) {
*fts_doc_col_no = ULINT_UNDEFINED;
@@ -1835,7 +1835,7 @@ innobase_fts_check_doc_id_index_in_def(
if (!(key->flags & HA_NOSAME)
|| key->user_defined_key_parts != 1
|| strcmp(key->name, FTS_DOC_ID_INDEX_NAME)
- || strcmp(key->key_part[0].field->field_name,
+ || strcmp(key->key_part[0].field->field_name.str,
FTS_DOC_ID_COL_NAME)) {
return(FTS_INCORRECT_DOC_ID_INDEX);
}
@@ -2441,7 +2441,7 @@ innobase_check_foreigns(
if (!new_field || (new_field->flags & NOT_NULL_FLAG)) {
if (innobase_check_foreigns_low(
user_table, drop_fk, n_drop_fk,
- (*fp)->field_name, !new_field)) {
+ (*fp)->field_name.str, !new_field)) {
return(true);
}
}
@@ -2670,7 +2670,7 @@ innobase_get_col_names(
for (uint old_i = 0; table->field[old_i]; old_i++) {
if (new_field->field == table->field[old_i]) {
- cols[old_i] = new_field->field_name;
+ cols[old_i] = new_field->field_name.str;
break;
}
}
@@ -2972,7 +2972,7 @@ prepare_inplace_alter_table_dict(
dict_mem_table_free(
ctx->new_table);
my_error(ER_WRONG_KEY_COLUMN, MYF(0),
- field->field_name);
+ field->field_name.str);
goto new_clustered_failed;
}
} else {
@@ -2999,16 +2999,16 @@ prepare_inplace_alter_table_dict(
}
}
- if (dict_col_name_is_reserved(field->field_name)) {
+ if (dict_col_name_is_reserved(field->field_name.str)) {
dict_mem_table_free(ctx->new_table);
my_error(ER_WRONG_COLUMN_NAME, MYF(0),
- field->field_name);
+ field->field_name.str);
goto new_clustered_failed;
}
dict_mem_table_add_col(
ctx->new_table, ctx->heap,
- field->field_name,
+ field->field_name.str,
col_type,
dtype_form_prtype(field_type, charset_no),
col_len);
@@ -3647,7 +3647,7 @@ err_exit_no_heap:
cf_it.rewind();
while (Create_field* cf = cf_it++) {
if (cf->field == *fp) {
- name = cf->field_name;
+ name = cf->field_name.str;
goto check_if_ok_to_rename;
}
}
@@ -3657,7 +3657,7 @@ check_if_ok_to_rename:
/* Prohibit renaming a column from FTS_DOC_ID
if full-text indexes exist. */
if (!my_strcasecmp(system_charset_info,
- (*fp)->field_name,
+ (*fp)->field_name.str,
FTS_DOC_ID_COL_NAME)
&& innobase_fulltext_exist(altered_table)) {
my_error(ER_INNODB_FT_WRONG_DOCID_COLUMN,
@@ -4834,8 +4834,8 @@ innobase_rename_columns_try(
if (cf->field == *fp) {
if (innobase_rename_column_try(
ctx->old_table, trx, table_name, i,
- cf->field->field_name,
- cf->field_name,
+ cf->field->field_name.str,
+ cf->field_name.str,
ctx->need_rebuild())) {
return(true);
}
@@ -4882,8 +4882,8 @@ innobase_rename_columns_cache(
while (Create_field* cf = cf_it++) {
if (cf->field == *fp) {
dict_mem_table_col_rename(user_table, i,
- cf->field->field_name,
- cf->field_name);
+ cf->field->field_name.str,
+ cf->field_name.str);
goto processed_field;
}
}
@@ -4945,7 +4945,7 @@ commit_get_autoinc(
dict_table_autoinc_lock(ctx->old_table);
err = row_search_max_autoinc(
- index, autoinc_field->field_name, &max_value_table);
+ index, autoinc_field->field_name.str, &max_value_table);
if (err != DB_SUCCESS) {
ut_ad(0);