summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--client/mysql.cc56
-rw-r--r--client/mysqldump.c60
-rw-r--r--client/mysqltest.cc4
-rw-r--r--cmake/package_name.cmake4
-rw-r--r--extra/innochecksum.cc55
-rw-r--r--extra/yassl/README18
-rw-r--r--extra/yassl/certs/dsa-cert.pem38
-rw-r--r--extra/yassl/include/openssl/ssl.h2
-rw-r--r--extra/yassl/src/ssl.cpp60
-rw-r--r--extra/yassl/taocrypt/include/aes.hpp58
-rw-r--r--extra/yassl/taocrypt/include/integer.hpp3
-rw-r--r--extra/yassl/taocrypt/src/aes.cpp172
-rw-r--r--extra/yassl/taocrypt/src/asn.cpp24
-rw-r--r--extra/yassl/taocrypt/src/dsa.cpp16
-rw-r--r--extra/yassl/taocrypt/src/integer.cpp5
-rw-r--r--extra/yassl/taocrypt/test/test.cpp3
-rw-r--r--extra/yassl/testsuite/test.hpp2
-rw-r--r--include/byte_order_generic_x86.h10
-rw-r--r--include/byte_order_generic_x86_64.h8
-rw-r--r--libmysql/libmysql.c5
-rw-r--r--mysql-test/extra/binlog_tests/database.test2
-rw-r--r--mysql-test/r/alter_table.result61
-rw-r--r--mysql-test/r/create_or_replace.result11
-rw-r--r--mysql-test/r/drop.result6
-rw-r--r--mysql-test/r/information_schema.result8
-rw-r--r--mysql-test/r/mysql.result8
-rw-r--r--mysql-test/r/mysql_not_windows.result6
-rw-r--r--mysql-test/r/mysqldump-nl.result126
-rw-r--r--mysql-test/r/mysqldump.result3
-rw-r--r--mysql-test/r/mysqltest.result6
-rw-r--r--mysql-test/r/selectivity.result72
-rw-r--r--mysql-test/r/selectivity_innodb.result115
-rw-r--r--mysql-test/suite/galera/disabled.def2
-rw-r--r--mysql-test/suite/galera/include/auto_increment_offset_restore.inc6
-rw-r--r--mysql-test/suite/galera/include/auto_increment_offset_save.inc8
-rw-r--r--mysql-test/suite/galera/r/galera_sst_xtrabackup-v2.result3
-rw-r--r--mysql-test/suite/galera/t/galera#414.test1
-rw-r--r--mysql-test/suite/galera/t/galera_sst_xtrabackup-v2.test10
-rw-r--r--mysql-test/suite/galera/t/galera_wan_restart_ist.test17
-rw-r--r--mysql-test/suite/galera_3nodes/disabled.def2
-rw-r--r--mysql-test/suite/rpl/r/rpl_stop_slave_error.result6
-rw-r--r--mysql-test/suite/rpl/t/rpl_drop_db.test4
-rw-r--r--mysql-test/suite/rpl/t/rpl_stop_slave_error-slave.opt1
-rw-r--r--mysql-test/suite/rpl/t/rpl_stop_slave_error.test17
-rw-r--r--mysql-test/suite/sys_vars/r/sysvars_innodb,32bit,xtradb.rdiff4
-rw-r--r--mysql-test/suite/sys_vars/r/sysvars_innodb,xtradb.rdiff4
-rw-r--r--mysql-test/t/alter_table.test34
-rw-r--r--mysql-test/t/create_or_replace.test12
-rw-r--r--mysql-test/t/drop.test9
-rw-r--r--mysql-test/t/information_schema.test8
-rw-r--r--mysql-test/t/mysql.test8
-rw-r--r--mysql-test/t/mysql_not_windows.test9
-rw-r--r--mysql-test/t/mysqldump-nl.test38
-rw-r--r--mysql-test/t/mysqltest.test9
-rw-r--r--mysql-test/t/selectivity.test52
-rw-r--r--mysql-test/t/selectivity_innodb.test25
-rw-r--r--mysql-test/valgrind.supp119
-rw-r--r--sql/item_subselect.cc4
-rw-r--r--sql/opt_range.cc9
-rw-r--r--sql/slave.cc9
-rw-r--r--sql/sql_db.cc26
-rw-r--r--sql/sql_parse.cc12
-rw-r--r--sql/sql_statistics.cc15
-rw-r--r--sql/sql_statistics.h5
-rw-r--r--sql/sql_table.cc4
-rw-r--r--sql/threadpool_common.cc98
-rw-r--r--storage/connect/JdbcInterface.java16
-rw-r--r--storage/connect/filamdbf.cpp86
-rw-r--r--storage/connect/filamdbf.h2
-rw-r--r--storage/connect/ha_connect.cc84
-rw-r--r--storage/connect/jdbconn.cpp249
-rw-r--r--storage/connect/jdbconn.h1
-rw-r--r--storage/connect/reldef.cpp8
-rw-r--r--storage/connect/tabjdbc.cpp19
-rw-r--r--storage/oqgraph/graphcore.cc2
-rw-r--r--storage/oqgraph/oqgraph_shim.h48
-rw-r--r--storage/tokudb/CMakeLists.txt2
-rw-r--r--storage/tokudb/PerconaFT/buildheader/make_tdb.cc3
-rw-r--r--storage/tokudb/PerconaFT/cmake_modules/TokuFeatureDetection.cmake4
-rw-r--r--storage/tokudb/PerconaFT/cmake_modules/TokuSetupCompiler.cmake2
-rw-r--r--storage/tokudb/PerconaFT/ft/cachetable/cachetable-internal.h2
-rw-r--r--storage/tokudb/PerconaFT/ft/cachetable/cachetable.cc16
-rw-r--r--storage/tokudb/PerconaFT/ft/cachetable/cachetable.h6
-rw-r--r--storage/tokudb/PerconaFT/ft/ft-ops.cc151
-rw-r--r--storage/tokudb/PerconaFT/ft/ft-ops.h2
-rw-r--r--storage/tokudb/PerconaFT/ft/ft.cc14
-rw-r--r--storage/tokudb/PerconaFT/ft/ft.h6
-rw-r--r--storage/tokudb/PerconaFT/ft/logger/logformat.cc9
-rw-r--r--storage/tokudb/PerconaFT/ft/logger/recover.cc78
-rw-r--r--storage/tokudb/PerconaFT/ft/serialize/rbtree_mhs.h16
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/test-rbtree-insert-remove-without-mhs.cc7
-rw-r--r--storage/tokudb/PerconaFT/ft/txn/roll.cc118
-rw-r--r--storage/tokudb/PerconaFT/portability/file.cc6
-rw-r--r--storage/tokudb/PerconaFT/portability/memory.cc9
-rw-r--r--storage/tokudb/PerconaFT/portability/memory.h4
-rw-r--r--storage/tokudb/PerconaFT/portability/portability.cc9
-rw-r--r--storage/tokudb/PerconaFT/portability/tests/test-xid.cc9
-rw-r--r--storage/tokudb/PerconaFT/portability/toku_config.h.in1
-rw-r--r--storage/tokudb/PerconaFT/portability/toku_portability.h2
-rw-r--r--storage/tokudb/PerconaFT/src/tests/CMakeLists.txt42
-rw-r--r--storage/tokudb/PerconaFT/src/tests/recovery_fileops_unit.cc155
-rw-r--r--storage/tokudb/PerconaFT/src/ydb-internal.h3
-rw-r--r--storage/tokudb/PerconaFT/src/ydb.cc50
-rw-r--r--storage/tokudb/PerconaFT/src/ydb_db.cc99
-rw-r--r--storage/tokudb/PerconaFT/src/ydb_db.h16
-rw-r--r--storage/tokudb/hatoku_hton.cc1
-rw-r--r--storage/tokudb/mysql-test/tokudb/disabled.def1
-rw-r--r--storage/tokudb/mysql-test/tokudb/include/table_files_replace_pattern.inc1
-rw-r--r--storage/tokudb/mysql-test/tokudb/r/dir-per-db-with-custom-data-dir.result10
-rw-r--r--storage/tokudb/mysql-test/tokudb/r/dir_per_db.result180
-rw-r--r--storage/tokudb/mysql-test/tokudb/r/i_s_tokudb_lock_waits_released.result12
-rw-r--r--storage/tokudb/mysql-test/tokudb/r/row_format.result51
-rw-r--r--storage/tokudb/mysql-test/tokudb/t/dir-per-db-with-custom-data-dir-master.opt1
-rw-r--r--storage/tokudb/mysql-test/tokudb/t/dir-per-db-with-custom-data-dir.test16
-rw-r--r--storage/tokudb/mysql-test/tokudb/t/dir_per_db.test76
-rw-r--r--storage/tokudb/mysql-test/tokudb/t/dir_per_db_show_table_files.inc9
-rw-r--r--storage/tokudb/mysql-test/tokudb/t/i_s_tokudb_lock_waits_released.test29
-rw-r--r--storage/tokudb/mysql-test/tokudb/t/row_format.test41
-rw-r--r--storage/tokudb/mysql-test/tokudb_bugs/r/db938.result2
-rw-r--r--storage/tokudb/mysql-test/tokudb_bugs/t/db938.test4
-rw-r--r--storage/tokudb/mysql-test/tokudb_parts/include/table_files_replace_pattern.inc1
-rw-r--r--storage/tokudb/mysql-test/tokudb_parts/t/partition_debug_sync_tokudb.test4
-rw-r--r--storage/tokudb/tokudb_sysvars.cc14
-rw-r--r--storage/tokudb/tokudb_sysvars.h1
-rw-r--r--storage/xtradb/buf/buf0buf.cc4
-rw-r--r--storage/xtradb/buf/buf0dblwr.cc2
-rw-r--r--storage/xtradb/buf/buf0flu.cc5
-rw-r--r--storage/xtradb/dict/dict0boot.cc4
-rw-r--r--storage/xtradb/dict/dict0crea.cc583
-rw-r--r--storage/xtradb/dict/dict0dict.cc158
-rw-r--r--storage/xtradb/dict/dict0load.cc159
-rw-r--r--storage/xtradb/fil/fil0fil.cc2
-rw-r--r--storage/xtradb/fts/fts0fts.cc31
-rw-r--r--storage/xtradb/handler/ha_innodb.cc453
-rw-r--r--storage/xtradb/handler/ha_innodb.h37
-rw-r--r--storage/xtradb/handler/handler0alter.cc75
-rw-r--r--storage/xtradb/handler/i_s.cc2
-rw-r--r--storage/xtradb/handler/xtradb_i_s.cc354
-rw-r--r--storage/xtradb/handler/xtradb_i_s.h2
-rw-r--r--storage/xtradb/include/data0type.h14
-rw-r--r--storage/xtradb/include/data0type.ic16
-rw-r--r--storage/xtradb/include/dict0boot.h32
-rw-r--r--storage/xtradb/include/dict0crea.h91
-rw-r--r--storage/xtradb/include/dict0dict.h46
-rw-r--r--storage/xtradb/include/dict0load.h29
-rw-r--r--storage/xtradb/include/fts0fts.h10
-rw-r--r--storage/xtradb/include/os0thread.h15
-rw-r--r--storage/xtradb/include/rem0types.h3
-rw-r--r--storage/xtradb/include/row0mysql.h85
-rw-r--r--storage/xtradb/include/srv0srv.h5
-rw-r--r--storage/xtradb/include/univ.i2
-rw-r--r--storage/xtradb/log/log0log.cc20
-rw-r--r--storage/xtradb/log/log0online.cc33
-rw-r--r--storage/xtradb/mach/mach0data.cc13
-rw-r--r--storage/xtradb/os/os0thread.cc33
-rw-r--r--storage/xtradb/rem/rem0rec.cc23
-rw-r--r--storage/xtradb/row/row0ftsort.cc2
-rw-r--r--storage/xtradb/row/row0log.cc14
-rw-r--r--storage/xtradb/row/row0merge.cc18
-rw-r--r--storage/xtradb/row/row0mysql.cc634
-rw-r--r--storage/xtradb/row/row0sel.cc45
-rw-r--r--storage/xtradb/srv/srv0start.cc6
162 files changed, 5627 insertions, 860 deletions
diff --git a/client/mysql.cc b/client/mysql.cc
index be103224f87..136713e3a1b 100644
--- a/client/mysql.cc
+++ b/client/mysql.cc
@@ -245,7 +245,8 @@ static void end_pager();
static void init_tee(const char *);
static void end_tee();
static const char* construct_prompt();
-static char *get_arg(char *line, my_bool get_next_arg);
+enum get_arg_mode { CHECK, GET, GET_NEXT};
+static char *get_arg(char *line, get_arg_mode mode);
static void init_username();
static void add_int_to_prompt(int toadd);
static int get_result_width(MYSQL_RES *res);
@@ -2255,7 +2256,7 @@ static COMMANDS *find_command(char *name)
if (!my_strnncoll(&my_charset_latin1, (uchar*) name, len,
(uchar*) commands[i].name, len) &&
(commands[i].name[len] == '\0') &&
- (!end || commands[i].takes_params))
+ (!end || (commands[i].takes_params && get_arg(name, CHECK))))
{
index= i;
break;
@@ -3173,7 +3174,7 @@ com_charset(String *buffer __attribute__((unused)), char *line)
char buff[256], *param;
CHARSET_INFO * new_cs;
strmake_buf(buff, line);
- param= get_arg(buff, 0);
+ param= get_arg(buff, GET);
if (!param || !*param)
{
return put_info("Usage: \\C charset_name | charset charset_name",
@@ -4259,12 +4260,12 @@ com_connect(String *buffer, char *line)
#ifdef EXTRA_DEBUG
tmp[1]= 0;
#endif
- tmp= get_arg(buff, 0);
+ tmp= get_arg(buff, GET);
if (tmp && *tmp)
{
my_free(current_db);
current_db= my_strdup(tmp, MYF(MY_WME));
- tmp= get_arg(buff, 1);
+ tmp= get_arg(buff, GET_NEXT);
if (tmp)
{
my_free(current_host);
@@ -4367,7 +4368,7 @@ com_delimiter(String *buffer __attribute__((unused)), char *line)
char buff[256], *tmp;
strmake_buf(buff, line);
- tmp= get_arg(buff, 0);
+ tmp= get_arg(buff, GET);
if (!tmp || !*tmp)
{
@@ -4398,7 +4399,7 @@ com_use(String *buffer __attribute__((unused)), char *line)
bzero(buff, sizeof(buff));
strmake_buf(buff, line);
- tmp= get_arg(buff, 0);
+ tmp= get_arg(buff, GET);
if (!tmp || !*tmp)
{
put_info("USE must be followed by a database name", INFO_ERROR);
@@ -4483,23 +4484,22 @@ com_nowarnings(String *buffer __attribute__((unused)),
}
/*
- Gets argument from a command on the command line. If get_next_arg is
- not defined, skips the command and returns the first argument. The
- line is modified by adding zero to the end of the argument. If
- get_next_arg is defined, then the function searches for end of string
- first, after found, returns the next argument and adds zero to the
- end. If you ever wish to use this feature, remember to initialize all
- items in the array to zero first.
+ Gets argument from a command on the command line. If mode is not GET_NEXT,
+ skips the command and returns the first argument. The line is modified by
+ adding zero to the end of the argument. If mode is GET_NEXT, then the
+ function searches for end of string first, after found, returns the next
+ argument and adds zero to the end. If you ever wish to use this feature,
+ remember to initialize all items in the array to zero first.
*/
-char *get_arg(char *line, my_bool get_next_arg)
+static char *get_arg(char *line, get_arg_mode mode)
{
char *ptr, *start;
- my_bool quoted= 0, valid_arg= 0;
+ bool short_cmd= false;
char qtype= 0;
ptr= line;
- if (get_next_arg)
+ if (mode == GET_NEXT)
{
for (; *ptr; ptr++) ;
if (*(ptr + 1))
@@ -4510,7 +4510,7 @@ char *get_arg(char *line, my_bool get_next_arg)
/* skip leading white spaces */
while (my_isspace(charset_info, *ptr))
ptr++;
- if (*ptr == '\\') // short command was used
+ if ((short_cmd= *ptr == '\\')) // short command was used
ptr+= 2;
else
while (*ptr &&!my_isspace(charset_info, *ptr)) // skip command
@@ -4523,24 +4523,28 @@ char *get_arg(char *line, my_bool get_next_arg)
if (*ptr == '\'' || *ptr == '\"' || *ptr == '`')
{
qtype= *ptr;
- quoted= 1;
ptr++;
}
for (start=ptr ; *ptr; ptr++)
{
- if (*ptr == '\\' && ptr[1]) // escaped character
+ if ((*ptr == '\\' && ptr[1]) || // escaped character
+ (!short_cmd && qtype && *ptr == qtype && ptr[1] == qtype)) // quote
{
- // Remove the backslash
- strmov_overlapp(ptr, ptr+1);
+ // Remove (or skip) the backslash (or a second quote)
+ if (mode != CHECK)
+ strmov_overlapp(ptr, ptr+1);
+ else
+ ptr++;
}
- else if ((!quoted && *ptr == ' ') || (quoted && *ptr == qtype))
+ else if (*ptr == (qtype ? qtype : ' '))
{
- *ptr= 0;
+ qtype= 0;
+ if (mode != CHECK)
+ *ptr= 0;
break;
}
}
- valid_arg= ptr != start;
- return valid_arg ? start : NullS;
+ return ptr != start && !qtype ? start : NullS;
}
diff --git a/client/mysqldump.c b/client/mysqldump.c
index f2576861fc0..ced65f78986 100644
--- a/client/mysqldump.c
+++ b/client/mysqldump.c
@@ -577,9 +577,7 @@ static int dump_all_tablespaces();
static int dump_tablespaces_for_tables(char *db, char **table_names, int tables);
static int dump_tablespaces_for_databases(char** databases);
static int dump_tablespaces(char* ts_where);
-static void print_comment(FILE *sql_file, my_bool is_error, const char *format,
- ...);
-
+static void print_comment(FILE *, my_bool, const char *, ...);
/*
Print the supplied message if in verbose mode
@@ -657,6 +655,30 @@ static void short_usage(FILE *f)
}
+/** returns a string fixed to be safely printed inside a -- comment
+
+ that is, any new line in it gets prefixed with --
+*/
+static const char *fix_for_comment(const char *ident)
+{
+ static char buf[1024];
+ char c, *s= buf;
+
+ while ((c= *s++= *ident++))
+ {
+ if (s >= buf + sizeof(buf) - 10)
+ {
+ strmov(s, "...");
+ break;
+ }
+ if (c == '\n')
+ s= strmov(s, "-- ");
+ }
+
+ return buf;
+}
+
+
static void write_header(FILE *sql_file, char *db_name)
{
if (opt_xml)
@@ -679,8 +701,8 @@ static void write_header(FILE *sql_file, char *db_name)
DUMP_VERSION, MYSQL_SERVER_VERSION, SYSTEM_TYPE,
MACHINE_TYPE);
print_comment(sql_file, 0, "-- Host: %s Database: %s\n",
- current_host ? current_host : "localhost",
- db_name ? db_name : "");
+ fix_for_comment(current_host ? current_host : "localhost"),
+ fix_for_comment(db_name ? db_name : ""));
print_comment(sql_file, 0,
"-- ------------------------------------------------------\n"
);
@@ -2250,7 +2272,8 @@ static uint dump_events_for_db(char *db)
/* nice comments */
print_comment(sql_file, 0,
- "\n--\n-- Dumping events for database '%s'\n--\n", db);
+ "\n--\n-- Dumping events for database '%s'\n--\n",
+ fix_for_comment(db));
/*
not using "mysql_query_with_error_report" because we may have not
@@ -2462,7 +2485,8 @@ static uint dump_routines_for_db(char *db)
/* nice comments */
print_comment(sql_file, 0,
- "\n--\n-- Dumping routines for database '%s'\n--\n", db);
+ "\n--\n-- Dumping routines for database '%s'\n--\n",
+ fix_for_comment(db));
/*
not using "mysql_query_with_error_report" because we may have not
@@ -2758,11 +2782,11 @@ static uint get_table_structure(char *table, char *db, char *table_type,
if (strcmp (table_type, "VIEW") == 0) /* view */
print_comment(sql_file, 0,
"\n--\n-- Temporary table structure for view %s\n--\n\n",
- result_table);
+ fix_for_comment(result_table));
else
print_comment(sql_file, 0,
"\n--\n-- Table structure for table %s\n--\n\n",
- result_table);
+ fix_for_comment(result_table));
if (opt_drop)
{
@@ -3006,7 +3030,7 @@ static uint get_table_structure(char *table, char *db, char *table_type,
print_comment(sql_file, 0,
"\n--\n-- Table structure for table %s\n--\n\n",
- result_table);
+ fix_for_comment(result_table));
if (opt_drop)
fprintf(sql_file, "DROP TABLE IF EXISTS %s;\n", result_table);
if (!opt_xml)
@@ -3715,21 +3739,21 @@ static void dump_table(char *table, char *db)
{
print_comment(md_result_file, 0,
"\n--\n-- Dumping data for table %s\n--\n",
- result_table);
+ fix_for_comment(result_table));
dynstr_append_checked(&query_string, "SELECT /*!40001 SQL_NO_CACHE */ * FROM ");
dynstr_append_checked(&query_string, result_table);
if (where)
{
- print_comment(md_result_file, 0, "-- WHERE: %s\n", where);
+ print_comment(md_result_file, 0, "-- WHERE: %s\n", fix_for_comment(where));
dynstr_append_checked(&query_string, " WHERE ");
dynstr_append_checked(&query_string, where);
}
if (order_by)
{
- print_comment(md_result_file, 0, "-- ORDER BY: %s\n", order_by);
+ print_comment(md_result_file, 0, "-- ORDER BY: %s\n", fix_for_comment(order_by));
dynstr_append_checked(&query_string, " ORDER BY ");
dynstr_append_checked(&query_string, order_by);
@@ -4239,7 +4263,7 @@ static int dump_tablespaces(char* ts_where)
if (first)
{
print_comment(md_result_file, 0, "\n--\n-- Logfile group: %s\n--\n",
- row[0]);
+ fix_for_comment(row[0]));
fprintf(md_result_file, "\nCREATE");
}
@@ -4308,7 +4332,8 @@ static int dump_tablespaces(char* ts_where)
first= 1;
if (first)
{
- print_comment(md_result_file, 0, "\n--\n-- Tablespace: %s\n--\n", row[0]);
+ print_comment(md_result_file, 0, "\n--\n-- Tablespace: %s\n--\n",
+ fix_for_comment(row[0]));
fprintf(md_result_file, "\nCREATE");
}
else
@@ -4512,7 +4537,8 @@ static int init_dumping(char *database, int init_func(char*))
char *qdatabase= quote_name(database,quoted_database_buf,opt_quoted);
print_comment(md_result_file, 0,
- "\n--\n-- Current Database: %s\n--\n", qdatabase);
+ "\n--\n-- Current Database: %s\n--\n",
+ fix_for_comment(qdatabase));
/* Call the view or table specific function */
init_func(qdatabase);
@@ -5772,7 +5798,7 @@ static my_bool get_view_structure(char *table, char* db)
print_comment(sql_file, 0,
"\n--\n-- Final view structure for view %s\n--\n\n",
- result_table);
+ fix_for_comment(result_table));
/* Table might not exist if this view was dumped with --tab. */
fprintf(sql_file, "/*!50001 DROP TABLE IF EXISTS %s*/;\n", opt_quoted_table);
diff --git a/client/mysqltest.cc b/client/mysqltest.cc
index c388fd79dd7..0a562904b16 100644
--- a/client/mysqltest.cc
+++ b/client/mysqltest.cc
@@ -3371,10 +3371,6 @@ void do_exec(struct st_command *command)
#endif
#endif
- /* exec command is interpreted externally and will not take newlines */
- while(replace(&ds_cmd, "\n", 1, " ", 1) == 0)
- ;
-
DBUG_PRINT("info", ("Executing '%s' as '%s'",
command->first_argument, ds_cmd.str));
diff --git a/cmake/package_name.cmake b/cmake/package_name.cmake
index c1c335f91f9..4930a6bf40a 100644
--- a/cmake/package_name.cmake
+++ b/cmake/package_name.cmake
@@ -30,6 +30,10 @@ IF(NOT VERSION)
SET(64BIT 1)
ENDIF()
+ IF(NOT 64BIT AND CMAKE_SYSTEM_PROCESSOR MATCHES "^mips64")
+ SET(DEFAULT_MACHINE "mips")
+ ENDIF()
+
IF(CMAKE_SYSTEM_NAME MATCHES "Windows")
SET(NEED_DASH_BETWEEN_PLATFORM_AND_MACHINE 0)
SET(DEFAULT_PLATFORM "win")
diff --git a/extra/innochecksum.cc b/extra/innochecksum.cc
index e128271f4b8..03cc6a20a8d 100644
--- a/extra/innochecksum.cc
+++ b/extra/innochecksum.cc
@@ -679,10 +679,9 @@ int main(int argc, char **argv)
time_t lastt; /* last time */
ulint oldcsum, oldcsumfield, csum, csumfield, crc32, logseq, logseqfield;
/* ulints for checksum storage */
- struct stat st; /* for stat, if you couldn't guess */
unsigned long long int size; /* size of file (has to be 64 bits) */
ulint pages; /* number of pages in file */
- off_t offset= 0;
+ long long offset= 0;
int fd;
printf("InnoDB offline file checksum utility.\n");
@@ -705,6 +704,47 @@ int main(int argc, char **argv)
goto error_out;
}
+#ifdef _WIN32
+ /* Switch off OS file buffering for the file. */
+
+ HANDLE h = CreateFile(filename, GENERIC_READ,
+ FILE_SHARE_READ|FILE_SHARE_WRITE, 0,
+ OPEN_EXISTING, FILE_FLAG_NO_BUFFERING, 0);
+
+ if (!h)
+ {
+ fprintf(stderr, "Error; cant open file\n");
+ goto error;
+ }
+
+ if (!GetFileSizeEx(h, (LARGE_INTEGER *)&size))
+ {
+ fprintf(stderr, "Error; GetFileSize() failed\n");
+ goto error;
+ }
+
+ fd = _open_osfhandle ((intptr_t) h, _O_RDONLY);
+ if (fd < 0)
+ {
+ fprintf(stderr, "Error; _open_osfhandle() failed\n");
+ goto error;
+ }
+
+ f = _fdopen(fd, "rb");
+ if (!f)
+ {
+ fprintf(stderr, "Error; fdopen() failed\n");
+ goto error;
+ }
+
+ /*
+ Disable stdio buffering (FILE_FLAG_NO_BUFFERING requires properly IO buffers
+ which stdio does not guarantee.
+ */
+ setvbuf(f, NULL, _IONBF, 0);
+
+#else
+ struct stat st;
/* stat the file to get size and page count */
if (stat(filename, &st))
{
@@ -715,6 +755,8 @@ int main(int argc, char **argv)
/* Open the file for reading */
f= fopen(filename, "rb");
+#endif
+
if (f == NULL)
{
fprintf(stderr, "Error; %s cannot be opened", filename);
@@ -772,7 +814,7 @@ int main(int argc, char **argv)
}
else if (verbose)
{
- printf("file %s = %llu bytes (%lu pages)...\n", filename, size, pages);
+ printf("file %s = %llu bytes (%lu pages)...\n", filename, size, (ulong)pages);
if (do_one_page)
printf("InnoChecksum; checking page %lu\n", do_page);
else
@@ -797,9 +839,12 @@ int main(int argc, char **argv)
goto error;
}
- offset= (off_t)start_page * (off_t)physical_page_size;
-
+ offset= (longlong)start_page * (longlong)physical_page_size;
+#ifdef _WIN32
+ if (_lseeki64(fd, offset, SEEK_SET) != offset)
+#else
if (lseek(fd, offset, SEEK_SET) != offset)
+#endif
{
perror("Error; Unable to seek to necessary offset");
goto error;
diff --git a/extra/yassl/README b/extra/yassl/README
index b5eb88824fb..a3d4f60f561 100644
--- a/extra/yassl/README
+++ b/extra/yassl/README
@@ -12,6 +12,24 @@ before calling SSL_new();
*** end Note ***
+yaSSL Release notes, version 2.4.2 (9/22/2016)
+ This release of yaSSL fixes a medium security vulnerability. A fix for
+ potential AES side channel leaks is included that a local user monitoring
+ the same CPU core cache could exploit. VM users, hyper-threading users,
+ and users where potential attackers have access to the CPU cache will need
+ to update if they utilize AES.
+
+ DSA padding fixes for unusual sizes is included as well. Users with DSA
+ certficiates should update.
+
+yaSSL Release notes, version 2.4.0 (5/20/2016)
+ This release of yaSSL fixes the OpenSSL compatibility function
+ SSL_CTX_load_verify_locations() when using the path directory to allow
+ unlimited path sizes. Minor Windows build fixes are included.
+ No high level security fixes in this version but we always recommend
+ updating.
+
+
yaSSL Release notes, version 2.3.9b (2/03/2016)
This release of yaSSL fixes the OpenSSL compatibility function
X509_NAME_get_index_by_NID() to use the actual index of the common name
diff --git a/extra/yassl/certs/dsa-cert.pem b/extra/yassl/certs/dsa-cert.pem
index 10d533edc88..10794cbee73 100644
--- a/extra/yassl/certs/dsa-cert.pem
+++ b/extra/yassl/certs/dsa-cert.pem
@@ -1,22 +1,22 @@
-----BEGIN CERTIFICATE-----
-MIIDqzCCA2ugAwIBAgIJAMGqrgDU6DyhMAkGByqGSM44BAMwgY4xCzAJBgNVBAYT
+MIIDrzCCA2+gAwIBAgIJAK1zRM7YFcNjMAkGByqGSM44BAMwgZAxCzAJBgNVBAYT
AlVTMQ8wDQYDVQQIDAZPcmVnb24xETAPBgNVBAcMCFBvcnRsYW5kMRAwDgYDVQQK
-DAd3b2xmU1NMMRAwDgYDVQQLDAd0ZXN0aW5nMRYwFAYDVQQDDA13d3cueWFzc2wu
-Y29tMR8wHQYJKoZIhvcNAQkBFhBpbmZvQHdvbGZzc2wuY29tMB4XDTEzMDQyMjIw
-MDk0NFoXDTE2MDExNzIwMDk0NFowgY4xCzAJBgNVBAYTAlVTMQ8wDQYDVQQIDAZP
-cmVnb24xETAPBgNVBAcMCFBvcnRsYW5kMRAwDgYDVQQKDAd3b2xmU1NMMRAwDgYD
-VQQLDAd0ZXN0aW5nMRYwFAYDVQQDDA13d3cueWFzc2wuY29tMR8wHQYJKoZIhvcN
-AQkBFhBpbmZvQHdvbGZzc2wuY29tMIIBuDCCASwGByqGSM44BAEwggEfAoGBAL1R
-7koy4IrH6sbh6nDEUUPPKgfhxxLCWCVexF2+qzANEr+hC9M002haJXFOfeS9DyoO
-WFbL0qMZOuqv+22CaHnoUWl7q3PjJOAI3JH0P54ZyUPuU1909RzgTdIDp5+ikbr7
-KYjnltL73FQVMbjTZQKthIpPn3MjYcF+4jp2W2zFAhUAkcntYND6MGf+eYzIJDN2
-L7SonHUCgYEAklpxErfqznIZjVvqqHFaq+mgAL5J8QrKVmdhYZh/Y8z4jCjoCA8o
-TDoFKxf7s2ZzgaPKvglaEKiYqLqic9qY78DYJswzQMLFvjsF4sFZ+pYCBdWPQI4N
-PgxCiznK6Ce+JH9ikSBvMvG+tevjr2UpawDIHX3+AWYaZBZwKADAaboDgYUAAoGB
-AJ3LY89yHyvQ/TsQ6zlYbovjbk/ogndsMqPdNUvL4RuPTgJP/caaDDa0XJ7ak6A7
-TJ+QheLNwOXoZPYJC4EGFSDAXpYniGhbWIrVTCGe6lmZDfnx40WXS0kk3m/DHaC0
-3ElLAiybxVGxyqoUfbT3Zv1JwftWMuiqHH5uADhdXuXVo1AwTjAdBgNVHQ4EFgQU
-IJjk416o4v8qpH9LBtXlR9v8gccwHwYDVR0jBBgwFoAUIJjk416o4v8qpH9LBtXl
-R9v8gccwDAYDVR0TBAUwAwEB/zAJBgcqhkjOOAQDAy8AMCwCFCjGKIdOSV12LcTu
-k08owGM6YkO1AhQe+K173VuaO/OsDNsxZlKpyH8+1g==
+DAd3b2xmU1NMMRAwDgYDVQQLDAd0ZXN0aW5nMRgwFgYDVQQDDA93d3cud29sZnNz
+bC5jb20xHzAdBgkqhkiG9w0BCQEWEGluZm9Ad29sZnNzbC5jb20wHhcNMTYwOTIy
+MjEyMzA0WhcNMjIwMzE1MjEyMzA0WjCBkDELMAkGA1UEBhMCVVMxDzANBgNVBAgM
+Bk9yZWdvbjERMA8GA1UEBwwIUG9ydGxhbmQxEDAOBgNVBAoMB3dvbGZTU0wxEDAO
+BgNVBAsMB3Rlc3RpbmcxGDAWBgNVBAMMD3d3dy53b2xmc3NsLmNvbTEfMB0GCSqG
+SIb3DQEJARYQaW5mb0B3b2xmc3NsLmNvbTCCAbgwggEsBgcqhkjOOAQBMIIBHwKB
+gQC9Ue5KMuCKx+rG4epwxFFDzyoH4ccSwlglXsRdvqswDRK/oQvTNNNoWiVxTn3k
+vQ8qDlhWy9KjGTrqr/ttgmh56FFpe6tz4yTgCNyR9D+eGclD7lNfdPUc4E3SA6ef
+opG6+ymI55bS+9xUFTG402UCrYSKT59zI2HBfuI6dltsxQIVAJHJ7WDQ+jBn/nmM
+yCQzdi+0qJx1AoGBAJJacRK36s5yGY1b6qhxWqvpoAC+SfEKylZnYWGYf2PM+Iwo
+6AgPKEw6BSsX+7Nmc4Gjyr4JWhComKi6onPamO/A2CbMM0DCxb47BeLBWfqWAgXV
+j0CODT4MQos5yugnviR/YpEgbzLxvrXr469lKWsAyB19/gFmGmQWcCgAwGm6A4GF
+AAKBgQCdy2PPch8r0P07EOs5WG6L425P6IJ3bDKj3TVLy+Ebj04CT/3Gmgw2tFye
+2pOgO0yfkIXizcDl6GT2CQuBBhUgwF6WJ4hoW1iK1UwhnupZmQ358eNFl0tJJN5v
+wx2gtNxJSwIsm8VRscqqFH2092b9ScH7VjLoqhx+bgA4XV7l1aNQME4wHQYDVR0O
+BBYEFCCY5ONeqOL/KqR/SwbV5Ufb/IHHMB8GA1UdIwQYMBaAFCCY5ONeqOL/KqR/
+SwbV5Ufb/IHHMAwGA1UdEwQFMAMBAf8wCQYHKoZIzjgEAwMvADAsAhQRYSCVN/Ge
+agV3mffU3qNZ92fI0QIUPH7Jp+iASI7U1ocaYDc10qXGaGY=
-----END CERTIFICATE-----
diff --git a/extra/yassl/include/openssl/ssl.h b/extra/yassl/include/openssl/ssl.h
index c95eb1ed887..9ec99b46c1f 100644
--- a/extra/yassl/include/openssl/ssl.h
+++ b/extra/yassl/include/openssl/ssl.h
@@ -34,7 +34,7 @@
#include "rsa.h"
-#define YASSL_VERSION "2.3.9b"
+#define YASSL_VERSION "2.4.2"
#if defined(__cplusplus)
diff --git a/extra/yassl/src/ssl.cpp b/extra/yassl/src/ssl.cpp
index 1972b29c92d..7f4b0fd095f 100644
--- a/extra/yassl/src/ssl.cpp
+++ b/extra/yassl/src/ssl.cpp
@@ -162,7 +162,7 @@ int read_file(SSL_CTX* ctx, const char* file, int format, CertType type)
TaoCrypt::DSA_PrivateKey dsaKey;
dsaKey.Initialize(dsaSource);
- if (rsaSource.GetError().What()) {
+ if (dsaSource.GetError().What()) {
// neither worked
ret = SSL_FAILURE;
}
@@ -785,40 +785,67 @@ int SSL_CTX_load_verify_locations(SSL_CTX* ctx, const char* file,
WIN32_FIND_DATA FindFileData;
HANDLE hFind;
- char name[MAX_PATH + 1]; // directory specification
- strncpy(name, path, MAX_PATH - 3);
- strncat(name, "\\*", 3);
+ const int DELIMITER_SZ = 2;
+ const int DELIMITER_STAR_SZ = 3;
+ int pathSz = (int)strlen(path);
+ int nameSz = pathSz + DELIMITER_STAR_SZ + 1; // plus 1 for terminator
+ char* name = NEW_YS char[nameSz]; // directory specification
+ memset(name, 0, nameSz);
+ strncpy(name, path, nameSz - DELIMITER_STAR_SZ - 1);
+ strncat(name, "\\*", DELIMITER_STAR_SZ);
hFind = FindFirstFile(name, &FindFileData);
- if (hFind == INVALID_HANDLE_VALUE) return SSL_BAD_PATH;
+ if (hFind == INVALID_HANDLE_VALUE) {
+ ysArrayDelete(name);
+ return SSL_BAD_PATH;
+ }
do {
- if (FindFileData.dwFileAttributes != FILE_ATTRIBUTE_DIRECTORY) {
- strncpy(name, path, MAX_PATH - 2 - HALF_PATH);
- strncat(name, "\\", 2);
- strncat(name, FindFileData.cFileName, HALF_PATH);
+ if (!(FindFileData.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY)) {
+ int curSz = (int)strlen(FindFileData.cFileName);
+ if (pathSz + curSz + DELIMITER_SZ + 1 > nameSz) {
+ ysArrayDelete(name);
+ // plus 1 for terminator
+ nameSz = pathSz + curSz + DELIMITER_SZ + 1;
+ name = NEW_YS char[nameSz];
+ }
+ memset(name, 0, nameSz);
+ strncpy(name, path, nameSz - curSz - DELIMITER_SZ - 1);
+ strncat(name, "\\", DELIMITER_SZ);
+ strncat(name, FindFileData.cFileName,
+ nameSz - pathSz - DELIMITER_SZ - 1);
ret = read_file(ctx, name, SSL_FILETYPE_PEM, CA);
}
} while (ret == SSL_SUCCESS && FindNextFile(hFind, &FindFileData));
+ ysArrayDelete(name);
FindClose(hFind);
#else // _WIN32
-
- const int MAX_PATH = 260;
-
DIR* dir = opendir(path);
if (!dir) return SSL_BAD_PATH;
struct dirent* entry;
struct stat buf;
- char name[MAX_PATH + 1];
+ const int DELIMITER_SZ = 1;
+ int pathSz = (int)strlen(path);
+ int nameSz = pathSz + DELIMITER_SZ + 1; //plus 1 for null terminator
+ char* name = NEW_YS char[nameSz]; // directory specification
while (ret == SSL_SUCCESS && (entry = readdir(dir))) {
- strncpy(name, path, MAX_PATH - 1 - HALF_PATH);
- strncat(name, "/", 1);
- strncat(name, entry->d_name, HALF_PATH);
+ int curSz = (int)strlen(entry->d_name);
+ if (pathSz + curSz + DELIMITER_SZ + 1 > nameSz) {
+ ysArrayDelete(name);
+ nameSz = pathSz + DELIMITER_SZ + curSz + 1;
+ name = NEW_YS char[nameSz];
+ }
+ memset(name, 0, nameSz);
+ strncpy(name, path, nameSz - curSz - 1);
+ strncat(name, "/", DELIMITER_SZ);
+ strncat(name, entry->d_name, nameSz - pathSz - DELIMITER_SZ - 1);
+
if (stat(name, &buf) < 0) {
+ ysArrayDelete(name);
closedir(dir);
return SSL_BAD_STAT;
}
@@ -827,6 +854,7 @@ int SSL_CTX_load_verify_locations(SSL_CTX* ctx, const char* file,
ret = read_file(ctx, name, SSL_FILETYPE_PEM, CA);
}
+ ysArrayDelete(name);
closedir(dir);
#endif
diff --git a/extra/yassl/taocrypt/include/aes.hpp b/extra/yassl/taocrypt/include/aes.hpp
index 01763033156..bccf6e73fc7 100644
--- a/extra/yassl/taocrypt/include/aes.hpp
+++ b/extra/yassl/taocrypt/include/aes.hpp
@@ -60,6 +60,7 @@ private:
static const word32 Te[5][256];
static const word32 Td[5][256];
+ static const byte CTd4[256];
static const word32* Te0;
static const word32* Te1;
@@ -80,11 +81,68 @@ private:
void ProcessAndXorBlock(const byte*, const byte*, byte*) const;
+ word32 PreFetchTe() const;
+ word32 PreFetchTd() const;
+ word32 PreFetchCTd4() const;
+
AES(const AES&); // hide copy
AES& operator=(const AES&); // and assign
};
+#if defined(__x86_64__) || defined(_M_X64) || \
+ (defined(__ILP32__) && (__ILP32__ >= 1))
+ #define TC_CACHE_LINE_SZ 64
+#else
+ /* default cache line size */
+ #define TC_CACHE_LINE_SZ 32
+#endif
+
+inline word32 AES::PreFetchTe() const
+{
+ word32 x = 0;
+
+ /* 4 tables of 256 entries */
+ for (int i = 0; i < 4; i++) {
+ /* each entry is 4 bytes */
+ for (int j = 0; j < 256; j += TC_CACHE_LINE_SZ/4) {
+ x &= Te[i][j];
+ }
+ }
+
+ return x;
+}
+
+
+inline word32 AES::PreFetchTd() const
+{
+ word32 x = 0;
+
+ /* 4 tables of 256 entries */
+ for (int i = 0; i < 4; i++) {
+ /* each entry is 4 bytes */
+ for (int j = 0; j < 256; j += TC_CACHE_LINE_SZ/4) {
+ x &= Td[i][j];
+ }
+ }
+
+ return x;
+}
+
+
+inline word32 AES::PreFetchCTd4() const
+{
+ word32 x = 0;
+ int i;
+
+ for (i = 0; i < 256; i += TC_CACHE_LINE_SZ) {
+ x &= CTd4[i];
+ }
+
+ return x;
+}
+
+
typedef BlockCipher<ENCRYPTION, AES, ECB> AES_ECB_Encryption;
typedef BlockCipher<DECRYPTION, AES, ECB> AES_ECB_Decryption;
diff --git a/extra/yassl/taocrypt/include/integer.hpp b/extra/yassl/taocrypt/include/integer.hpp
index 75a3ee3d3df..05fe189fd58 100644
--- a/extra/yassl/taocrypt/include/integer.hpp
+++ b/extra/yassl/taocrypt/include/integer.hpp
@@ -119,6 +119,9 @@ namespace TaoCrypt {
+#ifdef _WIN32
+ #undef max // avoid name clash
+#endif
// general MAX
template<typename T> inline
const T& max(const T& a, const T& b)
diff --git a/extra/yassl/taocrypt/src/aes.cpp b/extra/yassl/taocrypt/src/aes.cpp
index e47765b87d0..2321c72554c 100644
--- a/extra/yassl/taocrypt/src/aes.cpp
+++ b/extra/yassl/taocrypt/src/aes.cpp
@@ -109,10 +109,10 @@ void AES::SetKey(const byte* userKey, word32 keylen, CipherDir /*dummy*/)
{
temp = rk[3];
rk[4] = rk[0] ^
- (Te4[GETBYTE(temp, 2)] & 0xff000000) ^
- (Te4[GETBYTE(temp, 1)] & 0x00ff0000) ^
- (Te4[GETBYTE(temp, 0)] & 0x0000ff00) ^
- (Te4[GETBYTE(temp, 3)] & 0x000000ff) ^
+ (Te2[GETBYTE(temp, 2)] & 0xff000000) ^
+ (Te3[GETBYTE(temp, 1)] & 0x00ff0000) ^
+ (Te0[GETBYTE(temp, 0)] & 0x0000ff00) ^
+ (Te1[GETBYTE(temp, 3)] & 0x000000ff) ^
rcon_[i];
rk[5] = rk[1] ^ rk[4];
rk[6] = rk[2] ^ rk[5];
@@ -128,10 +128,10 @@ void AES::SetKey(const byte* userKey, word32 keylen, CipherDir /*dummy*/)
{
temp = rk[ 5];
rk[ 6] = rk[ 0] ^
- (Te4[GETBYTE(temp, 2)] & 0xff000000) ^
- (Te4[GETBYTE(temp, 1)] & 0x00ff0000) ^
- (Te4[GETBYTE(temp, 0)] & 0x0000ff00) ^
- (Te4[GETBYTE(temp, 3)] & 0x000000ff) ^
+ (Te2[GETBYTE(temp, 2)] & 0xff000000) ^
+ (Te3[GETBYTE(temp, 1)] & 0x00ff0000) ^
+ (Te0[GETBYTE(temp, 0)] & 0x0000ff00) ^
+ (Te1[GETBYTE(temp, 3)] & 0x000000ff) ^
rcon_[i];
rk[ 7] = rk[ 1] ^ rk[ 6];
rk[ 8] = rk[ 2] ^ rk[ 7];
@@ -149,10 +149,10 @@ void AES::SetKey(const byte* userKey, word32 keylen, CipherDir /*dummy*/)
{
temp = rk[ 7];
rk[ 8] = rk[ 0] ^
- (Te4[GETBYTE(temp, 2)] & 0xff000000) ^
- (Te4[GETBYTE(temp, 1)] & 0x00ff0000) ^
- (Te4[GETBYTE(temp, 0)] & 0x0000ff00) ^
- (Te4[GETBYTE(temp, 3)] & 0x000000ff) ^
+ (Te2[GETBYTE(temp, 2)] & 0xff000000) ^
+ (Te3[GETBYTE(temp, 1)] & 0x00ff0000) ^
+ (Te0[GETBYTE(temp, 0)] & 0x0000ff00) ^
+ (Te1[GETBYTE(temp, 3)] & 0x000000ff) ^
rcon_[i];
rk[ 9] = rk[ 1] ^ rk[ 8];
rk[10] = rk[ 2] ^ rk[ 9];
@@ -161,10 +161,10 @@ void AES::SetKey(const byte* userKey, word32 keylen, CipherDir /*dummy*/)
break;
temp = rk[11];
rk[12] = rk[ 4] ^
- (Te4[GETBYTE(temp, 3)] & 0xff000000) ^
- (Te4[GETBYTE(temp, 2)] & 0x00ff0000) ^
- (Te4[GETBYTE(temp, 1)] & 0x0000ff00) ^
- (Te4[GETBYTE(temp, 0)] & 0x000000ff);
+ (Te2[GETBYTE(temp, 3)] & 0xff000000) ^
+ (Te3[GETBYTE(temp, 2)] & 0x00ff0000) ^
+ (Te0[GETBYTE(temp, 1)] & 0x0000ff00) ^
+ (Te1[GETBYTE(temp, 0)] & 0x000000ff);
rk[13] = rk[ 5] ^ rk[12];
rk[14] = rk[ 6] ^ rk[13];
rk[15] = rk[ 7] ^ rk[14];
@@ -191,25 +191,25 @@ void AES::SetKey(const byte* userKey, word32 keylen, CipherDir /*dummy*/)
for (i = 1; i < rounds_; i++) {
rk += 4;
rk[0] =
- Td0[Te4[GETBYTE(rk[0], 3)] & 0xff] ^
- Td1[Te4[GETBYTE(rk[0], 2)] & 0xff] ^
- Td2[Te4[GETBYTE(rk[0], 1)] & 0xff] ^
- Td3[Te4[GETBYTE(rk[0], 0)] & 0xff];
+ Td0[Te1[GETBYTE(rk[0], 3)] & 0xff] ^
+ Td1[Te1[GETBYTE(rk[0], 2)] & 0xff] ^
+ Td2[Te1[GETBYTE(rk[0], 1)] & 0xff] ^
+ Td3[Te1[GETBYTE(rk[0], 0)] & 0xff];
rk[1] =
- Td0[Te4[GETBYTE(rk[1], 3)] & 0xff] ^
- Td1[Te4[GETBYTE(rk[1], 2)] & 0xff] ^
- Td2[Te4[GETBYTE(rk[1], 1)] & 0xff] ^
- Td3[Te4[GETBYTE(rk[1], 0)] & 0xff];
+ Td0[Te1[GETBYTE(rk[1], 3)] & 0xff] ^
+ Td1[Te1[GETBYTE(rk[1], 2)] & 0xff] ^
+ Td2[Te1[GETBYTE(rk[1], 1)] & 0xff] ^
+ Td3[Te1[GETBYTE(rk[1], 0)] & 0xff];
rk[2] =
- Td0[Te4[GETBYTE(rk[2], 3)] & 0xff] ^
- Td1[Te4[GETBYTE(rk[2], 2)] & 0xff] ^
- Td2[Te4[GETBYTE(rk[2], 1)] & 0xff] ^
- Td3[Te4[GETBYTE(rk[2], 0)] & 0xff];
+ Td0[Te1[GETBYTE(rk[2], 3)] & 0xff] ^
+ Td1[Te1[GETBYTE(rk[2], 2)] & 0xff] ^
+ Td2[Te1[GETBYTE(rk[2], 1)] & 0xff] ^
+ Td3[Te1[GETBYTE(rk[2], 0)] & 0xff];
rk[3] =
- Td0[Te4[GETBYTE(rk[3], 3)] & 0xff] ^
- Td1[Te4[GETBYTE(rk[3], 2)] & 0xff] ^
- Td2[Te4[GETBYTE(rk[3], 1)] & 0xff] ^
- Td3[Te4[GETBYTE(rk[3], 0)] & 0xff];
+ Td0[Te1[GETBYTE(rk[3], 3)] & 0xff] ^
+ Td1[Te1[GETBYTE(rk[3], 2)] & 0xff] ^
+ Td2[Te1[GETBYTE(rk[3], 1)] & 0xff] ^
+ Td3[Te1[GETBYTE(rk[3], 0)] & 0xff];
}
}
}
@@ -244,6 +244,7 @@ void AES::encrypt(const byte* inBlock, const byte* xorBlock,
s2 ^= rk[2];
s3 ^= rk[3];
+ s0 |= PreFetchTe();
/*
* Nr - 1 full rounds:
*/
@@ -312,28 +313,28 @@ void AES::encrypt(const byte* inBlock, const byte* xorBlock,
*/
s0 =
- (Te4[GETBYTE(t0, 3)] & 0xff000000) ^
- (Te4[GETBYTE(t1, 2)] & 0x00ff0000) ^
- (Te4[GETBYTE(t2, 1)] & 0x0000ff00) ^
- (Te4[GETBYTE(t3, 0)] & 0x000000ff) ^
+ (Te2[GETBYTE(t0, 3)] & 0xff000000) ^
+ (Te3[GETBYTE(t1, 2)] & 0x00ff0000) ^
+ (Te0[GETBYTE(t2, 1)] & 0x0000ff00) ^
+ (Te1[GETBYTE(t3, 0)] & 0x000000ff) ^
rk[0];
s1 =
- (Te4[GETBYTE(t1, 3)] & 0xff000000) ^
- (Te4[GETBYTE(t2, 2)] & 0x00ff0000) ^
- (Te4[GETBYTE(t3, 1)] & 0x0000ff00) ^
- (Te4[GETBYTE(t0, 0)] & 0x000000ff) ^
+ (Te2[GETBYTE(t1, 3)] & 0xff000000) ^
+ (Te3[GETBYTE(t2, 2)] & 0x00ff0000) ^
+ (Te0[GETBYTE(t3, 1)] & 0x0000ff00) ^
+ (Te1[GETBYTE(t0, 0)] & 0x000000ff) ^
rk[1];
s2 =
- (Te4[GETBYTE(t2, 3)] & 0xff000000) ^
- (Te4[GETBYTE(t3, 2)] & 0x00ff0000) ^
- (Te4[GETBYTE(t0, 1)] & 0x0000ff00) ^
- (Te4[GETBYTE(t1, 0)] & 0x000000ff) ^
+ (Te2[GETBYTE(t2, 3)] & 0xff000000) ^
+ (Te3[GETBYTE(t3, 2)] & 0x00ff0000) ^
+ (Te0[GETBYTE(t0, 1)] & 0x0000ff00) ^
+ (Te1[GETBYTE(t1, 0)] & 0x000000ff) ^
rk[2];
s3 =
- (Te4[GETBYTE(t3, 3)] & 0xff000000) ^
- (Te4[GETBYTE(t0, 2)] & 0x00ff0000) ^
- (Te4[GETBYTE(t1, 1)] & 0x0000ff00) ^
- (Te4[GETBYTE(t2, 0)] & 0x000000ff) ^
+ (Te2[GETBYTE(t3, 3)] & 0xff000000) ^
+ (Te3[GETBYTE(t0, 2)] & 0x00ff0000) ^
+ (Te0[GETBYTE(t1, 1)] & 0x0000ff00) ^
+ (Te1[GETBYTE(t2, 0)] & 0x000000ff) ^
rk[3];
@@ -358,6 +359,8 @@ void AES::decrypt(const byte* inBlock, const byte* xorBlock,
s2 ^= rk[2];
s3 ^= rk[3];
+ s0 |= PreFetchTd();
+
/*
* Nr - 1 full rounds:
*/
@@ -423,29 +426,32 @@ void AES::decrypt(const byte* inBlock, const byte* xorBlock,
* apply last round and
* map cipher state to byte array block:
*/
+
+ t0 |= PreFetchCTd4();
+
s0 =
- (Td4[GETBYTE(t0, 3)] & 0xff000000) ^
- (Td4[GETBYTE(t3, 2)] & 0x00ff0000) ^
- (Td4[GETBYTE(t2, 1)] & 0x0000ff00) ^
- (Td4[GETBYTE(t1, 0)] & 0x000000ff) ^
+ ((word32)CTd4[GETBYTE(t0, 3)] << 24) ^
+ ((word32)CTd4[GETBYTE(t3, 2)] << 16) ^
+ ((word32)CTd4[GETBYTE(t2, 1)] << 8) ^
+ ((word32)CTd4[GETBYTE(t1, 0)]) ^
rk[0];
s1 =
- (Td4[GETBYTE(t1, 3)] & 0xff000000) ^
- (Td4[GETBYTE(t0, 2)] & 0x00ff0000) ^
- (Td4[GETBYTE(t3, 1)] & 0x0000ff00) ^
- (Td4[GETBYTE(t2, 0)] & 0x000000ff) ^
+ ((word32)CTd4[GETBYTE(t1, 3)] << 24) ^
+ ((word32)CTd4[GETBYTE(t0, 2)] << 16) ^
+ ((word32)CTd4[GETBYTE(t3, 1)] << 8) ^
+ ((word32)CTd4[GETBYTE(t2, 0)]) ^
rk[1];
s2 =
- (Td4[GETBYTE(t2, 3)] & 0xff000000) ^
- (Td4[GETBYTE(t1, 2)] & 0x00ff0000) ^
- (Td4[GETBYTE(t0, 1)] & 0x0000ff00) ^
- (Td4[GETBYTE(t3, 0)] & 0x000000ff) ^
+ ((word32)CTd4[GETBYTE(t2, 3)] << 24 ) ^
+ ((word32)CTd4[GETBYTE(t1, 2)] << 16 ) ^
+ ((word32)CTd4[GETBYTE(t0, 1)] << 8 ) ^
+ ((word32)CTd4[GETBYTE(t3, 0)]) ^
rk[2];
s3 =
- (Td4[GETBYTE(t3, 3)] & 0xff000000) ^
- (Td4[GETBYTE(t2, 2)] & 0x00ff0000) ^
- (Td4[GETBYTE(t1, 1)] & 0x0000ff00) ^
- (Td4[GETBYTE(t0, 0)] & 0x000000ff) ^
+ ((word32)CTd4[GETBYTE(t3, 3)] << 24) ^
+ ((word32)CTd4[GETBYTE(t2, 2)] << 16) ^
+ ((word32)CTd4[GETBYTE(t1, 1)] << 8) ^
+ ((word32)CTd4[GETBYTE(t0, 0)]) ^
rk[3];
gpBlock::Put(xorBlock, outBlock)(s0)(s1)(s2)(s3);
@@ -1826,18 +1832,52 @@ const word32 AES::Td[5][256] = {
}
};
+const byte AES::CTd4[256] =
+{
+ 0x52U, 0x09U, 0x6aU, 0xd5U, 0x30U, 0x36U, 0xa5U, 0x38U,
+ 0xbfU, 0x40U, 0xa3U, 0x9eU, 0x81U, 0xf3U, 0xd7U, 0xfbU,
+ 0x7cU, 0xe3U, 0x39U, 0x82U, 0x9bU, 0x2fU, 0xffU, 0x87U,
+ 0x34U, 0x8eU, 0x43U, 0x44U, 0xc4U, 0xdeU, 0xe9U, 0xcbU,
+ 0x54U, 0x7bU, 0x94U, 0x32U, 0xa6U, 0xc2U, 0x23U, 0x3dU,
+ 0xeeU, 0x4cU, 0x95U, 0x0bU, 0x42U, 0xfaU, 0xc3U, 0x4eU,
+ 0x08U, 0x2eU, 0xa1U, 0x66U, 0x28U, 0xd9U, 0x24U, 0xb2U,
+ 0x76U, 0x5bU, 0xa2U, 0x49U, 0x6dU, 0x8bU, 0xd1U, 0x25U,
+ 0x72U, 0xf8U, 0xf6U, 0x64U, 0x86U, 0x68U, 0x98U, 0x16U,
+ 0xd4U, 0xa4U, 0x5cU, 0xccU, 0x5dU, 0x65U, 0xb6U, 0x92U,
+ 0x6cU, 0x70U, 0x48U, 0x50U, 0xfdU, 0xedU, 0xb9U, 0xdaU,
+ 0x5eU, 0x15U, 0x46U, 0x57U, 0xa7U, 0x8dU, 0x9dU, 0x84U,
+ 0x90U, 0xd8U, 0xabU, 0x00U, 0x8cU, 0xbcU, 0xd3U, 0x0aU,
+ 0xf7U, 0xe4U, 0x58U, 0x05U, 0xb8U, 0xb3U, 0x45U, 0x06U,
+ 0xd0U, 0x2cU, 0x1eU, 0x8fU, 0xcaU, 0x3fU, 0x0fU, 0x02U,
+ 0xc1U, 0xafU, 0xbdU, 0x03U, 0x01U, 0x13U, 0x8aU, 0x6bU,
+ 0x3aU, 0x91U, 0x11U, 0x41U, 0x4fU, 0x67U, 0xdcU, 0xeaU,
+ 0x97U, 0xf2U, 0xcfU, 0xceU, 0xf0U, 0xb4U, 0xe6U, 0x73U,
+ 0x96U, 0xacU, 0x74U, 0x22U, 0xe7U, 0xadU, 0x35U, 0x85U,
+ 0xe2U, 0xf9U, 0x37U, 0xe8U, 0x1cU, 0x75U, 0xdfU, 0x6eU,
+ 0x47U, 0xf1U, 0x1aU, 0x71U, 0x1dU, 0x29U, 0xc5U, 0x89U,
+ 0x6fU, 0xb7U, 0x62U, 0x0eU, 0xaaU, 0x18U, 0xbeU, 0x1bU,
+ 0xfcU, 0x56U, 0x3eU, 0x4bU, 0xc6U, 0xd2U, 0x79U, 0x20U,
+ 0x9aU, 0xdbU, 0xc0U, 0xfeU, 0x78U, 0xcdU, 0x5aU, 0xf4U,
+ 0x1fU, 0xddU, 0xa8U, 0x33U, 0x88U, 0x07U, 0xc7U, 0x31U,
+ 0xb1U, 0x12U, 0x10U, 0x59U, 0x27U, 0x80U, 0xecU, 0x5fU,
+ 0x60U, 0x51U, 0x7fU, 0xa9U, 0x19U, 0xb5U, 0x4aU, 0x0dU,
+ 0x2dU, 0xe5U, 0x7aU, 0x9fU, 0x93U, 0xc9U, 0x9cU, 0xefU,
+ 0xa0U, 0xe0U, 0x3bU, 0x4dU, 0xaeU, 0x2aU, 0xf5U, 0xb0U,
+ 0xc8U, 0xebU, 0xbbU, 0x3cU, 0x83U, 0x53U, 0x99U, 0x61U,
+ 0x17U, 0x2bU, 0x04U, 0x7eU, 0xbaU, 0x77U, 0xd6U, 0x26U,
+ 0xe1U, 0x69U, 0x14U, 0x63U, 0x55U, 0x21U, 0x0cU, 0x7dU,
+};
+
const word32* AES::Te0 = AES::Te[0];
const word32* AES::Te1 = AES::Te[1];
const word32* AES::Te2 = AES::Te[2];
const word32* AES::Te3 = AES::Te[3];
-const word32* AES::Te4 = AES::Te[4];
const word32* AES::Td0 = AES::Td[0];
const word32* AES::Td1 = AES::Td[1];
const word32* AES::Td2 = AES::Td[2];
const word32* AES::Td3 = AES::Td[3];
-const word32* AES::Td4 = AES::Td[4];
diff --git a/extra/yassl/taocrypt/src/asn.cpp b/extra/yassl/taocrypt/src/asn.cpp
index 0474e7c21d5..80bcd612d27 100644
--- a/extra/yassl/taocrypt/src/asn.cpp
+++ b/extra/yassl/taocrypt/src/asn.cpp
@@ -1219,17 +1219,17 @@ word32 DecodeDSA_Signature(byte* decoded, const byte* encoded, word32 sz)
}
word32 rLen = GetLength(source);
if (rLen != 20) {
- if (rLen == 21) { // zero at front, eat
+ while (rLen > 20 && source.remaining() > 0) { // zero's at front, eat
source.next();
--rLen;
}
- else if (rLen == 19) { // add zero to front so 20 bytes
+ if (rLen < 20) { // add zero's to front so 20 bytes
+ word32 tmpLen = rLen;
+ while (tmpLen < 20) {
decoded[0] = 0;
decoded++;
+ tmpLen++;
}
- else {
- source.SetError(DSA_SZ_E);
- return 0;
}
}
memcpy(decoded, source.get_buffer() + source.get_index(), rLen);
@@ -1242,17 +1242,17 @@ word32 DecodeDSA_Signature(byte* decoded, const byte* encoded, word32 sz)
}
word32 sLen = GetLength(source);
if (sLen != 20) {
- if (sLen == 21) {
- source.next(); // zero at front, eat
+ while (sLen > 20 && source.remaining() > 0) {
+ source.next(); // zero's at front, eat
--sLen;
}
- else if (sLen == 19) {
- decoded[rLen] = 0; // add zero to front so 20 bytes
+ if (sLen < 20) { // add zero's to front so 20 bytes
+ word32 tmpLen = sLen;
+ while (tmpLen < 20) {
+ decoded[rLen] = 0;
decoded++;
+ tmpLen++;
}
- else {
- source.SetError(DSA_SZ_E);
- return 0;
}
}
memcpy(decoded + rLen, source.get_buffer() + source.get_index(), sLen);
diff --git a/extra/yassl/taocrypt/src/dsa.cpp b/extra/yassl/taocrypt/src/dsa.cpp
index 72221441b2b..fda01881df5 100644
--- a/extra/yassl/taocrypt/src/dsa.cpp
+++ b/extra/yassl/taocrypt/src/dsa.cpp
@@ -172,6 +172,7 @@ word32 DSA_Signer::Sign(const byte* sha_digest, byte* sig,
const Integer& q = key_.GetSubGroupOrder();
const Integer& g = key_.GetSubGroupGenerator();
const Integer& x = key_.GetPrivatePart();
+ byte* tmpPtr = sig; // initial signature output
Integer k(rng, 1, q - 1);
@@ -187,22 +188,23 @@ word32 DSA_Signer::Sign(const byte* sha_digest, byte* sig,
return (word32) -1;
int rSz = r_.ByteCount();
+ int tmpSz = rSz;
- if (rSz == 19) {
- sig[0] = 0;
- sig++;
+ while (tmpSz++ < SHA::DIGEST_SIZE) {
+ *sig++ = 0;
}
r_.Encode(sig, rSz);
+ sig = tmpPtr + SHA::DIGEST_SIZE; // advance sig output to s
int sSz = s_.ByteCount();
+ tmpSz = sSz;
- if (sSz == 19) {
- sig[rSz] = 0;
- sig++;
+ while (tmpSz++ < SHA::DIGEST_SIZE) {
+ *sig++ = 0;
}
- s_.Encode(sig + rSz, sSz);
+ s_.Encode(sig, sSz);
return 40;
}
diff --git a/extra/yassl/taocrypt/src/integer.cpp b/extra/yassl/taocrypt/src/integer.cpp
index fb8d9276bd9..dd8425396ed 100644
--- a/extra/yassl/taocrypt/src/integer.cpp
+++ b/extra/yassl/taocrypt/src/integer.cpp
@@ -193,8 +193,9 @@ DWord() {}
"a" (a), "rm" (b) : "cc");
#elif defined(__mips64)
- __asm__("dmultu %2,%3" : "=d" (r.halfs_.high), "=l" (r.halfs_.low)
- : "r" (a), "r" (b));
+ unsigned __int128 t = (unsigned __int128) a * b;
+ r.halfs_.high = t >> 64;
+ r.halfs_.low = (word) t;
#elif defined(_M_IX86)
// for testing
diff --git a/extra/yassl/taocrypt/test/test.cpp b/extra/yassl/taocrypt/test/test.cpp
index c23d981924d..b07a9eb9f29 100644
--- a/extra/yassl/taocrypt/test/test.cpp
+++ b/extra/yassl/taocrypt/test/test.cpp
@@ -1281,6 +1281,9 @@ int dsa_test()
if (!verifier.Verify(digest, decoded))
return -90;
+ if (!verifier.Verify(digest, signature))
+ return -91;
+
return 0;
}
diff --git a/extra/yassl/testsuite/test.hpp b/extra/yassl/testsuite/test.hpp
index 5374edd0e2a..a65a212cf99 100644
--- a/extra/yassl/testsuite/test.hpp
+++ b/extra/yassl/testsuite/test.hpp
@@ -22,7 +22,6 @@
#define yaSSL_TEST_HPP
#include "runtime.hpp"
-#include "openssl/ssl.h" /* openssl compatibility test */
#include "error.hpp"
#include <stdio.h>
#include <stdlib.h>
@@ -56,6 +55,7 @@
#endif
#define SOCKET_T int
#endif /* _WIN32 */
+#include "openssl/ssl.h" /* openssl compatibility test */
#ifdef _MSC_VER
diff --git a/include/byte_order_generic_x86.h b/include/byte_order_generic_x86.h
index 0a71a17829b..a97dd0f43a3 100644
--- a/include/byte_order_generic_x86.h
+++ b/include/byte_order_generic_x86.h
@@ -27,19 +27,9 @@
((uint32) (uchar) (A)[0])))
#define sint4korr(A) (*((const long *) (A)))
#define uint2korr(A) (*((const uint16 *) (A)))
-
-/*
- Attention: Please, note, uint3korr reads 4 bytes (not 3)!
- It means, that you have to provide enough allocated space.
-*/
-#if defined(HAVE_valgrind) && !defined(_WIN32)
#define uint3korr(A) (uint32) (((uint32) ((uchar) (A)[0])) +\
(((uint32) ((uchar) (A)[1])) << 8) +\
(((uint32) ((uchar) (A)[2])) << 16))
-#else
-#define uint3korr(A) (long) (*((const unsigned int *) (A)) & 0xFFFFFF)
-#endif
-
#define uint4korr(A) (*((const uint32 *) (A)))
#define uint5korr(A) ((ulonglong)(((uint32) ((uchar) (A)[0])) +\
(((uint32) ((uchar) (A)[1])) << 8) +\
diff --git a/include/byte_order_generic_x86_64.h b/include/byte_order_generic_x86_64.h
index b6b0c5d8ea5..8c7493965a9 100644
--- a/include/byte_order_generic_x86_64.h
+++ b/include/byte_order_generic_x86_64.h
@@ -27,17 +27,9 @@
((uint32) (uchar) (A)[0])))
#define sint4korr(A) (int32) (*((int32 *) (A)))
#define uint2korr(A) (uint16) (*((uint16 *) (A)))
-/*
- Attention: Please, note, uint3korr reads 4 bytes (not 3)!
- It means, that you have to provide enough allocated space.
-*/
-#if defined(HAVE_valgrind) && !defined(_WIN32)
#define uint3korr(A) (uint32) (((uint32) ((uchar) (A)[0])) +\
(((uint32) ((uchar) (A)[1])) << 8) +\
(((uint32) ((uchar) (A)[2])) << 16))
-#else
-#define uint3korr(A) (uint32) (*((unsigned int *) (A)) & 0xFFFFFF)
-#endif
#define uint4korr(A) (uint32) (*((uint32 *) (A)))
#define uint5korr(A) ((ulonglong)(((uint32) ((uchar) (A)[0])) +\
(((uint32) ((uchar) (A)[1])) << 8) +\
diff --git a/libmysql/libmysql.c b/libmysql/libmysql.c
index 190652c1d61..4d7d8a380a7 100644
--- a/libmysql/libmysql.c
+++ b/libmysql/libmysql.c
@@ -452,8 +452,9 @@ void read_user_name(char *name)
void read_user_name(char *name)
{
- char *str=getenv("USER"); /* ODBC will send user variable */
- strmake(name,str ? str : "ODBC", USERNAME_LENGTH);
+ DWORD len= USERNAME_LENGTH;
+ if (!GetUserName(name, &len))
+ strmov(name,"UNKNOWN_USER");
}
#endif
diff --git a/mysql-test/extra/binlog_tests/database.test b/mysql-test/extra/binlog_tests/database.test
index 2e093aacb0d..097a501cc34 100644
--- a/mysql-test/extra/binlog_tests/database.test
+++ b/mysql-test/extra/binlog_tests/database.test
@@ -52,7 +52,7 @@ eval SELECT 'hello' INTO OUTFILE 'fake_file.$prefix';
# Use '/' instead of '\' in the error message. On windows platform, dir is
# formed with '\'.
---replace_regex /\\testing_1\\*/\/testing_1\// /66/39/ /17/39/ /247/39/ /File exists/Directory not empty/
+--replace_regex /\\testing_1\\*/\/testing_1\// /66/39/ /93/39/ /17/39/ /247/39/ /File exists/Directory not empty/
--error 1010
DROP DATABASE testing_1;
let $wait_binlog_event= DROP TABLE IF EXIST;
diff --git a/mysql-test/r/alter_table.result b/mysql-test/r/alter_table.result
index 9c0a25f6d61..8b77e7836cd 100644
--- a/mysql-test/r/alter_table.result
+++ b/mysql-test/r/alter_table.result
@@ -2031,6 +2031,64 @@ Warnings:
Note 1061 Multiple primary key defined
DROP TABLE t1;
#
+# MDEV-11126 Crash while altering persistent virtual column
+#
+CREATE TABLE `tab1` (
+`id` bigint(20) NOT NULL AUTO_INCREMENT,
+`field2` set('option1','option2','option3','option4') NOT NULL,
+`field3` set('option1','option2','option3','option4','option5') NOT NULL,
+`field4` set('option1','option2','option3','option4') NOT NULL,
+`field5` varchar(32) NOT NULL,
+`field6` varchar(32) NOT NULL,
+`field7` varchar(32) NOT NULL,
+`field8` varchar(32) NOT NULL,
+`field9` int(11) NOT NULL DEFAULT '1',
+`field10` varchar(16) NOT NULL,
+`field11` enum('option1','option2','option3') NOT NULL DEFAULT 'option1',
+`v_col` varchar(128) AS (IF(field11='option1',CONCAT_WS(":","field1",field2,field3,field4,field5,field6,field7,field8,field9,field10), CONCAT_WS(":","field1",field11,field2,field3,field4,field5,field6,field7,field8,field9,field10))) PERSISTENT,
+PRIMARY KEY (`id`)
+) DEFAULT CHARSET=latin1;
+ALTER TABLE `tab1` CHANGE COLUMN v_col `v_col` varchar(128);
+SHOW CREATE TABLE `tab1`;
+Table Create Table
+tab1 CREATE TABLE `tab1` (
+ `id` bigint(20) NOT NULL AUTO_INCREMENT,
+ `field2` set('option1','option2','option3','option4') NOT NULL,
+ `field3` set('option1','option2','option3','option4','option5') NOT NULL,
+ `field4` set('option1','option2','option3','option4') NOT NULL,
+ `field5` varchar(32) NOT NULL,
+ `field6` varchar(32) NOT NULL,
+ `field7` varchar(32) NOT NULL,
+ `field8` varchar(32) NOT NULL,
+ `field9` int(11) NOT NULL DEFAULT '1',
+ `field10` varchar(16) NOT NULL,
+ `field11` enum('option1','option2','option3') NOT NULL DEFAULT 'option1',
+ `v_col` varchar(128) DEFAULT NULL,
+ PRIMARY KEY (`id`)
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+ALTER TABLE `tab1` CHANGE COLUMN v_col `v_col` varchar(128) AS (IF(field11='option1',CONCAT_WS(":","field1",field2,field3,field4,field5,field6,field7,field8,field9,field10), CONCAT_WS(":","field1",field11,field2,field3,field4,field5,field6,field7,field8,field9,field10))) PERSISTENT;
+SHOW CREATE TABLE `tab1`;
+Table Create Table
+tab1 CREATE TABLE `tab1` (
+ `id` bigint(20) NOT NULL AUTO_INCREMENT,
+ `field2` set('option1','option2','option3','option4') NOT NULL,
+ `field3` set('option1','option2','option3','option4','option5') NOT NULL,
+ `field4` set('option1','option2','option3','option4') NOT NULL,
+ `field5` varchar(32) NOT NULL,
+ `field6` varchar(32) NOT NULL,
+ `field7` varchar(32) NOT NULL,
+ `field8` varchar(32) NOT NULL,
+ `field9` int(11) NOT NULL DEFAULT '1',
+ `field10` varchar(16) NOT NULL,
+ `field11` enum('option1','option2','option3') NOT NULL DEFAULT 'option1',
+ `v_col` varchar(128) AS (IF(field11='option1',CONCAT_WS(":","field1",field2,field3,field4,field5,field6,field7,field8,field9,field10), CONCAT_WS(":","field1",field11,field2,field3,field4,field5,field6,field7,field8,field9,field10))) PERSISTENT,
+ PRIMARY KEY (`id`)
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+DROP TABLE `tab1`;
+#
+# Start of 10.1 tests
+#
+#
# MDEV-7374 : Losing connection to MySQL while running ALTER TABLE
#
CREATE TABLE t1(i INT) ENGINE=INNODB;
@@ -2039,9 +2097,6 @@ INSERT INTO t1 SELECT a.* FROM t1 a, t1 b, t1 c, t1 d, t1 e;
ALTER TABLE t1 MODIFY i FLOAT;
DROP TABLE t1;
#
-# Start of 10.1 tests
-#
-#
# MDEV-7816 ALTER with DROP INDEX and ADD INDEX .. COMMENT='comment2' ignores the new comment
#
CREATE TABLE t1(a INT);
diff --git a/mysql-test/r/create_or_replace.result b/mysql-test/r/create_or_replace.result
index 7df3704cfa1..70b4b98b89c 100644
--- a/mysql-test/r/create_or_replace.result
+++ b/mysql-test/r/create_or_replace.result
@@ -440,3 +440,14 @@ KILL QUERY con_id;
ERROR 70100: Query execution was interrupted
drop table t1;
DROP TABLE t2;
+#
+# MDEV-10824 - Crash in CREATE OR REPLACE TABLE t1 AS SELECT spfunc()
+#
+CREATE TABLE t1(a INT);
+CREATE FUNCTION f1() RETURNS VARCHAR(16383) RETURN 'test';
+CREATE OR REPLACE TABLE t1 AS SELECT f1();
+LOCK TABLE t1 WRITE;
+CREATE OR REPLACE TABLE t1 AS SELECT f1();
+UNLOCK TABLES;
+DROP FUNCTION f1;
+DROP TABLE t1;
diff --git a/mysql-test/r/drop.result b/mysql-test/r/drop.result
index c23ffbe327b..c25ae9e3055 100644
--- a/mysql-test/r/drop.result
+++ b/mysql-test/r/drop.result
@@ -209,3 +209,9 @@ INSERT INTO table1 VALUES (1);
ERROR 42S02: Unknown table 't.notable'
DROP TABLE table1,table2;
# End BUG#34750
+#
+# MDEV-11105 Table named 'db' has weird side effect.
+#
+CREATE DATABASE mysqltest;
+CREATE TABLE mysqltest.db(id INT);
+DROP DATABASE mysqltest;
diff --git a/mysql-test/r/information_schema.result b/mysql-test/r/information_schema.result
index fa5280df2b4..e78e2b7ee3e 100644
--- a/mysql-test/r/information_schema.result
+++ b/mysql-test/r/information_schema.result
@@ -1012,19 +1012,19 @@ show grants;
Grants for user3@localhost
GRANT USAGE ON *.* TO 'user3'@'localhost'
GRANT SELECT ON `mysqltest`.* TO 'user3'@'localhost'
-select * from information_schema.column_privileges where grantee like '%user%'
+select * from information_schema.column_privileges where grantee like '\'user%'
order by grantee;
GRANTEE TABLE_CATALOG TABLE_SCHEMA TABLE_NAME COLUMN_NAME PRIVILEGE_TYPE IS_GRANTABLE
'user1'@'localhost' def mysqltest t1 f1 SELECT NO
-select * from information_schema.table_privileges where grantee like '%user%'
+select * from information_schema.table_privileges where grantee like '\'user%'
order by grantee;
GRANTEE TABLE_CATALOG TABLE_SCHEMA TABLE_NAME PRIVILEGE_TYPE IS_GRANTABLE
'user2'@'localhost' def mysqltest t2 SELECT NO
-select * from information_schema.schema_privileges where grantee like '%user%'
+select * from information_schema.schema_privileges where grantee like '\'user%'
order by grantee;
GRANTEE TABLE_CATALOG TABLE_SCHEMA PRIVILEGE_TYPE IS_GRANTABLE
'user3'@'localhost' def mysqltest SELECT NO
-select * from information_schema.user_privileges where grantee like '%user%'
+select * from information_schema.user_privileges where grantee like '\'user%'
order by grantee;
GRANTEE TABLE_CATALOG PRIVILEGE_TYPE IS_GRANTABLE
'user1'@'localhost' def USAGE NO
diff --git a/mysql-test/r/mysql.result b/mysql-test/r/mysql.result
index ca19d2eb98b..4097a22ea43 100644
--- a/mysql-test/r/mysql.result
+++ b/mysql-test/r/mysql.result
@@ -514,6 +514,14 @@ DROP DATABASE connected_db;
create database `aa``bb````cc`;
DATABASE()
aa`bb``cc
+DATABASE()
+test
+DATABASE()
+aa`bb``cc
+DATABASE()
+test
+DATABASE()
+aa`bb``cc
drop database `aa``bb````cc`;
a
>>\ndelimiter\n<<
diff --git a/mysql-test/r/mysql_not_windows.result b/mysql-test/r/mysql_not_windows.result
index d5670a1a9ca..1df62d9a12d 100644
--- a/mysql-test/r/mysql_not_windows.result
+++ b/mysql-test/r/mysql_not_windows.result
@@ -3,3 +3,9 @@ a
1
End of tests
+1
+1
+2
+2
+X
+3
diff --git a/mysql-test/r/mysqldump-nl.result b/mysql-test/r/mysqldump-nl.result
new file mode 100644
index 00000000000..829bf980103
--- /dev/null
+++ b/mysql-test/r/mysqldump-nl.result
@@ -0,0 +1,126 @@
+create database `mysqltest1
+1tsetlqsym`;
+use `mysqltest1
+1tsetlqsym`;
+create table `t1
+1t` (`foobar
+raboof` int);
+create view `v1
+1v` as select * from `t1
+1t`;
+create procedure sp() select * from `v1
+1v`;
+flush tables;
+use test;
+
+--
+-- Current Database: `mysqltest1
+-- 1tsetlqsym`
+--
+
+/*!40000 DROP DATABASE IF EXISTS `mysqltest1
+1tsetlqsym`*/;
+
+CREATE DATABASE /*!32312 IF NOT EXISTS*/ `mysqltest1
+1tsetlqsym` /*!40100 DEFAULT CHARACTER SET latin1 */;
+
+USE `mysqltest1
+1tsetlqsym`;
+
+--
+-- Table structure for table `t1
+-- 1t`
+--
+
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE `t1
+1t` (
+ `foobar
+raboof` int(11) DEFAULT NULL
+) ENGINE=MyISAM DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Dumping data for table `t1
+-- 1t`
+--
+
+--
+-- Temporary table structure for view `v1
+-- 1v`
+--
+
+SET @saved_cs_client = @@character_set_client;
+SET character_set_client = utf8;
+/*!50001 CREATE TABLE `v1
+1v` (
+ `foobar
+raboof` tinyint NOT NULL
+) ENGINE=MyISAM */;
+SET character_set_client = @saved_cs_client;
+
+--
+-- Dumping routines for database 'mysqltest1
+-- 1tsetlqsym'
+--
+/*!50003 SET @saved_cs_client = @@character_set_client */ ;
+/*!50003 SET @saved_cs_results = @@character_set_results */ ;
+/*!50003 SET @saved_col_connection = @@collation_connection */ ;
+/*!50003 SET character_set_client = latin1 */ ;
+/*!50003 SET character_set_results = latin1 */ ;
+/*!50003 SET collation_connection = latin1_swedish_ci */ ;
+/*!50003 SET @saved_sql_mode = @@sql_mode */ ;
+/*!50003 SET sql_mode = 'NO_AUTO_CREATE_USER,NO_ENGINE_SUBSTITUTION' */ ;
+DELIMITER ;;
+CREATE DEFINER=`root`@`localhost` PROCEDURE `sp`()
+select * from `v1
+1v` ;;
+DELIMITER ;
+/*!50003 SET sql_mode = @saved_sql_mode */ ;
+/*!50003 SET character_set_client = @saved_cs_client */ ;
+/*!50003 SET character_set_results = @saved_cs_results */ ;
+/*!50003 SET collation_connection = @saved_col_connection */ ;
+
+--
+-- Current Database: `mysqltest1
+-- 1tsetlqsym`
+--
+
+USE `mysqltest1
+1tsetlqsym`;
+
+--
+-- Final view structure for view `v1
+-- 1v`
+--
+
+/*!50001 DROP TABLE IF EXISTS `v1
+1v`*/;
+/*!50001 SET @saved_cs_client = @@character_set_client */;
+/*!50001 SET @saved_cs_results = @@character_set_results */;
+/*!50001 SET @saved_col_connection = @@collation_connection */;
+/*!50001 SET character_set_client = latin1 */;
+/*!50001 SET character_set_results = latin1 */;
+/*!50001 SET collation_connection = latin1_swedish_ci */;
+/*!50001 CREATE ALGORITHM=UNDEFINED */
+/*!50013 DEFINER=`root`@`localhost` SQL SECURITY DEFINER */
+/*!50001 VIEW `v1
+1v` AS select `t1
+1t`.`foobar
+raboof` AS `foobar
+raboof` from `t1
+1t` */;
+/*!50001 SET character_set_client = @saved_cs_client */;
+/*!50001 SET character_set_results = @saved_cs_results */;
+/*!50001 SET collation_connection = @saved_col_connection */;
+show tables from `mysqltest1
+1tsetlqsym`;
+Tables_in_mysqltest1
+1tsetlqsym
+t1
+1t
+v1
+1v
+drop database `mysqltest1
+1tsetlqsym`;
diff --git a/mysql-test/r/mysqldump.result b/mysql-test/r/mysqldump.result
index 559f8885b0d..ce786c2ede8 100644
--- a/mysql-test/r/mysqldump.result
+++ b/mysql-test/r/mysqldump.result
@@ -5236,9 +5236,6 @@ SET @@global.log_output="TABLE";
SET @@global.general_log='OFF';
SET @@global.slow_query_log='OFF';
DROP DATABASE mysql;
-Warnings:
-Error 1146 Table 'mysql.proc' doesn't exist
-Error 1146 Table 'mysql.event' doesn't exist
SHOW CREATE TABLE mysql.general_log;
Table Create Table
general_log CREATE TABLE `general_log` (
diff --git a/mysql-test/r/mysqltest.result b/mysql-test/r/mysqltest.result
index e258b1d156f..fa054d457f9 100644
--- a/mysql-test/r/mysqltest.result
+++ b/mysql-test/r/mysqltest.result
@@ -269,12 +269,6 @@ source database
echo message echo message
mysqltest: At line 1: Missing argument in exec
-1
-1
-2
-2
-X
-3
MySQL
"MySQL"
MySQL: The
diff --git a/mysql-test/r/selectivity.result b/mysql-test/r/selectivity.result
index 4238359c201..49298304ebd 100644
--- a/mysql-test/r/selectivity.result
+++ b/mysql-test/r/selectivity.result
@@ -1440,3 +1440,75 @@ a b i
set optimizer_use_condition_selectivity=@save_optimizer_use_condition_selectivity;
DROP TABLE t1,t2;
set use_stat_tables=@save_use_stat_tables;
+#
+# Bug mdev-11096: range condition over column without statistical data
+#
+set use_stat_tables='preferably';
+set optimizer_use_condition_selectivity=3;
+create table t1(col1 char(32));
+insert into t1 values ('a'),('b'),('c'),('d'), ('e'),('f'),('g'),('h');
+analyze table t1 persistent for columns () indexes ();
+Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
+test.t1 analyze status OK
+explain extended
+select * from t1 where col1 > 'b' and col1 < 'e';
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 8 100.00 Using where
+Warnings:
+Note 1003 select `test`.`t1`.`col1` AS `col1` from `test`.`t1` where ((`test`.`t1`.`col1` > 'b') and (`test`.`t1`.`col1` < 'e'))
+select * from t1 where col1 > 'b' and col1 < 'e';
+col1
+c
+d
+drop table t1;
+set optimizer_use_condition_selectivity=@save_optimizer_use_condition_selectivity;
+set use_stat_tables=@save_use_stat_tables;
+#
+# Bug mdev-9628: unindexed blob column without min-max statistics
+# with optimizer_use_condition_selectivity=3
+#
+set use_stat_tables='preferably';
+set optimizer_use_condition_selectivity=3;
+create table t1(col1 char(32));
+insert into t1 values ('a'),('b'),('c'),('d'), ('e'),('f'),('g'),('h');
+analyze table t1;
+Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
+test.t1 analyze status OK
+create table t2(col1 text);
+insert into t2 values ('a'),('b'),('c'),('d'), ('e'),('f'),('g'),('h');
+analyze table t2;
+Table Op Msg_type Msg_text
+test.t2 analyze status Engine-independent statistics collected
+test.t2 analyze Warning Engine-independent statistics are not collected for column 'col1'
+test.t2 analyze status OK
+select * from t1 where col1 > 'b' and col1 < 'd';
+col1
+c
+explain extended
+select * from t1 where col1 > 'b' and col1 < 'd';
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 8 28.57 Using where
+Warnings:
+Note 1003 select `test`.`t1`.`col1` AS `col1` from `test`.`t1` where ((`test`.`t1`.`col1` > 'b') and (`test`.`t1`.`col1` < 'd'))
+select * from t2 where col1 > 'b' and col1 < 'd';
+col1
+c
+explain extended
+select * from t2 where col1 > 'b' and col1 < 'd';
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t2 ALL NULL NULL NULL NULL 8 100.00 Using where
+Warnings:
+Note 1003 select `test`.`t2`.`col1` AS `col1` from `test`.`t2` where ((`test`.`t2`.`col1` > 'b') and (`test`.`t2`.`col1` < 'd'))
+select * from t2 where col1 < 'b' and col1 > 'd';
+col1
+explain extended
+select * from t2 where col1 < 'b' and col1 > 'd';
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE NULL NULL NULL NULL NULL NULL NULL NULL Impossible WHERE noticed after reading const tables
+Warnings:
+Note 1003 select `test`.`t2`.`col1` AS `col1` from `test`.`t2` where 0
+drop table t1,t2;
+set optimizer_use_condition_selectivity=@save_optimizer_use_condition_selectivity;
+set use_stat_tables=@save_use_stat_tables;
diff --git a/mysql-test/r/selectivity_innodb.result b/mysql-test/r/selectivity_innodb.result
index daf28073cf1..8adc16e7df7 100644
--- a/mysql-test/r/selectivity_innodb.result
+++ b/mysql-test/r/selectivity_innodb.result
@@ -802,9 +802,9 @@ insert into t2 values (2),(3);
explain extended
select * from t1 where a in ( select b from t2 ) AND ( a > 3 );
id select_type table type possible_keys key key_len ref rows filtered Extra
-1 PRIMARY t1 ALL NULL NULL NULL NULL 1 0.00 Using where
+1 PRIMARY t1 ALL NULL NULL NULL NULL 1 100.00 Using where
1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 4 func 1 100.00
-2 MATERIALIZED t2 ALL NULL NULL NULL NULL 2 0.00
+2 MATERIALIZED t2 ALL NULL NULL NULL NULL 2 100.00
Warnings:
Note 1003 select `test`.`t1`.`a` AS `a` from `test`.`t1` semi join (`test`.`t2`) where ((`test`.`t1`.`a` > 3))
select * from t1 where a in ( select b from t2 ) AND ( a > 3 );
@@ -1450,6 +1450,78 @@ a b i
set optimizer_use_condition_selectivity=@save_optimizer_use_condition_selectivity;
DROP TABLE t1,t2;
set use_stat_tables=@save_use_stat_tables;
+#
+# Bug mdev-11096: range condition over column without statistical data
+#
+set use_stat_tables='preferably';
+set optimizer_use_condition_selectivity=3;
+create table t1(col1 char(32));
+insert into t1 values ('a'),('b'),('c'),('d'), ('e'),('f'),('g'),('h');
+analyze table t1 persistent for columns () indexes ();
+Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
+test.t1 analyze status OK
+explain extended
+select * from t1 where col1 > 'b' and col1 < 'e';
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 8 100.00 Using where
+Warnings:
+Note 1003 select `test`.`t1`.`col1` AS `col1` from `test`.`t1` where ((`test`.`t1`.`col1` > 'b') and (`test`.`t1`.`col1` < 'e'))
+select * from t1 where col1 > 'b' and col1 < 'e';
+col1
+c
+d
+drop table t1;
+set optimizer_use_condition_selectivity=@save_optimizer_use_condition_selectivity;
+set use_stat_tables=@save_use_stat_tables;
+#
+# Bug mdev-9628: unindexed blob column without min-max statistics
+# with optimizer_use_condition_selectivity=3
+#
+set use_stat_tables='preferably';
+set optimizer_use_condition_selectivity=3;
+create table t1(col1 char(32));
+insert into t1 values ('a'),('b'),('c'),('d'), ('e'),('f'),('g'),('h');
+analyze table t1;
+Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
+test.t1 analyze status OK
+create table t2(col1 text);
+insert into t2 values ('a'),('b'),('c'),('d'), ('e'),('f'),('g'),('h');
+analyze table t2;
+Table Op Msg_type Msg_text
+test.t2 analyze status Engine-independent statistics collected
+test.t2 analyze Warning Engine-independent statistics are not collected for column 'col1'
+test.t2 analyze status OK
+select * from t1 where col1 > 'b' and col1 < 'd';
+col1
+c
+explain extended
+select * from t1 where col1 > 'b' and col1 < 'd';
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 8 28.57 Using where
+Warnings:
+Note 1003 select `test`.`t1`.`col1` AS `col1` from `test`.`t1` where ((`test`.`t1`.`col1` > 'b') and (`test`.`t1`.`col1` < 'd'))
+select * from t2 where col1 > 'b' and col1 < 'd';
+col1
+c
+explain extended
+select * from t2 where col1 > 'b' and col1 < 'd';
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t2 ALL NULL NULL NULL NULL 8 100.00 Using where
+Warnings:
+Note 1003 select `test`.`t2`.`col1` AS `col1` from `test`.`t2` where ((`test`.`t2`.`col1` > 'b') and (`test`.`t2`.`col1` < 'd'))
+select * from t2 where col1 < 'b' and col1 > 'd';
+col1
+explain extended
+select * from t2 where col1 < 'b' and col1 > 'd';
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE NULL NULL NULL NULL NULL NULL NULL NULL Impossible WHERE noticed after reading const tables
+Warnings:
+Note 1003 select `test`.`t2`.`col1` AS `col1` from `test`.`t2` where 0
+drop table t1,t2;
+set optimizer_use_condition_selectivity=@save_optimizer_use_condition_selectivity;
+set use_stat_tables=@save_use_stat_tables;
set optimizer_switch=@save_optimizer_switch_for_selectivity_test;
set @tmp_ust= @@use_stat_tables;
set @tmp_oucs= @@optimizer_use_condition_selectivity;
@@ -1536,6 +1608,45 @@ where t1.child_user_id=t3.id and t1.child_group_id is null and t2.lower_group_na
parent_id child_group_id child_user_id id lower_group_name directory_id id
drop table t1,t2,t3;
#
+# MDEV-9187: duplicate of bug mdev-9628
+#
+set use_stat_tables = preferably;
+set optimizer_use_condition_selectivity=3;
+CREATE TABLE t1 (f1 char(32)) ENGINE=InnoDB;
+INSERT INTO t1 VALUES ('foo'),('bar'),('qux');
+ANALYZE TABLE t1;
+Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
+test.t1 analyze status OK
+SELECT * FROM t1 WHERE f1 < 'm';
+f1
+foo
+bar
+EXPLAIN EXTENDED
+SELECT * FROM t1 WHERE f1 < 'm';
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 3 72.09 Using where
+Warnings:
+Note 1003 select `test`.`t1`.`f1` AS `f1` from `test`.`t1` where (`test`.`t1`.`f1` < 'm')
+CREATE TABLE t2 (f1 TEXT) ENGINE=InnoDB;
+INSERT INTO t2 VALUES ('foo'),('bar'),('qux');
+ANALYZE TABLE t2;
+Table Op Msg_type Msg_text
+test.t2 analyze status Engine-independent statistics collected
+test.t2 analyze Warning Engine-independent statistics are not collected for column 'f1'
+test.t2 analyze status OK
+SELECT * FROM t2 WHERE f1 <> 'qux';
+f1
+foo
+bar
+EXPLAIN EXTENDED
+SELECT * FROM t2 WHERE f1 <> 'qux';
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t2 ALL NULL NULL NULL NULL 3 100.00 Using where
+Warnings:
+Note 1003 select `test`.`t2`.`f1` AS `f1` from `test`.`t2` where (`test`.`t2`.`f1` <> 'qux')
+DROP TABLE t1,t2;
+#
# End of 10.0 tests
#
set use_stat_tables= @tmp_ust;
diff --git a/mysql-test/suite/galera/disabled.def b/mysql-test/suite/galera/disabled.def
index 5f8d9c6ddff..907f7931483 100644
--- a/mysql-test/suite/galera/disabled.def
+++ b/mysql-test/suite/galera/disabled.def
@@ -30,3 +30,5 @@ galera_gcs_fragment : Incorrect arguments to SET
galera_flush_local : Fails sporadically
galera_binlog_stmt_autoinc : TODO: investigate
galera_concurrent_ctas : Test times out, investigate
+galera_sst_xtrabackup-v2-options : TODO: Fix test case
+mysql-wsrep#33 : TODO: investigate
diff --git a/mysql-test/suite/galera/include/auto_increment_offset_restore.inc b/mysql-test/suite/galera/include/auto_increment_offset_restore.inc
index 6218dfd6f2c..1248ed100ca 100644
--- a/mysql-test/suite/galera/include/auto_increment_offset_restore.inc
+++ b/mysql-test/suite/galera/include/auto_increment_offset_restore.inc
@@ -32,4 +32,10 @@ if ($node_3)
--connection $node_3
--eval SET @@global.auto_increment_offset = $auto_increment_offset_node_3;
}
+
+if ($node_4)
+{
+--connection $node_4
+--eval SET @@global.auto_increment_offset = $auto_increment_offset_node_4;
+}
--enable_query_log
diff --git a/mysql-test/suite/galera/include/auto_increment_offset_save.inc b/mysql-test/suite/galera/include/auto_increment_offset_save.inc
index 3c4db3f381c..216c689ec8c 100644
--- a/mysql-test/suite/galera/include/auto_increment_offset_save.inc
+++ b/mysql-test/suite/galera/include/auto_increment_offset_save.inc
@@ -13,6 +13,8 @@
# Connection handle for 2nd node
# $node_3 (optional)
# Connection handle for 3rd node
+# $node_4 (optional)
+# Connection handle for 4th node
if (!$node_1)
{
@@ -35,3 +37,9 @@ if ($node_3)
let $auto_increment_offset_node_3 = `SELECT @@global.auto_increment_offset`;
}
+if ($node_4)
+{
+ --connection $node_4
+ let $auto_increment_offset_node_4 = `SELECT @@global.auto_increment_offset`;
+}
+
diff --git a/mysql-test/suite/galera/r/galera_sst_xtrabackup-v2.result b/mysql-test/suite/galera/r/galera_sst_xtrabackup-v2.result
index 750d73b615f..df2d9190a4b 100644
--- a/mysql-test/suite/galera/r/galera_sst_xtrabackup-v2.result
+++ b/mysql-test/suite/galera/r/galera_sst_xtrabackup-v2.result
@@ -277,7 +277,7 @@ INSERT INTO t1 VALUES ('node2_committed_before');
INSERT INTO t1 VALUES ('node2_committed_before');
INSERT INTO t1 VALUES ('node2_committed_before');
COMMIT;
-SET GLOBAL debug = 'd,sync.alter_opened_table';
+SET GLOBAL debug_dbug = 'd,sync.alter_opened_table';
ALTER TABLE t1 ADD COLUMN f2 INTEGER;
SET wsrep_sync_wait = 0;
Killing server ...
@@ -356,3 +356,4 @@ COUNT(*) = 0
DROP TABLE t1;
COMMIT;
SET AUTOCOMMIT=ON;
+SET GLOBAL debug_dbug = $debug_orig;
diff --git a/mysql-test/suite/galera/t/galera#414.test b/mysql-test/suite/galera/t/galera#414.test
index b426e6510b6..0ee6dcac700 100644
--- a/mysql-test/suite/galera/t/galera#414.test
+++ b/mysql-test/suite/galera/t/galera#414.test
@@ -3,6 +3,7 @@
#
--source include/big_test.inc
+--source include/have_innodb.inc
--source include/galera_cluster.inc
# We perform the shutdown/restart sequence in here. If there was a crash during shutdown, MTR will detect it
diff --git a/mysql-test/suite/galera/t/galera_sst_xtrabackup-v2.test b/mysql-test/suite/galera/t/galera_sst_xtrabackup-v2.test
index c6823795e59..aac6822170a 100644
--- a/mysql-test/suite/galera/t/galera_sst_xtrabackup-v2.test
+++ b/mysql-test/suite/galera/t/galera_sst_xtrabackup-v2.test
@@ -2,8 +2,18 @@
--source include/galera_cluster.inc
--source include/have_innodb.inc
+# Save original auto_increment_offset values.
+--let $node_1=node_1
+--let $node_2=node_2
+--source include/auto_increment_offset_save.inc
+
--source suite/galera/include/galera_st_shutdown_slave.inc
--source suite/galera/include/galera_st_clean_slave.inc
--source suite/galera/include/galera_st_kill_slave.inc
--source suite/galera/include/galera_st_kill_slave_ddl.inc
+
+# Restore original auto_increment_offset values.
+--source include/auto_increment_offset_restore.inc
+
+--source include/galera_end.inc
diff --git a/mysql-test/suite/galera/t/galera_wan_restart_ist.test b/mysql-test/suite/galera/t/galera_wan_restart_ist.test
index 42f63df3acc..1cf5d4c7f74 100644
--- a/mysql-test/suite/galera/t/galera_wan_restart_ist.test
+++ b/mysql-test/suite/galera/t/galera_wan_restart_ist.test
@@ -12,6 +12,16 @@
--source include/galera_cluster.inc
--source include/have_innodb.inc
+--connect node_3, 127.0.0.1, root, , test, $NODE_MYPORT_3
+--connect node_4, 127.0.0.1, root, , test, $NODE_MYPORT_4
+
+# Save original auto_increment_offset values.
+--let $node_1=node_1
+--let $node_2=node_2
+--let $node_3=node_3
+--let $node_4=node_4
+--source include/auto_increment_offset_save.inc
+
SELECT VARIABLE_VALUE = 4 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size';
--connection node_1
@@ -21,11 +31,9 @@ INSERT INTO t1 VALUES (1);
--connection node_2
INSERT INTO t1 VALUES (2);
---connect node_3, 127.0.0.1, root, , test, $NODE_MYPORT_3
--connection node_3
INSERT INTO t1 VALUES (3);
---connect node_4, 127.0.0.1, root, , test, $NODE_MYPORT_4
--connection node_4
INSERT INTO t1 VALUES (4);
@@ -146,3 +154,8 @@ CALL mtr.add_suppression("Action message in non-primary configuration from membe
--connection node_4
CALL mtr.add_suppression("Action message in non-primary configuration from member 0");
+
+# Restore original auto_increment_offset values.
+--source include/auto_increment_offset_restore.inc
+
+--source include/galera_end.inc
diff --git a/mysql-test/suite/galera_3nodes/disabled.def b/mysql-test/suite/galera_3nodes/disabled.def
index fb23a81bfb8..ca55c41ff72 100644
--- a/mysql-test/suite/galera_3nodes/disabled.def
+++ b/mysql-test/suite/galera_3nodes/disabled.def
@@ -3,3 +3,5 @@ galera_evs_suspect_timeout : TODO: investigate
galera_innobackupex_backup : TODO: investigate
galera_slave_options_do :MDEV-8798
galera_slave_options_ignore : MDEV-8798
+galera_pc_bootstrap : TODO: Investigate: Timeout in wait_condition.inc
+galera_pc_weight : Test times out
diff --git a/mysql-test/suite/rpl/r/rpl_stop_slave_error.result b/mysql-test/suite/rpl/r/rpl_stop_slave_error.result
new file mode 100644
index 00000000000..2bd372a9a91
--- /dev/null
+++ b/mysql-test/suite/rpl/r/rpl_stop_slave_error.result
@@ -0,0 +1,6 @@
+include/master-slave.inc
+[connection master]
+include/stop_slave.inc
+NOT FOUND /Error reading packet from server: Lost connection/ in slave_log.err
+include/start_slave.inc
+include/rpl_end.inc
diff --git a/mysql-test/suite/rpl/t/rpl_drop_db.test b/mysql-test/suite/rpl/t/rpl_drop_db.test
index dae1651dc93..f66187b12f5 100644
--- a/mysql-test/suite/rpl/t/rpl_drop_db.test
+++ b/mysql-test/suite/rpl/t/rpl_drop_db.test
@@ -13,7 +13,7 @@ insert into mysqltest1.t1 values (1);
select * from mysqltest1.t1 into outfile 'mysqltest1/f1.txt';
create table mysqltest1.t2 (n int);
create table mysqltest1.t3 (n int);
---replace_result \\ / 66 39 17 39 247 39 "File exists" "Directory not empty"
+--replace_result \\ / 66 39 93 39 17 39 247 39 "File exists" "Directory not empty"
--error 1010
drop database mysqltest1;
use mysqltest1;
@@ -30,7 +30,7 @@ while ($1)
}
--enable_query_log
---replace_result \\ / 66 39 17 39 247 39 "File exists" "Directory not empty"
+--replace_result \\ / 66 39 93 39 17 39 247 39 "File exists" "Directory not empty"
--error 1010
drop database mysqltest1;
use mysqltest1;
diff --git a/mysql-test/suite/rpl/t/rpl_stop_slave_error-slave.opt b/mysql-test/suite/rpl/t/rpl_stop_slave_error-slave.opt
new file mode 100644
index 00000000000..32c4527a915
--- /dev/null
+++ b/mysql-test/suite/rpl/t/rpl_stop_slave_error-slave.opt
@@ -0,0 +1 @@
+--log-error=$MYSQLTEST_VARDIR/tmp/slave_log.err
diff --git a/mysql-test/suite/rpl/t/rpl_stop_slave_error.test b/mysql-test/suite/rpl/t/rpl_stop_slave_error.test
new file mode 100644
index 00000000000..a88981c15c4
--- /dev/null
+++ b/mysql-test/suite/rpl/t/rpl_stop_slave_error.test
@@ -0,0 +1,17 @@
+#
+# MDEV-8345 STOP SLAVE should not cause an ERROR to be logged to the error log
+#
+source include/have_binlog_format_mixed.inc; # don't repeat the test three times
+source include/master-slave.inc;
+
+connection master;
+sync_slave_with_master;
+source include/stop_slave.inc;
+let SEARCH_FILE=$MYSQLTEST_VARDIR/tmp/slave_log.err;
+let SEARCH_PATTERN=Error reading packet from server: Lost connection;
+let SEARCH_RANGE= -50000;
+source include/search_pattern_in_file.inc;
+
+source include/start_slave.inc;
+source include/rpl_end.inc;
+
diff --git a/mysql-test/suite/sys_vars/r/sysvars_innodb,32bit,xtradb.rdiff b/mysql-test/suite/sys_vars/r/sysvars_innodb,32bit,xtradb.rdiff
index cc73fc52a25..ad4d44086ef 100644
--- a/mysql-test/suite/sys_vars/r/sysvars_innodb,32bit,xtradb.rdiff
+++ b/mysql-test/suite/sys_vars/r/sysvars_innodb,32bit,xtradb.rdiff
@@ -288,7 +288,7 @@
+DEFAULT_VALUE assert
+VARIABLE_SCOPE GLOBAL
+VARIABLE_TYPE ENUM
-+VARIABLE_COMMENT Warn corruptions of user tables as 'corrupt table' instead of not crashing itself, when used with file_per_table. All file io for the datafile after detected as corrupt are disabled, except for the deletion
++VARIABLE_COMMENT Warn corruptions of user tables as 'corrupt table' instead of not crashing itself, when used with file_per_table. All file io for the datafile after detected as corrupt are disabled, except for the deletion.
+NUMERIC_MIN_VALUE NULL
+NUMERIC_MAX_VALUE NULL
+NUMERIC_BLOCK_SIZE NULL
@@ -1228,7 +1228,7 @@
VARIABLE_NAME INNODB_VERSION
SESSION_VALUE NULL
-GLOBAL_VALUE 5.6.33
-+GLOBAL_VALUE 5.6.32-78.1
++GLOBAL_VALUE 5.6.32-79.0
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE NULL
VARIABLE_SCOPE GLOBAL
diff --git a/mysql-test/suite/sys_vars/r/sysvars_innodb,xtradb.rdiff b/mysql-test/suite/sys_vars/r/sysvars_innodb,xtradb.rdiff
index 391e3acefde..5bcb144d9a7 100644
--- a/mysql-test/suite/sys_vars/r/sysvars_innodb,xtradb.rdiff
+++ b/mysql-test/suite/sys_vars/r/sysvars_innodb,xtradb.rdiff
@@ -167,7 +167,7 @@
+DEFAULT_VALUE assert
+VARIABLE_SCOPE GLOBAL
+VARIABLE_TYPE ENUM
-+VARIABLE_COMMENT Warn corruptions of user tables as 'corrupt table' instead of not crashing itself, when used with file_per_table. All file io for the datafile after detected as corrupt are disabled, except for the deletion
++VARIABLE_COMMENT Warn corruptions of user tables as 'corrupt table' instead of not crashing itself, when used with file_per_table. All file io for the datafile after detected as corrupt are disabled, except for the deletion.
+NUMERIC_MIN_VALUE NULL
+NUMERIC_MAX_VALUE NULL
+NUMERIC_BLOCK_SIZE NULL
@@ -662,7 +662,7 @@
VARIABLE_NAME INNODB_VERSION
SESSION_VALUE NULL
-GLOBAL_VALUE 5.6.33
-+GLOBAL_VALUE 5.6.32-78.1
++GLOBAL_VALUE 5.6.32-79.0
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE NULL
VARIABLE_SCOPE GLOBAL
diff --git a/mysql-test/t/alter_table.test b/mysql-test/t/alter_table.test
index df4958bd2bc..eb395a72a22 100644
--- a/mysql-test/t/alter_table.test
+++ b/mysql-test/t/alter_table.test
@@ -1713,6 +1713,35 @@ ALTER TABLE t1 ADD PRIMARY KEY IF NOT EXISTS event_id (event_id,market_id);
DROP TABLE t1;
--echo #
+--echo # MDEV-11126 Crash while altering persistent virtual column
+--echo #
+
+CREATE TABLE `tab1` (
+ `id` bigint(20) NOT NULL AUTO_INCREMENT,
+ `field2` set('option1','option2','option3','option4') NOT NULL,
+ `field3` set('option1','option2','option3','option4','option5') NOT NULL,
+ `field4` set('option1','option2','option3','option4') NOT NULL,
+ `field5` varchar(32) NOT NULL,
+ `field6` varchar(32) NOT NULL,
+ `field7` varchar(32) NOT NULL,
+ `field8` varchar(32) NOT NULL,
+ `field9` int(11) NOT NULL DEFAULT '1',
+ `field10` varchar(16) NOT NULL,
+ `field11` enum('option1','option2','option3') NOT NULL DEFAULT 'option1',
+ `v_col` varchar(128) AS (IF(field11='option1',CONCAT_WS(":","field1",field2,field3,field4,field5,field6,field7,field8,field9,field10), CONCAT_WS(":","field1",field11,field2,field3,field4,field5,field6,field7,field8,field9,field10))) PERSISTENT,
+ PRIMARY KEY (`id`)
+) DEFAULT CHARSET=latin1;
+
+ALTER TABLE `tab1` CHANGE COLUMN v_col `v_col` varchar(128);
+SHOW CREATE TABLE `tab1`;
+ALTER TABLE `tab1` CHANGE COLUMN v_col `v_col` varchar(128) AS (IF(field11='option1',CONCAT_WS(":","field1",field2,field3,field4,field5,field6,field7,field8,field9,field10), CONCAT_WS(":","field1",field11,field2,field3,field4,field5,field6,field7,field8,field9,field10))) PERSISTENT;
+SHOW CREATE TABLE `tab1`;
+DROP TABLE `tab1`;
+--echo #
+--echo # Start of 10.1 tests
+--echo #
+
+--echo #
--echo # MDEV-7374 : Losing connection to MySQL while running ALTER TABLE
--echo #
CREATE TABLE t1(i INT) ENGINE=INNODB;
@@ -1722,10 +1751,6 @@ ALTER TABLE t1 MODIFY i FLOAT;
DROP TABLE t1;
--echo #
---echo # Start of 10.1 tests
---echo #
-
---echo #
--echo # MDEV-7816 ALTER with DROP INDEX and ADD INDEX .. COMMENT='comment2' ignores the new comment
--echo #
CREATE TABLE t1(a INT);
@@ -1733,4 +1758,3 @@ CREATE INDEX i1 ON t1(a) COMMENT 'comment1';
ALTER TABLE t1 DROP INDEX i1, ADD INDEX i1(a) COMMENT 'comment2';
SHOW CREATE TABLE t1;
DROP TABLE t1;
-
diff --git a/mysql-test/t/create_or_replace.test b/mysql-test/t/create_or_replace.test
index 6d744679d67..abf470b62d5 100644
--- a/mysql-test/t/create_or_replace.test
+++ b/mysql-test/t/create_or_replace.test
@@ -384,3 +384,15 @@ drop table t1;
# Cleanup
#
DROP TABLE t2;
+
+--echo #
+--echo # MDEV-10824 - Crash in CREATE OR REPLACE TABLE t1 AS SELECT spfunc()
+--echo #
+CREATE TABLE t1(a INT);
+CREATE FUNCTION f1() RETURNS VARCHAR(16383) RETURN 'test';
+CREATE OR REPLACE TABLE t1 AS SELECT f1();
+LOCK TABLE t1 WRITE;
+CREATE OR REPLACE TABLE t1 AS SELECT f1();
+UNLOCK TABLES;
+DROP FUNCTION f1;
+DROP TABLE t1;
diff --git a/mysql-test/t/drop.test b/mysql-test/t/drop.test
index d9784bc819a..a3e96953bac 100644
--- a/mysql-test/t/drop.test
+++ b/mysql-test/t/drop.test
@@ -313,3 +313,12 @@ INSERT INTO table1 VALUES (1);
DROP TABLE table1,table2;
--echo # End BUG#34750
+
+--echo #
+--echo # MDEV-11105 Table named 'db' has weird side effect.
+--echo #
+
+CREATE DATABASE mysqltest;
+CREATE TABLE mysqltest.db(id INT);
+DROP DATABASE mysqltest;
+
diff --git a/mysql-test/t/information_schema.test b/mysql-test/t/information_schema.test
index f1e78441852..5b0e2910889 100644
--- a/mysql-test/t/information_schema.test
+++ b/mysql-test/t/information_schema.test
@@ -620,13 +620,13 @@ select * from information_schema.schema_privileges order by grantee;
select * from information_schema.user_privileges order by grantee;
show grants;
connection con4;
-select * from information_schema.column_privileges where grantee like '%user%'
+select * from information_schema.column_privileges where grantee like '\'user%'
order by grantee;
-select * from information_schema.table_privileges where grantee like '%user%'
+select * from information_schema.table_privileges where grantee like '\'user%'
order by grantee;
-select * from information_schema.schema_privileges where grantee like '%user%'
+select * from information_schema.schema_privileges where grantee like '\'user%'
order by grantee;
-select * from information_schema.user_privileges where grantee like '%user%'
+select * from information_schema.user_privileges where grantee like '\'user%'
order by grantee;
show grants;
connection default;
diff --git a/mysql-test/t/mysql.test b/mysql-test/t/mysql.test
index f1813dd0ca4..8df9a0c82bf 100644
--- a/mysql-test/t/mysql.test
+++ b/mysql-test/t/mysql.test
@@ -588,8 +588,16 @@ DROP DATABASE connected_db;
# USE and names with backticks
#
--write_file $MYSQLTEST_VARDIR/tmp/backticks.sql
+\u aa`bb``cc
+SELECT DATABASE();
+USE test
+SELECT DATABASE();
USE aa`bb``cc
SELECT DATABASE();
+USE test
+SELECT DATABASE();
+USE `aa``bb````cc`
+SELECT DATABASE();
EOF
create database `aa``bb````cc`;
--exec $MYSQL < $MYSQLTEST_VARDIR/tmp/backticks.sql
diff --git a/mysql-test/t/mysql_not_windows.test b/mysql-test/t/mysql_not_windows.test
index 66853677f7b..591de74cbbf 100644
--- a/mysql-test/t/mysql_not_windows.test
+++ b/mysql-test/t/mysql_not_windows.test
@@ -13,3 +13,12 @@
--echo
--echo End of tests
+
+# Multi-line exec
+exec $MYSQL \
+ test -e "select 1";
+exec $MYSQL test -e "select
+ 2";
+let $query = select 3
+ as X;
+exec $MYSQL test -e "$query";
diff --git a/mysql-test/t/mysqldump-nl.test b/mysql-test/t/mysqldump-nl.test
new file mode 100644
index 00000000000..311996e77c3
--- /dev/null
+++ b/mysql-test/t/mysqldump-nl.test
@@ -0,0 +1,38 @@
+#
+# New lines in identifiers
+#
+
+# embedded server doesn't support external clients
+--source include/not_embedded.inc
+# cmd.exe doesn't like new lines on the command line
+--source include/not_windows.inc
+
+create database `mysqltest1
+1tsetlqsym`;
+use `mysqltest1
+1tsetlqsym`;
+
+create table `t1
+1t` (`foobar
+raboof` int);
+create view `v1
+1v` as select * from `t1
+1t`;
+
+create procedure sp() select * from `v1
+1v`;
+
+flush tables;
+use test;
+
+exec $MYSQL_DUMP --compact --comment --routines --add-drop-database --databases 'mysqltest1
+1tsetlqsym';
+
+exec $MYSQL_DUMP --compact --comment --routines --add-drop-database --databases 'mysqltest1
+1tsetlqsym' | $MYSQL;
+
+show tables from `mysqltest1
+1tsetlqsym`;
+
+drop database `mysqltest1
+1tsetlqsym`;
diff --git a/mysql-test/t/mysqltest.test b/mysql-test/t/mysqltest.test
index ae59c713c3d..e85d793b628 100644
--- a/mysql-test/t/mysqltest.test
+++ b/mysql-test/t/mysqltest.test
@@ -741,15 +741,6 @@ echo ;
--error 1
--exec echo "--exec " | $MYSQL_TEST 2>&1
-# Multi-line exec
-exec $MYSQL
- test -e "select 1";
-exec $MYSQL test -e "select
- 2";
-let $query = select 3
- as X;
-exec $MYSQL test -e "$query";
-
# ----------------------------------------------------------------------------
# Test let command
# ----------------------------------------------------------------------------
diff --git a/mysql-test/t/selectivity.test b/mysql-test/t/selectivity.test
index 92e38604a30..9ca3d99d39d 100644
--- a/mysql-test/t/selectivity.test
+++ b/mysql-test/t/selectivity.test
@@ -972,6 +972,58 @@ set optimizer_use_condition_selectivity=@save_optimizer_use_condition_selectivit
DROP TABLE t1,t2;
+set use_stat_tables=@save_use_stat_tables;
+
+--echo #
+--echo # Bug mdev-11096: range condition over column without statistical data
+--echo #
+
+set use_stat_tables='preferably';
+set optimizer_use_condition_selectivity=3;
+
+create table t1(col1 char(32));
+insert into t1 values ('a'),('b'),('c'),('d'), ('e'),('f'),('g'),('h');
+analyze table t1 persistent for columns () indexes ();
+
+explain extended
+select * from t1 where col1 > 'b' and col1 < 'e';
+select * from t1 where col1 > 'b' and col1 < 'e';
+
+drop table t1;
+
+set optimizer_use_condition_selectivity=@save_optimizer_use_condition_selectivity;
+set use_stat_tables=@save_use_stat_tables;
+
+--echo #
+--echo # Bug mdev-9628: unindexed blob column without min-max statistics
+--echo # with optimizer_use_condition_selectivity=3
+--echo #
+
+set use_stat_tables='preferably';
+set optimizer_use_condition_selectivity=3;
+create table t1(col1 char(32));
+insert into t1 values ('a'),('b'),('c'),('d'), ('e'),('f'),('g'),('h');
+analyze table t1;
+
+create table t2(col1 text);
+insert into t2 values ('a'),('b'),('c'),('d'), ('e'),('f'),('g'),('h');
+analyze table t2;
+
+select * from t1 where col1 > 'b' and col1 < 'd';
+explain extended
+select * from t1 where col1 > 'b' and col1 < 'd';
+
+select * from t2 where col1 > 'b' and col1 < 'd';
+explain extended
+select * from t2 where col1 > 'b' and col1 < 'd';
+
+select * from t2 where col1 < 'b' and col1 > 'd';
+explain extended
+select * from t2 where col1 < 'b' and col1 > 'd';
+
+drop table t1,t2;
+
+set optimizer_use_condition_selectivity=@save_optimizer_use_condition_selectivity;
set use_stat_tables=@save_use_stat_tables;
diff --git a/mysql-test/t/selectivity_innodb.test b/mysql-test/t/selectivity_innodb.test
index d6a77eac600..25aa0abbc3b 100644
--- a/mysql-test/t/selectivity_innodb.test
+++ b/mysql-test/t/selectivity_innodb.test
@@ -110,6 +110,31 @@ where t1.child_user_id=t3.id and t1.child_group_id is null and t2.lower_group_na
drop table t1,t2,t3;
--echo #
+--echo # MDEV-9187: duplicate of bug mdev-9628
+--echo #
+
+set use_stat_tables = preferably;
+set optimizer_use_condition_selectivity=3;
+
+CREATE TABLE t1 (f1 char(32)) ENGINE=InnoDB;
+INSERT INTO t1 VALUES ('foo'),('bar'),('qux');
+ANALYZE TABLE t1;
+
+SELECT * FROM t1 WHERE f1 < 'm';
+EXPLAIN EXTENDED
+SELECT * FROM t1 WHERE f1 < 'm';
+
+CREATE TABLE t2 (f1 TEXT) ENGINE=InnoDB;
+INSERT INTO t2 VALUES ('foo'),('bar'),('qux');
+ANALYZE TABLE t2;
+
+SELECT * FROM t2 WHERE f1 <> 'qux';
+EXPLAIN EXTENDED
+SELECT * FROM t2 WHERE f1 <> 'qux';
+
+DROP TABLE t1,t2;
+
+--echo #
--echo # End of 10.0 tests
--echo #
diff --git a/mysql-test/valgrind.supp b/mysql-test/valgrind.supp
index 8b9c671d19d..8ae2fd517e9 100644
--- a/mysql-test/valgrind.supp
+++ b/mysql-test/valgrind.supp
@@ -1213,6 +1213,125 @@
fun:dlopen@@GLIBC_2.2.5
}
+#
+# MDEV-11061: OpenSSL 0.9.8 problems
+#
+
+{
+ MDEV-11061: OpenSSL 0.9.8
+ Memcheck:Cond
+ obj:*/libz.so*
+ ...
+ obj:*/libcrypto.so.0.9.8
+ ...
+ obj:*/libssl.so.0.9.8
+ ...
+}
+
+{
+ MDEV-11061: OpenSSL 0.9.8
+ Memcheck:Value8
+ obj:*/libz.so*
+ ...
+ obj:*/libcrypto.so.0.9.8
+ ...
+ obj:*/libssl.so.0.9.8
+ ...
+}
+
+{
+ MDEV-11061: OpenSSL 0.9.8
+ Memcheck:Cond
+ obj:*/libcrypto.so.0.9.8
+ ...
+ obj:*/libssl.so.0.9.8
+ ...
+}
+
+{
+ MDEV-11061: OpenSSL 0.9.8
+ Memcheck:Value8
+ obj:*/libcrypto.so.0.9.8
+ ...
+ obj:*/libssl.so.0.9.8
+ ...
+}
+
+{
+ MDEV-11061: OpenSSL 0.9.8
+ Memcheck:Cond
+ obj:*/libssl.so.0.9.8
+ obj:*/libssl.so.0.9.8
+ ...
+}
+
+{
+ MDEV-11061: OpenSSL 0.9.8
+ Memcheck:Value8
+ obj:*/libssl.so.0.9.8
+ obj:*/libssl.so.0.9.8
+ ...
+}
+
+{
+ MDEV-11061: OpenSSL 0.9.8
+ Memcheck:Cond
+ fun:memcpy
+ obj:*/libcrypto.so.0.9.8
+ obj:*/libssl.so.0.9.8
+ ...
+}
+
+{
+ MDEV-11061: OpenSSL 0.9.8
+ Memcheck:Value8
+ fun:memcpy
+ obj:*/libcrypto.so.0.9.8
+ obj:*/libssl.so.0.9.8
+ ...
+}
+
+{
+ MDEV-11061: OpenSSL 0.9.8
+ Memcheck:Cond
+ fun:is_overlap
+ fun:memcpy
+ obj:*/libcrypto.so.0.9.8
+ obj:*/libssl.so.0.9.8
+ ...
+}
+
+{
+ MDEV-11061: OpenSSL 0.9.8
+ Memcheck:Cond
+ fun:memset
+ obj:*/libcrypto.so.0.9.8
+ ...
+ obj:*/libssl.so.0.9.8
+ ...
+}
+
+{
+ MDEV-11061: OpenSSL 0.9.8
+ Memcheck:Value8
+ fun:memset
+ obj:*/libcrypto.so.0.9.8
+ ...
+ obj:*/libssl.so.0.9.8
+ ...
+}
+
+{
+ MDEV-11061: OpenSSL 0.9.8
+ Memcheck:Param
+ write(buf)
+ obj:*/libpthread-2.9.so*
+ obj:*/libcrypto.so.0.9.8
+ ...
+ obj:*/libssl.so.0.9.8
+ ...
+}
+
{
vasprintf in OpenSuse 12.3
Memcheck:Leak
diff --git a/sql/item_subselect.cc b/sql/item_subselect.cc
index 42f17065cfe..256bcc92378 100644
--- a/sql/item_subselect.cc
+++ b/sql/item_subselect.cc
@@ -2691,8 +2691,8 @@ static bool check_equality_for_exist2in(Item_func *func,
args[0]->all_used_tables() == OUTER_REF_TABLE_BIT)
{
/* It is Item_field or Item_direct_view_ref) */
- DBUG_ASSERT(args[0]->type() == Item::FIELD_ITEM ||
- args[0]->type() == Item::REF_ITEM);
+ DBUG_ASSERT(args[1]->type() == Item::FIELD_ITEM ||
+ args[1]->type() == Item::REF_ITEM);
*local_field= (Item_ident *)args[1];
*outer_exp= args[0];
return TRUE;
diff --git a/sql/opt_range.cc b/sql/opt_range.cc
index 1a0d12a8dee..bed57f3273f 100644
--- a/sql/opt_range.cc
+++ b/sql/opt_range.cc
@@ -2757,9 +2757,16 @@ bool create_key_parts_for_pseudo_indexes(RANGE_OPT_PARAM *param,
{
Field *field= *field_ptr;
uint16 store_length;
+ uint16 max_key_part_length= (uint16) table->file->max_key_part_length();
key_part->key= keys;
key_part->part= 0;
- key_part->length= (uint16) field->key_length();
+ if (field->flags & BLOB_FLAG)
+ key_part->length= max_key_part_length;
+ else
+ {
+ key_part->length= (uint16) field->key_length();
+ set_if_smaller(key_part->length, max_key_part_length);
+ }
store_length= key_part->length;
if (field->real_maybe_null())
store_length+= HA_KEY_NULL_LENGTH;
diff --git a/sql/slave.cc b/sql/slave.cc
index a55d26b1aa0..fa82cf84b5f 100644
--- a/sql/slave.cc
+++ b/sql/slave.cc
@@ -3320,8 +3320,13 @@ static ulong read_event(MYSQL* mysql, Master_info *mi, bool* suppress_warnings)
*suppress_warnings= TRUE;
}
else
- sql_print_error("Error reading packet from server: %s ( server_errno=%d)",
- mysql_error(mysql), mysql_errno(mysql));
+ {
+ if (!mi->rli.abort_slave)
+ {
+ sql_print_error("Error reading packet from server: %s (server_errno=%d)",
+ mysql_error(mysql), mysql_errno(mysql));
+ }
+ }
DBUG_RETURN(packet_error);
}
diff --git a/sql/sql_db.cc b/sql/sql_db.cc
index 2ba67cb1b00..cb50301d79d 100644
--- a/sql/sql_db.cc
+++ b/sql/sql_db.cc
@@ -815,7 +815,7 @@ static bool
mysql_rm_db_internal(THD *thd,char *db, bool if_exists, bool silent)
{
ulong deleted_tables= 0;
- bool error= true;
+ bool error= true, rm_mysql_schema;
char path[FN_REFLEN + 16];
MY_DIR *dirp;
uint length;
@@ -840,6 +840,18 @@ mysql_rm_db_internal(THD *thd,char *db, bool if_exists, bool silent)
length= build_table_filename(path, sizeof(path) - 1, db, "", "", 0);
strmov(path+length, MY_DB_OPT_FILE); // Append db option file name
del_dbopt(path); // Remove dboption hash entry
+ /*
+ Now remove the db.opt file.
+ The 'find_db_tables_and_rm_known_files' doesn't remove this file
+ if there exists a table with the name 'db', so let's just do it
+ separately. We know this file exists and needs to be deleted anyway.
+ */
+ if (my_delete_with_symlink(path, MYF(0)) && my_errno != ENOENT)
+ {
+ my_error(EE_DELETE, MYF(0), path, my_errno);
+ DBUG_RETURN(true);
+ }
+
path[length]= '\0'; // Remove file name
/* See if the directory exists */
@@ -867,7 +879,8 @@ mysql_rm_db_internal(THD *thd,char *db, bool if_exists, bool silent)
Disable drop of enabled log tables, must be done before name locking.
This check is only needed if we are dropping the "mysql" database.
*/
- if ((my_strcasecmp(system_charset_info, MYSQL_SCHEMA_NAME.str, db) == 0))
+ if ((rm_mysql_schema=
+ (my_strcasecmp(system_charset_info, MYSQL_SCHEMA_NAME.str, db) == 0)))
{
for (table= tables; table; table= table->next_local)
if (check_if_log_table(table, TRUE, "DROP"))
@@ -880,7 +893,7 @@ mysql_rm_db_internal(THD *thd,char *db, bool if_exists, bool silent)
lock_db_routines(thd, dbnorm))
goto exit;
- if (!in_bootstrap)
+ if (!in_bootstrap && !rm_mysql_schema)
{
for (table= tables; table; table= table->next_local)
{
@@ -920,10 +933,13 @@ mysql_rm_db_internal(THD *thd,char *db, bool if_exists, bool silent)
ha_drop_database(path);
tmp_disable_binlog(thd);
query_cache_invalidate1(thd, dbnorm);
- (void) sp_drop_db_routines(thd, dbnorm); /* @todo Do not ignore errors */
+ if (!rm_mysql_schema)
+ {
+ (void) sp_drop_db_routines(thd, dbnorm); /* @todo Do not ignore errors */
#ifdef HAVE_EVENT_SCHEDULER
- Events::drop_schema_events(thd, dbnorm);
+ Events::drop_schema_events(thd, dbnorm);
#endif
+ }
reenable_binlog(thd);
/*
diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc
index a15f3da8e12..e37a4b9ccc1 100644
--- a/sql/sql_parse.cc
+++ b/sql/sql_parse.cc
@@ -3263,12 +3263,6 @@ mysql_execute_command(THD *thd)
}
/*
- For CREATE TABLE we should not open the table even if it exists.
- If the table exists, we should either not create it or replace it
- */
- lex->query_tables->open_strategy= TABLE_LIST::OPEN_STUB;
-
- /*
If we are a slave, we should add OR REPLACE if we don't have
IF EXISTS. This will help a slave to recover from
CREATE TABLE OR EXISTS failures by dropping the table and
@@ -8871,12 +8865,6 @@ bool create_table_precheck(THD *thd, TABLE_LIST *tables,
if (check_fk_parent_table_access(thd, &lex->create_info, &lex->alter_info, create_table->db))
goto err;
- /*
- For CREATE TABLE we should not open the table even if it exists.
- If the table exists, we should either not create it or replace it
- */
- lex->query_tables->open_strategy= TABLE_LIST::OPEN_STUB;
-
error= FALSE;
err:
diff --git a/sql/sql_statistics.cc b/sql/sql_statistics.cc
index 4020cbc70c4..311263c39b1 100644
--- a/sql/sql_statistics.cc
+++ b/sql/sql_statistics.cc
@@ -1003,11 +1003,13 @@ public:
switch (i) {
case COLUMN_STAT_MIN_VALUE:
+ table_field->read_stats->min_value->set_notnull();
stat_field->val_str(&val);
table_field->read_stats->min_value->store(val.ptr(), val.length(),
&my_charset_bin);
break;
case COLUMN_STAT_MAX_VALUE:
+ table_field->read_stats->max_value->set_notnull();
stat_field->val_str(&val);
table_field->read_stats->max_value->store(val.ptr(), val.length(),
&my_charset_bin);
@@ -3659,17 +3661,8 @@ double get_column_range_cardinality(Field *field,
{
double avg_frequency= col_stats->get_avg_frequency();
res= avg_frequency;
- /*
- psergey-todo: what does check for min_value, max_value mean?
- min/max_value are set to NULL in alloc_statistics_for_table() and
- alloc_statistics_for_table_share(). Both functions will immediately
- call create_min_max_statistical_fields_for_table and
- create_min_max_statistical_fields_for_table_share() respectively,
- which will set min/max_value to be valid pointers, unless OOM
- occurs.
- */
if (avg_frequency > 1.0 + 0.000001 &&
- col_stats->min_value && col_stats->max_value)
+ col_stats->min_max_values_are_provided())
{
Histogram *hist= &col_stats->histogram;
if (hist->is_available())
@@ -3692,7 +3685,7 @@ double get_column_range_cardinality(Field *field,
}
else
{
- if (col_stats->min_value && col_stats->max_value)
+ if (col_stats->min_max_values_are_provided())
{
double sel, min_mp_pos, max_mp_pos;
diff --git a/sql/sql_statistics.h b/sql/sql_statistics.h
index 46e5cef22d1..8e5f8107849 100644
--- a/sql/sql_statistics.h
+++ b/sql/sql_statistics.h
@@ -388,6 +388,11 @@ public:
avg_frequency= (ulong) (val * Scale_factor_avg_frequency);
}
+ bool min_max_values_are_provided()
+ {
+ return !is_null(COLUMN_STAT_MIN_VALUE) &&
+ !is_null(COLUMN_STAT_MIN_VALUE);
+ }
};
diff --git a/sql/sql_table.cc b/sql/sql_table.cc
index 2e639f3d072..c7471a85e8c 100644
--- a/sql/sql_table.cc
+++ b/sql/sql_table.cc
@@ -2440,7 +2440,8 @@ int mysql_rm_table_no_locks(THD *thd, TABLE_LIST *tables, bool if_exists,
if (table_type && table_type != view_pseudo_hton)
ha_lock_engine(thd, table_type);
- if (thd->locked_tables_mode)
+ if (thd->locked_tables_mode == LTM_LOCK_TABLES ||
+ thd->locked_tables_mode == LTM_PRELOCKED_UNDER_LOCK_TABLES)
{
if (wait_while_table_is_used(thd, table->table, HA_EXTRA_NOT_USED))
{
@@ -6294,6 +6295,7 @@ static bool fill_alter_inplace_info(THD *thd,
(field->stored_in_db || field->vcol_info->is_in_partitioning_expr()))
{
if (is_equal == IS_EQUAL_NO ||
+ !new_field->vcol_info ||
!field->vcol_info->is_equal(new_field->vcol_info))
ha_alter_info->handler_flags|= Alter_inplace_info::ALTER_COLUMN_VCOL;
else
diff --git a/sql/threadpool_common.cc b/sql/threadpool_common.cc
index ae8a81b1bcd..9d263038bc9 100644
--- a/sql/threadpool_common.cc
+++ b/sql/threadpool_common.cc
@@ -73,17 +73,16 @@ struct Worker_thread_context
void save()
{
-#ifdef HAVE_PSI_INTERFACE
- psi_thread= PSI_server?PSI_server->get_thread():0;
+#ifdef HAVE_PSI_THREAD_INTERFACE
+ psi_thread = PSI_THREAD_CALL(get_thread)();
#endif
mysys_var= (st_my_thread_var *)pthread_getspecific(THR_KEY_mysys);
}
void restore()
{
-#ifdef HAVE_PSI_INTERFACE
- if (PSI_server)
- PSI_server->set_thread(psi_thread);
+#ifdef HAVE_PSI_THREAD_INTERFACE
+ PSI_THREAD_CALL(set_thread)(psi_thread);
#endif
pthread_setspecific(THR_KEY_mysys,mysys_var);
pthread_setspecific(THR_THD, 0);
@@ -92,6 +91,41 @@ struct Worker_thread_context
};
+#ifdef HAVE_PSI_INTERFACE
+
+/*
+ The following fixes PSI "idle" psi instrumentation.
+ The server assumes that connection becomes idle
+ just before net_read_packet() and switches to active after it.
+ In out setup, server becomes idle when async socket io is made.
+*/
+
+extern void net_before_header_psi(struct st_net *net, void *user_data, size_t);
+
+static void dummy_before_header(struct st_net *, void *, size_t)
+{
+}
+
+static void re_init_net_server_extension(THD *thd)
+{
+ thd->m_net_server_extension.m_before_header = dummy_before_header;
+}
+
+#else
+
+#define re_init_net_server_extension(thd)
+
+#endif /* HAVE_PSI_INTERFACE */
+
+
+static inline void set_thd_idle(THD *thd)
+{
+ thd->net.reading_or_writing= 1;
+#ifdef HAVE_PSI_INTERFACE
+ net_before_header_psi(&thd->net, thd, 0);
+#endif
+}
+
/*
Attach/associate the connection with the OS thread,
*/
@@ -100,10 +134,10 @@ static bool thread_attach(THD* thd)
pthread_setspecific(THR_KEY_mysys,thd->mysys_var);
thd->thread_stack=(char*)&thd;
thd->store_globals();
-#ifdef HAVE_PSI_INTERFACE
- if (PSI_server)
- PSI_server->set_thread(thd->event_scheduler.m_psi);
+#ifdef HAVE_PSI_THREAD_INTERFACE
+ PSI_THREAD_CALL(set_thread)(thd->event_scheduler.m_psi);
#endif
+ mysql_socket_set_thread_owner(thd->net.vio->mysql_socket);
return 0;
}
@@ -130,39 +164,38 @@ int threadpool_add_connection(THD *thd)
}
/* Create new PSI thread for use with the THD. */
-#ifdef HAVE_PSI_INTERFACE
- if (PSI_server)
- {
- thd->event_scheduler.m_psi =
- PSI_server->new_thread(key_thread_one_connection, thd, thd->thread_id);
- }
+#ifdef HAVE_PSI_THREAD_INTERFACE
+ thd->event_scheduler.m_psi=
+ PSI_THREAD_CALL(new_thread)(key_thread_one_connection, thd, thd->thread_id);
#endif
/* Login. */
thread_attach(thd);
+ re_init_net_server_extension(thd);
ulonglong now= microsecond_interval_timer();
thd->prior_thr_create_utime= now;
thd->start_utime= now;
thd->thr_create_utime= now;
- if (!setup_connection_thread_globals(thd))
- {
- if (!thd_prepare_connection(thd))
- {
-
- /*
- Check if THD is ok, as prepare_new_connection_state()
- can fail, for example if init command failed.
- */
- if (thd_is_connection_alive(thd))
- {
- retval= 0;
- thd->net.reading_or_writing= 1;
- thd->skip_wait_timeout= true;
- }
- }
- }
+ if (setup_connection_thread_globals(thd))
+ goto end;
+
+ if (thd_prepare_connection(thd))
+ goto end;
+
+ /*
+ Check if THD is ok, as prepare_new_connection_state()
+ can fail, for example if init command failed.
+ */
+ if (!thd_is_connection_alive(thd))
+ goto end;
+
+ retval= 0;
+ thd->skip_wait_timeout= true;
+ set_thd_idle(thd);
+
+end:
worker_context.restore();
return retval;
}
@@ -244,12 +277,13 @@ int threadpool_process_request(THD *thd)
goto end;
}
+ set_thd_idle(thd);
+
vio= thd->net.vio;
if (!vio->has_data(vio))
{
/* More info on this debug sync is in sql_parse.cc*/
DEBUG_SYNC(thd, "before_do_command_net_read");
- thd->net.reading_or_writing= 1;
goto end;
}
}
diff --git a/storage/connect/JdbcInterface.java b/storage/connect/JdbcInterface.java
index f765052915d..34af8c4e013 100644
--- a/storage/connect/JdbcInterface.java
+++ b/storage/connect/JdbcInterface.java
@@ -340,6 +340,18 @@ public class JdbcInterface {
return m;
} // end of GetMaxValue
+ public String GetQuoteString() {
+ String qs = null;
+
+ try {
+ qs = dbmd.getIdentifierQuoteString();
+ } catch(SQLException se) {
+ SetErrmsg(se);
+ } // end try/catch
+
+ return qs;
+ } // end of GetQuoteString
+
public int GetColumns(String[] parms) {
int ncol = -1;
@@ -680,11 +692,11 @@ public class JdbcInterface {
return 0;
} // end of TimestampField
- public String ObjectField(int n, String name) {
+ public Object ObjectField(int n, String name) {
if (rs == null) {
System.out.println("No result set");
} else try {
- return (n > 0) ? rs.getObject(n).toString() : rs.getObject(name).toString();
+ return (n > 0) ? rs.getObject(n) : rs.getObject(name);
} catch (SQLException se) {
SetErrmsg(se);
} //end try/catch
diff --git a/storage/connect/filamdbf.cpp b/storage/connect/filamdbf.cpp
index 8afda723578..a4557facbd8 100644
--- a/storage/connect/filamdbf.cpp
+++ b/storage/connect/filamdbf.cpp
@@ -383,7 +383,7 @@ DBFBASE::DBFBASE(DBFBASE *txfp)
/* and header length. Set Records, check that Reclen is equal to lrecl and */
/* return the header length or 0 in case of error. */
/****************************************************************************/
-int DBFBASE::ScanHeader(PGLOBAL g, PSZ fname, int lrecl, char *defpath)
+int DBFBASE::ScanHeader(PGLOBAL g, PSZ fn, int lrecl, int *rln, char *defpath)
{
int rc;
char filename[_MAX_PATH];
@@ -393,7 +393,7 @@ int DBFBASE::ScanHeader(PGLOBAL g, PSZ fname, int lrecl, char *defpath)
/************************************************************************/
/* Open the input file. */
/************************************************************************/
- PlugSetPath(filename, fname, defpath);
+ PlugSetPath(filename, fn, defpath);
if (!(infile= global_fopen(g, MSGID_CANNOT_OPEN, filename, "rb")))
return 0; // Assume file does not exist
@@ -410,11 +410,7 @@ int DBFBASE::ScanHeader(PGLOBAL g, PSZ fname, int lrecl, char *defpath)
} else if (rc == RC_FX)
return -1;
- if ((int)header.Reclen() != lrecl) {
- sprintf(g->Message, MSG(BAD_LRECL), lrecl, header.Reclen());
- return -1;
- } // endif Lrecl
-
+ *rln = (int)header.Reclen();
Records = (int)header.Records();
return (int)header.Headlen();
} // end of ScanHeader
@@ -431,9 +427,27 @@ int DBFFAM::Cardinality(PGLOBAL g)
if (!g)
return 1;
- if (!Headlen)
- if ((Headlen = ScanHeader(g, To_File, Lrecl, Tdbp->GetPath())) < 0)
- return -1; // Error in ScanHeader
+ if (!Headlen) {
+ int rln = 0; // Record length in the file header
+
+ Headlen = ScanHeader(g, To_File, Lrecl, &rln, Tdbp->GetPath());
+
+ if (Headlen < 0)
+ return -1; // Error in ScanHeader
+
+ if (rln && Lrecl != rln) {
+ // This happens always on some Linux platforms
+ sprintf(g->Message, MSG(BAD_LRECL), Lrecl, rln);
+
+ if (Accept) {
+ Lrecl = rln;
+ PushWarning(g, Tdbp);
+ } else
+ return -1;
+
+ } // endif rln
+
+ } // endif Headlen
// Set number of blocks for later use
Block = (Records > 0) ? (Records + Nrec - 1) / Nrec : 0;
@@ -565,7 +579,13 @@ bool DBFFAM::AllocateBuffer(PGLOBAL g)
if (Lrecl != reclen) {
sprintf(g->Message, MSG(BAD_LRECL), Lrecl, reclen);
- return true;
+
+ if (Accept) {
+ Lrecl = reclen;
+ PushWarning(g, Tdbp);
+ } else
+ return true;
+
} // endif Lrecl
hlen = HEADLEN * (n + 1) + 2;
@@ -641,8 +661,14 @@ bool DBFFAM::AllocateBuffer(PGLOBAL g)
if ((rc = dbfhead(g, Stream, Tdbp->GetFile(g), &header)) == RC_OK) {
if (Lrecl != (int)header.Reclen()) {
sprintf(g->Message, MSG(BAD_LRECL), Lrecl, header.Reclen());
- return true;
- } // endif Lrecl
+
+ if (Accept) {
+ Lrecl = header.Reclen();
+ PushWarning(g, Tdbp);
+ } else
+ return true;
+
+ } // endif Lrecl
Records = (int)header.Records();
Headlen = (int)header.Headlen();
@@ -916,9 +942,27 @@ int DBMFAM::Cardinality(PGLOBAL g)
if (!g)
return 1;
- if (!Headlen)
- if ((Headlen = ScanHeader(g, To_File, Lrecl, Tdbp->GetPath())) < 0)
- return -1; // Error in ScanHeader
+ if (!Headlen) {
+ int rln = 0; // Record length in the file header
+
+ Headlen = ScanHeader(g, To_File, Lrecl, &rln, Tdbp->GetPath());
+
+ if (Headlen < 0)
+ return -1; // Error in ScanHeader
+
+ if (rln && Lrecl != rln) {
+ // This happens always on some Linux platforms
+ sprintf(g->Message, MSG(BAD_LRECL), Lrecl, rln);
+
+ if (Accept) {
+ Lrecl = rln;
+ PushWarning(g, Tdbp);
+ } else
+ return -1;
+
+ } // endif rln
+
+ } // endif Headlen
// Set number of blocks for later use
Block = (Records > 0) ? (Records + Nrec - 1) / Nrec : 0;
@@ -961,8 +1005,14 @@ bool DBMFAM::AllocateBuffer(PGLOBAL g)
if (Lrecl != (int)hp->Reclen()) {
sprintf(g->Message, MSG(BAD_LRECL), Lrecl, hp->Reclen());
- return true;
- } // endif Lrecl
+
+ if (Accept) {
+ Lrecl = hp->Reclen();
+ PushWarning(g, Tdbp);
+ } else
+ return true;
+
+ } // endif Lrecl
Records = (int)hp->Records();
Headlen = (int)hp->Headlen();
diff --git a/storage/connect/filamdbf.h b/storage/connect/filamdbf.h
index da84d7685a8..66458a10eaa 100644
--- a/storage/connect/filamdbf.h
+++ b/storage/connect/filamdbf.h
@@ -31,7 +31,7 @@ class DllExport DBFBASE {
DBFBASE(PDBF txfp);
// Implementation
- int ScanHeader(PGLOBAL g, PSZ fname, int lrecl, char *defpath);
+ int ScanHeader(PGLOBAL g, PSZ fname, int lrecl, int *rlen, char *defpath);
protected:
// Default constructor, not to be used
diff --git a/storage/connect/ha_connect.cc b/storage/connect/ha_connect.cc
index ea6fb1b08c1..cf945a73f46 100644
--- a/storage/connect/ha_connect.cc
+++ b/storage/connect/ha_connect.cc
@@ -224,6 +224,7 @@ uint GetWorkSize(void);
void SetWorkSize(uint);
extern "C" const char *msglang(void);
+static void PopUser(PCONNECT xp);
static PCONNECT GetUser(THD *thd, PCONNECT xp);
static PGLOBAL GetPlug(THD *thd, PCONNECT& lxp);
@@ -831,34 +832,43 @@ ha_connect::~ha_connect(void)
table ? table->s->table_name.str : "<null>",
xp, xp ? xp->count : 0);
- if (xp) {
- PCONNECT p;
+ PopUser(xp);
+} // end of ha_connect destructor
- xp->count--;
- for (p= user_connect::to_users; p; p= p->next)
- if (p == xp)
- break;
+/****************************************************************************/
+/* Check whether this user can be removed. */
+/****************************************************************************/
+static void PopUser(PCONNECT xp)
+{
+ if (xp) {
+ xp->count--;
- if (p && !p->count) {
- if (p->next)
- p->next->previous= p->previous;
+ if (!xp->count) {
+ PCONNECT p;
- if (p->previous)
- p->previous->next= p->next;
- else
- user_connect::to_users= p->next;
+ for (p= user_connect::to_users; p; p= p->next)
+ if (p == xp)
+ break;
- } // endif p
+ if (p) {
+ if (p->next)
+ p->next->previous= p->previous;
- if (!xp->count) {
- PlugCleanup(xp->g, true);
- delete xp;
- } // endif count
+ if (p->previous)
+ p->previous->next= p->next;
+ else
+ user_connect::to_users= p->next;
- } // endif xp
+ } // endif p
-} // end of ha_connect destructor
+ PlugCleanup(xp->g, true);
+ delete xp;
+ } // endif count
+
+ } // endif xp
+
+} // end of PopUser
/****************************************************************************/
@@ -866,7 +876,7 @@ ha_connect::~ha_connect(void)
/****************************************************************************/
static PCONNECT GetUser(THD *thd, PCONNECT xp)
{
- if (!thd)
+ if (!thd)
return NULL;
if (xp && thd == xp->thdp)
@@ -890,7 +900,6 @@ static PCONNECT GetUser(THD *thd, PCONNECT xp)
return xp;
} // end of GetUser
-
/****************************************************************************/
/* Get the global pointer of the user of this handler. */
/****************************************************************************/
@@ -5261,7 +5270,18 @@ static int connect_assisted_discovery(handlerton *, THD* thd,
if (!(shm= (char*)db))
db= table_s->db.str; // Default value
- // Check table type
+ // Save stack and allocation environment and prepare error return
+ if (g->jump_level == MAX_JUMP) {
+ strcpy(g->Message, MSG(TOO_MANY_JUMPS));
+ goto jer;
+ } // endif jump_level
+
+ if ((rc= setjmp(g->jumper[++g->jump_level])) != 0) {
+ my_message(ER_UNKNOWN_ERROR, g->Message, MYF(0));
+ goto err;
+ } // endif rc
+
+ // Check table type
if (ttp == TAB_UNDEF) {
topt->type= (src) ? "MYSQL" : (tab) ? "PROXY" : "DOS";
ttp= GetTypeID(topt->type);
@@ -5270,20 +5290,9 @@ static int connect_assisted_discovery(handlerton *, THD* thd,
} else if (ttp == TAB_NIY) {
sprintf(g->Message, "Unsupported table type %s", topt->type);
my_message(ER_UNKNOWN_ERROR, g->Message, MYF(0));
- return HA_ERR_INTERNAL_ERROR;
+ goto err;
} // endif ttp
- // Save stack and allocation environment and prepare error return
- if (g->jump_level == MAX_JUMP) {
- strcpy(g->Message, MSG(TOO_MANY_JUMPS));
- return HA_ERR_INTERNAL_ERROR;
- } // endif jump_level
-
- if ((rc= setjmp(g->jumper[++g->jump_level])) != 0) {
- my_message(ER_UNKNOWN_ERROR, g->Message, MYF(0));
- goto err;
- } // endif rc
-
if (!tab) {
if (ttp == TAB_TBL) {
// Make tab the first table of the list
@@ -5843,6 +5852,7 @@ static int connect_assisted_discovery(handlerton *, THD* thd,
rc= init_table_share(thd, table_s, create_info, &sql);
g->jump_level--;
+ PopUser(xp);
return rc;
} // endif ok
@@ -5850,7 +5860,9 @@ static int connect_assisted_discovery(handlerton *, THD* thd,
err:
g->jump_level--;
- return HA_ERR_INTERNAL_ERROR;
+ jer:
+ PopUser(xp);
+ return HA_ERR_INTERNAL_ERROR;
} // end of connect_assisted_discovery
/**
diff --git a/storage/connect/jdbconn.cpp b/storage/connect/jdbconn.cpp
index 3b8de3e975b..dca9bd0eac4 100644
--- a/storage/connect/jdbconn.cpp
+++ b/storage/connect/jdbconn.cpp
@@ -498,145 +498,6 @@ PQRYRES JDBCDrivers(PGLOBAL g, int maxres, bool info)
return qrp;
} // end of JDBCDrivers
-#if 0
-/*************************************************************************/
-/* JDBCDataSources: constructs the result blocks containing all JDBC */
-/* data sources available on the local host. */
-/* Called with info=true to have result column names. */
-/*************************************************************************/
-PQRYRES JDBCDataSources(PGLOBAL g, int maxres, bool info)
-{
- int buftyp[] ={ TYPE_STRING, TYPE_STRING };
- XFLD fldtyp[] ={ FLD_NAME, FLD_REM };
- unsigned int length[] ={ 0, 256 };
- bool b[] ={ false, true };
- int i, n = 0, ncol = 2;
- PCOLRES crp;
- PQRYRES qrp;
- JDBConn *jcp = NULL;
-
- /************************************************************************/
- /* Do an evaluation of the result size. */
- /************************************************************************/
- if (!info) {
- jcp = new(g)JDBConn(g, NULL);
- n = jcp->GetMaxValue(SQL_MAX_DSN_LENGTH);
- length[0] = (n) ? (n + 1) : 256;
-
- if (!maxres)
- maxres = 512; // Estimated max number of data sources
-
- } else {
- length[0] = 256;
- maxres = 0;
- } // endif info
-
- if (trace)
- htrc("JDBCDataSources: max=%d len=%d\n", maxres, length[0]);
-
- /************************************************************************/
- /* Allocate the structures used to refer to the result set. */
- /************************************************************************/
- qrp = PlgAllocResult(g, ncol, maxres, IDS_DSRC,
- buftyp, fldtyp, length, false, true);
-
- for (i = 0, crp = qrp->Colresp; crp; i++, crp = crp->Next)
- if (b[i])
- crp->Kdata->SetNullable(true);
-
- /************************************************************************/
- /* Now get the results into blocks. */
- /************************************************************************/
- if (!info && qrp && jcp->GetDataSources(qrp))
- qrp = NULL;
-
- /************************************************************************/
- /* Return the result pointer for use by GetData routines. */
- /************************************************************************/
- return qrp;
-} // end of JDBCDataSources
-
-/**************************************************************************/
-/* PrimaryKeys: constructs the result blocks containing all the */
-/* JDBC catalog information concerning primary keys. */
-/**************************************************************************/
-PQRYRES JDBCPrimaryKeys(PGLOBAL g, JDBConn *op, char *dsn, char *table)
-{
- static int buftyp[] ={ TYPE_STRING, TYPE_STRING, TYPE_STRING,
- TYPE_STRING, TYPE_SHORT, TYPE_STRING };
- static unsigned int length[] ={ 0, 0, 0, 0, 6, 128 };
- int n, ncol = 5;
- int maxres;
- PQRYRES qrp;
- JCATPARM *cap;
- JDBConn *jcp = op;
-
- if (!op) {
- /**********************************************************************/
- /* Open the connection with the JDBC data source. */
- /**********************************************************************/
- jcp = new(g)JDBConn(g, NULL);
-
- if (jcp->Open(dsn, 2) < 1) // 2 is openReadOnly
- return NULL;
-
- } // endif op
-
- /************************************************************************/
- /* Do an evaluation of the result size. */
- /************************************************************************/
- n = jcp->GetMaxValue(SQL_MAX_COLUMNS_IN_TABLE);
- maxres = (n) ? (int)n : 250;
- n = jcp->GetMaxValue(SQL_MAX_CATALOG_NAME_LEN);
- length[0] = (n) ? (n + 1) : 128;
- n = jcp->GetMaxValue(SQL_MAX_SCHEMA_NAME_LEN);
- length[1] = (n) ? (n + 1) : 128;
- n = jcp->GetMaxValue(SQL_MAX_TABLE_NAME_LEN);
- length[2] = (n) ? (n + 1) : 128;
- n = jcp->GetMaxValue(SQL_MAX_COLUMN_NAME_LEN);
- length[3] = (n) ? (n + 1) : 128;
-
- if (trace)
- htrc("JDBCPrimaryKeys: max=%d len=%d,%d,%d\n",
- maxres, length[0], length[1], length[2]);
-
- /************************************************************************/
- /* Allocate the structure used to refer to the result set. */
- /************************************************************************/
- qrp = PlgAllocResult(g, ncol, maxres, IDS_PKEY,
- buftyp, NULL, length, false, true);
-
- if (trace)
- htrc("Getting pkey results ncol=%d\n", qrp->Nbcol);
-
- cap = AllocCatInfo(g, CAT_KEY, NULL, table, qrp);
-
- /************************************************************************/
- /* Now get the results into blocks. */
- /************************************************************************/
- if ((n = jcp->GetCatInfo(cap)) >= 0) {
- qrp->Nblin = n;
- // ResetNullValues(cap);
-
- if (trace)
- htrc("PrimaryKeys: NBCOL=%d NBLIN=%d\n", qrp->Nbcol, qrp->Nblin);
-
- } else
- qrp = NULL;
-
- /************************************************************************/
- /* Close any local connection. */
- /************************************************************************/
- if (!op)
- jcp->Close();
-
- /************************************************************************/
- /* Return the result pointer for use by GetData routines. */
- /************************************************************************/
- return qrp;
-} // end of JDBCPrimaryKeys
-#endif // 0
-
/***********************************************************************/
/* JDBConn construction/destruction. */
/***********************************************************************/
@@ -651,7 +512,7 @@ JDBConn::JDBConn(PGLOBAL g, TDBJDBC *tdbp)
xqid = xuid = xid = grs = readid = fetchid = typid = errid = nullptr;
prepid = xpid = pcid = nullptr;
chrfldid = intfldid = dblfldid = fltfldid = bigfldid = nullptr;
- datfldid = timfldid = tspfldid = nullptr;
+ objfldid = datfldid = timfldid = tspfldid = nullptr;
//m_LoginTimeout = DEFAULT_LOGIN_TIMEOUT;
//m_QueryTimeout = DEFAULT_QUERY_TIMEOUT;
//m_UpdateOptions = 0;
@@ -739,60 +600,6 @@ bool JDBConn::gmID(PGLOBAL g, jmethodID& mid, const char *name, const char *sig
} // end of gmID
-#if 0
-/***********************************************************************/
-/* Utility routine. */
-/***********************************************************************/
-PSZ JDBConn::GetStringInfo(ushort infotype)
-{
- //ASSERT(m_hdbc != SQL_NULL_HDBC);
- char *p, buffer[MAX_STRING_INFO];
- SWORD result;
- RETCODE rc;
-
- rc = SQLGetInfo(m_hdbc, infotype, buffer, sizeof(buffer), &result);
-
- if (!Check(rc)) {
- ThrowDJX(rc, "SQLGetInfo"); // Temporary
- // *buffer = '\0';
- } // endif rc
-
- p = PlugDup(m_G, buffer);
- return p;
-} // end of GetStringInfo
-
-/***********************************************************************/
-/* Utility routines. */
-/***********************************************************************/
-void JDBConn::OnSetOptions(HSTMT hstmt)
-{
- RETCODE rc;
- ASSERT(m_hdbc != SQL_NULL_HDBC);
-
- if ((signed)m_QueryTimeout != -1) {
- // Attempt to set query timeout. Ignore failure
- rc = SQLSetStmtOption(hstmt, SQL_QUERY_TIMEOUT, m_QueryTimeout);
-
- if (!Check(rc))
- // don't attempt it again
- m_QueryTimeout = (DWORD)-1;
-
- } // endif m_QueryTimeout
-
- if (m_RowsetSize > 0) {
- // Attempt to set rowset size.
- // In case of failure reset it to 0 to use Fetch.
- rc = SQLSetStmtOption(hstmt, SQL_ROWSET_SIZE, m_RowsetSize);
-
- if (!Check(rc))
- // don't attempt it again
- m_RowsetSize = 0;
-
- } // endif m_RowsetSize
-
-} // end of OnSetOptions
-#endif // 0
-
/***********************************************************************/
/* Utility routine. */
/***********************************************************************/
@@ -1007,7 +814,7 @@ int JDBConn::Open(PJPARM sop)
#define N 1
#endif
- // Java source will be compiled as ajar file installed in the plugin dir
+ // Java source will be compiled as a jar file installed in the plugin dir
jpop->Append(sep);
jpop->Append(GetPluginDir());
jpop->Append("JdbcInterface.jar");
@@ -1204,6 +1011,21 @@ int JDBConn::Open(PJPARM sop)
return RC_FX;
} // endif Msg
+ jmethodID qcid = nullptr;
+
+ if (!gmID(g, qcid, "GetQuoteString", "()Ljava/lang/String;")) {
+ jstring s = (jstring)env->CallObjectMethod(job, qcid);
+
+ if (s != nullptr) {
+ char *qch = (char*)env->GetStringUTFChars(s, (jboolean)false);
+ m_IDQuoteChar[0] = *qch;
+ } else {
+ s = (jstring)env->CallObjectMethod(job, errid);
+ Msg = (char*)env->GetStringUTFChars(s, (jboolean)false);
+ } // endif s
+
+ } // endif qcid
+
if (gmID(g, typid, "ColumnType", "(ILjava/lang/String;)I"))
return RC_FX;
else
@@ -1345,9 +1167,10 @@ void JDBConn::Close()
/***********************************************************************/
void JDBConn::SetColumnValue(int rank, PSZ name, PVAL val)
{
- PGLOBAL& g = m_G;
- jint ctyp;
- jstring cn, jn = nullptr;
+ PGLOBAL& g = m_G;
+ jint ctyp;
+ jstring cn, jn = nullptr;
+ jobject jb = nullptr;
if (rank == 0)
if (!name || (jn = env->NewStringUTF(name)) == nullptr) {
@@ -1363,21 +1186,32 @@ void JDBConn::SetColumnValue(int rank, PSZ name, PVAL val)
longjmp(g->jumper[g->jump_level], TYPE_AM_JDBC);
} // endif Check
+ if (val->GetNullable())
+ if (!gmID(g, objfldid, "ObjectField", "(ILjava/lang/String;)Ljava/lang/Object;")) {
+ jb = env->CallObjectMethod(job, objfldid, (jint)rank, jn);
+
+ if (jb == nullptr) {
+ val->Reset();
+ val->SetNull(true);
+ goto chk;
+ } // endif job
+
+ } // endif objfldid
+
switch (ctyp) {
case 12: // VARCHAR
case -1: // LONGVARCHAR
case 1: // CHAR
- if (!gmID(g, chrfldid, "StringField", "(ILjava/lang/String;)Ljava/lang/String;")) {
+ if (jb)
+ cn = (jstring)jb;
+ else if (!gmID(g, chrfldid, "StringField", "(ILjava/lang/String;)Ljava/lang/String;"))
cn = (jstring)env->CallObjectMethod(job, chrfldid, (jint)rank, jn);
+ else
+ cn = nullptr;
- if (cn) {
- const char *field = env->GetStringUTFChars(cn, (jboolean)false);
- val->SetValue_psz((PSZ)field);
- } else {
- val->Reset();
- val->SetNull(true);
- } // endif cn
-
+ if (cn) {
+ const char *field = env->GetStringUTFChars(cn, (jboolean)false);
+ val->SetValue_psz((PSZ)field);
} else
val->Reset();
@@ -1449,6 +1283,7 @@ void JDBConn::SetColumnValue(int rank, PSZ name, PVAL val)
val->Reset();
} // endswitch Type
+ chk:
if (Check()) {
if (rank == 0)
env->DeleteLocalRef(jn);
diff --git a/storage/connect/jdbconn.h b/storage/connect/jdbconn.h
index 095b1565bd2..0a1c52d4576 100644
--- a/storage/connect/jdbconn.h
+++ b/storage/connect/jdbconn.h
@@ -165,6 +165,7 @@ protected:
jmethodID xpid; // The ExecutePrep method ID
jmethodID pcid; // The ClosePrepStmt method ID
jmethodID errid; // The GetErrmsg method ID
+ jmethodID objfldid; // The ObjectField method ID
jmethodID chrfldid; // The StringField method ID
jmethodID intfldid; // The IntField method ID
jmethodID dblfldid; // The DoubleField method ID
diff --git a/storage/connect/reldef.cpp b/storage/connect/reldef.cpp
index 2c8ada52e6f..ac2327212e0 100644
--- a/storage/connect/reldef.cpp
+++ b/storage/connect/reldef.cpp
@@ -294,7 +294,7 @@ int TABDEF::GetColCatInfo(PGLOBAL g)
nlg+= nof;
case TAB_DIR:
case TAB_XML:
- poff= loff + 1;
+ poff= loff + (pcf->Flags & U_VIRTUAL ? 0 : 1);
break;
case TAB_INI:
case TAB_MAC:
@@ -440,7 +440,11 @@ int TABDEF::GetColCatInfo(PGLOBAL g)
} // endswitch tc
// lrecl must be at least recln to avoid buffer overflow
- recln= MY_MAX(recln, Hc->GetIntegerOption("Lrecl"));
+ if (trace)
+ htrc("Lrecl: Calculated=%d defined=%d\n",
+ recln, Hc->GetIntegerOption("Lrecl"));
+
+ recln = MY_MAX(recln, Hc->GetIntegerOption("Lrecl"));
Hc->SetIntegerOption("Lrecl", recln);
((PDOSDEF)this)->SetLrecl(recln);
} // endif Lrecl
diff --git a/storage/connect/tabjdbc.cpp b/storage/connect/tabjdbc.cpp
index 86fd831b262..e398523892f 100644
--- a/storage/connect/tabjdbc.cpp
+++ b/storage/connect/tabjdbc.cpp
@@ -686,6 +686,9 @@ bool TDBJDBC::MakeInsert(PGLOBAL g)
else
Prepared = true;
+ if (trace)
+ htrc("Insert=%s\n", Query->GetStr());
+
return false;
} // end of MakeInsert
@@ -733,17 +736,18 @@ bool TDBJDBC::MakeCommand(PGLOBAL g)
// If so, it must be quoted in the original query
strlwr(strcat(strcat(strcpy(name, " "), Name), " "));
- if (!strstr(" update delete low_priority ignore quick from ", name))
- strlwr(strcpy(name, Name)); // Not a keyword
- else
+ if (strstr(" update delete low_priority ignore quick from ", name)) {
strlwr(strcat(strcat(strcpy(name, qc), Name), qc));
+ k += 2;
+ } else
+ strlwr(strcpy(name, Name)); // Not a keyword
if ((p = strstr(qrystr, name))) {
for (i = 0; i < p - qrystr; i++)
stmt[i] = (Qrystr[i] == '`') ? *qc : Qrystr[i];
stmt[i] = 0;
- k = i + (int)strlen(Name);
+ k += i + (int)strlen(Name);
if (qtd && *(p-1) == ' ')
strcat(strcat(strcat(stmt, qc), TableName), qc);
@@ -765,6 +769,9 @@ bool TDBJDBC::MakeCommand(PGLOBAL g)
return NULL;
} // endif p
+ if (trace)
+ htrc("Command=%s\n", stmt);
+
Query = new(g)STRING(g, 0, stmt);
return (!Query->GetSize());
} // end of MakeCommand
@@ -1214,6 +1221,10 @@ int TDBJDBC::WriteDB(PGLOBAL g)
} // endif oom
Query->RepLast(')');
+
+ if (trace > 1)
+ htrc("Inserting: %s\n", Query->GetStr());
+
rc = Jcp->ExecuteUpdate(Query->GetStr());
Query->Truncate(len); // Restore query
diff --git a/storage/oqgraph/graphcore.cc b/storage/oqgraph/graphcore.cc
index 4346b94805c..7c8ca53c096 100644
--- a/storage/oqgraph/graphcore.cc
+++ b/storage/oqgraph/graphcore.cc
@@ -485,7 +485,7 @@ namespace open_query
optional<Vertex>
oqgraph_share::find_vertex(VertexID id) const
{
- return ::boost::find_vertex(id, g);
+ return oqgraph3::find_vertex(id, g);
}
#if 0
diff --git a/storage/oqgraph/oqgraph_shim.h b/storage/oqgraph/oqgraph_shim.h
index af240b88ebd..004d7f0f7c5 100644
--- a/storage/oqgraph/oqgraph_shim.h
+++ b/storage/oqgraph/oqgraph_shim.h
@@ -274,6 +274,33 @@ namespace boost
};
#endif
+ template<>
+ struct property_map<oqgraph3::graph, edge_weight_t>
+ {
+ typedef void type;
+ typedef oqgraph3::edge_weight_property_map const_type;
+ };
+
+ template<>
+ struct property_map<oqgraph3::graph, vertex_index_t>
+ {
+ typedef void type;
+ typedef oqgraph3::vertex_index_property_map const_type;
+ };
+
+ template<>
+ struct property_map<oqgraph3::graph, edge_index_t>
+ {
+ typedef void type;
+ typedef oqgraph3::edge_index_property_map const_type;
+ };
+
+}
+
+namespace oqgraph3
+{
+ using namespace boost;
+
inline graph_traits<oqgraph3::graph>::vertex_descriptor
source(
const graph_traits<oqgraph3::graph>::edge_descriptor& e,
@@ -401,27 +428,6 @@ namespace boost
return count;
}
- template<>
- struct property_map<oqgraph3::graph, edge_weight_t>
- {
- typedef void type;
- typedef oqgraph3::edge_weight_property_map const_type;
- };
-
- template<>
- struct property_map<oqgraph3::graph, vertex_index_t>
- {
- typedef void type;
- typedef oqgraph3::vertex_index_property_map const_type;
- };
-
- template<>
- struct property_map<oqgraph3::graph, edge_index_t>
- {
- typedef void type;
- typedef oqgraph3::edge_index_property_map const_type;
- };
-
inline property_map<
oqgraph3::graph,
edge_weight_t>::const_type::reference
diff --git a/storage/tokudb/CMakeLists.txt b/storage/tokudb/CMakeLists.txt
index 53a4a675bbf..1cc54a444f4 100644
--- a/storage/tokudb/CMakeLists.txt
+++ b/storage/tokudb/CMakeLists.txt
@@ -1,4 +1,4 @@
-SET(TOKUDB_VERSION 5.6.32-78.1)
+SET(TOKUDB_VERSION 5.6.33-79.0)
# PerconaFT only supports x86-64 and cmake-2.8.9+
IF(CMAKE_VERSION VERSION_LESS "2.8.9")
MESSAGE(STATUS "CMake 2.8.9 or higher is required by TokuDB")
diff --git a/storage/tokudb/PerconaFT/buildheader/make_tdb.cc b/storage/tokudb/PerconaFT/buildheader/make_tdb.cc
index 576f902f6ae..7ede78b3c0d 100644
--- a/storage/tokudb/PerconaFT/buildheader/make_tdb.cc
+++ b/storage/tokudb/PerconaFT/buildheader/make_tdb.cc
@@ -422,6 +422,9 @@ static void print_db_env_struct (void) {
"int (*set_checkpoint_pool_threads)(DB_ENV *, uint32_t)",
"void (*set_check_thp)(DB_ENV *, bool new_val)",
"bool (*get_check_thp)(DB_ENV *)",
+ "bool (*set_dir_per_db)(DB_ENV *, bool new_val)",
+ "bool (*get_dir_per_db)(DB_ENV *)",
+ "const char *(*get_data_dir)(DB_ENV *env)",
NULL};
sort_and_dump_fields("db_env", true, extra);
diff --git a/storage/tokudb/PerconaFT/cmake_modules/TokuFeatureDetection.cmake b/storage/tokudb/PerconaFT/cmake_modules/TokuFeatureDetection.cmake
index e3c900fbadb..2f04a33558a 100644
--- a/storage/tokudb/PerconaFT/cmake_modules/TokuFeatureDetection.cmake
+++ b/storage/tokudb/PerconaFT/cmake_modules/TokuFeatureDetection.cmake
@@ -97,7 +97,7 @@ if (NOT HAVE_BACKTRACE_WITHOUT_EXECINFO)
endif ()
endif ()
-if(HAVE_CLOCK_REALTIME)
+if(HAVE_CLOCK_REALTIME AND (NOT APPLE))
list(APPEND EXTRA_SYSTEM_LIBS rt)
else()
list(APPEND EXTRA_SYSTEM_LIBS System)
@@ -109,6 +109,8 @@ check_function_exists(pthread_rwlockattr_setkind_np HAVE_PTHREAD_RWLOCKATTR_SETK
## check for the right way to yield using pthreads
check_function_exists(pthread_yield HAVE_PTHREAD_YIELD)
check_function_exists(pthread_yield_np HAVE_PTHREAD_YIELD_NP)
+## check if we have pthread_threadid_np() (i.e. osx)
+check_function_exists(pthread_threadid_np HAVE_PTHREAD_THREADID_NP)
## check if we have pthread_getthreadid_np() (i.e. freebsd)
check_function_exists(pthread_getthreadid_np HAVE_PTHREAD_GETTHREADID_NP)
check_function_exists(sched_getcpu HAVE_SCHED_GETCPU)
diff --git a/storage/tokudb/PerconaFT/cmake_modules/TokuSetupCompiler.cmake b/storage/tokudb/PerconaFT/cmake_modules/TokuSetupCompiler.cmake
index cce12d575bf..f7e7f76e96e 100644
--- a/storage/tokudb/PerconaFT/cmake_modules/TokuSetupCompiler.cmake
+++ b/storage/tokudb/PerconaFT/cmake_modules/TokuSetupCompiler.cmake
@@ -66,9 +66,7 @@ set_cflags_if_supported(
-Wno-error=address-of-array-temporary
-Wno-error=tautological-constant-out-of-range-compare
-Wno-error=maybe-uninitialized
- -Wno-ignored-attributes
-Wno-error=extern-c-compat
- -Wno-pointer-bool-conversion
-fno-rtti
-fno-exceptions
-Wno-error=nonnull-compare
diff --git a/storage/tokudb/PerconaFT/ft/cachetable/cachetable-internal.h b/storage/tokudb/PerconaFT/ft/cachetable/cachetable-internal.h
index dc6aec9226d..05fb771de08 100644
--- a/storage/tokudb/PerconaFT/ft/cachetable/cachetable-internal.h
+++ b/storage/tokudb/PerconaFT/ft/cachetable/cachetable-internal.h
@@ -138,6 +138,8 @@ struct cachefile {
// nor attempt to open any cachefile with the same fname (dname)
// until this cachefile has been fully closed and unlinked.
bool unlink_on_close;
+ // If set then fclose will not be logged in recovery log.
+ bool skip_log_recover_on_close;
int fd; /* Bug: If a file is opened read-only, then it is stuck in read-only. If it is opened read-write, then subsequent writers can write to it too. */
CACHETABLE cachetable;
struct fileid fileid;
diff --git a/storage/tokudb/PerconaFT/ft/cachetable/cachetable.cc b/storage/tokudb/PerconaFT/ft/cachetable/cachetable.cc
index 5bba977de1a..6d753805fa9 100644
--- a/storage/tokudb/PerconaFT/ft/cachetable/cachetable.cc
+++ b/storage/tokudb/PerconaFT/ft/cachetable/cachetable.cc
@@ -467,6 +467,10 @@ toku_cachefile_fname_in_env (CACHEFILE cf) {
return cf->fname_in_env;
}
+void toku_cachefile_set_fname_in_env(CACHEFILE cf, char *new_fname_in_env) {
+ cf->fname_in_env = new_fname_in_env;
+}
+
int
toku_cachefile_get_fd (CACHEFILE cf) {
return cf->fd;
@@ -2903,6 +2907,18 @@ bool toku_cachefile_is_unlink_on_close(CACHEFILE cf) {
return cf->unlink_on_close;
}
+void toku_cachefile_skip_log_recover_on_close(CACHEFILE cf) {
+ cf->skip_log_recover_on_close = true;
+}
+
+void toku_cachefile_do_log_recover_on_close(CACHEFILE cf) {
+ cf->skip_log_recover_on_close = false;
+}
+
+bool toku_cachefile_is_skip_log_recover_on_close(CACHEFILE cf) {
+ return cf->skip_log_recover_on_close;
+}
+
uint64_t toku_cachefile_size(CACHEFILE cf) {
int64_t file_size;
int fd = toku_cachefile_get_fd(cf);
diff --git a/storage/tokudb/PerconaFT/ft/cachetable/cachetable.h b/storage/tokudb/PerconaFT/ft/cachetable/cachetable.h
index 148326562ab..3b3cb0a2d46 100644
--- a/storage/tokudb/PerconaFT/ft/cachetable/cachetable.h
+++ b/storage/tokudb/PerconaFT/ft/cachetable/cachetable.h
@@ -500,12 +500,18 @@ int toku_cachefile_get_fd (CACHEFILE);
// Return the filename
char * toku_cachefile_fname_in_env (CACHEFILE cf);
+void toku_cachefile_set_fname_in_env(CACHEFILE cf, char *new_fname_in_env);
+
// Make it so when the cachefile closes, the underlying file is unlinked
void toku_cachefile_unlink_on_close(CACHEFILE cf);
// is this cachefile marked as unlink on close?
bool toku_cachefile_is_unlink_on_close(CACHEFILE cf);
+void toku_cachefile_skip_log_recover_on_close(CACHEFILE cf);
+void toku_cachefile_do_log_recover_on_close(CACHEFILE cf);
+bool toku_cachefile_is_skip_log_recover_on_close(CACHEFILE cf);
+
// Return the logger associated with the cachefile
struct tokulogger *toku_cachefile_logger(CACHEFILE cf);
diff --git a/storage/tokudb/PerconaFT/ft/ft-ops.cc b/storage/tokudb/PerconaFT/ft/ft-ops.cc
index f131668889e..30a8710d7aa 100644
--- a/storage/tokudb/PerconaFT/ft/ft-ops.cc
+++ b/storage/tokudb/PerconaFT/ft/ft-ops.cc
@@ -149,22 +149,23 @@ basement nodes, bulk fetch, and partial fetch:
#include "ft/cachetable/checkpoint.h"
#include "ft/cursor.h"
-#include "ft/ft.h"
#include "ft/ft-cachetable-wrappers.h"
#include "ft/ft-flusher.h"
#include "ft/ft-internal.h"
-#include "ft/msg.h"
+#include "ft/ft.h"
#include "ft/leafentry.h"
#include "ft/logger/log-internal.h"
+#include "ft/msg.h"
#include "ft/node.h"
#include "ft/serialize/block_table.h"
-#include "ft/serialize/sub_block.h"
#include "ft/serialize/ft-serialize.h"
#include "ft/serialize/ft_layout_version.h"
#include "ft/serialize/ft_node-serialize.h"
+#include "ft/serialize/sub_block.h"
#include "ft/txn/txn_manager.h"
-#include "ft/ule.h"
#include "ft/txn/xids.h"
+#include "ft/ule.h"
+#include "src/ydb-internal.h"
#include <toku_race_tools.h>
@@ -179,6 +180,7 @@ basement nodes, bulk fetch, and partial fetch:
#include <stdint.h>
+#include <memory>
/* Status is intended for display to humans to help understand system behavior.
* It does not need to be perfectly thread-safe.
*/
@@ -2593,12 +2595,104 @@ static inline int ft_open_maybe_direct(const char *filename, int oflag, int mode
static const mode_t file_mode = S_IRUSR+S_IWUSR+S_IRGRP+S_IWGRP+S_IROTH+S_IWOTH;
+inline bool toku_file_is_root(const char *path, const char *last_slash) {
+ return last_slash == path;
+}
+
+static std::unique_ptr<char[], decltype(&toku_free)> toku_file_get_parent_dir(
+ const char *path) {
+ std::unique_ptr<char[], decltype(&toku_free)> result(nullptr, &toku_free);
+
+ bool has_trailing_slash = false;
+
+ /* Find the offset of the last slash */
+ const char *last_slash = strrchr(path, OS_PATH_SEPARATOR);
+
+ if (!last_slash) {
+ /* No slash in the path, return NULL */
+ return result;
+ }
+
+ /* Ok, there is a slash. Is there anything after it? */
+ if (static_cast<size_t>(last_slash - path + 1) == strlen(path)) {
+ has_trailing_slash = true;
+ }
+
+ /* Reduce repetative slashes. */
+ while (last_slash > path && last_slash[-1] == OS_PATH_SEPARATOR) {
+ last_slash--;
+ }
+
+ /* Check for the root of a drive. */
+ if (toku_file_is_root(path, last_slash)) {
+ return result;
+ }
+
+ /* If a trailing slash prevented the first strrchr() from trimming
+ the last component of the path, trim that component now. */
+ if (has_trailing_slash) {
+ /* Back up to the previous slash. */
+ last_slash--;
+ while (last_slash > path && last_slash[0] != OS_PATH_SEPARATOR) {
+ last_slash--;
+ }
+
+ /* Reduce repetative slashes. */
+ while (last_slash > path && last_slash[-1] == OS_PATH_SEPARATOR) {
+ last_slash--;
+ }
+ }
+
+ /* Check for the root of a drive. */
+ if (toku_file_is_root(path, last_slash)) {
+ return result;
+ }
+
+ result.reset(toku_strndup(path, last_slash - path));
+ return result;
+}
+
+static bool toku_create_subdirs_if_needed(const char *path) {
+ static const mode_t dir_mode = S_IRUSR | S_IWUSR | S_IXUSR | S_IRGRP |
+ S_IWGRP | S_IXGRP | S_IROTH | S_IXOTH;
+
+ toku_struct_stat stat;
+ bool subdir_exists = true;
+ auto subdir = toku_file_get_parent_dir(path);
+
+ if (!subdir.get())
+ return true;
+
+ if (toku_stat(subdir.get(), &stat) == -1) {
+ if (ENOENT == get_error_errno())
+ subdir_exists = false;
+ else
+ return false;
+ }
+
+ if (subdir_exists) {
+ if (!S_ISDIR(stat.st_mode))
+ return false;
+ return true;
+ }
+
+ if (!toku_create_subdirs_if_needed(subdir.get()))
+ return false;
+
+ if (toku_os_mkdir(subdir.get(), dir_mode))
+ return false;
+
+ return true;
+}
+
// open a file for use by the ft
// Requires: File does not exist.
static int ft_create_file(FT_HANDLE UU(ft_handle), const char *fname, int *fdp) {
int r;
int fd;
int er;
+ if (!toku_create_subdirs_if_needed(fname))
+ return get_error_errno();
fd = ft_open_maybe_direct(fname, O_RDWR | O_BINARY, file_mode);
assert(fd==-1);
if ((er = get_maybe_error_errno()) != ENOENT) {
@@ -4427,6 +4521,55 @@ void toku_ft_unlink(FT_HANDLE handle) {
toku_cachefile_unlink_on_close(cf);
}
+int toku_ft_rename_iname(DB_TXN *txn,
+ const char *data_dir,
+ const char *old_iname,
+ const char *new_iname,
+ CACHETABLE ct) {
+ int r = 0;
+
+ std::unique_ptr<char[], decltype(&toku_free)> new_iname_full(nullptr,
+ &toku_free);
+ std::unique_ptr<char[], decltype(&toku_free)> old_iname_full(nullptr,
+ &toku_free);
+
+ new_iname_full.reset(toku_construct_full_name(2, data_dir, new_iname));
+ old_iname_full.reset(toku_construct_full_name(2, data_dir, old_iname));
+
+ if (txn) {
+ BYTESTRING bs_old_name = {static_cast<uint32_t>(strlen(old_iname) + 1),
+ const_cast<char *>(old_iname)};
+ BYTESTRING bs_new_name = {static_cast<uint32_t>(strlen(new_iname) + 1),
+ const_cast<char *>(new_iname)};
+ FILENUM filenum = FILENUM_NONE;
+ {
+ CACHEFILE cf;
+ r = toku_cachefile_of_iname_in_env(ct, old_iname, &cf);
+ if (r != ENOENT) {
+ char *old_fname_in_cf = toku_cachefile_fname_in_env(cf);
+ toku_cachefile_set_fname_in_env(cf, toku_xstrdup(new_iname));
+ toku_free(old_fname_in_cf);
+ filenum = toku_cachefile_filenum(cf);
+ }
+ }
+ toku_logger_save_rollback_frename(
+ db_txn_struct_i(txn)->tokutxn, &bs_old_name, &bs_new_name);
+ toku_log_frename(db_txn_struct_i(txn)->tokutxn->logger,
+ (LSN *)0,
+ 0,
+ toku_txn_get_txnid(db_txn_struct_i(txn)->tokutxn),
+ bs_old_name,
+ filenum,
+ bs_new_name);
+ }
+
+ r = toku_os_rename(old_iname_full.get(), new_iname_full.get());
+ if (r != 0)
+ return r;
+ r = toku_fsync_directory(new_iname_full.get());
+ return r;
+}
+
int toku_ft_get_fragmentation(FT_HANDLE ft_handle, TOKU_DB_FRAGMENTATION report) {
int fd = toku_cachefile_get_fd(ft_handle->ft->cf);
toku_ft_lock(ft_handle->ft);
diff --git a/storage/tokudb/PerconaFT/ft/ft-ops.h b/storage/tokudb/PerconaFT/ft/ft-ops.h
index 313a74628ea..70cf045d43c 100644
--- a/storage/tokudb/PerconaFT/ft/ft-ops.h
+++ b/storage/tokudb/PerconaFT/ft/ft-ops.h
@@ -48,6 +48,8 @@ Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
#include "ft/msg.h"
#include "util/dbt.h"
+#define OS_PATH_SEPARATOR '/'
+
typedef struct ft_handle *FT_HANDLE;
int toku_open_ft_handle (const char *fname, int is_create, FT_HANDLE *, int nodesize, int basementnodesize, enum toku_compression_method compression_method, CACHETABLE, TOKUTXN, int(*)(DB *,const DBT*,const DBT*)) __attribute__ ((warn_unused_result));
diff --git a/storage/tokudb/PerconaFT/ft/ft.cc b/storage/tokudb/PerconaFT/ft/ft.cc
index 699fcc57603..7c94b4c59d3 100644
--- a/storage/tokudb/PerconaFT/ft/ft.cc
+++ b/storage/tokudb/PerconaFT/ft/ft.cc
@@ -253,7 +253,19 @@ static void ft_close(CACHEFILE cachefile, int fd, void *header_v, bool oplsn_val
char* fname_in_env = toku_cachefile_fname_in_env(cachefile);
assert(fname_in_env);
BYTESTRING bs = {.len=(uint32_t) strlen(fname_in_env), .data=fname_in_env};
- toku_log_fclose(logger, &lsn, ft->h->dirty, bs, toku_cachefile_filenum(cachefile)); // flush the log on close (if new header is being written), otherwise it might not make it out.
+ if (!toku_cachefile_is_skip_log_recover_on_close(cachefile)) {
+ toku_log_fclose(
+ logger,
+ &lsn,
+ ft->h->dirty,
+ bs,
+ toku_cachefile_filenum(cachefile)); // flush the log on
+ // close (if new header
+ // is being written),
+ // otherwise it might
+ // not make it out.
+ toku_cachefile_do_log_recover_on_close(cachefile);
+ }
}
}
if (ft->h->dirty) { // this is the only place this bit is tested (in currentheader)
diff --git a/storage/tokudb/PerconaFT/ft/ft.h b/storage/tokudb/PerconaFT/ft/ft.h
index d600e093bdc..7a3c4fa783c 100644
--- a/storage/tokudb/PerconaFT/ft/ft.h
+++ b/storage/tokudb/PerconaFT/ft/ft.h
@@ -53,6 +53,12 @@ typedef struct ft_options *FT_OPTIONS;
void toku_ft_unlink(FT_HANDLE handle);
void toku_ft_unlink_on_commit(FT_HANDLE handle, TOKUTXN txn);
+int toku_ft_rename_iname(DB_TXN *txn,
+ const char *data_dir,
+ const char *old_iname,
+ const char *new_iname,
+ CACHETABLE ct);
+
void toku_ft_init_reflock(FT ft);
void toku_ft_destroy_reflock(FT ft);
void toku_ft_grab_reflock(FT ft);
diff --git a/storage/tokudb/PerconaFT/ft/logger/logformat.cc b/storage/tokudb/PerconaFT/ft/logger/logformat.cc
index 6f3baa81c86..49b61138803 100644
--- a/storage/tokudb/PerconaFT/ft/logger/logformat.cc
+++ b/storage/tokudb/PerconaFT/ft/logger/logformat.cc
@@ -90,6 +90,10 @@ const struct logtype rollbacks[] = {
{"fcreate", 'F', FA{{"FILENUM", "filenum", 0},
{"BYTESTRING", "iname", 0},
NULLFIELD}, LOG_BEGIN_ACTION_NA},
+ //rename file
+ {"frename", 'n', FA{{"BYTESTRING", "old_iname", 0},
+ {"BYTESTRING", "new_iname", 0},
+ NULLFIELD}, LOG_BEGIN_ACTION_NA},
// cmdinsert is used to insert a key-value pair into a DB. For rollback we don't need the data.
{"cmdinsert", 'i', FA{
{"FILENUM", "filenum", 0},
@@ -195,6 +199,11 @@ const struct logtype logtypes[] = {
{"fdelete", 'U', FA{{"TXNID_PAIR", "xid", 0},
{"FILENUM", "filenum", 0},
NULLFIELD}, SHOULD_LOG_BEGIN},
+ {"frename", 'n', FA{{"TXNID_PAIR", "xid", 0},
+ {"BYTESTRING", "old_iname", 0},
+ {"FILENUM", "old_filenum", 0},
+ {"BYTESTRING", "new_iname", 0},
+ NULLFIELD}, IGNORE_LOG_BEGIN},
{"enq_insert", 'I', FA{{"FILENUM", "filenum", 0},
{"TXNID_PAIR", "xid", 0},
{"BYTESTRING", "key", 0},
diff --git a/storage/tokudb/PerconaFT/ft/logger/recover.cc b/storage/tokudb/PerconaFT/ft/logger/recover.cc
index 38f29773bd6..a9c30c0e37a 100644
--- a/storage/tokudb/PerconaFT/ft/logger/recover.cc
+++ b/storage/tokudb/PerconaFT/ft/logger/recover.cc
@@ -36,6 +36,7 @@ Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+#include <memory>
#include "ft/cachetable/cachetable.h"
#include "ft/cachetable/checkpoint.h"
#include "ft/ft.h"
@@ -935,6 +936,83 @@ static int toku_recover_backward_fdelete (struct logtype_fdelete *UU(l), RECOVER
return 0;
}
+static int toku_recover_frename(struct logtype_frename *l, RECOVER_ENV renv) {
+ assert(renv);
+ assert(renv->env);
+
+ toku_struct_stat stat;
+ const char *data_dir = renv->env->get_data_dir(renv->env);
+ bool old_exist = true;
+ bool new_exist = true;
+
+ assert(data_dir);
+
+ struct file_map_tuple *tuple;
+
+ std::unique_ptr<char[], decltype(&toku_free)> old_iname_full(
+ toku_construct_full_name(2, data_dir, l->old_iname.data), &toku_free);
+ std::unique_ptr<char[], decltype(&toku_free)> new_iname_full(
+ toku_construct_full_name(2, data_dir, l->new_iname.data), &toku_free);
+
+ if (toku_stat(old_iname_full.get(), &stat) == -1) {
+ if (ENOENT == errno)
+ old_exist = false;
+ else
+ return 1;
+ }
+
+ if (toku_stat(new_iname_full.get(), &stat) == -1) {
+ if (ENOENT == errno)
+ new_exist = false;
+ else
+ return 1;
+ }
+
+ // Both old and new files can exist if:
+ // - rename() is not completed
+ // - fcreate was replayed during recovery
+ // 'Stalled cachefiles' container cachefile_list::m_stale_fileid contains
+ // closed but not yet evicted cachefiles and the key of this container is
+ // fs-dependent file id - (device id, inode number) pair. As it is supposed
+ // new file have not yet created during recovery process the 'stalled
+ // cachefile' container can contain only cache file of old file.
+ // To preserve the old cachefile file's id and keep it in
+ // 'stalled cachefiles' container the new file is removed
+ // and the old file is renamed.
+ if (old_exist && new_exist &&
+ (toku_os_unlink(new_iname_full.get()) == -1 ||
+ toku_os_rename(old_iname_full.get(), new_iname_full.get()) == -1 ||
+ toku_fsync_directory(old_iname_full.get()) == -1 ||
+ toku_fsync_directory(new_iname_full.get()) == -1))
+ return 1;
+
+ if (old_exist && !new_exist &&
+ (toku_os_rename(old_iname_full.get(), new_iname_full.get()) == -1 ||
+ toku_fsync_directory(old_iname_full.get()) == -1 ||
+ toku_fsync_directory(new_iname_full.get()) == -1))
+ return 1;
+
+ if (file_map_find(&renv->fmap, l->old_filenum, &tuple) != DB_NOTFOUND) {
+ if (tuple->iname)
+ toku_free(tuple->iname);
+ tuple->iname = toku_xstrdup(l->new_iname.data);
+ }
+
+ TOKUTXN txn = NULL;
+ toku_txnid2txn(renv->logger, l->xid, &txn);
+
+ if (txn)
+ toku_logger_save_rollback_frename(txn, &l->old_iname, &l->new_iname);
+
+ return 0;
+}
+
+static int toku_recover_backward_frename(struct logtype_frename *UU(l),
+ RECOVER_ENV UU(renv)) {
+ // nothing
+ return 0;
+}
+
static int toku_recover_enq_insert (struct logtype_enq_insert *l, RECOVER_ENV renv) {
int r;
TOKUTXN txn = NULL;
diff --git a/storage/tokudb/PerconaFT/ft/serialize/rbtree_mhs.h b/storage/tokudb/PerconaFT/ft/serialize/rbtree_mhs.h
index 92f1e278e1a..eb8c953b08c 100644
--- a/storage/tokudb/PerconaFT/ft/serialize/rbtree_mhs.h
+++ b/storage/tokudb/PerconaFT/ft/serialize/rbtree_mhs.h
@@ -106,6 +106,7 @@ namespace MhsRbTree {
static const uint64_t MHS_MAX_VAL = 0xffffffffffffffff;
OUUInt64() : _value(0) {}
OUUInt64(uint64_t s) : _value(s) {}
+ OUUInt64(const OUUInt64& o) : _value(o._value) {}
bool operator<(const OUUInt64 &r) const {
invariant(!(_value == MHS_MAX_VAL && r.ToInt() == MHS_MAX_VAL));
return _value < r.ToInt();
@@ -182,15 +183,18 @@ namespace MhsRbTree {
class Node {
public:
- struct BlockPair {
+ class BlockPair {
+ public:
OUUInt64 _offset;
OUUInt64 _size;
BlockPair() : _offset(0), _size(0) {}
BlockPair(uint64_t o, uint64_t s) : _offset(o), _size(s) {}
-
BlockPair(OUUInt64 o, OUUInt64 s) : _offset(o), _size(s) {}
- int operator<(const struct BlockPair &rhs) const {
+ BlockPair(const BlockPair &o)
+ : _offset(o._offset), _size(o._size) {}
+
+ int operator<(const BlockPair &rhs) const {
return _offset < rhs._offset;
}
int operator<(const uint64_t &o) const { return _offset < o; }
@@ -203,15 +207,15 @@ namespace MhsRbTree {
};
EColor _color;
- struct BlockPair _hole;
- struct Pair _label;
+ BlockPair _hole;
+ Pair _label;
Node *_left;
Node *_right;
Node *_parent;
Node(EColor c,
Node::BlockPair h,
- struct Pair lb,
+ Pair lb,
Node *l,
Node *r,
Node *p)
diff --git a/storage/tokudb/PerconaFT/ft/tests/test-rbtree-insert-remove-without-mhs.cc b/storage/tokudb/PerconaFT/ft/tests/test-rbtree-insert-remove-without-mhs.cc
index 85f29ce9813..cefe66335a6 100644
--- a/storage/tokudb/PerconaFT/ft/tests/test-rbtree-insert-remove-without-mhs.cc
+++ b/storage/tokudb/PerconaFT/ft/tests/test-rbtree-insert-remove-without-mhs.cc
@@ -53,9 +53,10 @@ static void generate_random_input() {
std::srand(unsigned(std::time(0)));
// set some values:
- for (uint64_t i = 1; i < N; ++i) {
- input_vector.push_back({i, 0});
- old_vector[i] = {i, 0};
+ for (uint64_t i = 0; i < N; ++i) {
+ MhsRbTree::Node::BlockPair bp = {i+1, 0};
+ input_vector.push_back(bp);
+ old_vector[i] = bp;
}
// using built-in random generator:
std::random_shuffle(input_vector.begin(), input_vector.end(), myrandom);
diff --git a/storage/tokudb/PerconaFT/ft/txn/roll.cc b/storage/tokudb/PerconaFT/ft/txn/roll.cc
index 90eee1e580a..9f3977743a0 100644
--- a/storage/tokudb/PerconaFT/ft/txn/roll.cc
+++ b/storage/tokudb/PerconaFT/ft/txn/roll.cc
@@ -38,13 +38,13 @@ Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
/* rollback and rollforward routines. */
-
-#include "ft/ft.h"
+#include <memory>
#include "ft/ft-ops.h"
+#include "ft/ft.h"
#include "ft/log_header.h"
#include "ft/logger/log-internal.h"
-#include "ft/txn/xids.h"
#include "ft/txn/rollback-apply.h"
+#include "ft/txn/xids.h"
// functionality provided by roll.c is exposed by an autogenerated
// header file, logheader.h
@@ -162,10 +162,122 @@ toku_rollback_fcreate (FILENUM filenum,
// directory row lock for its dname) and we would not get this
// far if there were other live handles.
toku_cachefile_unlink_on_close(cf);
+ toku_cachefile_skip_log_recover_on_close(cf);
done:
return 0;
}
+int toku_commit_frename(BYTESTRING /* old_name */,
+ BYTESTRING /* new_iname */,
+ TOKUTXN /* txn */,
+ LSN UU(oplsn)) {
+ return 0;
+}
+
+int toku_rollback_frename(BYTESTRING old_iname,
+ BYTESTRING new_iname,
+ TOKUTXN txn,
+ LSN UU(oplsn)) {
+ assert(txn);
+ assert(txn->logger);
+ assert(txn->logger->ct);
+
+ CACHETABLE cachetable = txn->logger->ct;
+
+ toku_struct_stat stat;
+ bool old_exist = true;
+ bool new_exist = true;
+
+ std::unique_ptr<char[], decltype(&toku_free)> old_iname_full(
+ toku_cachetable_get_fname_in_cwd(cachetable, old_iname.data),
+ &toku_free);
+ std::unique_ptr<char[], decltype(&toku_free)> new_iname_full(
+ toku_cachetable_get_fname_in_cwd(cachetable, new_iname.data),
+ &toku_free);
+
+ if (toku_stat(old_iname_full.get(), &stat) == -1) {
+ if (ENOENT == errno)
+ old_exist = false;
+ else
+ return 1;
+ }
+
+ if (toku_stat(new_iname_full.get(), &stat) == -1) {
+ if (ENOENT == errno)
+ new_exist = false;
+ else
+ return 1;
+ }
+
+ // Both old and new files can exist if:
+ // - rename() is not completed
+ // - fcreate was replayed during recovery
+ // 'Stalled cachefiles' container cachefile_list::m_stale_fileid contains
+ // closed but not yet evicted cachefiles and the key of this container is
+ // fs-dependent file id - (device id, inode number) pair. To preserve the
+ // new cachefile
+ // file's id and keep it in 'stalled cachefiles' container the old file is
+ // removed
+ // and the new file is renamed.
+ if (old_exist && new_exist &&
+ (toku_os_unlink(old_iname_full.get()) == -1 ||
+ toku_os_rename(new_iname_full.get(), old_iname_full.get()) == -1 ||
+ toku_fsync_directory(new_iname_full.get()) == -1 ||
+ toku_fsync_directory(old_iname_full.get()) == -1))
+ return 1;
+
+ if (!old_exist && new_exist &&
+ (toku_os_rename(new_iname_full.get(), old_iname_full.get()) == -1 ||
+ toku_fsync_directory(new_iname_full.get()) == -1 ||
+ toku_fsync_directory(old_iname_full.get()) == -1))
+ return 1;
+
+ // it's ok if both files do not exist on recovery
+ if (!old_exist && !new_exist)
+ assert(txn->for_recovery);
+
+ CACHEFILE cf;
+ int r = toku_cachefile_of_iname_in_env(cachetable, new_iname.data, &cf);
+ if (r != ENOENT) {
+ char *old_fname_in_cf = toku_cachefile_fname_in_env(cf);
+ toku_cachefile_set_fname_in_env(cf, toku_xstrdup(old_iname.data));
+ toku_free(old_fname_in_cf);
+ // There is at least one case when fclose logging cause error:
+ // 1) start transaction
+ // 2) create ft 'a'(write "fcreate" in recovery log)
+ // 3) rename ft 'a' to 'b'(write "frename" in recovery log)
+ // 4) abort transaction:
+ // a) rollback rename ft (renames 'b' to 'a')
+ // b) rollback create ft (removes 'a'):
+ // invokes toku_cachefile_unlink_on_close - lazy unlink on file
+ // close,
+ // it just sets corresponding flag in cachefile object
+ // c) write "unlink" for 'a' in recovery log
+ // (when transaction is aborted all locks are released,
+ // when file lock is released the file is closed and unlinked if
+ // corresponding flag is set in cachefile object)
+ // 5) crash
+ //
+ // After this we have the following records in recovery log:
+ // - create ft 'a',
+ // - rename 'a' to 'b',
+ // - unlink 'a'
+ //
+ // On recovery:
+ // - create 'a'
+ // - rename 'a' to 'b'
+ // - unlink 'a' - as 'a' file does not exist we have crash on assert
+ // here
+ //
+ // There is no need to write "unlink" in recovery log in (4a) because
+ // 'a' will be removed
+ // on transaction rollback on recovery.
+ toku_cachefile_skip_log_recover_on_close(cf);
+ }
+
+ return 0;
+}
+
int find_ft_from_filenum (const FT &ft, const FILENUM &filenum);
int find_ft_from_filenum (const FT &ft, const FILENUM &filenum) {
FILENUM thisfnum = toku_cachefile_filenum(ft->cf);
diff --git a/storage/tokudb/PerconaFT/portability/file.cc b/storage/tokudb/PerconaFT/portability/file.cc
index 5332a2dff55..0e3efc1a12a 100644
--- a/storage/tokudb/PerconaFT/portability/file.cc
+++ b/storage/tokudb/PerconaFT/portability/file.cc
@@ -356,6 +356,12 @@ toku_os_close(int fd) { // if EINTR, retry until success
return r;
}
+int toku_os_rename(const char *old_name, const char *new_name) {
+ return rename(old_name, new_name);
+}
+
+int toku_os_unlink(const char *path) { return unlink(path); }
+
ssize_t
toku_os_read(int fd, void *buf, size_t count) {
ssize_t r;
diff --git a/storage/tokudb/PerconaFT/portability/memory.cc b/storage/tokudb/PerconaFT/portability/memory.cc
index 2de12699c61..5430ff84b70 100644
--- a/storage/tokudb/PerconaFT/portability/memory.cc
+++ b/storage/tokudb/PerconaFT/portability/memory.cc
@@ -313,6 +313,15 @@ toku_strdup(const char *s) {
return (char *) toku_memdup(s, strlen(s)+1);
}
+char *toku_strndup(const char *s, size_t n) {
+ size_t s_size = strlen(s);
+ size_t bytes_to_copy = n > s_size ? s_size : n;
+ ++bytes_to_copy;
+ char *result = (char *)toku_memdup(s, bytes_to_copy);
+ result[bytes_to_copy - 1] = 0;
+ return result;
+}
+
void
toku_free(void *p) {
if (p) {
diff --git a/storage/tokudb/PerconaFT/portability/memory.h b/storage/tokudb/PerconaFT/portability/memory.h
index 7780536f279..5ae652d39fc 100644
--- a/storage/tokudb/PerconaFT/portability/memory.h
+++ b/storage/tokudb/PerconaFT/portability/memory.h
@@ -125,7 +125,9 @@ size_t toku_malloc_usable_size(void *p) __attribute__((__visibility__("default")
void *toku_memdup (const void *v, size_t len);
/* Toku-version of strdup. Use this so that it calls toku_malloc() */
char *toku_strdup (const char *s) __attribute__((__visibility__("default")));
-
+/* Toku-version of strndup. Use this so that it calls toku_malloc() */
+char *toku_strndup(const char *s, size_t n)
+ __attribute__((__visibility__("default")));
/* Copy memory. Analogous to strdup() Crashes instead of returning NULL */
void *toku_xmemdup (const void *v, size_t len) __attribute__((__visibility__("default")));
/* Toku-version of strdup. Use this so that it calls toku_xmalloc() Crashes instead of returning NULL */
diff --git a/storage/tokudb/PerconaFT/portability/portability.cc b/storage/tokudb/PerconaFT/portability/portability.cc
index ba9f8d48ed5..19f445a85d7 100644
--- a/storage/tokudb/PerconaFT/portability/portability.cc
+++ b/storage/tokudb/PerconaFT/portability/portability.cc
@@ -63,6 +63,9 @@ Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
#if defined(HAVE_SYS_SYSCTL_H)
# include <sys/sysctl.h>
#endif
+#if defined(HAVE_PTHREAD_H)
+# include <pthread.h>
+#endif
#if defined(HAVE_PTHREAD_NP_H)
# include <pthread_np.h>
#endif
@@ -102,7 +105,11 @@ toku_os_getpid(void) {
int
toku_os_gettid(void) {
-#if defined(__NR_gettid)
+#if defined(HAVE_PTHREAD_THREADID_NP)
+ uint64_t result;
+ pthread_threadid_np(NULL, &result);
+ return (int) result; // Used for instrumentation so overflow is ok here.
+#elif defined(__NR_gettid)
return syscall(__NR_gettid);
#elif defined(SYS_gettid)
return syscall(SYS_gettid);
diff --git a/storage/tokudb/PerconaFT/portability/tests/test-xid.cc b/storage/tokudb/PerconaFT/portability/tests/test-xid.cc
index 9ee68906bb3..71736f898ef 100644
--- a/storage/tokudb/PerconaFT/portability/tests/test-xid.cc
+++ b/storage/tokudb/PerconaFT/portability/tests/test-xid.cc
@@ -51,11 +51,18 @@ Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
#if defined(HAVE_PTHREAD_NP_H)
# include <pthread_np.h>
#endif
+#if defined(HAVE_PTHREAD_H)
+# include <pthread.h>
+#endif
// since we implement the same thing here as in toku_os_gettid, this test
// is pretty pointless
static int gettid(void) {
-#if defined(__NR_gettid)
+#if defined(HAVE_PTHREAD_THREADID_NP)
+ uint64_t result;
+ pthread_threadid_np(NULL, &result);
+ return (int) result;
+#elif defined(__NR_gettid)
return syscall(__NR_gettid);
#elif defined(SYS_gettid)
return syscall(SYS_gettid);
diff --git a/storage/tokudb/PerconaFT/portability/toku_config.h.in b/storage/tokudb/PerconaFT/portability/toku_config.h.in
index 1a34bf1ef45..18f6779796f 100644
--- a/storage/tokudb/PerconaFT/portability/toku_config.h.in
+++ b/storage/tokudb/PerconaFT/portability/toku_config.h.in
@@ -87,6 +87,7 @@ Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
#cmakedefine HAVE_PTHREAD_RWLOCKATTR_SETKIND_NP 1
#cmakedefine HAVE_PTHREAD_YIELD 1
#cmakedefine HAVE_PTHREAD_YIELD_NP 1
+#cmakedefine HAVE_PTHREAD_THREADID_NP 1
#cmakedefine HAVE_PTHREAD_GETTHREADID_NP 1
#cmakedefine PTHREAD_YIELD_RETURNS_INT 1
diff --git a/storage/tokudb/PerconaFT/portability/toku_portability.h b/storage/tokudb/PerconaFT/portability/toku_portability.h
index 921d3a309f6..f127b0fe172 100644
--- a/storage/tokudb/PerconaFT/portability/toku_portability.h
+++ b/storage/tokudb/PerconaFT/portability/toku_portability.h
@@ -246,6 +246,8 @@ int toku_os_open(const char *path, int oflag, int mode);
int toku_os_open_direct(const char *path, int oflag, int mode);
int toku_os_close(int fd);
int toku_os_fclose(FILE * stream);
+int toku_os_rename(const char *old_name, const char *new_name);
+int toku_os_unlink(const char *path);
ssize_t toku_os_read(int fd, void *buf, size_t count);
ssize_t toku_os_pread(int fd, void *buf, size_t count, off_t offset);
void toku_os_recursive_delete(const char *path);
diff --git a/storage/tokudb/PerconaFT/src/tests/CMakeLists.txt b/storage/tokudb/PerconaFT/src/tests/CMakeLists.txt
index 47f6aa44a75..c01a8f0d628 100644
--- a/storage/tokudb/PerconaFT/src/tests/CMakeLists.txt
+++ b/storage/tokudb/PerconaFT/src/tests/CMakeLists.txt
@@ -108,11 +108,11 @@ if(BUILD_TESTING OR BUILD_SRC_TESTS)
foreach(ov c d r)
if (ov STREQUAL c)
- set(gset 0)
set(hset 0)
+ set(iset 0)
else ()
- set(gset 0 1 2 3 4 5)
- set(hset 0 1)
+ set(hset 0 1 2 3 4 5)
+ set(iset 0 1)
endif ()
foreach(av 0 1)
@@ -130,25 +130,27 @@ if(BUILD_TESTING OR BUILD_SRC_TESTS)
foreach(dv ${dset})
foreach(ev ${eset})
foreach(fv 0 1)
- foreach(gv ${gset})
+ foreach(gv 0 1)
foreach(hv ${hset})
-
- if ((NOT ov STREQUAL c) AND (NOT cv) AND ((NOT bv) OR (NOT ev) OR (dv)))
- set(iset 0 1)
- else ()
- set(iset 0)
- endif ()
-
foreach(iv ${iset})
- set(testname "ydb/recovery_fileops_unit.${ov}${av}${bv}${cv}${dv}${ev}${fv}${gv}${hv}${iv}")
- set(envdir "recovery_fileops_unit_dir/${ov}${av}${bv}${cv}${dv}${ev}${fv}${gv}${hv}${iv}")
- set(errfile "recovery_fileops_unit_dir/${ov}${av}${bv}${cv}${dv}${ev}${fv}${gv}${hv}${iv}.ctest-errors")
- add_test(NAME ${testname}
- COMMAND run_recovery_fileops_unit.sh $<TARGET_FILE:recovery_fileops_unit.tdb> ${errfile} 137
- -O ${ov} -A ${av} -B ${bv} -C ${cv} -D ${dv} -E ${ev} -F ${fv} -G ${gv} -H ${hv} -I ${iv}
- )
- setup_toku_test_properties(${testname} ${envdir})
- set_property(DIRECTORY APPEND PROPERTY ADDITIONAL_MAKE_CLEAN_FILES "${errfile}")
+
+ if ((NOT ov STREQUAL c) AND (NOT cv) AND ((NOT bv) OR (NOT ev) OR (dv)))
+ set(jset 0 1)
+ else ()
+ set(jset 0)
+ endif ()
+
+ foreach(jv ${jset})
+ set(testname "ydb/recovery_fileops_unit.${ov}${av}${bv}${cv}${dv}${ev}${fv}${gv}${hv}${iv}${jv}")
+ set(envdir "recovery_fileops_unit_dir/${ov}${av}${bv}${cv}${dv}${ev}${fv}${gv}${hv}${iv}${jv}")
+ set(errfile "recovery_fileops_unit_dir/${ov}${av}${bv}${cv}${dv}${ev}${fv}${gv}${hv}${iv}${jv}.ctest-errors")
+ add_test(NAME ${testname}
+ COMMAND run_recovery_fileops_unit.sh $<TARGET_FILE:recovery_fileops_unit.tdb> ${errfile} 137
+ -O ${ov} -A ${av} -B ${bv} -C ${cv} -D ${dv} -E ${ev} -F ${fv} -G ${gv} -H ${hv} -I ${iv} -J ${jv}
+ )
+ setup_toku_test_properties(${testname} ${envdir})
+ set_property(DIRECTORY APPEND PROPERTY ADDITIONAL_MAKE_CLEAN_FILES "${errfile}")
+ endforeach(jv)
endforeach(iv)
endforeach(hv)
endforeach(gv)
diff --git a/storage/tokudb/PerconaFT/src/tests/recovery_fileops_unit.cc b/storage/tokudb/PerconaFT/src/tests/recovery_fileops_unit.cc
index 2c905c5ff12..cc99ab560d8 100644
--- a/storage/tokudb/PerconaFT/src/tests/recovery_fileops_unit.cc
+++ b/storage/tokudb/PerconaFT/src/tests/recovery_fileops_unit.cc
@@ -36,17 +36,17 @@ Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
-#include "test.h"
-#include "toku_pthread.h"
#include <db.h>
-#include <sys/stat.h>
#include <stdlib.h>
-
+#include <sys/stat.h>
+#include "ft/logger/logger.h"
+#include "test.h"
+#include "toku_pthread.h"
static int do_recover;
static int do_crash;
static char fileop;
-static int choices['I'-'A'+1];
+static int choices['J' - 'A' + 1];
const int num_choices = sizeof(choices)/sizeof(choices[0]);
static DB_TXN *txn;
const char *oldname = "oldfoo";
@@ -58,11 +58,14 @@ static char *cmd;
static void
usage(void) {
- fprintf(stderr, "Usage:\n%s [-v|-q]* [-h] (-c|-r) -O fileop -A# -B# -C# -D# -E# -F# [-G# -H# -I#]\n"
- " fileop = c/r/d (create/rename/delete)\n"
- " Where # is a single digit number > 0.\n"
- " A-F are required for fileop=create\n"
- " A-I are required for fileop=delete, fileop=rename\n", cmd);
+ fprintf(stderr,
+ "Usage:\n%s [-v|-q]* [-h] (-c|-r) -O fileop -A# -B# -C# -D# -E# "
+ "-F# -G# [-H# -I# -J#]\n"
+ " fileop = c/r/d (create/rename/delete)\n"
+ " Where # is a single digit number > 0.\n"
+ " A-G are required for fileop=create\n"
+ " A-I are required for fileop=delete, fileop=rename\n",
+ cmd);
exit(1);
}
@@ -129,19 +132,18 @@ get_choice_flush_log_before_crash(void) {
return get_bool_choice('F');
}
-static int
-get_choice_create_type(void) {
- return get_x_choice('G', 6);
-}
+static int get_choice_dir_per_db(void) { return get_bool_choice('G'); }
+
+static int get_choice_create_type(void) { return get_x_choice('H', 6); }
static int
get_choice_txn_does_open_close_before_fileop(void) {
- return get_bool_choice('H');
+ return get_bool_choice('I');
}
static int
get_choice_lock_table_split_fcreate(void) {
- int choice = get_bool_choice('I');
+ int choice = get_bool_choice('J');
if (choice)
assert(fileop_did_commit());
return choice;
@@ -156,63 +158,65 @@ do_args(int argc, char * const argv[]) {
choices[i] = -1;
}
- int c;
- while ((c = getopt(argc, argv, "vqhcrO:A:B:C:D:E:F:G:H:I:X:")) != -1) {
- switch(c) {
- case 'v':
- verbose++;
- break;
- case 'q':
- verbose--;
- if (verbose<0) verbose=0;
- break;
- case 'h':
- case '?':
- usage();
- break;
- case 'c':
- do_crash = 1;
- break;
- case 'r':
- do_recover = 1;
- break;
- case 'O':
- if (fileop != '\0')
+ char c;
+ while ((c = getopt(argc, argv, "vqhcrO:A:B:C:D:E:F:G:H:I:J:X:")) != -1) {
+ switch (c) {
+ case 'v':
+ verbose++;
+ break;
+ case 'q':
+ verbose--;
+ if (verbose < 0)
+ verbose = 0;
+ break;
+ case 'h':
+ case '?':
usage();
- fileop = optarg[0];
- switch (fileop) {
- case 'c':
- case 'r':
- case 'd':
- break;
- default:
+ break;
+ case 'c':
+ do_crash = 1;
+ break;
+ case 'r':
+ do_recover = 1;
+ break;
+ case 'O':
+ if (fileop != '\0')
usage();
- break;
- }
- break;
- case 'A':
- case 'B':
- case 'C':
- case 'D':
- case 'E':
- case 'F':
- case 'G':
- case 'H':
- case 'I':
- if (fileop == '\0')
- usage();
- int num;
- num = atoi(optarg);
- if (num < 0 || num > 9)
- usage();
- choices[c - 'A'] = num;
- break;
- case 'X':
- if (strcmp(optarg, "novalgrind") == 0) {
- // provide a way for the shell script runner to pass an
- // arg that suppresses valgrind on this child process
+ fileop = optarg[0];
+ switch (fileop) {
+ case 'c':
+ case 'r':
+ case 'd':
+ break;
+ default:
+ usage();
+ break;
+ }
+ break;
+ case 'A':
+ case 'B':
+ case 'C':
+ case 'D':
+ case 'E':
+ case 'F':
+ case 'G':
+ case 'H':
+ case 'I':
+ case 'J':
+ if (fileop == '\0')
+ usage();
+ int num;
+ num = atoi(optarg);
+ if (num < 0 || num > 9)
+ usage();
+ choices[c - 'A'] = num;
break;
- }
+ case 'X':
+ if (strcmp(optarg, "novalgrind") == 0) {
+ // provide a way for the shell script runner to pass an
+ // arg that suppresses valgrind on this child process
+ break;
+ }
// otherwise, fall through to an error
default:
usage();
@@ -222,7 +226,7 @@ do_args(int argc, char * const argv[]) {
if (argc!=optind) { usage(); exit(1); }
for (i = 0; i < num_choices; i++) {
- if (i >= 'G' - 'A' && fileop == 'c')
+ if (i >= 'H' - 'A' && fileop == 'c')
break;
if (choices[i] == -1)
usage();
@@ -261,6 +265,8 @@ static void env_startup(void) {
int envflags = DB_INIT_LOCK | DB_INIT_LOG | DB_INIT_MPOOL | DB_INIT_TXN | DB_CREATE | DB_PRIVATE | recover_flag;
r = db_env_create(&env, 0);
CKERR(r);
+ r = env->set_dir_per_db(env, get_choice_dir_per_db());
+ CKERR(r);
env->set_errfile(env, stderr);
r = env->open(env, TOKU_TEST_FILENAME, envflags, S_IRWXU+S_IRWXG+S_IRWXO);
CKERR(r);
@@ -625,8 +631,11 @@ recover_and_verify(void) {
else if (did_create_commit_early())
expect_old_name = 1;
}
- verify_file_exists(oldname, expect_old_name);
- verify_file_exists(newname, expect_new_name);
+ // We can't expect files existence until recovery log was not flushed
+ if ((get_choice_flush_log_before_crash())) {
+ verify_file_exists(oldname, expect_old_name);
+ verify_file_exists(newname, expect_new_name);
+ }
env_shutdown();
}
diff --git a/storage/tokudb/PerconaFT/src/ydb-internal.h b/storage/tokudb/PerconaFT/src/ydb-internal.h
index 2d6c84126e1..d40f7795b0b 100644
--- a/storage/tokudb/PerconaFT/src/ydb-internal.h
+++ b/storage/tokudb/PerconaFT/src/ydb-internal.h
@@ -132,7 +132,8 @@ struct __toku_db_env_internal {
int datadir_lockfd;
int logdir_lockfd;
int tmpdir_lockfd;
- bool check_thp; // if set check if transparent huge pages are disables
+ bool check_thp; // if set check if transparent huge pages are disabled
+ bool dir_per_db;
uint64_t (*get_loader_memory_size_callback)(void);
uint64_t default_lock_timeout_msec;
uint64_t (*get_lock_timeout_callback)(uint64_t default_lock_timeout_msec);
diff --git a/storage/tokudb/PerconaFT/src/ydb.cc b/storage/tokudb/PerconaFT/src/ydb.cc
index aed271bce40..3341f6d76c6 100644
--- a/storage/tokudb/PerconaFT/src/ydb.cc
+++ b/storage/tokudb/PerconaFT/src/ydb.cc
@@ -1298,6 +1298,22 @@ env_get_check_thp(DB_ENV * env) {
return env->i->check_thp;
}
+static bool env_set_dir_per_db(DB_ENV *env, bool new_val) {
+ HANDLE_PANICKED_ENV(env);
+ bool r = env->i->dir_per_db;
+ env->i->dir_per_db = new_val;
+ return r;
+}
+
+static bool env_get_dir_per_db(DB_ENV *env) {
+ HANDLE_PANICKED_ENV(env);
+ return env->i->dir_per_db;
+}
+
+static const char *env_get_data_dir(DB_ENV *env) {
+ return env->i->real_data_dir;
+}
+
static int env_dbremove(DB_ENV * env, DB_TXN *txn, const char *fname, const char *dbname, uint32_t flags);
static int
@@ -2700,6 +2716,9 @@ toku_env_create(DB_ENV ** envp, uint32_t flags) {
USENV(do_backtrace);
USENV(set_check_thp);
USENV(get_check_thp);
+ USENV(set_dir_per_db);
+ USENV(get_dir_per_db);
+ USENV(get_data_dir);
#undef USENV
// unlocked methods
@@ -3045,7 +3064,7 @@ env_dbrename(DB_ENV *env, DB_TXN *txn, const char *fname, const char *dbname, co
if (env_is_db_with_dname_open(env, newname)) {
return toku_ydb_do_error(env, EINVAL, "Cannot rename dictionary; Dictionary with target name has an open handle.\n");
}
-
+
DBT old_dname_dbt;
DBT new_dname_dbt;
DBT iname_dbt;
@@ -3065,10 +3084,35 @@ env_dbrename(DB_ENV *env, DB_TXN *txn, const char *fname, const char *dbname, co
r = EEXIST;
}
else if (r == DB_NOTFOUND) {
+ DBT new_iname_dbt;
+ // Do not rename ft file if 'dir_per_db' option is not set
+ auto new_iname =
+ env->get_dir_per_db(env)
+ ? generate_iname_for_rename_or_open(
+ env, txn, newname, false)
+ : std::unique_ptr<char[], decltype(&toku_free)>(
+ toku_strdup(iname), &toku_free);
+ toku_fill_dbt(
+ &new_iname_dbt, new_iname.get(), strlen(new_iname.get()) + 1);
+
// remove old (dname,iname) and insert (newname,iname) in directory
r = toku_db_del(env->i->directory, txn, &old_dname_dbt, DB_DELETE_ANY, true);
if (r != 0) { goto exit; }
- r = toku_db_put(env->i->directory, txn, &new_dname_dbt, &iname_dbt, 0, true);
+
+ // Do not rename ft file if 'dir_per_db' option is not set
+ if (env->get_dir_per_db(env))
+ r = toku_ft_rename_iname(txn,
+ env->get_data_dir(env),
+ iname,
+ new_iname.get(),
+ env->i->cachetable);
+
+ r = toku_db_put(env->i->directory,
+ txn,
+ &new_dname_dbt,
+ &new_iname_dbt,
+ 0,
+ true);
if (r != 0) { goto exit; }
//Now that we have writelocks on both dnames, verify that there are still no handles open. (to prevent race conditions)
@@ -3091,7 +3135,7 @@ env_dbrename(DB_ENV *env, DB_TXN *txn, const char *fname, const char *dbname, co
// otherwise, we're okay in marking this ft as remove on
// commit. no new handles can open for this dictionary
// because the txn has directory write locks on the dname
- if (txn && !can_acquire_table_lock(env, txn, iname)) {
+ if (txn && !can_acquire_table_lock(env, txn, new_iname.get())) {
r = DB_LOCK_NOTGRANTED;
}
// We don't do anything at the ft or cachetable layer for rename.
diff --git a/storage/tokudb/PerconaFT/src/ydb_db.cc b/storage/tokudb/PerconaFT/src/ydb_db.cc
index e5bd4e7d089..100d1bfa20b 100644
--- a/storage/tokudb/PerconaFT/src/ydb_db.cc
+++ b/storage/tokudb/PerconaFT/src/ydb_db.cc
@@ -83,8 +83,7 @@ ydb_db_layer_get_status(YDB_DB_LAYER_STATUS statp) {
*statp = ydb_db_layer_status;
}
-static void
-create_iname_hint(const char *dname, char *hint) {
+void create_iname_hint(const char *dname, char *hint) {
//Requires: size of hint array must be > strlen(dname)
//Copy alphanumeric characters only.
//Replace strings of non-alphanumeric characters with a single underscore.
@@ -105,11 +104,43 @@ create_iname_hint(const char *dname, char *hint) {
*hint = '\0';
}
+void create_iname_hint_for_dbdir(const char *dname, char *hint) {
+ assert(dname);
+ if (*dname == '.')
+ ++dname;
+ if (*dname == '/')
+ ++dname;
+ bool underscored = false;
+ bool dbdir_is_parsed = false;
+ // Do not change the first '/' because this is
+ // delimiter which splits name into database dir
+ // and table dir.
+ while (*dname) {
+ if (isalnum(*dname) || (*dname == '/' && !dbdir_is_parsed)) {
+ char c = *dname++;
+ *hint++ = c;
+ if (c == '/')
+ dbdir_is_parsed = true;
+ underscored = false;
+ } else {
+ if (!underscored)
+ *hint++ = '_';
+ dname++;
+ underscored = true;
+ }
+ }
+ *hint = '\0';
+}
+
// n < 0 means to ignore mark and ignore n
// n >= 0 means to include mark ("_B_" or "_P_") with hex value of n in iname
// (intended for use by loader, which will create many inames using one txnid).
-static char *
-create_iname(DB_ENV *env, uint64_t id1, uint64_t id2, char *hint, const char *mark, int n) {
+char *create_iname(DB_ENV *env,
+ uint64_t id1,
+ uint64_t id2,
+ char *hint,
+ const char *mark,
+ int n) {
int bytes;
char inamebase[strlen(hint) +
8 + // hex file format version
@@ -138,6 +169,34 @@ create_iname(DB_ENV *env, uint64_t id1, uint64_t id2, char *hint, const char *ma
return rval;
}
+static uint64_t nontransactional_open_id = 0;
+
+std::unique_ptr<char[], decltype(&toku_free)> generate_iname_for_rename_or_open(
+ DB_ENV *env,
+ DB_TXN *txn,
+ const char *dname,
+ bool is_open) {
+ std::unique_ptr<char[], decltype(&toku_free)> result(nullptr, &toku_free);
+ char hint[strlen(dname) + 1];
+ uint64_t id1 = 0;
+ uint64_t id2 = 0;
+
+ if (txn) {
+ id1 = toku_txn_get_txnid(db_txn_struct_i(txn)->tokutxn).parent_id64;
+ id2 = toku_txn_get_txnid(db_txn_struct_i(txn)->tokutxn).child_id64;
+ } else if (is_open)
+ id1 = toku_sync_fetch_and_add(&nontransactional_open_id, 1);
+
+ if (env->get_dir_per_db(env) && !toku_os_is_absolute_name(dname))
+ create_iname_hint_for_dbdir(dname, hint);
+ else
+ create_iname_hint(dname, hint);
+
+ result.reset(create_iname(env, id1, id2, hint, NULL, -1));
+
+ return result;
+}
+
static int toku_db_open(DB * db, DB_TXN * txn, const char *fname, const char *dbname, DBTYPE dbtype, uint32_t flags, int mode);
// Effect: Do the work required of DB->close().
@@ -227,8 +286,6 @@ db_open_subdb(DB * db, DB_TXN * txn, const char *fname, const char *dbname, DBTY
return r;
}
-static uint64_t nontransactional_open_id = 0;
-
// inames are created here.
// algorithm:
// begin txn
@@ -286,27 +343,15 @@ toku_db_open(DB * db, DB_TXN * txn, const char *fname, const char *dbname, DBTYP
toku_fill_dbt(&dname_dbt, dname, strlen(dname)+1);
toku_init_dbt_flags(&iname_dbt, DB_DBT_REALLOC);
r = toku_db_get(db->dbenv->i->directory, txn, &dname_dbt, &iname_dbt, DB_SERIALIZABLE); // allocates memory for iname
- char *iname = (char *) iname_dbt.data;
+ std::unique_ptr<char[], decltype(&toku_free)> iname(
+ static_cast<char *>(iname_dbt.data), &toku_free);
if (r == DB_NOTFOUND && !is_db_create) {
r = ENOENT;
} else if (r==0 && is_db_excl) {
r = EEXIST;
} else if (r == DB_NOTFOUND) {
- char hint[strlen(dname) + 1];
-
- // create iname and make entry in directory
- uint64_t id1 = 0;
- uint64_t id2 = 0;
-
- if (txn) {
- id1 = toku_txn_get_txnid(db_txn_struct_i(txn)->tokutxn).parent_id64;
- id2 = toku_txn_get_txnid(db_txn_struct_i(txn)->tokutxn).child_id64;
- } else {
- id1 = toku_sync_fetch_and_add(&nontransactional_open_id, 1);
- }
- create_iname_hint(dname, hint);
- iname = create_iname(db->dbenv, id1, id2, hint, NULL, -1); // allocated memory for iname
- toku_fill_dbt(&iname_dbt, iname, strlen(iname) + 1);
+ iname = generate_iname_for_rename_or_open(db->dbenv, txn, dname, true);
+ toku_fill_dbt(&iname_dbt, iname.get(), strlen(iname.get()) + 1);
//
// put_flags will be 0 for performance only, avoid unnecessary query
// if we are creating a hot index, per #3166, we do not want the write lock in directory grabbed.
@@ -318,16 +363,13 @@ toku_db_open(DB * db, DB_TXN * txn, const char *fname, const char *dbname, DBTYP
// we now have an iname
if (r == 0) {
- r = toku_db_open_iname(db, txn, iname, flags, mode);
+ r = toku_db_open_iname(db, txn, iname.get(), flags, mode);
if (r == 0) {
db->i->dname = toku_xstrdup(dname);
env_note_db_opened(db->dbenv, db); // tell env that a new db handle is open (using dname)
}
}
- if (iname) {
- toku_free(iname);
- }
return r;
}
@@ -1181,7 +1223,10 @@ load_inames(DB_ENV * env, DB_TXN * txn, int N, DB * dbs[/*N*/], const char * new
toku_fill_dbt(&dname_dbt, dname, strlen(dname)+1);
// now create new iname
char hint[strlen(dname) + 1];
- create_iname_hint(dname, hint);
+ if (env->get_dir_per_db(env) && !toku_os_is_absolute_name(dname))
+ create_iname_hint_for_dbdir(dname, hint);
+ else
+ create_iname_hint(dname, hint);
const char *new_iname = create_iname(env, xid.parent_id64, xid.child_id64, hint, mark, i); // allocates memory for iname_in_env
new_inames_in_env[i] = new_iname;
toku_fill_dbt(&iname_dbt, new_iname, strlen(new_iname) + 1); // iname_in_env goes in directory
diff --git a/storage/tokudb/PerconaFT/src/ydb_db.h b/storage/tokudb/PerconaFT/src/ydb_db.h
index 8b92dd1c3cb..8be28857c14 100644
--- a/storage/tokudb/PerconaFT/src/ydb_db.h
+++ b/storage/tokudb/PerconaFT/src/ydb_db.h
@@ -43,6 +43,8 @@ Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
#include "ydb-internal.h"
#include "ydb_txn.h"
+#include <memory>
+
typedef enum {
YDB_LAYER_DIRECTORY_WRITE_LOCKS = 0, /* total directory write locks taken */
YDB_LAYER_DIRECTORY_WRITE_LOCKS_FAIL, /* total directory write locks unable to be taken */
@@ -119,3 +121,17 @@ toku_db_destruct_autotxn(DB_TXN *txn, int r, bool changed) {
}
return r;
}
+
+void create_iname_hint_for_dbdir(const char *dname, char *hint);
+void create_iname_hint(const char *dname, char *hint);
+char *create_iname(DB_ENV *env,
+ uint64_t id1,
+ uint64_t id2,
+ char *hint,
+ const char *mark,
+ int n);
+std::unique_ptr<char[], decltype(&toku_free)> generate_iname_for_rename_or_open(
+ DB_ENV *env,
+ DB_TXN *txn,
+ const char *dname,
+ bool is_open);
diff --git a/storage/tokudb/hatoku_hton.cc b/storage/tokudb/hatoku_hton.cc
index ed2c104200d..67adc480914 100644
--- a/storage/tokudb/hatoku_hton.cc
+++ b/storage/tokudb/hatoku_hton.cc
@@ -531,6 +531,7 @@ static int tokudb_init_func(void *p) {
db_env->change_fsync_log_period(db_env, tokudb::sysvars::fsync_log_period);
db_env->set_lock_timeout_callback(db_env, tokudb_lock_timeout_callback);
+ db_env->set_dir_per_db(db_env, tokudb::sysvars::dir_per_db);
db_env->set_loader_memory_size(
db_env,
diff --git a/storage/tokudb/mysql-test/tokudb/disabled.def b/storage/tokudb/mysql-test/tokudb/disabled.def
index c98a8aa622a..ddefceb432e 100644
--- a/storage/tokudb/mysql-test/tokudb/disabled.def
+++ b/storage/tokudb/mysql-test/tokudb/disabled.def
@@ -28,3 +28,4 @@ type_timestamp_explicit:
cluster_key_part: engine options on partitioned tables
i_s_tokudb_lock_waits_released: unstable, race conditions
i_s_tokudb_locks_released: unstable, race conditions
+row_format: n/a
diff --git a/storage/tokudb/mysql-test/tokudb/include/table_files_replace_pattern.inc b/storage/tokudb/mysql-test/tokudb/include/table_files_replace_pattern.inc
new file mode 100644
index 00000000000..b10ad21dd95
--- /dev/null
+++ b/storage/tokudb/mysql-test/tokudb/include/table_files_replace_pattern.inc
@@ -0,0 +1 @@
+--replace_regex /[a-z0-9]+_[a-z0-9]+_[a-z0-9]+(_[BP]_[a-z0-9]+){0,1}\./id./ /sqlx_[a-z0-9]+_[a-z0-9]+_/sqlx_nnnn_nnnn_/ /sqlx-[a-z0-9]+_[a-z0-9]+/sqlx-nnnn_nnnn/ /#p#/#P#/ /#sp#/#SP#/ /#tmp#/#TMP#/
diff --git a/storage/tokudb/mysql-test/tokudb/r/dir-per-db-with-custom-data-dir.result b/storage/tokudb/mysql-test/tokudb/r/dir-per-db-with-custom-data-dir.result
new file mode 100644
index 00000000000..a36dbcb28c0
--- /dev/null
+++ b/storage/tokudb/mysql-test/tokudb/r/dir-per-db-with-custom-data-dir.result
@@ -0,0 +1,10 @@
+SELECT @@tokudb_dir_per_db;
+@@tokudb_dir_per_db
+1
+TOKUDB_DATA_DIR_CHANGED
+1
+CREATE DATABASE tokudb_test;
+USE tokudb_test;
+CREATE TABLE t (a INT UNSIGNED AUTO_INCREMENT PRIMARY KEY) ENGINE=tokudb;
+DROP TABLE t;
+DROP DATABASE tokudb_test;
diff --git a/storage/tokudb/mysql-test/tokudb/r/dir_per_db.result b/storage/tokudb/mysql-test/tokudb/r/dir_per_db.result
new file mode 100644
index 00000000000..371f97406c8
--- /dev/null
+++ b/storage/tokudb/mysql-test/tokudb/r/dir_per_db.result
@@ -0,0 +1,180 @@
+########
+# tokudb_dir_per_db = 1
+########
+SET GLOBAL tokudb_dir_per_db= 1;
+########
+# CREATE
+########
+CREATE TABLE t1 (a INT UNSIGNED AUTO_INCREMENT PRIMARY KEY, b INT(10) UNSIGNED NOT NULL) ENGINE=tokudb;
+INSERT INTO t1 SET b = 10;
+INSERT INTO t1 SET b = 20;
+SELECT b FROM t1 ORDER BY a;
+b
+10
+20
+CREATE INDEX b ON t1 (b);
+CREATE INDEX ab ON t1 (a,b);
+## Looking for *.tokudb files in data_dir
+## Looking for *.tokudb files in data_dir/test
+t1_key_ab_id.tokudb
+t1_key_b_id.tokudb
+t1_main_id.tokudb
+t1_status_id.tokudb
+########
+# RENAME
+########
+RENAME TABLE t1 TO t2;
+SELECT b FROM t2 ORDER BY a;
+b
+10
+20
+## Looking for *.tokudb files in data_dir
+## Looking for *.tokudb files in data_dir/test
+t2_key_ab_id.tokudb
+t2_key_b_id.tokudb
+t2_main_id.tokudb
+t2_status_id.tokudb
+########
+# DROP
+########
+DROP TABLE t2;
+## Looking for *.tokudb files in data_dir
+## Looking for *.tokudb files in data_dir/test
+########
+# tokudb_dir_per_db = 0
+########
+SET GLOBAL tokudb_dir_per_db= 0;
+########
+# CREATE
+########
+CREATE TABLE t1 (a INT UNSIGNED AUTO_INCREMENT PRIMARY KEY, b INT(10) UNSIGNED NOT NULL) ENGINE=tokudb;
+INSERT INTO t1 SET b = 10;
+INSERT INTO t1 SET b = 20;
+SELECT b FROM t1 ORDER BY a;
+b
+10
+20
+CREATE INDEX b ON t1 (b);
+CREATE INDEX ab ON t1 (a,b);
+## Looking for *.tokudb files in data_dir
+_test_t1_key_ab_id.tokudb
+_test_t1_key_b_id.tokudb
+_test_t1_main_id.tokudb
+_test_t1_status_id.tokudb
+## Looking for *.tokudb files in data_dir/test
+########
+# RENAME
+########
+RENAME TABLE t1 TO t2;
+SELECT b FROM t2 ORDER BY a;
+b
+10
+20
+## Looking for *.tokudb files in data_dir
+_test_t1_key_ab_id.tokudb
+_test_t1_key_b_id.tokudb
+_test_t1_main_id.tokudb
+_test_t1_status_id.tokudb
+## Looking for *.tokudb files in data_dir/test
+########
+# DROP
+########
+DROP TABLE t2;
+## Looking for *.tokudb files in data_dir
+## Looking for *.tokudb files in data_dir/test
+########
+# CREATE on tokudb_dir_per_db = 0 and RENAME on tokudb_dir_per_db = 1 and vice versa
+########
+########
+# tokudb_dir_per_db = (1 - 1);
+########
+SET GLOBAL tokudb_dir_per_db= (1 - 1);;
+########
+# CREATE
+########
+CREATE TABLE t1 (a INT UNSIGNED AUTO_INCREMENT PRIMARY KEY, b INT(10) UNSIGNED NOT NULL) ENGINE=tokudb;
+INSERT INTO t1 SET b = 10;
+INSERT INTO t1 SET b = 20;
+SELECT b FROM t1 ORDER BY a;
+b
+10
+20
+CREATE INDEX b ON t1 (b);
+CREATE INDEX ab ON t1 (a,b);
+## Looking for *.tokudb files in data_dir
+_test_t1_key_ab_id.tokudb
+_test_t1_key_b_id.tokudb
+_test_t1_main_id.tokudb
+_test_t1_status_id.tokudb
+## Looking for *.tokudb files in data_dir/test
+########
+# tokudb_dir_per_db = 1
+########
+SET GLOBAL tokudb_dir_per_db= 1;
+########
+# RENAME
+########
+RENAME TABLE t1 TO t2;
+SELECT b FROM t2 ORDER BY a;
+b
+10
+20
+## Looking for *.tokudb files in data_dir
+## Looking for *.tokudb files in data_dir/test
+t2_key_ab_id.tokudb
+t2_key_b_id.tokudb
+t2_main_id.tokudb
+t2_status_id.tokudb
+########
+# DROP
+########
+DROP TABLE t2;
+## Looking for *.tokudb files in data_dir
+## Looking for *.tokudb files in data_dir/test
+########
+# tokudb_dir_per_db = (1 - 0);
+########
+SET GLOBAL tokudb_dir_per_db= (1 - 0);;
+########
+# CREATE
+########
+CREATE TABLE t1 (a INT UNSIGNED AUTO_INCREMENT PRIMARY KEY, b INT(10) UNSIGNED NOT NULL) ENGINE=tokudb;
+INSERT INTO t1 SET b = 10;
+INSERT INTO t1 SET b = 20;
+SELECT b FROM t1 ORDER BY a;
+b
+10
+20
+CREATE INDEX b ON t1 (b);
+CREATE INDEX ab ON t1 (a,b);
+## Looking for *.tokudb files in data_dir
+## Looking for *.tokudb files in data_dir/test
+t1_key_ab_id.tokudb
+t1_key_b_id.tokudb
+t1_main_id.tokudb
+t1_status_id.tokudb
+########
+# tokudb_dir_per_db = 0
+########
+SET GLOBAL tokudb_dir_per_db= 0;
+########
+# RENAME
+########
+RENAME TABLE t1 TO t2;
+SELECT b FROM t2 ORDER BY a;
+b
+10
+20
+## Looking for *.tokudb files in data_dir
+## Looking for *.tokudb files in data_dir/test
+t1_key_ab_id.tokudb
+t1_key_b_id.tokudb
+t1_main_id.tokudb
+t1_status_id.tokudb
+########
+# DROP
+########
+DROP TABLE t2;
+## Looking for *.tokudb files in data_dir
+## Looking for *.tokudb files in data_dir/test
+SET GLOBAL tokudb_dir_per_db=default;
diff --git a/storage/tokudb/mysql-test/tokudb/r/i_s_tokudb_lock_waits_released.result b/storage/tokudb/mysql-test/tokudb/r/i_s_tokudb_lock_waits_released.result
index 6f9592ddc1f..ecd4d077206 100644
--- a/storage/tokudb/mysql-test/tokudb/r/i_s_tokudb_lock_waits_released.result
+++ b/storage/tokudb/mysql-test/tokudb/r/i_s_tokudb_lock_waits_released.result
@@ -2,6 +2,7 @@ set default_storage_engine='tokudb';
set tokudb_prelock_empty=false;
drop table if exists t;
create table t (id int primary key);
+t should be empty
select trx_id,trx_mysql_thread_id from information_schema.tokudb_trx;
trx_id trx_mysql_thread_id
select * from information_schema.tokudb_locks;
@@ -15,17 +16,21 @@ insert into t values (1);
set autocommit=0;
set tokudb_lock_timeout=600000;
insert into t values (1);
+should find the presence of a lock on 1st transaction
select * from information_schema.tokudb_locks;
locks_trx_id locks_mysql_thread_id locks_dname locks_key_left locks_key_right locks_table_schema locks_table_name locks_table_dictionary_name
TRX_ID MYSQL_ID ./test/t-main 0001000000 0001000000 test t main
+should find the presence of a lock_wait on the 2nd transaction
select * from information_schema.tokudb_lock_waits;
requesting_trx_id blocking_trx_id lock_waits_dname lock_waits_key_left lock_waits_key_right lock_waits_start_time lock_waits_table_schema lock_waits_table_name lock_waits_table_dictionary_name
REQUEST_TRX_ID BLOCK_TRX_ID ./test/t-main 0001000000 0001000000 LOCK_WAITS_START_TIME test t main
+should find the presence of two transactions
select trx_id,trx_mysql_thread_id from information_schema.tokudb_trx;
trx_id trx_mysql_thread_id
TRX_ID MYSQL_ID
TRX_ID MYSQL_ID
commit;
+verify that the lock on the 1st transaction is released and replaced by the lock for the 2nd transaction
select * from information_schema.tokudb_locks;
locks_trx_id locks_mysql_thread_id locks_dname locks_key_left locks_key_right locks_table_schema locks_table_name locks_table_dictionary_name
TRX_ID MYSQL_ID ./test/t-main 0001000000 0001000000 test t main
@@ -33,6 +38,8 @@ select * from information_schema.tokudb_lock_waits;
requesting_trx_id blocking_trx_id lock_waits_dname lock_waits_key_left lock_waits_key_right lock_waits_start_time lock_waits_table_schema lock_waits_table_name lock_waits_table_dictionary_name
ERROR 23000: Duplicate entry '1' for key 'PRIMARY'
commit;
+verify that txn_a replace (1) blocks txn_b replace (1) and txn_b eventually gets the lock on (1) and completes
+verify that the lock on the 2nd transaction has been released, should be be empty
select trx_id,trx_mysql_thread_id from information_schema.tokudb_trx;
trx_id trx_mysql_thread_id
select * from information_schema.tokudb_locks;
@@ -46,23 +53,28 @@ replace into t values (1);
set autocommit=0;
set tokudb_lock_timeout=600000;
replace into t values (1);
+should find the presence of a lock on 1st transaction
select * from information_schema.tokudb_locks;
locks_trx_id locks_mysql_thread_id locks_dname locks_key_left locks_key_right locks_table_schema locks_table_name locks_table_dictionary_name
TRX_ID MYSQL_ID ./test/t-main 0001000000 0001000000 test t main
+should find the presence of a lock_wait on the 2nd transaction
select * from information_schema.tokudb_lock_waits;
requesting_trx_id blocking_trx_id lock_waits_dname lock_waits_key_left lock_waits_key_right lock_waits_start_time lock_waits_table_schema lock_waits_table_name lock_waits_table_dictionary_name
REQUEST_TRX_ID BLOCK_TRX_ID ./test/t-main 0001000000 0001000000 LOCK_WAITS_START_TIME test t main
+should find the presence of two transactions
select trx_id,trx_mysql_thread_id from information_schema.tokudb_trx;
trx_id trx_mysql_thread_id
TRX_ID MYSQL_ID
TRX_ID MYSQL_ID
commit;
+verify that the lock on the 1st transaction is released and replaced by the lock for the 2nd transaction
select * from information_schema.tokudb_locks;
locks_trx_id locks_mysql_thread_id locks_dname locks_key_left locks_key_right locks_table_schema locks_table_name locks_table_dictionary_name
TRX_ID MYSQL_ID ./test/t-main 0001000000 0001000000 test t main
select * from information_schema.tokudb_lock_waits;
requesting_trx_id blocking_trx_id lock_waits_dname lock_waits_key_left lock_waits_key_right lock_waits_start_time lock_waits_table_schema lock_waits_table_name lock_waits_table_dictionary_name
commit;
+verify that the lock on the 2nd transaction has been released, should be be empty
select trx_id,trx_mysql_thread_id from information_schema.tokudb_trx;
trx_id trx_mysql_thread_id
select * from information_schema.tokudb_locks;
diff --git a/storage/tokudb/mysql-test/tokudb/r/row_format.result b/storage/tokudb/mysql-test/tokudb/r/row_format.result
new file mode 100644
index 00000000000..cb669148445
--- /dev/null
+++ b/storage/tokudb/mysql-test/tokudb/r/row_format.result
@@ -0,0 +1,51 @@
+CREATE TABLE tokudb_row_format_test_1 (a INT) ENGINE=TokuDB ROW_FORMAT=TOKUDB_DEFAULT;
+CREATE TABLE tokudb_row_format_test_2 (a INT) ENGINE=TokuDB ROW_FORMAT=TOKUDB_FAST;
+CREATE TABLE tokudb_row_format_test_3 (a INT) ENGINE=TokuDB ROW_FORMAT=TOKUDB_SMALL;
+CREATE TABLE tokudb_row_format_test_4 (a INT) ENGINE=TokuDB ROW_FORMAT=TOKUDB_UNCOMPRESSED;
+CREATE TABLE tokudb_row_format_test_5 (a INT) ENGINE=TokuDB ROW_FORMAT=TOKUDB_ZLIB;
+CREATE TABLE tokudb_row_format_test_6 (a INT) ENGINE=TokuDB ROW_FORMAT=TOKUDB_LZMA;
+CREATE TABLE tokudb_row_format_test_7 (a INT) ENGINE=TokuDB ROW_FORMAT=TOKUDB_QUICKLZ;
+CREATE TABLE tokudb_row_format_test_8 (a INT) ENGINE=TokuDB ROW_FORMAT=TOKUDB_SNAPPY;
+SELECT table_name, row_format, engine FROM information_schema.tables WHERE table_name like 'tokudb_row_format_test%' ORDER BY table_name;
+table_name row_format engine
+tokudb_row_format_test_1 tokudb_zlib TokuDB
+tokudb_row_format_test_2 tokudb_quicklz TokuDB
+tokudb_row_format_test_3 tokudb_lzma TokuDB
+tokudb_row_format_test_4 tokudb_uncompressed TokuDB
+tokudb_row_format_test_5 tokudb_zlib TokuDB
+tokudb_row_format_test_6 tokudb_lzma TokuDB
+tokudb_row_format_test_7 tokudb_quicklz TokuDB
+tokudb_row_format_test_8 tokudb_snappy TokuDB
+ALTER TABLE tokudb_row_format_test_1 ENGINE=TokuDB ROW_FORMAT=TOKUDB_FAST;
+SELECT table_name, row_format, engine FROM information_schema.tables WHERE table_name = 'tokudb_row_format_test_1';
+table_name row_format engine
+tokudb_row_format_test_1 tokudb_quicklz TokuDB
+ALTER TABLE tokudb_row_format_test_1 ENGINE=TokuDB ROW_FORMAT=TOKUDB_SMALL;
+SELECT table_name, row_format, engine FROM information_schema.tables WHERE table_name = 'tokudb_row_format_test_1';
+table_name row_format engine
+tokudb_row_format_test_1 tokudb_lzma TokuDB
+ALTER TABLE tokudb_row_format_test_1 ENGINE=TokuDB ROW_FORMAT=TOKUDB_UNCOMPRESSED;
+SELECT table_name, row_format, engine FROM information_schema.tables WHERE table_name = 'tokudb_row_format_test_1';
+table_name row_format engine
+tokudb_row_format_test_1 tokudb_uncompressed TokuDB
+ALTER TABLE tokudb_row_format_test_1 ENGINE=TokuDB ROW_FORMAT=TOKUDB_ZLIB;
+SELECT table_name, row_format, engine FROM information_schema.tables WHERE table_name = 'tokudb_row_format_test_1';
+table_name row_format engine
+tokudb_row_format_test_1 tokudb_zlib TokuDB
+ALTER TABLE tokudb_row_format_test_1 ENGINE=TokuDB ROW_FORMAT=TOKUDB_SNAPPY;
+SELECT table_name, row_format, engine FROM information_schema.tables WHERE table_name = 'tokudb_row_format_test_1';
+table_name row_format engine
+tokudb_row_format_test_1 tokudb_snappy TokuDB
+ALTER TABLE tokudb_row_format_test_1 ENGINE=TokuDB ROW_FORMAT=TOKUDB_QUICKLZ;
+SELECT table_name, row_format, engine FROM information_schema.tables WHERE table_name = 'tokudb_row_format_test_1';
+table_name row_format engine
+tokudb_row_format_test_1 tokudb_quicklz TokuDB
+ALTER TABLE tokudb_row_format_test_1 ENGINE=TokuDB ROW_FORMAT=TOKUDB_LZMA;
+SELECT table_name, row_format, engine FROM information_schema.tables WHERE table_name = 'tokudb_row_format_test_1';
+table_name row_format engine
+tokudb_row_format_test_1 tokudb_lzma TokuDB
+ALTER TABLE tokudb_row_format_test_1 ENGINE=TokuDB ROW_FORMAT=TOKUDB_DEFAULT;
+SELECT table_name, row_format, engine FROM information_schema.tables WHERE table_name = 'tokudb_row_format_test_1';
+table_name row_format engine
+tokudb_row_format_test_1 tokudb_zlib TokuDB
+DROP TABLE tokudb_row_format_test_1, tokudb_row_format_test_2, tokudb_row_format_test_3, tokudb_row_format_test_4, tokudb_row_format_test_5, tokudb_row_format_test_6, tokudb_row_format_test_7, tokudb_row_format_test_8;
diff --git a/storage/tokudb/mysql-test/tokudb/t/dir-per-db-with-custom-data-dir-master.opt b/storage/tokudb/mysql-test/tokudb/t/dir-per-db-with-custom-data-dir-master.opt
new file mode 100644
index 00000000000..a9090f4d115
--- /dev/null
+++ b/storage/tokudb/mysql-test/tokudb/t/dir-per-db-with-custom-data-dir-master.opt
@@ -0,0 +1 @@
+--loose-tokudb_data_dir="$MYSQL_TMP_DIR" --loose-tokudb-dir-per-db=1
diff --git a/storage/tokudb/mysql-test/tokudb/t/dir-per-db-with-custom-data-dir.test b/storage/tokudb/mysql-test/tokudb/t/dir-per-db-with-custom-data-dir.test
new file mode 100644
index 00000000000..7f415a72515
--- /dev/null
+++ b/storage/tokudb/mysql-test/tokudb/t/dir-per-db-with-custom-data-dir.test
@@ -0,0 +1,16 @@
+--source include/have_tokudb.inc
+
+SELECT @@tokudb_dir_per_db;
+
+--disable_query_log
+--eval SELECT STRCMP(@@tokudb_data_dir, '$MYSQL_TMP_DIR') = 0 AS TOKUDB_DATA_DIR_CHANGED
+--enable_query_log
+
+CREATE DATABASE tokudb_test;
+USE tokudb_test;
+CREATE TABLE t (a INT UNSIGNED AUTO_INCREMENT PRIMARY KEY) ENGINE=tokudb;
+
+--file_exists $MYSQL_TMP_DIR/tokudb_test
+
+DROP TABLE t;
+DROP DATABASE tokudb_test;
diff --git a/storage/tokudb/mysql-test/tokudb/t/dir_per_db.test b/storage/tokudb/mysql-test/tokudb/t/dir_per_db.test
new file mode 100644
index 00000000000..b638b706d87
--- /dev/null
+++ b/storage/tokudb/mysql-test/tokudb/t/dir_per_db.test
@@ -0,0 +1,76 @@
+source include/have_tokudb.inc;
+
+--let $DB= test
+--let $DATADIR= `select @@datadir`
+--let $i= 2
+
+while ($i) {
+ --dec $i
+ --echo ########
+ --echo # tokudb_dir_per_db = $i
+ --echo ########
+ --eval SET GLOBAL tokudb_dir_per_db= $i
+ --echo ########
+ --echo # CREATE
+ --echo ########
+ CREATE TABLE t1 (a INT UNSIGNED AUTO_INCREMENT PRIMARY KEY, b INT(10) UNSIGNED NOT NULL) ENGINE=tokudb;
+ INSERT INTO t1 SET b = 10;
+ INSERT INTO t1 SET b = 20;
+ SELECT b FROM t1 ORDER BY a;
+ CREATE INDEX b ON t1 (b);
+ CREATE INDEX ab ON t1 (a,b);
+ --source dir_per_db_show_table_files.inc
+ --echo ########
+ --echo # RENAME
+ --echo ########
+ RENAME TABLE t1 TO t2;
+ SELECT b FROM t2 ORDER BY a;
+ --source dir_per_db_show_table_files.inc
+ --echo ########
+ --echo # DROP
+ --echo ########
+ DROP TABLE t2;
+ --source dir_per_db_show_table_files.inc
+}
+
+--echo ########
+--echo # CREATE on tokudb_dir_per_db = 0 and RENAME on tokudb_dir_per_db = 1 and vice versa
+--echo ########
+
+--let $i= 2
+
+while ($i) {
+ --dec $i
+ --let $inv_i= (1 - $i);
+ --echo ########
+ --echo # tokudb_dir_per_db = $inv_i
+ --echo ########
+ --eval SET GLOBAL tokudb_dir_per_db= $inv_i
+ --echo ########
+ --echo # CREATE
+ --echo ########
+ CREATE TABLE t1 (a INT UNSIGNED AUTO_INCREMENT PRIMARY KEY, b INT(10) UNSIGNED NOT NULL) ENGINE=tokudb;
+ INSERT INTO t1 SET b = 10;
+ INSERT INTO t1 SET b = 20;
+ SELECT b FROM t1 ORDER BY a;
+ CREATE INDEX b ON t1 (b);
+ CREATE INDEX ab ON t1 (a,b);
+ --source dir_per_db_show_table_files.inc
+ --echo ########
+ --echo # tokudb_dir_per_db = $i
+ --echo ########
+ --eval SET GLOBAL tokudb_dir_per_db= $i
+ --echo ########
+ --echo # RENAME
+ --echo ########
+ RENAME TABLE t1 TO t2;
+ SELECT b FROM t2 ORDER BY a;
+ --source dir_per_db_show_table_files.inc
+ --echo ########
+ --echo # DROP
+ --echo ########
+ DROP TABLE t2;
+ --source dir_per_db_show_table_files.inc
+}
+
+SET GLOBAL tokudb_dir_per_db=default;
diff --git a/storage/tokudb/mysql-test/tokudb/t/dir_per_db_show_table_files.inc b/storage/tokudb/mysql-test/tokudb/t/dir_per_db_show_table_files.inc
new file mode 100644
index 00000000000..bdf7d5b235f
--- /dev/null
+++ b/storage/tokudb/mysql-test/tokudb/t/dir_per_db_show_table_files.inc
@@ -0,0 +1,9 @@
+--sorted_result
+
+--echo ## Looking for *.tokudb files in data_dir
+--source include/table_files_replace_pattern.inc
+--list_files $DATADIR *.tokudb
+
+--echo ## Looking for *.tokudb files in data_dir/$DB
+--source include/table_files_replace_pattern.inc
+--list_files $DATADIR/$DB/ *.tokudb
diff --git a/storage/tokudb/mysql-test/tokudb/t/i_s_tokudb_lock_waits_released.test b/storage/tokudb/mysql-test/tokudb/t/i_s_tokudb_lock_waits_released.test
index d8ce18b3aa7..6534175d619 100644
--- a/storage/tokudb/mysql-test/tokudb/t/i_s_tokudb_lock_waits_released.test
+++ b/storage/tokudb/mysql-test/tokudb/t/i_s_tokudb_lock_waits_released.test
@@ -17,7 +17,7 @@ create table t (id int primary key);
# verify that txn_a insert (1) blocks txn_b insert (1) and txn_b gets a duplicate key error
-# should be empty
+--echo t should be empty
select trx_id,trx_mysql_thread_id from information_schema.tokudb_trx;
select * from information_schema.tokudb_locks;
select * from information_schema.tokudb_lock_waits;
@@ -33,7 +33,7 @@ set autocommit=0;
set tokudb_lock_timeout=600000; # set lock wait timeout to 10 minutes
send insert into t values (1);
-# should find the presence of a lock on 1st transaction
+--echo should find the presence of a lock on 1st transaction
connection default;
let $wait_condition= select count(*)=1 from information_schema.processlist where info='insert into t values (1)' and state='update';
source include/wait_condition.inc;
@@ -42,17 +42,17 @@ real_sleep 1; # delay a little to shorten the update -> write row -> lock wait r
replace_column 1 TRX_ID 2 MYSQL_ID;
select * from information_schema.tokudb_locks;
-# should find the presence of a lock_wait on the 2nd transaction
+--echo should find the presence of a lock_wait on the 2nd transaction
replace_column 1 REQUEST_TRX_ID 2 BLOCK_TRX_ID 6 LOCK_WAITS_START_TIME;
select * from information_schema.tokudb_lock_waits;
-# should find the presence of two transactions
+--echo should find the presence of two transactions
replace_column 1 TRX_ID 2 MYSQL_ID;
select trx_id,trx_mysql_thread_id from information_schema.tokudb_trx;
connection conn_a;
commit;
-# verify that the lock on the 1st transaction is released and replaced by the lock for the 2nd transaction
+--echo verify that the lock on the 1st transaction is released and replaced by the lock for the 2nd transaction
let $wait_condition= select count(*)=1 from information_schema.tokudb_locks where locks_dname='./test/t-main';
source include/wait_condition.inc;
@@ -69,10 +69,8 @@ connection default;
disconnect conn_a;
disconnect conn_b;
-# verify that txn_a replace (1) blocks txn_b replace (1) and txn_b eventually gets the lock on (1) and completes
-
-# verify that the lock on the 2nd transaction has been released
-# should be be empty
+--echo verify that txn_a replace (1) blocks txn_b replace (1) and txn_b eventually gets the lock on (1) and completes
+--echo verify that the lock on the 2nd transaction has been released, should be be empty
select trx_id,trx_mysql_thread_id from information_schema.tokudb_trx;
select * from information_schema.tokudb_locks;
select * from information_schema.tokudb_lock_waits;
@@ -88,7 +86,7 @@ set autocommit=0;
set tokudb_lock_timeout=600000; # set lock wait timeout to 10 minutes
send replace into t values (1);
-# should find the presence of a lock on 1st transaction
+--echo should find the presence of a lock on 1st transaction
connection default;
let $wait_condition= select count(*)=1 from information_schema.processlist where info='replace into t values (1)' and state='update';
source include/wait_condition.inc;
@@ -97,17 +95,19 @@ real_sleep 1; # delay a little to shorten the update -> write row -> lock wait r
replace_column 1 TRX_ID 2 MYSQL_ID;
select * from information_schema.tokudb_locks;
-# should find the presence of a lock_wait on the 2nd transaction
+--echo should find the presence of a lock_wait on the 2nd transaction
replace_column 1 REQUEST_TRX_ID 2 BLOCK_TRX_ID 6 LOCK_WAITS_START_TIME;
select * from information_schema.tokudb_lock_waits;
-# should find the presence of two transactions
+--echo should find the presence of two transactions
replace_column 1 TRX_ID 2 MYSQL_ID;
select trx_id,trx_mysql_thread_id from information_schema.tokudb_trx;
connection conn_a;
commit;
-# verify that the lock on the 1st transaction is released and replaced by the lock for the 2nd transaction
+--echo verify that the lock on the 1st transaction is released and replaced by the lock for the 2nd transaction
+let $wait_condition= select count(*)=1 from information_schema.tokudb_locks where locks_dname='./test/t-main';
+source include/wait_condition.inc;
replace_column 1 TRX_ID 2 MYSQL_ID;
select * from information_schema.tokudb_locks;
select * from information_schema.tokudb_lock_waits;
@@ -120,8 +120,7 @@ connection default;
disconnect conn_a;
disconnect conn_b;
-# verify that the lock on the 2nd transaction has been released
-# should be be empty
+--echo verify that the lock on the 2nd transaction has been released, should be be empty
select trx_id,trx_mysql_thread_id from information_schema.tokudb_trx;
select * from information_schema.tokudb_locks;
select * from information_schema.tokudb_lock_waits;
diff --git a/storage/tokudb/mysql-test/tokudb/t/row_format.test b/storage/tokudb/mysql-test/tokudb/t/row_format.test
new file mode 100644
index 00000000000..6533f8c06be
--- /dev/null
+++ b/storage/tokudb/mysql-test/tokudb/t/row_format.test
@@ -0,0 +1,41 @@
+#
+# Test TokuDB compression option additions to row_format
+#
+--source include/have_tokudb.inc
+
+CREATE TABLE tokudb_row_format_test_1 (a INT) ENGINE=TokuDB ROW_FORMAT=TOKUDB_DEFAULT;
+CREATE TABLE tokudb_row_format_test_2 (a INT) ENGINE=TokuDB ROW_FORMAT=TOKUDB_FAST;
+CREATE TABLE tokudb_row_format_test_3 (a INT) ENGINE=TokuDB ROW_FORMAT=TOKUDB_SMALL;
+CREATE TABLE tokudb_row_format_test_4 (a INT) ENGINE=TokuDB ROW_FORMAT=TOKUDB_UNCOMPRESSED;
+CREATE TABLE tokudb_row_format_test_5 (a INT) ENGINE=TokuDB ROW_FORMAT=TOKUDB_ZLIB;
+CREATE TABLE tokudb_row_format_test_6 (a INT) ENGINE=TokuDB ROW_FORMAT=TOKUDB_LZMA;
+CREATE TABLE tokudb_row_format_test_7 (a INT) ENGINE=TokuDB ROW_FORMAT=TOKUDB_QUICKLZ;
+CREATE TABLE tokudb_row_format_test_8 (a INT) ENGINE=TokuDB ROW_FORMAT=TOKUDB_SNAPPY;
+
+SELECT table_name, row_format, engine FROM information_schema.tables WHERE table_name like 'tokudb_row_format_test%' ORDER BY table_name;
+
+ALTER TABLE tokudb_row_format_test_1 ENGINE=TokuDB ROW_FORMAT=TOKUDB_FAST;
+SELECT table_name, row_format, engine FROM information_schema.tables WHERE table_name = 'tokudb_row_format_test_1';
+
+ALTER TABLE tokudb_row_format_test_1 ENGINE=TokuDB ROW_FORMAT=TOKUDB_SMALL;
+SELECT table_name, row_format, engine FROM information_schema.tables WHERE table_name = 'tokudb_row_format_test_1';
+
+ALTER TABLE tokudb_row_format_test_1 ENGINE=TokuDB ROW_FORMAT=TOKUDB_UNCOMPRESSED;
+SELECT table_name, row_format, engine FROM information_schema.tables WHERE table_name = 'tokudb_row_format_test_1';
+
+ALTER TABLE tokudb_row_format_test_1 ENGINE=TokuDB ROW_FORMAT=TOKUDB_ZLIB;
+SELECT table_name, row_format, engine FROM information_schema.tables WHERE table_name = 'tokudb_row_format_test_1';
+
+ALTER TABLE tokudb_row_format_test_1 ENGINE=TokuDB ROW_FORMAT=TOKUDB_SNAPPY;
+SELECT table_name, row_format, engine FROM information_schema.tables WHERE table_name = 'tokudb_row_format_test_1';
+
+ALTER TABLE tokudb_row_format_test_1 ENGINE=TokuDB ROW_FORMAT=TOKUDB_QUICKLZ;
+SELECT table_name, row_format, engine FROM information_schema.tables WHERE table_name = 'tokudb_row_format_test_1';
+
+ALTER TABLE tokudb_row_format_test_1 ENGINE=TokuDB ROW_FORMAT=TOKUDB_LZMA;
+SELECT table_name, row_format, engine FROM information_schema.tables WHERE table_name = 'tokudb_row_format_test_1';
+
+ALTER TABLE tokudb_row_format_test_1 ENGINE=TokuDB ROW_FORMAT=TOKUDB_DEFAULT;
+SELECT table_name, row_format, engine FROM information_schema.tables WHERE table_name = 'tokudb_row_format_test_1';
+
+DROP TABLE tokudb_row_format_test_1, tokudb_row_format_test_2, tokudb_row_format_test_3, tokudb_row_format_test_4, tokudb_row_format_test_5, tokudb_row_format_test_6, tokudb_row_format_test_7, tokudb_row_format_test_8;
diff --git a/storage/tokudb/mysql-test/tokudb_bugs/r/db938.result b/storage/tokudb/mysql-test/tokudb_bugs/r/db938.result
index 779d458221b..30e0bdbebd7 100644
--- a/storage/tokudb/mysql-test/tokudb_bugs/r/db938.result
+++ b/storage/tokudb/mysql-test/tokudb_bugs/r/db938.result
@@ -23,6 +23,7 @@ set DEBUG_SYNC = 'tokudb_after_truncate_all_dictionarys SIGNAL closed WAIT_FOR d
TRUNCATE TABLE t1;
set global tokudb_debug_pause_background_job_manager = FALSE;
set DEBUG_SYNC = 'now SIGNAL done';
+set DEBUG_SYNC = 'RESET';
drop table t1;
set session tokudb_auto_analyze = @orig_auto_analyze;
set session tokudb_analyze_in_background = @orig_in_background;
@@ -32,4 +33,3 @@ set session tokudb_analyze_time = @orig_time;
set global tokudb_cardinality_scale_percent = @orig_scale_percent;
set session default_storage_engine = @orig_default_storage_engine;
set global tokudb_debug_pause_background_job_manager = @orig_pause_background_job_manager;
-set DEBUG_SYNC='reset';
diff --git a/storage/tokudb/mysql-test/tokudb_bugs/t/db938.test b/storage/tokudb/mysql-test/tokudb_bugs/t/db938.test
index f56f93d1492..50434a79a00 100644
--- a/storage/tokudb/mysql-test/tokudb_bugs/t/db938.test
+++ b/storage/tokudb/mysql-test/tokudb_bugs/t/db938.test
@@ -40,6 +40,7 @@ insert into t1(b,c) values(0,0), (1,1), (2,2), (3,3);
select database_name, table_name, job_type, job_params, scheduler from information_schema.tokudb_background_job_status;
# lets flip to another connection
+--source include/count_sessions.inc
connect(conn1, localhost, root);
# set up the DEBUG_SYNC point
@@ -64,6 +65,7 @@ connection conn1;
reap;
connection default;
disconnect conn1;
+set DEBUG_SYNC = 'RESET';
drop table t1;
set session tokudb_auto_analyze = @orig_auto_analyze;
@@ -74,4 +76,4 @@ set session tokudb_analyze_time = @orig_time;
set global tokudb_cardinality_scale_percent = @orig_scale_percent;
set session default_storage_engine = @orig_default_storage_engine;
set global tokudb_debug_pause_background_job_manager = @orig_pause_background_job_manager;
-set DEBUG_SYNC='reset';
+--source include/wait_until_count_sessions.inc
diff --git a/storage/tokudb/mysql-test/tokudb_parts/include/table_files_replace_pattern.inc b/storage/tokudb/mysql-test/tokudb_parts/include/table_files_replace_pattern.inc
new file mode 100644
index 00000000000..b10ad21dd95
--- /dev/null
+++ b/storage/tokudb/mysql-test/tokudb_parts/include/table_files_replace_pattern.inc
@@ -0,0 +1 @@
+--replace_regex /[a-z0-9]+_[a-z0-9]+_[a-z0-9]+(_[BP]_[a-z0-9]+){0,1}\./id./ /sqlx_[a-z0-9]+_[a-z0-9]+_/sqlx_nnnn_nnnn_/ /sqlx-[a-z0-9]+_[a-z0-9]+/sqlx-nnnn_nnnn/ /#p#/#P#/ /#sp#/#SP#/ /#tmp#/#TMP#/
diff --git a/storage/tokudb/mysql-test/tokudb_parts/t/partition_debug_sync_tokudb.test b/storage/tokudb/mysql-test/tokudb_parts/t/partition_debug_sync_tokudb.test
index be14d8814f0..f97235a0a2d 100644
--- a/storage/tokudb/mysql-test/tokudb_parts/t/partition_debug_sync_tokudb.test
+++ b/storage/tokudb/mysql-test/tokudb_parts/t/partition_debug_sync_tokudb.test
@@ -56,7 +56,7 @@ partition by range (a)
insert into t1 values (1), (11), (21), (33);
SELECT * FROM t1;
SHOW CREATE TABLE t1;
---replace_result #p# #P# #sp# #SP#
+--source include/table_files_replace_pattern.inc
--list_files $MYSQLD_DATADIR/test
SET DEBUG_SYNC='before_open_in_get_all_tables SIGNAL parked WAIT_FOR open';
@@ -82,7 +82,7 @@ ALTER TABLE t1 REORGANIZE PARTITION p0 INTO
disconnect con1;
connection default;
--reap
---replace_result #p# #P# #sp# #SP#
+--source include/table_files_replace_pattern.inc
--list_files $MYSQLD_DATADIR/test
SHOW CREATE TABLE t1;
SELECT * FROM t1;
diff --git a/storage/tokudb/tokudb_sysvars.cc b/storage/tokudb/tokudb_sysvars.cc
index 7cea749b4fb..b758929c10e 100644
--- a/storage/tokudb/tokudb_sysvars.cc
+++ b/storage/tokudb/tokudb_sysvars.cc
@@ -66,6 +66,7 @@ uint read_status_frequency = 0;
my_bool strip_frm_data = FALSE;
char* tmp_dir = NULL;
uint write_status_frequency = 0;
+my_bool dir_per_db = FALSE;
char* version = (char*) TOKUDB_VERSION_STR;
// file system reserve as a percentage of total disk space
@@ -394,6 +395,18 @@ static MYSQL_SYSVAR_UINT(
~0U,
0);
+static void tokudb_dir_per_db_update(THD* thd,
+ struct st_mysql_sys_var* sys_var,
+ void* var, const void* save) {
+ my_bool *value = (my_bool *) var;
+ *value = *(const my_bool *) save;
+ db_env->set_dir_per_db(db_env, *value);
+}
+
+static MYSQL_SYSVAR_BOOL(dir_per_db, dir_per_db,
+ 0, "TokuDB store ft files in db directories",
+ NULL, tokudb_dir_per_db_update, FALSE);
+
#if TOKU_INCLUDE_HANDLERTON_HANDLE_FATAL_SIGNAL
static MYSQL_SYSVAR_STR(
gdb_path,
@@ -935,6 +948,7 @@ st_mysql_sys_var* system_variables[] = {
MYSQL_SYSVAR(tmp_dir),
MYSQL_SYSVAR(version),
MYSQL_SYSVAR(write_status_frequency),
+ MYSQL_SYSVAR(dir_per_db),
#if TOKU_INCLUDE_HANDLERTON_HANDLE_FATAL_SIGNAL
MYSQL_SYSVAR(gdb_path),
diff --git a/storage/tokudb/tokudb_sysvars.h b/storage/tokudb/tokudb_sysvars.h
index 3bd96f7c68d..7701f211729 100644
--- a/storage/tokudb/tokudb_sysvars.h
+++ b/storage/tokudb/tokudb_sysvars.h
@@ -101,6 +101,7 @@ extern uint read_status_frequency;
extern my_bool strip_frm_data;
extern char* tmp_dir;
extern uint write_status_frequency;
+extern my_bool dir_per_db;
extern char* version;
#if TOKU_INCLUDE_HANDLERTON_HANDLE_FATAL_SIGNAL
diff --git a/storage/xtradb/buf/buf0buf.cc b/storage/xtradb/buf/buf0buf.cc
index 0d159fa496b..8cf8ab3975e 100644
--- a/storage/xtradb/buf/buf0buf.cc
+++ b/storage/xtradb/buf/buf0buf.cc
@@ -4906,7 +4906,9 @@ database_corrupted:
recv_recover_page(TRUE, (buf_block_t*) bpage);
}
- if (uncompressed && !recv_no_ibuf_operations) {
+ if (uncompressed && !recv_no_ibuf_operations
+ && fil_page_get_type(frame) == FIL_PAGE_INDEX
+ && page_is_leaf(frame)) {
buf_block_t* block;
ibool update_ibuf_bitmap;
diff --git a/storage/xtradb/buf/buf0dblwr.cc b/storage/xtradb/buf/buf0dblwr.cc
index ff334003524..d8d85c25289 100644
--- a/storage/xtradb/buf/buf0dblwr.cc
+++ b/storage/xtradb/buf/buf0dblwr.cc
@@ -550,7 +550,7 @@ buf_dblwr_process()
} else if (buf_page_is_corrupted(true, read_buf, zip_size)) {
fprintf(stderr,
- "InnoDB: Warning: database page"
+ "InnoDB: Database page"
" corruption or a failed\n"
"InnoDB: file read of"
" space %lu page %lu.\n"
diff --git a/storage/xtradb/buf/buf0flu.cc b/storage/xtradb/buf/buf0flu.cc
index 76358b8f8f4..7f5829c0620 100644
--- a/storage/xtradb/buf/buf0flu.cc
+++ b/storage/xtradb/buf/buf0flu.cc
@@ -2629,6 +2629,11 @@ page_cleaner_sleep_if_needed(
ulint next_loop_time) /*!< in: time when next loop iteration
should start */
{
+ /* No sleep if we are cleaning the buffer pool during the shutdown
+ with everything else finished */
+ if (srv_shutdown_state == SRV_SHUTDOWN_FLUSH_PHASE)
+ return;
+
ulint cur_time = ut_time_ms();
if (next_loop_time > cur_time) {
diff --git a/storage/xtradb/dict/dict0boot.cc b/storage/xtradb/dict/dict0boot.cc
index 138d3131e09..7162a7f4c87 100644
--- a/storage/xtradb/dict/dict0boot.cc
+++ b/storage/xtradb/dict/dict0boot.cc
@@ -273,6 +273,10 @@ dict_boot(void)
ut_ad(DICT_NUM_FIELDS__SYS_FOREIGN_FOR_NAME == 2);
ut_ad(DICT_NUM_COLS__SYS_FOREIGN_COLS == 4);
ut_ad(DICT_NUM_FIELDS__SYS_FOREIGN_COLS == 6);
+ ut_ad(DICT_NUM_COLS__SYS_ZIP_DICT == 3);
+ ut_ad(DICT_NUM_FIELDS__SYS_ZIP_DICT == 5);
+ ut_ad(DICT_NUM_COLS__SYS_ZIP_DICT_COLS == 3);
+ ut_ad(DICT_NUM_FIELDS__SYS_ZIP_DICT_COLS == 5);
mtr_start(&mtr);
diff --git a/storage/xtradb/dict/dict0crea.cc b/storage/xtradb/dict/dict0crea.cc
index 870e452e5f2..4a7bd2e8a4e 100644
--- a/storage/xtradb/dict/dict0crea.cc
+++ b/storage/xtradb/dict/dict0crea.cc
@@ -38,6 +38,7 @@ Created 1/8/1996 Heikki Tuuri
#include "que0que.h"
#include "row0ins.h"
#include "row0mysql.h"
+#include "row0sel.h"
#include "pars0pars.h"
#include "trx0roll.h"
#include "usr0sess.h"
@@ -1935,6 +1936,135 @@ dict_create_or_check_sys_tablespace(void)
return(err);
}
+/** Creates the zip_dict system table inside InnoDB
+at server bootstrap or server start if it is not found or is
+not of the right form.
+@return DB_SUCCESS or error code */
+UNIV_INTERN
+dberr_t
+dict_create_or_check_sys_zip_dict(void)
+{
+ trx_t* trx;
+ my_bool srv_file_per_table_backup;
+ dberr_t err;
+ dberr_t sys_zip_dict_err;
+ dberr_t sys_zip_dict_cols_err;
+
+ ut_a(srv_get_active_thread_type() == SRV_NONE);
+
+ /* Note: The master thread has not been started at this point. */
+
+ sys_zip_dict_err = dict_check_if_system_table_exists(
+ "SYS_ZIP_DICT", DICT_NUM_FIELDS__SYS_ZIP_DICT + 1, 2);
+ sys_zip_dict_cols_err = dict_check_if_system_table_exists(
+ "SYS_ZIP_DICT_COLS", DICT_NUM_FIELDS__SYS_ZIP_DICT_COLS + 1,
+ 1);
+
+ if (sys_zip_dict_err == DB_SUCCESS &&
+ sys_zip_dict_cols_err == DB_SUCCESS)
+ return (DB_SUCCESS);
+
+ trx = trx_allocate_for_mysql();
+
+ trx_set_dict_operation(trx, TRX_DICT_OP_TABLE);
+
+ trx->op_info = "creating zip_dict and zip_dict_cols sys tables";
+
+ row_mysql_lock_data_dictionary(trx);
+
+ /* Check which incomplete table definition to drop. */
+
+ if (sys_zip_dict_err == DB_CORRUPTION) {
+ ib_logf(IB_LOG_LEVEL_WARN,
+ "Dropping incompletely created "
+ "SYS_ZIP_DICT table.");
+ row_drop_table_for_mysql("SYS_ZIP_DICT", trx, TRUE, TRUE);
+ }
+ if (sys_zip_dict_cols_err == DB_CORRUPTION) {
+ ib_logf(IB_LOG_LEVEL_WARN,
+ "Dropping incompletely created "
+ "SYS_ZIP_DICT_COLS table.");
+ row_drop_table_for_mysql("SYS_ZIP_DICT_COLS", trx, TRUE, TRUE);
+ }
+
+ ib_logf(IB_LOG_LEVEL_INFO,
+ "Creating zip_dict and zip_dict_cols system tables.");
+
+ /* We always want SYSTEM tables to be created inside the system
+ tablespace. */
+ srv_file_per_table_backup = srv_file_per_table;
+ srv_file_per_table = 0;
+
+ err = que_eval_sql(
+ NULL,
+ "PROCEDURE CREATE_SYS_ZIP_DICT_PROC () IS\n"
+ "BEGIN\n"
+ "CREATE TABLE SYS_ZIP_DICT(\n"
+ " ID INT UNSIGNED NOT NULL,\n"
+ " NAME CHAR("
+ STRINGIFY_ARG(ZIP_DICT_MAX_NAME_LENGTH)
+ ") NOT NULL,\n"
+ " DATA BLOB NOT NULL\n"
+ ");\n"
+ "CREATE UNIQUE CLUSTERED INDEX SYS_ZIP_DICT_ID"
+ " ON SYS_ZIP_DICT (ID);\n"
+ "CREATE UNIQUE INDEX SYS_ZIP_DICT_NAME"
+ " ON SYS_ZIP_DICT (NAME);\n"
+ "CREATE TABLE SYS_ZIP_DICT_COLS(\n"
+ " TABLE_ID INT UNSIGNED NOT NULL,\n"
+ " COLUMN_POS INT UNSIGNED NOT NULL,\n"
+ " DICT_ID INT UNSIGNED NOT NULL\n"
+ ");\n"
+ "CREATE UNIQUE CLUSTERED INDEX SYS_ZIP_DICT_COLS_COMPOSITE"
+ " ON SYS_ZIP_DICT_COLS (TABLE_ID, COLUMN_POS);\n"
+ "END;\n",
+ FALSE, trx);
+
+ if (err != DB_SUCCESS) {
+ ib_logf(IB_LOG_LEVEL_ERROR,
+ "Creation of SYS_ZIP_DICT and SYS_ZIP_DICT_COLS"
+ "has failed with error %lu. Tablespace is full. "
+ "Dropping incompletely created tables.",
+ (ulong) err);
+
+ ut_a(err == DB_OUT_OF_FILE_SPACE
+ || err == DB_TOO_MANY_CONCURRENT_TRXS);
+
+ row_drop_table_for_mysql("SYS_ZIP_DICT", trx, TRUE, TRUE);
+ row_drop_table_for_mysql("SYS_ZIP_DICT_COLS", trx, TRUE, TRUE);
+
+ if (err == DB_OUT_OF_FILE_SPACE) {
+ err = DB_MUST_GET_MORE_FILE_SPACE;
+ }
+ }
+
+ trx_commit_for_mysql(trx);
+
+ row_mysql_unlock_data_dictionary(trx);
+
+ trx_free_for_mysql(trx);
+
+ srv_file_per_table = srv_file_per_table_backup;
+
+ if (err == DB_SUCCESS) {
+ ib_logf(IB_LOG_LEVEL_INFO,
+ "zip_dict and zip_dict_cols system tables created.");
+ }
+
+ /* Note: The master thread has not been started at this point. */
+ /* Confirm and move to the non-LRU part of the table LRU list. */
+
+ sys_zip_dict_err = dict_check_if_system_table_exists(
+ "SYS_ZIP_DICT", DICT_NUM_FIELDS__SYS_ZIP_DICT + 1, 2);
+ ut_a(sys_zip_dict_err == DB_SUCCESS);
+ sys_zip_dict_cols_err = dict_check_if_system_table_exists(
+ "SYS_ZIP_DICT_COLS",
+ DICT_NUM_FIELDS__SYS_ZIP_DICT_COLS + 1, 1);
+ ut_a(sys_zip_dict_cols_err == DB_SUCCESS);
+
+ return(err);
+}
+
/********************************************************************//**
Add a single tablespace definition to the data dictionary tables in the
database.
@@ -1988,3 +2118,456 @@ dict_create_add_tablespace_to_dictionary(
return(error);
}
+
+/** Add a single compression dictionary definition to the SYS_ZIP_DICT
+InnoDB system table.
+@return error code or DB_SUCCESS */
+UNIV_INTERN
+dberr_t
+dict_create_add_zip_dict(
+ const char* name, /*!< in: dict name */
+ ulint name_len, /*!< in: dict name length */
+ const char* data, /*!< in: dict data */
+ ulint data_len, /*!< in: dict data length */
+ trx_t* trx) /*!< in/out: transaction */
+{
+ ut_ad(name);
+ ut_ad(data);
+
+ pars_info_t* info = pars_info_create();
+
+ pars_info_add_literal(info, "name", name, name_len,
+ DATA_VARCHAR, DATA_ENGLISH);
+ pars_info_add_literal(info, "data", data, data_len,
+ DATA_BLOB, DATA_BINARY_TYPE | DATA_NOT_NULL);
+
+ dberr_t error = que_eval_sql(info,
+ "PROCEDURE P () IS\n"
+ " max_id INT;\n"
+ "DECLARE CURSOR cur IS\n"
+ " SELECT ID FROM SYS_ZIP_DICT\n"
+ " ORDER BY ID DESC;\n"
+ "BEGIN\n"
+ " max_id := 0;\n"
+ " OPEN cur;\n"
+ " FETCH cur INTO max_id;\n"
+ " IF (cur % NOTFOUND) THEN\n"
+ " max_id := 0;\n"
+ " END IF;\n"
+ " CLOSE cur;\n"
+ " INSERT INTO SYS_ZIP_DICT VALUES"
+ " (max_id + 1, :name, :data);\n"
+ "END;\n",
+ FALSE, trx);
+
+ return error;
+}
+
+/** Fetch callback, just stores extracted zip_dict id in the external
+variable.
+@return TRUE if all OK */
+static
+ibool
+dict_create_extract_int_aux(
+ void* row, /*!< in: sel_node_t* */
+ void* user_arg) /*!< in: int32 id */
+{
+ sel_node_t* node = static_cast<sel_node_t*>(row);
+ dfield_t* dfield = que_node_get_val(node->select_list);
+ dtype_t* type = dfield_get_type(dfield);
+ ulint len = dfield_get_len(dfield);
+
+ ut_a(dtype_get_mtype(type) == DATA_INT);
+ ut_a(len == sizeof(ib_uint32_t));
+
+ memcpy(user_arg, dfield_get_data(dfield), sizeof(ib_uint32_t));
+
+ return(TRUE);
+}
+
+/** Add a single compression dictionary reference to the SYS_ZIP_DICT_COLS
+InnoDB system table.
+@return error code or DB_SUCCESS */
+UNIV_INTERN
+dberr_t
+dict_create_add_zip_dict_reference(
+ ulint table_id, /*!< in: table id */
+ ulint column_pos, /*!< in: column position */
+ ulint dict_id, /*!< in: dict id */
+ trx_t* trx) /*!< in/out: transaction */
+{
+ pars_info_t* info = pars_info_create();
+
+ pars_info_add_int4_literal(info, "table_id", table_id);
+ pars_info_add_int4_literal(info, "column_pos", column_pos);
+ pars_info_add_int4_literal(info, "dict_id", dict_id);
+
+ dberr_t error = que_eval_sql(info,
+ "PROCEDURE P () IS\n"
+ "BEGIN\n"
+ " INSERT INTO SYS_ZIP_DICT_COLS VALUES"
+ " (:table_id, :column_pos, :dict_id);\n"
+ "END;\n",
+ FALSE, trx);
+ return error;
+}
+
+/** Get a single compression dictionary id for the given
+(table id, column pos) pair.
+@return error code or DB_SUCCESS */
+UNIV_INTERN
+dberr_t
+dict_create_get_zip_dict_id_by_reference(
+ ulint table_id, /*!< in: table id */
+ ulint column_pos, /*!< in: column position */
+ ulint* dict_id, /*!< out: dict id */
+ trx_t* trx) /*!< in/out: transaction */
+{
+ ut_ad(dict_id);
+
+ pars_info_t* info = pars_info_create();
+
+ ib_uint32_t dict_id_buf;
+ mach_write_to_4(reinterpret_cast<byte*>(&dict_id_buf ),
+ ULINT32_UNDEFINED);
+
+ pars_info_add_int4_literal(info, "table_id", table_id);
+ pars_info_add_int4_literal(info, "column_pos", column_pos);
+ pars_info_bind_function(
+ info, "my_func", dict_create_extract_int_aux, &dict_id_buf);
+
+ dberr_t error = que_eval_sql(info,
+ "PROCEDURE P () IS\n"
+ "DECLARE FUNCTION my_func;\n"
+ "DECLARE CURSOR cur IS\n"
+ " SELECT DICT_ID FROM SYS_ZIP_DICT_COLS\n"
+ " WHERE TABLE_ID = :table_id AND\n"
+ " COLUMN_POS = :column_pos;\n"
+ "BEGIN\n"
+ " OPEN cur;\n"
+ " FETCH cur INTO my_func();\n"
+ " CLOSE cur;\n"
+ "END;\n",
+ FALSE, trx);
+ if (error == DB_SUCCESS) {
+ ib_uint32_t local_dict_id = mach_read_from_4(
+ reinterpret_cast<const byte*>(&dict_id_buf));
+ if (local_dict_id == ULINT32_UNDEFINED)
+ error = DB_RECORD_NOT_FOUND;
+ else
+ *dict_id = local_dict_id;
+ }
+ return error;
+}
+
+/** Get compression dictionary id for the given name.
+@return error code or DB_SUCCESS */
+UNIV_INTERN
+dberr_t
+dict_create_get_zip_dict_id_by_name(
+ const char* dict_name, /*!< in: dict name */
+ ulint dict_name_len, /*!< in: dict name length */
+ ulint* dict_id, /*!< out: dict id */
+ trx_t* trx) /*!< in/out: transaction */
+{
+ ut_ad(dict_name);
+ ut_ad(dict_name_len);
+ ut_ad(dict_id);
+
+ pars_info_t* info = pars_info_create();
+
+ pars_info_add_literal(info, "dict_name", dict_name, dict_name_len,
+ DATA_VARCHAR, DATA_ENGLISH);
+
+ ib_uint32_t dict_id_buf;
+ mach_write_to_4(reinterpret_cast<byte*>(&dict_id_buf),
+ ULINT32_UNDEFINED);
+ pars_info_bind_function(
+ info, "my_func", dict_create_extract_int_aux, &dict_id_buf);
+
+ dberr_t error = que_eval_sql(info,
+ "PROCEDURE P () IS\n"
+ "DECLARE FUNCTION my_func;\n"
+ "DECLARE CURSOR cur IS\n"
+ " SELECT ID FROM SYS_ZIP_DICT\n"
+ " WHERE NAME = :dict_name;\n"
+ "BEGIN\n"
+ " OPEN cur;\n"
+ " FETCH cur INTO my_func();\n"
+ " CLOSE cur;\n"
+ "END;\n",
+ FALSE, trx);
+ if (error == DB_SUCCESS) {
+ ib_uint32_t local_dict_id = mach_read_from_4(
+ reinterpret_cast<const byte*>(&dict_id_buf));
+ if (local_dict_id == ULINT32_UNDEFINED)
+ error = DB_RECORD_NOT_FOUND;
+ else
+ *dict_id = local_dict_id;
+ }
+ return error;
+}
+
+/** Auxiliary enum used to indicate zip dict data extraction result code */
+enum zip_dict_info_aux_code {
+ zip_dict_info_success, /*!< success */
+ zip_dict_info_not_found, /*!< zip dict record not found */
+ zip_dict_info_oom, /*!< out of memory */
+ zip_dict_info_corrupted_name, /*!< corrupted zip dict name */
+ zip_dict_info_corrupted_data /*!< corrupted zip dict data */
+};
+
+/** Auxiliary struct used to return zip dict info aling with result code */
+struct zip_dict_info_aux {
+ LEX_STRING name; /*!< zip dict name */
+ LEX_STRING data; /*!< zip dict data */
+ int code; /*!< result code (0 - success) */
+};
+
+/** Fetch callback, just stores extracted zip_dict data in the external
+variable.
+@return always returns TRUE */
+static
+ibool
+dict_create_get_zip_dict_info_by_id_aux(
+ void* row, /*!< in: sel_node_t* */
+ void* user_arg) /*!< in: pointer to zip_dict_info_aux* */
+{
+ sel_node_t* node = static_cast<sel_node_t*>(row);
+ zip_dict_info_aux* result =
+ static_cast<zip_dict_info_aux*>(user_arg);
+
+ result->code = zip_dict_info_success;
+ result->name.str = 0;
+ result->name.length = 0;
+ result->data.str = 0;
+ result->data.length = 0;
+
+ /* NAME field */
+ que_node_t* exp = node->select_list;
+ ut_a(exp != 0);
+
+ dfield_t* dfield = que_node_get_val(exp);
+ dtype_t* type = dfield_get_type(dfield);
+ ut_a(dtype_get_mtype(type) == DATA_VARCHAR);
+
+ ulint len = dfield_get_len(dfield);
+ void* data = dfield_get_data(dfield);
+
+
+ if (len == UNIV_SQL_NULL) {
+ result->code = zip_dict_info_corrupted_name;
+ }
+ else {
+ result->name.str =
+ static_cast<char*>(my_malloc(len + 1, MYF(0)));
+ if (result->name.str == 0) {
+ result->code = zip_dict_info_oom;
+ }
+ else {
+ memcpy(result->name.str, data, len);
+ result->name.str[len] = '\0';
+ result->name.length = len;
+ }
+ }
+
+ /* DATA field */
+ exp = que_node_get_next(exp);
+ ut_a(exp != 0);
+
+ dfield = que_node_get_val(exp);
+ type = dfield_get_type(dfield);
+ ut_a(dtype_get_mtype(type) == DATA_BLOB);
+
+ len = dfield_get_len(dfield);
+ data = dfield_get_data(dfield);
+
+ if (len == UNIV_SQL_NULL) {
+ result->code = zip_dict_info_corrupted_data;
+ }
+ else {
+ result->data.str =
+ static_cast<char*>(my_malloc(
+ len == 0 ? 1 : len, MYF(0)));
+ if (result->data.str == 0) {
+ result->code = zip_dict_info_oom;
+ }
+ else {
+ memcpy(result->data.str, data, len);
+ result->data.length = len;
+ }
+ }
+
+ ut_ad(que_node_get_next(exp) == 0);
+
+ if (result->code != zip_dict_info_success) {
+ if (result->name.str == 0) {
+ mem_free(result->name.str);
+ result->name.str = 0;
+ result->name.length = 0;
+ }
+ if (result->data.str == 0) {
+ mem_free(result->data.str);
+ result->data.str = 0;
+ result->data.length = 0;
+ }
+ }
+
+ return TRUE;
+}
+
+/** Get compression dictionary info (name and data) for the given id.
+Allocates memory for name and data on success.
+Must be freed with mem_free().
+@return error code or DB_SUCCESS */
+UNIV_INTERN
+dberr_t
+dict_create_get_zip_dict_info_by_id(
+ ulint dict_id, /*!< in: dict id */
+ char** name, /*!< out: dict name */
+ ulint* name_len, /*!< out: dict name length*/
+ char** data, /*!< out: dict data */
+ ulint* data_len, /*!< out: dict data length*/
+ trx_t* trx) /*!< in/out: transaction */
+{
+ ut_ad(name);
+ ut_ad(data);
+
+ zip_dict_info_aux rec;
+ rec.code = zip_dict_info_not_found;
+ pars_info_t* info = pars_info_create();
+
+ pars_info_add_int4_literal(info, "id", dict_id);
+ pars_info_bind_function(
+ info, "my_func", dict_create_get_zip_dict_info_by_id_aux,
+ &rec);
+
+ dberr_t error = que_eval_sql(info,
+ "PROCEDURE P () IS\n"
+ "DECLARE FUNCTION my_func;\n"
+ "DECLARE CURSOR cur IS\n"
+ " SELECT NAME, DATA FROM SYS_ZIP_DICT\n"
+ " WHERE ID = :id;\n"
+ "BEGIN\n"
+ " OPEN cur;\n"
+ " FETCH cur INTO my_func();\n"
+ " CLOSE cur;\n"
+ "END;\n",
+ FALSE, trx);
+ if (error == DB_SUCCESS) {
+ switch (rec.code) {
+ case zip_dict_info_success:
+ *name = rec.name.str;
+ *name_len = rec.name.length;
+ *data = rec.data.str;
+ *data_len = rec.data.length;
+ break;
+ case zip_dict_info_not_found:
+ error = DB_RECORD_NOT_FOUND;
+ break;
+ case zip_dict_info_oom:
+ error = DB_OUT_OF_MEMORY;
+ break;
+ case zip_dict_info_corrupted_name:
+ case zip_dict_info_corrupted_data:
+ error = DB_INVALID_NULL;
+ break;
+ default:
+ ut_error;
+ }
+ }
+ return error;
+}
+
+/** Remove a single compression dictionary from the data dictionary
+tables in the database.
+@return error code or DB_SUCCESS */
+UNIV_INTERN
+dberr_t
+dict_create_remove_zip_dict(
+ const char* name, /*!< in: dict name */
+ ulint name_len, /*!< in: dict name length */
+ trx_t* trx) /*!< in/out: transaction */
+{
+ ut_ad(name);
+
+ pars_info_t* info = pars_info_create();
+
+ ib_uint32_t dict_id_buf;
+ mach_write_to_4(reinterpret_cast<byte*>(&dict_id_buf),
+ ULINT32_UNDEFINED);
+ ib_uint32_t counter_buf;
+ mach_write_to_4(reinterpret_cast<byte*>(&counter_buf),
+ ULINT32_UNDEFINED);
+
+ pars_info_add_literal(info, "name", name, name_len,
+ DATA_VARCHAR, DATA_ENGLISH);
+ pars_info_bind_int4_literal(info, "dict_id", &dict_id_buf);
+ pars_info_bind_function(info, "find_dict_func",
+ dict_create_extract_int_aux, &dict_id_buf);
+ pars_info_bind_function(info, "count_func",
+ dict_create_extract_int_aux, &counter_buf);
+
+ dberr_t error = que_eval_sql(info,
+ "PROCEDURE P () IS\n"
+ "DECLARE FUNCTION find_dict_func;\n"
+ "DECLARE FUNCTION count_func;\n"
+ "DECLARE CURSOR dict_cur IS\n"
+ " SELECT ID FROM SYS_ZIP_DICT\n"
+ " WHERE NAME = :name\n"
+ " FOR UPDATE;\n"
+ "DECLARE CURSOR ref_cur IS\n"
+ " SELECT 1 FROM SYS_ZIP_DICT_COLS\n"
+ " WHERE DICT_ID = :dict_id;\n"
+ "BEGIN\n"
+ " OPEN dict_cur;\n"
+ " FETCH dict_cur INTO find_dict_func();\n"
+ " IF NOT (SQL % NOTFOUND) THEN\n"
+ " OPEN ref_cur;\n"
+ " FETCH ref_cur INTO count_func();\n"
+ " IF SQL % NOTFOUND THEN\n"
+ " DELETE FROM SYS_ZIP_DICT WHERE CURRENT OF dict_cur;\n"
+ " END IF;\n"
+ " CLOSE ref_cur;\n"
+ " END IF;\n"
+ " CLOSE dict_cur;\n"
+ "END;\n",
+ FALSE, trx);
+ if (error == DB_SUCCESS) {
+ ib_uint32_t local_dict_id = mach_read_from_4(
+ reinterpret_cast<const byte*>(&dict_id_buf));
+ if (local_dict_id == ULINT32_UNDEFINED) {
+ error = DB_RECORD_NOT_FOUND;
+ }
+ else {
+ ib_uint32_t local_counter = mach_read_from_4(
+ reinterpret_cast<const byte*>(&counter_buf));
+ if (local_counter != ULINT32_UNDEFINED)
+ error = DB_ROW_IS_REFERENCED;
+ }
+ }
+ return error;
+}
+
+/** Remove all compression dictionary references for the given table ID from
+the data dictionary tables in the database.
+@return error code or DB_SUCCESS */
+UNIV_INTERN
+dberr_t
+dict_create_remove_zip_dict_references_for_table(
+ ulint table_id, /*!< in: table id */
+ trx_t* trx) /*!< in/out: transaction */
+{
+ pars_info_t* info = pars_info_create();
+
+ pars_info_add_int4_literal(info, "table_id", table_id);
+
+ dberr_t error = que_eval_sql(info,
+ "PROCEDURE P () IS\n"
+ "BEGIN\n"
+ " DELETE FROM SYS_ZIP_DICT_COLS\n"
+ " WHERE TABLE_ID = :table_id;\n"
+ "END;\n",
+ FALSE, trx);
+ return error;
+}
diff --git a/storage/xtradb/dict/dict0dict.cc b/storage/xtradb/dict/dict0dict.cc
index 86ec720d24e..2f0b09ec419 100644
--- a/storage/xtradb/dict/dict0dict.cc
+++ b/storage/xtradb/dict/dict0dict.cc
@@ -7346,3 +7346,161 @@ dict_tf_to_row_format_string(
return(0);
}
#endif /* !UNIV_HOTBACKUP */
+
+/** Insert a records into SYS_ZIP_DICT.
+@retval DB_SUCCESS if OK
+@retval dberr_t if the insert failed */
+UNIV_INTERN
+dberr_t
+dict_create_zip_dict(
+ const char* name, /*!< in: zip_dict name */
+ ulint name_len, /*!< in: zip_dict name length*/
+ const char* data, /*!< in: zip_dict data */
+ ulint data_len) /*!< in: zip_dict data length */
+{
+ dberr_t err = DB_SUCCESS;
+ trx_t* trx;
+
+ ut_ad(name);
+ ut_ad(data);
+
+ rw_lock_x_lock(&dict_operation_lock);
+ dict_mutex_enter_for_mysql();
+
+ trx = trx_allocate_for_background();
+ trx->op_info = "insert zip_dict";
+ trx->dict_operation_lock_mode = RW_X_LATCH;
+ trx_start_if_not_started(trx);
+
+ err = dict_create_add_zip_dict(name, name_len, data, data_len, trx);
+
+ if (err == DB_SUCCESS) {
+ trx_commit_for_mysql(trx);
+ }
+ else {
+ trx->op_info = "rollback of internal trx on zip_dict table";
+ trx_rollback_to_savepoint(trx, NULL);
+ ut_a(trx->error_state == DB_SUCCESS);
+ }
+ trx->op_info = "";
+ trx->dict_operation_lock_mode = 0;
+ trx_free_for_background(trx);
+
+ dict_mutex_exit_for_mysql();
+ rw_lock_x_unlock(&dict_operation_lock);
+
+ return err;
+}
+/** Get single compression dictionary id for the given
+(table id, column pos) pair.
+@retval DB_SUCCESS if OK
+@retval DB_RECORD_NOT_FOUND if not found */
+UNIV_INTERN
+dberr_t
+dict_get_dictionary_id_by_key(
+ ulint table_id, /*!< in: table id */
+ ulint column_pos, /*!< in: column position */
+ ulint* dict_id) /*!< out: zip_dict id */
+{
+ dberr_t err = DB_SUCCESS;
+ trx_t* trx;
+
+ rw_lock_s_lock(&dict_operation_lock);
+ dict_mutex_enter_for_mysql();
+
+ trx = trx_allocate_for_background();
+ trx->op_info = "get zip dict id by composite key";
+ trx->dict_operation_lock_mode = RW_S_LATCH;
+ trx_start_if_not_started(trx);
+
+ err = dict_create_get_zip_dict_id_by_reference(table_id, column_pos,
+ dict_id, trx);
+
+ trx_commit_for_mysql(trx);
+ trx->dict_operation_lock_mode = 0;
+ trx_free_for_background(trx);
+
+ dict_mutex_exit_for_mysql();
+ rw_lock_s_unlock(&dict_operation_lock);
+
+ return err;
+}
+/** Get compression dictionary info (name and data) for the given id.
+Allocates memory in name->str and data->str on success.
+Must be freed with mem_free().
+@retval DB_SUCCESS if OK
+@retval DB_RECORD_NOT_FOUND if not found */
+UNIV_INTERN
+dberr_t
+dict_get_dictionary_info_by_id(
+ ulint dict_id, /*!< in: table name */
+ char** name, /*!< out: dictionary name */
+ ulint* name_len, /*!< out: dictionary name length*/
+ char** data, /*!< out: dictionary data */
+ ulint* data_len) /*!< out: dictionary data length*/
+{
+ dberr_t err = DB_SUCCESS;
+ trx_t* trx;
+
+ rw_lock_s_lock(&dict_operation_lock);
+ dict_mutex_enter_for_mysql();
+
+ trx = trx_allocate_for_background();
+ trx->op_info = "get zip dict name and data by id";
+ trx->dict_operation_lock_mode = RW_S_LATCH;
+ trx_start_if_not_started(trx);
+
+ err = dict_create_get_zip_dict_info_by_id(dict_id, name, name_len,
+ data, data_len, trx);
+
+ trx_commit_for_mysql(trx);
+ trx->dict_operation_lock_mode = 0;
+ trx_free_for_background(trx);
+
+ dict_mutex_exit_for_mysql();
+ rw_lock_s_unlock(&dict_operation_lock);
+
+ return err;
+}
+/** Delete a record in SYS_ZIP_DICT with the given name.
+@retval DB_SUCCESS if OK
+@retval DB_RECORD_NOT_FOUND if not found
+@retval DB_ROW_IS_REFERENCED if in use */
+UNIV_INTERN
+dberr_t
+dict_drop_zip_dict(
+ const char* name, /*!< in: zip_dict name */
+ ulint name_len) /*!< in: zip_dict name length*/
+{
+ dberr_t err = DB_SUCCESS;
+ trx_t* trx;
+
+ ut_ad(name);
+
+ rw_lock_x_lock(&dict_operation_lock);
+ dict_mutex_enter_for_mysql();
+
+ trx = trx_allocate_for_background();
+ trx->op_info = "delete zip_dict";
+ trx->dict_operation_lock_mode = RW_X_LATCH;
+ trx_start_if_not_started(trx);
+
+ err = dict_create_remove_zip_dict(name, name_len, trx);
+
+ if (err == DB_SUCCESS) {
+ trx_commit_for_mysql(trx);
+ }
+ else {
+ trx->op_info = "rollback of internal trx on zip_dict table";
+ trx_rollback_to_savepoint(trx, NULL);
+ ut_a(trx->error_state == DB_SUCCESS);
+ }
+ trx->op_info = "";
+ trx->dict_operation_lock_mode = 0;
+ trx_free_for_background(trx);
+
+ dict_mutex_exit_for_mysql();
+ rw_lock_x_unlock(&dict_operation_lock);
+
+ return err;
+}
diff --git a/storage/xtradb/dict/dict0load.cc b/storage/xtradb/dict/dict0load.cc
index 8b7034e5eba..a21a5d5cddc 100644
--- a/storage/xtradb/dict/dict0load.cc
+++ b/storage/xtradb/dict/dict0load.cc
@@ -57,7 +57,9 @@ static const char* SYSTEM_TABLE_NAME[] = {
"SYS_FOREIGN",
"SYS_FOREIGN_COLS",
"SYS_TABLESPACES",
- "SYS_DATAFILES"
+ "SYS_DATAFILES",
+ "SYS_ZIP_DICT",
+ "SYS_ZIP_DICT_COLS"
};
/* If this flag is TRUE, then we will load the cluster index's (and tables')
@@ -729,6 +731,161 @@ err_len:
return(NULL);
}
+/** This function parses a SYS_ZIP_DICT record, extracts necessary
+information from the record and returns to caller.
+@return error message, or NULL on success */
+UNIV_INTERN
+const char*
+dict_process_sys_zip_dict(
+ mem_heap_t* heap, /*!< in/out: heap memory */
+ ulint zip_size, /*!< in: nonzero=compressed BLOB page size */
+ const rec_t* rec, /*!< in: current SYS_ZIP_DICT rec */
+ ulint* id, /*!< out: dict id */
+ const char** name, /*!< out: dict name */
+ const char** data, /*!< out: dict data */
+ ulint* data_len) /*!< out: dict data length */
+{
+ ulint len;
+ const byte* field;
+
+ /* Initialize the output values */
+ *id = ULINT_UNDEFINED;
+ *name = NULL;
+ *data = NULL;
+ *data_len = 0;
+
+ if (UNIV_UNLIKELY(rec_get_deleted_flag(rec, 0))) {
+ return("delete-marked record in SYS_ZIP_DICT");
+ }
+
+ if (UNIV_UNLIKELY(
+ rec_get_n_fields_old(rec)!= DICT_NUM_FIELDS__SYS_ZIP_DICT)) {
+ return("wrong number of columns in SYS_ZIP_DICT record");
+ }
+
+ field = rec_get_nth_field_old(
+ rec, DICT_FLD__SYS_ZIP_DICT__ID, &len);
+ if (UNIV_UNLIKELY(len != DICT_FLD_LEN_SPACE)) {
+ goto err_len;
+ }
+ *id = mach_read_from_4(field);
+
+ rec_get_nth_field_offs_old(
+ rec, DICT_FLD__SYS_ZIP_DICT__DB_TRX_ID, &len);
+ if (UNIV_UNLIKELY(len != DATA_TRX_ID_LEN && len != UNIV_SQL_NULL)) {
+ goto err_len;
+ }
+
+ rec_get_nth_field_offs_old(
+ rec, DICT_FLD__SYS_ZIP_DICT__DB_ROLL_PTR, &len);
+ if (UNIV_UNLIKELY(len != DATA_ROLL_PTR_LEN && len != UNIV_SQL_NULL)) {
+ goto err_len;
+ }
+
+ field = rec_get_nth_field_old(
+ rec, DICT_FLD__SYS_ZIP_DICT__NAME, &len);
+ if (UNIV_UNLIKELY(len == 0 || len == UNIV_SQL_NULL)) {
+ goto err_len;
+ }
+ *name = mem_heap_strdupl(heap, (char*) field, len);
+
+ field = rec_get_nth_field_old(
+ rec, DICT_FLD__SYS_ZIP_DICT__DATA, &len);
+ if (UNIV_UNLIKELY(len == UNIV_SQL_NULL)) {
+ goto err_len;
+ }
+
+ if (rec_get_1byte_offs_flag(rec) == 0 &&
+ rec_2_is_field_extern(rec, DICT_FLD__SYS_ZIP_DICT__DATA)) {
+ ut_a(len >= BTR_EXTERN_FIELD_REF_SIZE);
+
+ if (UNIV_UNLIKELY
+ (!memcmp(field + len - BTR_EXTERN_FIELD_REF_SIZE,
+ field_ref_zero,
+ BTR_EXTERN_FIELD_REF_SIZE))) {
+ goto err_len;
+ }
+ *data = reinterpret_cast<char*>(
+ btr_copy_externally_stored_field(data_len, field,
+ zip_size, len, heap, 0));
+ }
+ else {
+ *data_len = len;
+ *data = static_cast<char*>(mem_heap_dup(heap, field, len));
+ }
+
+ return(NULL);
+
+err_len:
+ return("incorrect column length in SYS_ZIP_DICT");
+}
+
+/** This function parses a SYS_ZIP_DICT_COLS record, extracts necessary
+information from the record and returns to caller.
+@return error message, or NULL on success */
+UNIV_INTERN
+const char*
+dict_process_sys_zip_dict_cols(
+ mem_heap_t* heap, /*!< in/out: heap memory */
+ const rec_t* rec, /*!< in: current SYS_ZIP_DICT rec */
+ ulint* table_id, /*!< out: table id */
+ ulint* column_pos, /*!< out: column position */
+ ulint* dict_id) /*!< out: dict id */
+{
+ ulint len;
+ const byte* field;
+
+ /* Initialize the output values */
+ *table_id = ULINT_UNDEFINED;
+ *column_pos = ULINT_UNDEFINED;
+ *dict_id = ULINT_UNDEFINED;
+
+ if (UNIV_UNLIKELY(rec_get_deleted_flag(rec, 0))) {
+ return("delete-marked record in SYS_ZIP_DICT_COLS");
+ }
+
+ if (UNIV_UNLIKELY(rec_get_n_fields_old(rec) !=
+ DICT_NUM_FIELDS__SYS_ZIP_DICT_COLS)) {
+ return("wrong number of columns in SYS_ZIP_DICT_COLS"
+ " record");
+ }
+
+ field = rec_get_nth_field_old(
+ rec, DICT_FLD__SYS_ZIP_DICT_COLS__TABLE_ID, &len);
+ if (UNIV_UNLIKELY(len != DICT_FLD_LEN_SPACE)) {
+err_len:
+ return("incorrect column length in SYS_ZIP_DICT_COLS");
+ }
+ *table_id = mach_read_from_4(field);
+
+ field = rec_get_nth_field_old(
+ rec, DICT_FLD__SYS_ZIP_DICT_COLS__COLUMN_POS, &len);
+ if (UNIV_UNLIKELY(len != DICT_FLD_LEN_SPACE)) {
+ goto err_len;
+ }
+ *column_pos = mach_read_from_4(field);
+
+ rec_get_nth_field_offs_old(
+ rec, DICT_FLD__SYS_ZIP_DICT_COLS__DB_TRX_ID, &len);
+ if (UNIV_UNLIKELY(len != DATA_TRX_ID_LEN && len != UNIV_SQL_NULL)) {
+ goto err_len;
+ }
+
+ rec_get_nth_field_offs_old(
+ rec, DICT_FLD__SYS_ZIP_DICT_COLS__DB_ROLL_PTR, &len);
+ if (UNIV_UNLIKELY(len != DATA_ROLL_PTR_LEN && len != UNIV_SQL_NULL)) {
+ goto err_len;
+ }
+
+ field = rec_get_nth_field_old(
+ rec, DICT_FLD__SYS_ZIP_DICT_COLS__DICT_ID, &len);
+ if (UNIV_UNLIKELY(len != DICT_FLD_LEN_SPACE)) {
+ goto err_len;
+ }
+ *dict_id = mach_read_from_4(field);
+
+ return(NULL);
+}
/********************************************************************//**
Determine the flags of a table as stored in SYS_TABLES.TYPE and N_COLS.
@return ULINT_UNDEFINED if error, else a valid dict_table_t::flags. */
diff --git a/storage/xtradb/fil/fil0fil.cc b/storage/xtradb/fil/fil0fil.cc
index 803f3a81cbd..0604e1ee55f 100644
--- a/storage/xtradb/fil/fil0fil.cc
+++ b/storage/xtradb/fil/fil0fil.cc
@@ -341,6 +341,8 @@ fil_space_get_by_id(
ut_ad(space->magic_n == FIL_SPACE_MAGIC_N),
space->id == id);
+ /* The system tablespace must always be found */
+ ut_ad(space || id != 0 || srv_is_being_started);
return(space);
}
diff --git a/storage/xtradb/fts/fts0fts.cc b/storage/xtradb/fts/fts0fts.cc
index 0507be04412..d1cde3edf75 100644
--- a/storage/xtradb/fts/fts0fts.cc
+++ b/storage/xtradb/fts/fts0fts.cc
@@ -108,6 +108,7 @@ UNIV_INTERN mysql_pfs_key_t fts_pll_tokenize_mutex_key;
/** variable to record innodb_fts_internal_tbl_name for information
schema table INNODB_FTS_INSERTED etc. */
UNIV_INTERN char* fts_internal_tbl_name = NULL;
+UNIV_INTERN char* fts_internal_tbl_name2 = NULL;
/** InnoDB default stopword list:
There are different versions of stopwords, the stop words listed
@@ -6570,6 +6571,36 @@ fts_check_corrupt_index(
return(0);
}
+/* Get parent table name if it's a fts aux table
+@param[in] aux_table_name aux table name
+@param[in] aux_table_len aux table length
+@return parent table name, or NULL */
+char*
+fts_get_parent_table_name(
+ const char* aux_table_name,
+ ulint aux_table_len)
+{
+ fts_aux_table_t aux_table;
+ char* parent_table_name = NULL;
+
+ if (fts_is_aux_table_name(&aux_table, aux_table_name, aux_table_len)) {
+ dict_table_t* parent_table;
+
+ parent_table = dict_table_open_on_id(
+ aux_table.parent_id, TRUE, DICT_TABLE_OP_NORMAL);
+
+ if (parent_table != NULL) {
+ parent_table_name = mem_strdupl(
+ parent_table->name,
+ strlen(parent_table->name));
+
+ dict_table_close(parent_table, TRUE, FALSE);
+ }
+ }
+
+ return(parent_table_name);
+}
+
/** Check the validity of the parent table.
@param[in] aux_table auxiliary table
@return true if it is a valid table or false if it is not */
diff --git a/storage/xtradb/handler/ha_innodb.cc b/storage/xtradb/handler/ha_innodb.cc
index 0223e58edac..01014a553ec 100644
--- a/storage/xtradb/handler/ha_innodb.cc
+++ b/storage/xtradb/handler/ha_innodb.cc
@@ -113,6 +113,14 @@ this program; if not, write to the Free Software Foundation, Inc.,
#define thd_get_trx_isolation(X) ((enum_tx_isolation)thd_tx_isolation(X))
+#ifndef HAVE_PERCONA_COMPRESSED_COLUMNS
+#define COLUMN_FORMAT_TYPE_COMPRESSED 0xBADF00D
+#define SQLCOM_CREATE_COMPRESSION_DICTIONARY 0xDECAF
+#define SQLCOM_DROP_COMPRESSION_DICTIONARY 0xC0FFEE
+#define ER_COMPRESSION_DICTIONARY_DOES_NOT_EXIST 0xDEADFACE
+const static LEX_CSTRING null_lex_cstr={0,0};
+#endif
+
#ifdef MYSQL_DYNAMIC_PLUGIN
#define tc_size 400
#define tdc_size 400
@@ -893,7 +901,7 @@ int
innobase_get_parent_fk_list(
THD* thd,
const char* path,
- List<FOREIGN_KEY_INFO>* f_key_list);
+ List<FOREIGN_KEY_INFO>* f_key_list) __attribute__((unused));
/******************************************************************//**
Maps a MySQL trx isolation level code to the InnoDB isolation level code
@@ -1691,6 +1699,30 @@ normalize_table_name_low(
ibool set_lower_case); /* in: TRUE if we want to set
name to lower case */
+#ifdef HAVE_PERCONA_COMPRESSED_COLUMNS
+/** Creates a new compression dictionary. */
+static
+handler_create_zip_dict_result
+innobase_create_zip_dict(
+ handlerton* hton, /*!< in: innobase handlerton */
+ THD* thd, /*!< in: handle to the MySQL thread */
+ const char* name, /*!< in: zip dictionary name */
+ ulint* name_len,
+ /*!< in/out: zip dictionary name length */
+ const char* data, /*!< in: zip dictionary data */
+ ulint* data_len);
+ /*!< in/out: zip dictionary data length */
+
+/** Drops a existing compression dictionary. */
+static
+handler_drop_zip_dict_result
+innobase_drop_zip_dict(
+ handlerton* hton, /*!< in: innobase handlerton */
+ THD* thd, /*!< in: handle to the MySQL thread */
+ const char* name, /*!< in: zip dictionary name */
+ ulint* name_len);
+ /*!< in/out: zip dictionary name length */
+#endif
/*************************************************************//**
Checks if buffer pool is big enough to enable backoff algorithm.
InnoDB empty free list algorithm backoff requires free pages
@@ -3813,6 +3845,10 @@ innobase_init(
innodb_remember_check_sysvar_funcs();
+#ifdef HAVE_PERCONA_COMPRESSED_COLUMNS
+ innobase_hton->create_zip_dict = innobase_create_zip_dict;
+ innobase_hton->drop_zip_dict = innobase_drop_zip_dict;
+#endif
ut_a(DATA_MYSQL_TRUE_VARCHAR == (ulint)MYSQL_TYPE_VARCHAR);
#ifndef DBUG_OFF
@@ -4577,6 +4613,90 @@ innobase_purge_changed_page_bitmaps(
return (my_bool)log_online_purge_changed_page_bitmaps(lsn);
}
+#ifdef HAVE_PERCONA_COMPRESSED_COLUMNS
+/** Creates a new compression dictionary. */
+static
+handler_create_zip_dict_result
+innobase_create_zip_dict(
+ handlerton* hton, /*!< in: innobase handlerton */
+ THD* thd, /*!< in: handle to the MySQL thread */
+ const char* name, /*!< in: zip dictionary name */
+ ulint* name_len,
+ /*!< in/out: zip dictionary name length */
+ const char* data, /*!< in: zip dictionary data */
+ ulint* data_len)
+ /*!< in/out: zip dictionary data length */
+{
+ handler_create_zip_dict_result result =
+ HA_CREATE_ZIP_DICT_UNKNOWN_ERROR;
+
+ DBUG_ENTER("innobase_create_zip_dict");
+ DBUG_ASSERT(hton == innodb_hton_ptr);
+
+ if (UNIV_UNLIKELY(high_level_read_only)) {
+ DBUG_RETURN(HA_CREATE_ZIP_DICT_READ_ONLY);
+ }
+
+ if (UNIV_UNLIKELY(*name_len > ZIP_DICT_MAX_NAME_LENGTH)) {
+ *name_len = ZIP_DICT_MAX_NAME_LENGTH;
+ DBUG_RETURN(HA_CREATE_ZIP_DICT_NAME_TOO_LONG);
+ }
+
+ if (UNIV_UNLIKELY(*data_len > ZIP_DICT_MAX_DATA_LENGTH)) {
+ *data_len = ZIP_DICT_MAX_DATA_LENGTH;
+ DBUG_RETURN(HA_CREATE_ZIP_DICT_DATA_TOO_LONG);
+ }
+
+ switch (dict_create_zip_dict(name, *name_len, data, *data_len)) {
+ case DB_SUCCESS:
+ result = HA_CREATE_ZIP_DICT_OK;
+ break;
+ case DB_DUPLICATE_KEY:
+ result = HA_CREATE_ZIP_DICT_ALREADY_EXISTS;
+ break;
+ default:
+ ut_ad(0);
+ result = HA_CREATE_ZIP_DICT_UNKNOWN_ERROR;
+ }
+ DBUG_RETURN(result);
+}
+
+/** Drops a existing compression dictionary. */
+static
+handler_drop_zip_dict_result
+innobase_drop_zip_dict(
+ handlerton* hton, /*!< in: innobase handlerton */
+ THD* thd, /*!< in: handle to the MySQL thread */
+ const char* name, /*!< in: zip dictionary name */
+ ulint* name_len)
+ /*!< in/out: zip dictionary name length */
+{
+ handler_drop_zip_dict_result result = HA_DROP_ZIP_DICT_UNKNOWN_ERROR;
+
+ DBUG_ENTER("innobase_drop_zip_dict");
+ DBUG_ASSERT(hton == innodb_hton_ptr);
+
+ if (UNIV_UNLIKELY(high_level_read_only)) {
+ DBUG_RETURN(HA_DROP_ZIP_DICT_READ_ONLY);
+ }
+
+ switch (dict_drop_zip_dict(name, *name_len)) {
+ case DB_SUCCESS:
+ result = HA_DROP_ZIP_DICT_OK;
+ break;
+ case DB_RECORD_NOT_FOUND:
+ result = HA_DROP_ZIP_DICT_DOES_NOT_EXIST;
+ break;
+ case DB_ROW_IS_REFERENCED:
+ result = HA_DROP_ZIP_DICT_IS_REFERENCED;
+ break;
+ default:
+ ut_ad(0);
+ result = HA_DROP_ZIP_DICT_UNKNOWN_ERROR;
+ }
+ DBUG_RETURN(result);
+}
+#endif
/*****************************************************************//**
Check whether this is a fake change transaction.
@return TRUE if a fake change transaction */
@@ -6114,6 +6234,88 @@ func_exit:
DBUG_RETURN(ret);
}
+/** This function checks if all the compression dictionaries referenced
+in table->fields exist in SYS_ZIP_DICT InnoDB system table.
+@return true if all referenced dictionaries exist */
+UNIV_INTERN
+bool
+innobase_check_zip_dicts(
+ const TABLE* table, /*!< in: table in MySQL data
+ dictionary */
+ ulint* dict_ids, /*!< out: identified zip dict ids
+ (at least n_fields long) */
+ trx_t* trx, /*!< in: transaction */
+ const char** err_dict_name) /*!< out: the name of the
+ zip_dict which does not exist. */
+{
+ DBUG_ENTER("innobase_check_zip_dicts");
+
+ bool res = true;
+#ifdef HAVE_PERCONA_COMPRESSED_COLUMNS
+ dberr_t err = DB_SUCCESS;
+ const size_t n_fields = table->s->fields;
+
+ Field* field_ptr;
+ for (size_t field_idx = 0; err == DB_SUCCESS && field_idx < n_fields;
+ ++field_idx)
+ {
+ field_ptr = table->field[field_idx];
+ if (field_ptr->has_associated_compression_dictionary()) {
+ err = dict_create_get_zip_dict_id_by_name(
+ field_ptr->zip_dict_name.str,
+ field_ptr->zip_dict_name.length,
+ &dict_ids[field_idx],
+ trx);
+ ut_a(err == DB_SUCCESS || err == DB_RECORD_NOT_FOUND);
+ }
+ else {
+ dict_ids[field_idx] = ULINT_UNDEFINED;
+ }
+
+ }
+
+ if (err != DB_SUCCESS) {
+ res = false;
+ *err_dict_name = field_ptr->zip_dict_name.str;
+ }
+
+#endif
+ DBUG_RETURN(res);
+}
+
+/** This function creates compression dictionary references in
+SYS_ZIP_DICT_COLS InnoDB system table for table_id based on info
+in table->fields and provided zip dict ids. */
+UNIV_INTERN
+void
+innobase_create_zip_dict_references(
+ const TABLE* table, /*!< in: table in MySQL data
+ dictionary */
+ table_id_t ib_table_id, /*!< in: table ID in Innodb data
+ dictionary */
+ ulint* zip_dict_ids, /*!< in: zip dict ids
+ (at least n_fields long) */
+ trx_t* trx) /*!< in: transaction */
+{
+ DBUG_ENTER("innobase_create_zip_dict_references");
+
+ dberr_t err = DB_SUCCESS;
+ const size_t n_fields = table->s->fields;
+
+ for (size_t field_idx = 0; err == DB_SUCCESS && field_idx < n_fields;
+ ++field_idx)
+ {
+ if (zip_dict_ids[field_idx] != ULINT_UNDEFINED) {
+ err = dict_create_add_zip_dict_reference(ib_table_id,
+ table->field[field_idx]->field_index,
+ zip_dict_ids[field_idx], trx);
+ ut_a(err == DB_SUCCESS);
+ }
+ }
+
+ DBUG_VOID_RETURN;
+}
+
/*******************************************************************//**
This function uses index translation table to quickly locate the
requested index structure.
@@ -7387,6 +7589,7 @@ wsrep_store_key_val_for_row(
format) */
uint buff_len,/*!< in: buffer length */
const uchar* record,
+ row_prebuilt_t* prebuilt, /*!< in: InnoDB prebuilt struct */
ibool* key_is_null)/*!< out: full key was null */
{
KEY* key_info = table->key_info + keynr;
@@ -7543,8 +7746,17 @@ wsrep_store_key_val_for_row(
blob_data = row_mysql_read_blob_ref(&blob_len,
(byte*) (record
- + (ulint)get_field_offset(table, field)),
- (ulint) field->pack_length());
+ + (ulint) get_field_offset(table, field)),
+ (ulint) field->pack_length(),
+#ifdef HAVE_PERCONA_COMPRESSED_COLUMNS
+ field->column_format() ==
+ COLUMN_FORMAT_TYPE_COMPRESSED,
+ reinterpret_cast<const byte*>(
+ field->zip_dict_data.str),
+ field->zip_dict_data.length, prebuilt);
+#else
+ 0, 0, 0, prebuilt);
+#endif
true_len = blob_len;
@@ -7839,7 +8051,16 @@ ha_innobase::store_key_val_for_row(
blob_data = row_mysql_read_blob_ref(&blob_len,
(byte*) (record
+ (ulint) get_field_offset(table, field)),
- (ulint) field->pack_length());
+ (ulint) field->pack_length(),
+#ifdef HAVE_PERCONA_COMPRESSED_COLUMNS
+ field->column_format() ==
+ COLUMN_FORMAT_TYPE_COMPRESSED,
+ reinterpret_cast<const byte*>(
+ field->zip_dict_data.str),
+ field->zip_dict_data.length, prebuilt);
+#else
+ 0, 0, 0, prebuilt);
+#endif
true_len = blob_len;
@@ -8104,6 +8325,14 @@ build_template_field(
templ->mbminlen = dict_col_get_mbminlen(col);
templ->mbmaxlen = dict_col_get_mbmaxlen(col);
templ->is_unsigned = col->prtype & DATA_UNSIGNED;
+#ifdef HAVE_PERCONA_COMPRESSED_COLUMNS
+ templ->compressed = (field->column_format()
+ == COLUMN_FORMAT_TYPE_COMPRESSED);
+ templ->zip_dict_data = field->zip_dict_data;
+#else
+ templ->compressed = 0;
+ templ->zip_dict_data = null_lex_cstr;
+#endif
if (!dict_index_is_clust(index)
&& templ->rec_field_no == ULINT_UNDEFINED) {
@@ -9019,8 +9248,11 @@ calc_row_difference(
switch (col_type) {
case DATA_BLOB:
- o_ptr = row_mysql_read_blob_ref(&o_len, o_ptr, o_len);
- n_ptr = row_mysql_read_blob_ref(&n_len, n_ptr, n_len);
+ /* Do not compress blob column while comparing*/
+ o_ptr = row_mysql_read_blob_ref(&o_len, o_ptr, o_len,
+ false, 0, 0, prebuilt);
+ n_ptr = row_mysql_read_blob_ref(&n_len, n_ptr, n_len,
+ false, 0, 0, prebuilt);
break;
@@ -9090,7 +9322,17 @@ calc_row_difference(
TRUE,
new_mysql_row_col,
col_pack_len,
- dict_table_is_comp(prebuilt->table));
+ dict_table_is_comp(prebuilt->table),
+#ifdef HAVE_PERCONA_COMPRESSED_COLUMNS
+ field->column_format() ==
+ COLUMN_FORMAT_TYPE_COMPRESSED,
+ reinterpret_cast<const byte*>(
+ field->zip_dict_data.str),
+ field->zip_dict_data.length,
+#else
+ 0, 0, 0,
+#endif
+ prebuilt);
dfield_copy(&ufield->new_val, &dfield);
} else {
dfield_set_null(&ufield->new_val);
@@ -9262,7 +9504,8 @@ wsrep_calc_row_hash(
switch (col_type) {
case DATA_BLOB:
- ptr = row_mysql_read_blob_ref(&len, ptr, len);
+ ptr = row_mysql_read_blob_ref(&len, ptr, len,
+ false, 0, 0, prebuilt);
break;
@@ -11074,7 +11317,7 @@ ha_innobase::wsrep_append_keys(
len = wsrep_store_key_val_for_row(
thd, table, 0, key, WSREP_MAX_SUPPORTED_KEY_LENGTH,
- record0, &is_null);
+ record0, prebuilt, &is_null);
if (!is_null) {
rcode = wsrep_append_key(
@@ -11128,7 +11371,7 @@ ha_innobase::wsrep_append_keys(
len = wsrep_store_key_val_for_row(
thd, table, i, key0,
WSREP_MAX_SUPPORTED_KEY_LENGTH,
- record0, &is_null);
+ record0, prebuilt, &is_null);
if (!is_null) {
rcode = wsrep_append_key(
thd, trx, table_share, table,
@@ -11147,7 +11390,7 @@ ha_innobase::wsrep_append_keys(
len = wsrep_store_key_val_for_row(
thd, table, i, key1,
WSREP_MAX_SUPPORTED_KEY_LENGTH,
- record1, &is_null);
+ record1, prebuilt, &is_null);
if (!is_null && memcmp(key0, key1, len)) {
rcode = wsrep_append_key(
thd, trx, table_share,
@@ -11324,6 +11567,7 @@ create_table_def(
ulint unsigned_type;
ulint binary_type;
ulint long_true_varchar;
+ ulint compressed;
ulint charset_no;
ulint i;
ulint doc_id_col = 0;
@@ -11473,6 +11717,13 @@ create_table_def(
}
}
+ /* Check if the the field has COMPRESSED attribute */
+ compressed = 0;
+ if (field->column_format() ==
+ COLUMN_FORMAT_TYPE_COMPRESSED) {
+ compressed = DATA_COMPRESSED;
+ }
+
/* First check whether the column to be added has a
system reserved name. */
if (dict_col_name_is_reserved(field->field_name)){
@@ -11493,7 +11744,8 @@ err_col:
dtype_form_prtype(
(ulint) field->type()
| nulls_allowed | unsigned_type
- | binary_type | long_true_varchar,
+ | binary_type | long_true_varchar
+ | compressed,
charset_no),
col_len);
}
@@ -12543,6 +12795,9 @@ ha_innobase::create(
fil_encryption_t encrypt = (fil_encryption_t)options->encryption;
uint key_id = (uint)options->encryption_key_id;
+ mem_heap_t* heap = 0;
+ ulint* zip_dict_ids = 0;
+
DBUG_ENTER("ha_innobase::create");
DBUG_ASSERT(thd != NULL);
@@ -12639,6 +12894,19 @@ ha_innobase::create(
row_mysql_lock_data_dictionary(trx);
+ heap = mem_heap_create(form->s->fields * sizeof(ulint));
+ zip_dict_ids = static_cast<ulint*>(
+ mem_heap_alloc(heap, form->s->fields * sizeof(ulint)));
+
+ const char* err_zip_dict_name = 0;
+ if (!innobase_check_zip_dicts(form, zip_dict_ids,
+ trx, &err_zip_dict_name)) {
+ error = -1;
+ my_error(ER_COMPRESSION_DICTIONARY_DOES_NOT_EXIST,
+ MYF(0), err_zip_dict_name);
+ goto cleanup;
+ }
+
error = create_table_def(trx, form, norm_name, temp_path,
remote_path, flags, flags2, encrypt, key_id);
if (error) {
@@ -12746,6 +13014,22 @@ ha_innobase::create(
dict_table_get_all_fts_indexes(innobase_table, fts->indexes);
}
+ /*
+ Adding compression dictionary <-> compressed table column links
+ to the SYS_ZIP_DICT_COLS table.
+ */
+ ut_a(zip_dict_ids != 0);
+ {
+ dict_table_t* local_table = dict_table_open_on_name(
+ norm_name, TRUE, FALSE, DICT_ERR_IGNORE_NONE);
+
+ ut_a(local_table);
+ table_id_t table_id = local_table->id;
+ dict_table_close(local_table, TRUE, FALSE);
+ innobase_create_zip_dict_references(form,
+ table_id, zip_dict_ids, trx);
+ }
+
stmt = innobase_get_stmt(thd, &stmt_len);
if (stmt) {
@@ -12862,6 +13146,9 @@ ha_innobase::create(
trx_free_for_mysql(trx);
+ if (heap != 0)
+ mem_heap_free(heap);
+
DBUG_RETURN(0);
cleanup:
@@ -12871,6 +13158,9 @@ cleanup:
trx_free_for_mysql(trx);
+ if (heap != 0)
+ mem_heap_free(heap);
+
DBUG_RETURN(error);
}
@@ -14054,6 +14344,14 @@ ha_innobase::info_low(
if (dict_stats_is_persistent_enabled(ib_table)) {
if (is_analyze) {
+
+ /* If this table is already queued for
+ background analyze, remove it from the
+ queue as we are about to do the same */
+ dict_mutex_enter_for_mysql();
+ dict_stats_recalc_pool_del(ib_table);
+ dict_mutex_exit_for_mysql();
+
opt = DICT_STATS_RECALC_PERSISTENT;
} else {
/* This is e.g. 'SHOW INDEXES', fetch
@@ -15274,6 +15572,11 @@ ha_innobase::extra(
if (prebuilt->blob_heap) {
row_mysql_prebuilt_free_blob_heap(prebuilt);
}
+
+ if (prebuilt->compress_heap) {
+ row_mysql_prebuilt_free_compress_heap(prebuilt);
+ }
+
break;
case HA_EXTRA_RESET_STATE:
reset_template();
@@ -15325,6 +15628,10 @@ ha_innobase::reset()
row_mysql_prebuilt_free_blob_heap(prebuilt);
}
+ if (prebuilt->compress_heap) {
+ row_mysql_prebuilt_free_compress_heap(prebuilt);
+ }
+
reset_template();
ds_mrr.dsmrr_close();
@@ -15531,7 +15838,11 @@ ha_innobase::external_lock(
&& lock_type == F_WRLCK)
|| thd_sql_command(thd) == SQLCOM_CREATE_INDEX
|| thd_sql_command(thd) == SQLCOM_DROP_INDEX
- || thd_sql_command(thd) == SQLCOM_DELETE)) {
+ || thd_sql_command(thd) == SQLCOM_DELETE
+ || thd_sql_command(thd) ==
+ SQLCOM_CREATE_COMPRESSION_DICTIONARY
+ || thd_sql_command(thd) ==
+ SQLCOM_DROP_COMPRESSION_DICTIONARY)) {
if (thd_sql_command(thd) == SQLCOM_CREATE_TABLE)
{
@@ -16299,7 +16610,9 @@ ha_innobase::store_lock(
&& lock_type <= TL_WRITE))
|| sql_command == SQLCOM_CREATE_INDEX
|| sql_command == SQLCOM_DROP_INDEX
- || sql_command == SQLCOM_DELETE)) {
+ || sql_command == SQLCOM_DELETE
+ || sql_command == SQLCOM_CREATE_COMPRESSION_DICTIONARY
+ || sql_command == SQLCOM_DROP_COMPRESSION_DICTIONARY)) {
ib_senderrf(trx->mysql_thd,
IB_LOG_LEVEL_WARN, ER_READ_ONLY_MODE);
@@ -17270,6 +17583,84 @@ ha_innobase::check_if_incompatible_data(
return(COMPATIBLE_DATA_YES);
}
+/** This function reads zip dict-related info from SYS_ZIP_DICT
+and SYS_ZIP_DICT_COLS for all columns marked with
+COLUMN_FORMAT_TYPE_COMPRESSED flag and updates
+zip_dict_name / zip_dict_data for those which have associated
+compression dictionaries.
+*/
+UNIV_INTERN
+void
+ha_innobase::update_field_defs_with_zip_dict_info()
+{
+ DBUG_ENTER("update_field_defs_with_zip_dict_info");
+ ut_ad(!mutex_own(&dict_sys->mutex));
+
+ char norm_name[FN_REFLEN];
+ normalize_table_name(norm_name, table_share->normalized_path.str);
+
+ dict_table_t* ib_table = dict_table_open_on_name(
+ norm_name, FALSE, FALSE, DICT_ERR_IGNORE_NONE);
+
+ /* if dict_table_open_on_name() returns NULL, then it means that
+ TABLE_SHARE is populated for a table being created and we can
+ skip filling zip dict info here */
+ if (ib_table == 0)
+ DBUG_VOID_RETURN;
+
+#ifdef HAVE_PERCONA_COMPRESSED_COLUMNS
+ table_id_t ib_table_id = ib_table->id;
+ dict_table_close(ib_table, FALSE, FALSE);
+ Field* field;
+ for (uint i = 0; i < table_share->fields; ++i) {
+ field = table_share->field[i];
+ if (field->column_format() ==
+ COLUMN_FORMAT_TYPE_COMPRESSED) {
+ bool reference_found = false;
+ ulint dict_id = 0;
+ switch (dict_get_dictionary_id_by_key(ib_table_id, i,
+ &dict_id)) {
+ case DB_SUCCESS:
+ reference_found = true;
+ break;
+ case DB_RECORD_NOT_FOUND:
+ reference_found = false;
+ break;
+ default:
+ ut_error;
+ }
+ if (reference_found) {
+ char* local_name = 0;
+ ulint local_name_len = 0;
+ char* local_data = 0;
+ ulint local_data_len = 0;
+ if (dict_get_dictionary_info_by_id(dict_id,
+ &local_name, &local_name_len,
+ &local_data, &local_data_len) !=
+ DB_SUCCESS) {
+ ut_error;
+ }
+ else {
+ field->zip_dict_name.str =
+ local_name;
+ field->zip_dict_name.length =
+ local_name_len;
+ field->zip_dict_data.str =
+ local_data;
+ field->zip_dict_data.length =
+ local_data_len;
+ }
+ }
+ else {
+ field->zip_dict_name = null_lex_cstr;
+ field->zip_dict_data = null_lex_cstr;
+ }
+ }
+ }
+#endif
+ DBUG_VOID_RETURN;
+}
+
/****************************************************************//**
Update the system variable innodb_io_capacity_max using the "saved"
value. This function is registered as a callback with MySQL. */
@@ -17838,7 +18229,12 @@ innodb_internal_table_update(
my_free(old);
}
- fts_internal_tbl_name = *(char**) var_ptr;
+ fts_internal_tbl_name2 = *(char**) var_ptr;
+ if (fts_internal_tbl_name2 == NULL) {
+ fts_internal_tbl_name = const_cast<char*>("default");
+ } else {
+ fts_internal_tbl_name = fts_internal_tbl_name2;
+ }
}
/****************************************************************//**
@@ -20652,7 +21048,7 @@ static MYSQL_SYSVAR_BOOL(disable_sort_file_cache, srv_disable_sort_file_cache,
"Whether to disable OS system file cache for sort I/O",
NULL, NULL, FALSE);
-static MYSQL_SYSVAR_STR(ft_aux_table, fts_internal_tbl_name,
+static MYSQL_SYSVAR_STR(ft_aux_table, fts_internal_tbl_name2,
PLUGIN_VAR_NOCMDARG,
"FTS internal auxiliary table to be checked",
innodb_internal_table_validate,
@@ -21152,7 +21548,7 @@ static MYSQL_SYSVAR_ENUM(corrupt_table_action, srv_pass_corrupt_table,
"Warn corruptions of user tables as 'corrupt table' instead of not crashing itself, "
"when used with file_per_table. "
"All file io for the datafile after detected as corrupt are disabled, "
- "except for the deletion",
+ "except for the deletion.",
NULL, NULL, 0, &corrupt_table_action_typelib);
static MYSQL_SYSVAR_BOOL(locking_fake_changes, srv_fake_changes_locks,
@@ -21167,6 +21563,21 @@ static MYSQL_SYSVAR_BOOL(use_stacktrace, srv_use_stacktrace,
"Print stacktrace on long semaphore wait (off by default supported only on linux)",
NULL, NULL, FALSE);
+#ifdef HAVE_PERCONA_COMPRESSED_COLUMNS
+static MYSQL_SYSVAR_UINT(compressed_columns_zip_level,
+ srv_compressed_columns_zip_level,
+ PLUGIN_VAR_RQCMDARG,
+ "Compression level used for compressed columns. 0 is no compression"
+ ", 1 is fastest and 9 is best compression. Default is 6.",
+ NULL, NULL, DEFAULT_COMPRESSION_LEVEL, 0, 9, 0);
+
+static MYSQL_SYSVAR_ULONG(compressed_columns_threshold,
+ srv_compressed_columns_threshold,
+ PLUGIN_VAR_RQCMDARG,
+ "Compress column data if its length exceeds this value. Default is 96",
+ NULL, NULL, 96, 1, ~0UL, 0);
+#endif
+
static MYSQL_SYSVAR_UINT(compression_level, page_zip_level,
PLUGIN_VAR_RQCMDARG,
"Compression level used for zlib compression. 0 is no compression"
@@ -21544,6 +21955,10 @@ static struct st_mysql_sys_var* innobase_system_variables[]= {
MYSQL_SYSVAR(locking_fake_changes),
MYSQL_SYSVAR(tmpdir),
MYSQL_SYSVAR(use_stacktrace),
+#ifdef HAVE_PERCONA_COMPRESSED_COLUMNS
+ MYSQL_SYSVAR(compressed_columns_zip_level),
+ MYSQL_SYSVAR(compressed_columns_threshold),
+#endif
MYSQL_SYSVAR(force_primary_key),
MYSQL_SYSVAR(fatal_semaphore_wait_threshold),
/* Table page compression feature */
@@ -21593,6 +22008,10 @@ maria_declare_plugin(xtradb)
i_s_xtradb_read_view,
i_s_xtradb_internal_hash_tables,
i_s_xtradb_rseg,
+#ifdef HAVE_PERCONA_COMPRESSED_COLUMNS
+i_s_xtradb_zip_dict,
+i_s_xtradb_zip_dict_cols,
+#endif
i_s_innodb_trx,
i_s_innodb_locks,
i_s_innodb_lock_waits,
diff --git a/storage/xtradb/handler/ha_innodb.h b/storage/xtradb/handler/ha_innodb.h
index 7b0d20aec72..e6026f81c99 100644
--- a/storage/xtradb/handler/ha_innodb.h
+++ b/storage/xtradb/handler/ha_innodb.h
@@ -315,8 +315,17 @@ class ha_innobase: public handler
void set_partition_owner_stats(ha_statistics *stats);
bool check_if_incompatible_data(HA_CREATE_INFO *info,
uint table_changes);
+
bool check_if_supported_virtual_columns(void) { return TRUE; }
+ /** This function reads zip dict-related info from SYS_ZIP_DICT
+ and SYS_ZIP_DICT_COLS for all columns marked with
+ COLUMN_FORMAT_TYPE_COMPRESSED flag and updates
+ zip_dict_name / zip_dict_data for those which have associated
+ compression dictionaries.
+ */
+ virtual void update_field_defs_with_zip_dict_info();
+
private:
/** Builds a 'template' to the prebuilt struct.
@@ -732,3 +741,31 @@ ib_push_frm_error(
TABLE* table, /*!< in: MySQL table */
ulint n_keys, /*!< in: InnoDB #keys */
bool push_warning); /*!< in: print warning ? */
+
+/** This function checks if all the compression dictionaries referenced
+in table->fields exist in SYS_ZIP_DICT InnoDB system table.
+@return true if all referenced dictionaries exist */
+UNIV_INTERN
+bool
+innobase_check_zip_dicts(
+ const TABLE* table, /*!< in: table in MySQL data
+ dictionary */
+ ulint* dict_ids, /*!< out: identified zip dict ids
+ (at least n_fields long) */
+ trx_t* trx, /*!< in: transaction */
+ const char** err_dict_name); /*!< out: the name of the
+ zip_dict which does not exist. */
+
+/** This function creates compression dictionary references in
+SYS_ZIP_DICT_COLS InnoDB system table for table_id based on info
+in table->fields and provided zip dict ids. */
+UNIV_INTERN
+void
+innobase_create_zip_dict_references(
+ const TABLE* table, /*!< in: table in MySQL data
+ dictionary */
+ table_id_t ib_table_id, /*!< in: table ID in Innodb data
+ dictionary */
+ ulint* zip_dict_ids, /*!< in: zip dict ids
+ (at least n_fields long) */
+ trx_t* trx); /*!< in: transaction */
diff --git a/storage/xtradb/handler/handler0alter.cc b/storage/xtradb/handler/handler0alter.cc
index 71bcfd31831..6c394cba59b 100644
--- a/storage/xtradb/handler/handler0alter.cc
+++ b/storage/xtradb/handler/handler0alter.cc
@@ -22,6 +22,11 @@ this program; if not, write to the Free Software Foundation, Inc.,
Smart ALTER TABLE
*******************************************************/
+#ifndef HAVE_PERCONA_COMPRESSED_COLUMNS
+#define COLUMN_FORMAT_TYPE_COMPRESSED 0xBADF00D
+#define ER_COMPRESSION_DICTIONARY_DOES_NOT_EXIST 0xDEADFACE
+#endif
+
#include <my_global.h>
#include <unireg.h>
#include <mysqld_error.h>
@@ -214,7 +219,10 @@ innobase_need_rebuild(
const Alter_inplace_info* ha_alter_info,
const TABLE* altered_table)
{
- if (ha_alter_info->handler_flags
+ Alter_inplace_info::HA_ALTER_FLAGS alter_inplace_flags =
+ ha_alter_info->handler_flags & ~(INNOBASE_INPLACE_IGNORE);
+
+ if (alter_inplace_flags
== Alter_inplace_info::CHANGE_CREATE_OPTION
&& !(ha_alter_info->create_info->used_fields
& (HA_CREATE_USED_ROW_FORMAT
@@ -1205,6 +1213,15 @@ innobase_col_to_mysql(
field->reset();
if (field->type() == MYSQL_TYPE_VARCHAR) {
+ if (field->column_format() ==
+ COLUMN_FORMAT_TYPE_COMPRESSED) {
+ /* Skip compressed varchar column when
+ reporting an erroneous row
+ during index creation or table rebuild. */
+ field->set_null();
+ break;
+ }
+
/* This is a >= 5.0.3 type true VARCHAR. Store the
length of the data to the first byte or the first
two bytes of dest. */
@@ -2492,7 +2509,8 @@ innobase_build_col_map_add(
mem_heap_t* heap,
dfield_t* dfield,
const Field* field,
- ulint comp)
+ ulint comp,
+ row_prebuilt_t* prebuilt)
{
if (field->is_real_null()) {
dfield_set_null(dfield);
@@ -2504,7 +2522,14 @@ innobase_build_col_map_add(
byte* buf = static_cast<byte*>(mem_heap_alloc(heap, size));
row_mysql_store_col_in_innobase_format(
- dfield, buf, TRUE, field->ptr, size, comp);
+ dfield, buf, TRUE, field->ptr, size, comp,
+#ifdef HAVE_PERCONA_COMPRESSED_COLUMNS
+ field->column_format() == COLUMN_FORMAT_TYPE_COMPRESSED,
+ reinterpret_cast<const byte*>(field->zip_dict_data.str),
+ field->zip_dict_data.length, prebuilt);
+#else
+ 0,0,0, prebuilt);
+#endif
}
/** Construct the translation table for reordering, dropping or
@@ -2529,7 +2554,8 @@ innobase_build_col_map(
const dict_table_t* new_table,
const dict_table_t* old_table,
dtuple_t* add_cols,
- mem_heap_t* heap)
+ mem_heap_t* heap,
+ row_prebuilt_t* prebuilt)
{
uint old_i, old_innobase_i;
DBUG_ENTER("innobase_build_col_map");
@@ -2580,7 +2606,7 @@ innobase_build_col_map(
innobase_build_col_map_add(
heap, dtuple_get_nth_field(add_cols, i),
altered_table->field[sql_idx],
- dict_table_is_comp(new_table));
+ dict_table_is_comp(new_table), prebuilt);
found_col:
i++;
sql_idx++;
@@ -2744,7 +2770,8 @@ prepare_inplace_alter_table_dict(
ulint flags2,
ulint fts_doc_id_col,
bool add_fts_doc_id,
- bool add_fts_doc_id_idx)
+ bool add_fts_doc_id_idx,
+ row_prebuilt_t* prebuilt)
{
bool dict_locked = false;
ulint* add_key_nums; /* MySQL key numbers */
@@ -2756,6 +2783,7 @@ prepare_inplace_alter_table_dict(
ulint num_fts_index;
ha_innobase_inplace_ctx*ctx;
uint sql_idx;
+ ulint* zip_dict_ids = 0;
DBUG_ENTER("prepare_inplace_alter_table_dict");
@@ -2902,6 +2930,18 @@ prepare_inplace_alter_table_dict(
mode = crypt_data->encryption;
}
+ zip_dict_ids = static_cast<ulint*>(
+ mem_heap_alloc(ctx->heap,
+ altered_table->s->fields * sizeof(ulint)));
+
+ const char* err_zip_dict_name = 0;
+ if (!innobase_check_zip_dicts(altered_table, zip_dict_ids,
+ ctx->trx, &err_zip_dict_name)) {
+ my_error(ER_COMPRESSION_DICTIONARY_DOES_NOT_EXIST,
+ MYF(0), err_zip_dict_name);
+ goto new_clustered_failed;
+ }
+
if (innobase_check_foreigns(
ha_alter_info, altered_table, old_table,
user_table, ctx->drop_fk, ctx->num_to_drop_fk)) {
@@ -3008,6 +3048,12 @@ prepare_inplace_alter_table_dict(
}
}
+ if (field->column_format() ==
+ COLUMN_FORMAT_TYPE_COMPRESSED) {
+ field_type |= DATA_COMPRESSED;
+ }
+
+
if (dict_col_name_is_reserved(field->field_name)) {
dict_mem_table_free(ctx->new_table);
my_error(ER_WRONG_COLUMN_NAME, MYF(0),
@@ -3087,7 +3133,7 @@ prepare_inplace_alter_table_dict(
ctx->col_map = innobase_build_col_map(
ha_alter_info, altered_table, old_table,
ctx->new_table, user_table,
- add_cols, ctx->heap);
+ add_cols, ctx->heap, prebuilt);
ctx->add_cols = add_cols;
} else {
DBUG_ASSERT(!innobase_need_rebuild(ha_alter_info, old_table));
@@ -3265,6 +3311,15 @@ op_ok:
DBUG_ASSERT(error == DB_SUCCESS);
+ /*
+ Adding compression dictionary <-> compressed table column links
+ to the SYS_ZIP_DICT_COLS table.
+ */
+ if (zip_dict_ids != 0) {
+ innobase_create_zip_dict_references(altered_table,
+ ctx->trx->table_id, zip_dict_ids, ctx->trx);
+ }
+
/* Commit the data dictionary transaction in order to release
the table locks on the system tables. This means that if
MySQL crashes while creating a new primary key inside
@@ -3999,7 +4054,7 @@ err_exit:
}
if (!(ha_alter_info->handler_flags & INNOBASE_ALTER_DATA)
- || (ha_alter_info->handler_flags
+ || ((ha_alter_info->handler_flags & ~INNOBASE_INPLACE_IGNORE)
== Alter_inplace_info::CHANGE_CREATE_OPTION
&& !innobase_need_rebuild(ha_alter_info, table))) {
@@ -4133,7 +4188,7 @@ found_col:
table_share->table_name.str,
flags, flags2,
fts_doc_col_no, add_fts_doc_id,
- add_fts_doc_id_idx));
+ add_fts_doc_id_idx, prebuilt));
}
/** Alter the table structure in-place with operations
@@ -4173,7 +4228,7 @@ ok_exit:
DBUG_RETURN(false);
}
- if (ha_alter_info->handler_flags
+ if ((ha_alter_info->handler_flags & ~INNOBASE_INPLACE_IGNORE)
== Alter_inplace_info::CHANGE_CREATE_OPTION
&& !innobase_need_rebuild(ha_alter_info, table)) {
goto ok_exit;
diff --git a/storage/xtradb/handler/i_s.cc b/storage/xtradb/handler/i_s.cc
index d96ff377b4a..420dff83a40 100644
--- a/storage/xtradb/handler/i_s.cc
+++ b/storage/xtradb/handler/i_s.cc
@@ -3949,6 +3949,8 @@ i_s_fts_config_fill(
DBUG_RETURN(0);
}
+ DEBUG_SYNC_C("i_s_fts_config_fille_check");
+
fields = table->field;
/* Prevent DDL to drop fts aux tables. */
diff --git a/storage/xtradb/handler/xtradb_i_s.cc b/storage/xtradb/handler/xtradb_i_s.cc
index 96e31b94470..39f2efb90db 100644
--- a/storage/xtradb/handler/xtradb_i_s.cc
+++ b/storage/xtradb/handler/xtradb_i_s.cc
@@ -33,9 +33,11 @@ this program; if not, write to the Free Software Foundation, Inc.,
#include <read0i_s.h>
#include <trx0i_s.h>
#include "srv0start.h" /* for srv_was_started */
+#include <btr0pcur.h> /* btr_pcur_t */
#include <btr0sea.h> /* btr_search_sys */
#include <log0recv.h> /* recv_sys */
#include <fil0fil.h>
+#include <dict0crea.h> /* for ZIP_DICT_MAX_* constants */
/* for XTRADB_RSEG table */
#include "trx0trx.h" /* for TRX_QUE_STATE_STR_MAX_LEN */
@@ -44,6 +46,30 @@ this program; if not, write to the Free Software Foundation, Inc.,
#define PLUGIN_AUTHOR "Percona Inc."
+static int field_store_blob(Field*, const char*, uint) __attribute__((unused));
+/** Auxiliary function to store (char*, len) value in MYSQL_TYPE_BLOB
+field.
+@return 0 on success */
+static
+int
+field_store_blob(
+ Field* field, /*!< in/out: target field for storage */
+ const char* data, /*!< in: pointer to data, or NULL */
+ uint data_len) /*!< in: data length */
+{
+ int ret;
+
+ if (data != NULL) {
+ ret = field->store(data, data_len, system_charset_info);
+ field->set_notnull();
+ } else {
+ ret = 0; /* success */
+ field->set_null();
+ }
+
+ return(ret);
+}
+
static
int
i_s_common_deinit(
@@ -516,3 +542,331 @@ UNIV_INTERN struct st_mysql_plugin i_s_xtradb_rseg =
STRUCT_FLD(version_info, INNODB_VERSION_STR),
STRUCT_FLD(maturity, MariaDB_PLUGIN_MATURITY_STABLE),
};
+
+
+#ifdef HAVE_PERCONA_COMPRESSED_COLUMNS
+/************************************************************************/
+enum zip_dict_field_type
+{
+ zip_dict_field_id,
+ zip_dict_field_name,
+ zip_dict_field_zip_dict
+};
+
+static ST_FIELD_INFO xtradb_sys_zip_dict_fields_info[] =
+{
+ { STRUCT_FLD(field_name, "id"),
+ STRUCT_FLD(field_length, MY_INT64_NUM_DECIMAL_DIGITS),
+ STRUCT_FLD(field_type, MYSQL_TYPE_LONGLONG),
+ STRUCT_FLD(value, 0),
+ STRUCT_FLD(field_flags, MY_I_S_UNSIGNED),
+ STRUCT_FLD(old_name, ""),
+ STRUCT_FLD(open_method, SKIP_OPEN_TABLE) },
+
+ { STRUCT_FLD(field_name, "name"),
+ STRUCT_FLD(field_length, ZIP_DICT_MAX_NAME_LENGTH),
+ STRUCT_FLD(field_type, MYSQL_TYPE_STRING),
+ STRUCT_FLD(value, 0),
+ STRUCT_FLD(field_flags, 0),
+ STRUCT_FLD(old_name, ""),
+ STRUCT_FLD(open_method, SKIP_OPEN_TABLE) },
+
+ { STRUCT_FLD(field_name, "zip_dict"),
+ STRUCT_FLD(field_length, ZIP_DICT_MAX_DATA_LENGTH),
+ STRUCT_FLD(field_type, MYSQL_TYPE_BLOB),
+ STRUCT_FLD(value, 0),
+ STRUCT_FLD(field_flags, 0),
+ STRUCT_FLD(old_name, ""),
+ STRUCT_FLD(open_method, SKIP_OPEN_TABLE) },
+
+ END_OF_ST_FIELD_INFO
+};
+
+/** Function to fill INFORMATION_SCHEMA.XTRADB_ZIP_DICT with information
+collected by scanning SYS_ZIP_DICT table.
+@return 0 on success */
+static
+int
+xtradb_i_s_dict_fill_sys_zip_dict(
+ THD* thd, /*!< in: thread */
+ ulint id, /*!< in: dict ID */
+ const char* name, /*!< in: dict name */
+ const char* data, /*!< in: dict data */
+ ulint data_len, /*!< in: dict data length */
+ TABLE* table_to_fill) /*!< in/out: fill this table */
+{
+ DBUG_ENTER("xtradb_i_s_dict_fill_sys_zip_dict");
+
+ Field** fields = table_to_fill->field;
+
+ OK(field_store_ulint(fields[zip_dict_field_id], id));
+ OK(field_store_string(fields[zip_dict_field_name], name));
+ OK(field_store_blob(fields[zip_dict_field_zip_dict], data,
+ data_len));
+
+ OK(schema_table_store_record(thd, table_to_fill));
+
+ DBUG_RETURN(0);
+}
+
+/** Function to populate INFORMATION_SCHEMA.XTRADB_ZIP_DICT table.
+Loop through each record in SYS_ZIP_DICT, and extract the column
+information and fill the INFORMATION_SCHEMA.XTRADB_ZIP_DICT table.
+@return 0 on success */
+static
+int
+xtradb_i_s_sys_zip_dict_fill_table(
+ THD* thd, /*!< in: thread */
+ TABLE_LIST* tables, /*!< in/out: tables to fill */
+ Item* ) /*!< in: condition (not used) */
+{
+ btr_pcur_t pcur;
+ const rec_t* rec;
+ mem_heap_t* heap;
+ mtr_t mtr;
+
+ DBUG_ENTER("xtradb_i_s_sys_zip_dict_fill_table");
+ RETURN_IF_INNODB_NOT_STARTED(tables->schema_table_name);
+
+ /* deny access to user without SUPER_ACL privilege */
+ if (check_global_access(thd, SUPER_ACL)) {
+ DBUG_RETURN(0);
+ }
+
+ heap = mem_heap_create(1000);
+ mutex_enter(&dict_sys->mutex);
+ mtr_start(&mtr);
+
+ rec = dict_startscan_system(&pcur, &mtr, SYS_ZIP_DICT);
+ ulint zip_size = dict_table_zip_size(pcur.btr_cur.index->table);
+
+ while (rec) {
+ const char* err_msg;
+ ulint id;
+ const char* name;
+ const char* data;
+ ulint data_len;
+
+ /* Extract necessary information from a SYS_ZIP_DICT row */
+ err_msg = dict_process_sys_zip_dict(
+ heap, zip_size, rec, &id, &name, &data, &data_len);
+
+ mtr_commit(&mtr);
+ mutex_exit(&dict_sys->mutex);
+
+ if (!err_msg) {
+ xtradb_i_s_dict_fill_sys_zip_dict(
+ thd, id, name, data, data_len,
+ tables->table);
+ } else {
+ push_warning_printf(thd,
+ Sql_condition::WARN_LEVEL_WARN,
+ ER_CANT_FIND_SYSTEM_REC, "%s", err_msg);
+ }
+
+ mem_heap_empty(heap);
+
+ /* Get the next record */
+ mutex_enter(&dict_sys->mutex);
+ mtr_start(&mtr);
+ rec = dict_getnext_system(&pcur, &mtr);
+ }
+
+ mtr_commit(&mtr);
+ mutex_exit(&dict_sys->mutex);
+ mem_heap_free(heap);
+
+ DBUG_RETURN(0);
+}
+
+static int i_s_xtradb_zip_dict_init(void* p)
+{
+ DBUG_ENTER("i_s_xtradb_zip_dict_init");
+
+ ST_SCHEMA_TABLE* schema = static_cast<ST_SCHEMA_TABLE*>(p);
+
+ schema->fields_info = xtradb_sys_zip_dict_fields_info;
+ schema->fill_table = xtradb_i_s_sys_zip_dict_fill_table;
+
+ DBUG_RETURN(0);
+}
+
+UNIV_INTERN struct st_mysql_plugin i_s_xtradb_zip_dict =
+{
+ STRUCT_FLD(type, MYSQL_INFORMATION_SCHEMA_PLUGIN),
+ STRUCT_FLD(info, &i_s_info),
+ STRUCT_FLD(name, "XTRADB_ZIP_DICT"),
+ STRUCT_FLD(author, PLUGIN_AUTHOR),
+ STRUCT_FLD(descr, "InnoDB compression dictionaries information"),
+ STRUCT_FLD(license, PLUGIN_LICENSE_GPL),
+ STRUCT_FLD(init, i_s_xtradb_zip_dict_init),
+ STRUCT_FLD(deinit, i_s_common_deinit),
+ STRUCT_FLD(version, INNODB_VERSION_SHORT),
+ STRUCT_FLD(status_vars, NULL),
+ STRUCT_FLD(system_vars, NULL),
+ STRUCT_FLD(__reserved1, NULL),
+ STRUCT_FLD(flags, 0UL),
+};
+
+enum zip_dict_cols_field_type
+{
+ zip_dict_cols_field_table_id,
+ zip_dict_cols_field_column_pos,
+ zip_dict_cols_field_dict_id
+};
+
+static ST_FIELD_INFO xtradb_sys_zip_dict_cols_fields_info[] =
+{
+ { STRUCT_FLD(field_name, "table_id"),
+ STRUCT_FLD(field_length, MY_INT64_NUM_DECIMAL_DIGITS),
+ STRUCT_FLD(field_type, MYSQL_TYPE_LONGLONG),
+ STRUCT_FLD(value, 0),
+ STRUCT_FLD(field_flags, MY_I_S_UNSIGNED),
+ STRUCT_FLD(old_name, ""),
+ STRUCT_FLD(open_method, SKIP_OPEN_TABLE) },
+
+ { STRUCT_FLD(field_name, "column_pos"),
+ STRUCT_FLD(field_length, MY_INT64_NUM_DECIMAL_DIGITS),
+ STRUCT_FLD(field_type, MYSQL_TYPE_LONGLONG),
+ STRUCT_FLD(value, 0),
+ STRUCT_FLD(field_flags, MY_I_S_UNSIGNED),
+ STRUCT_FLD(old_name, ""),
+ STRUCT_FLD(open_method, SKIP_OPEN_TABLE) },
+
+ { STRUCT_FLD(field_name, "dict_id"),
+ STRUCT_FLD(field_length, MY_INT64_NUM_DECIMAL_DIGITS),
+ STRUCT_FLD(field_type, MYSQL_TYPE_LONGLONG),
+ STRUCT_FLD(value, 0),
+ STRUCT_FLD(field_flags, MY_I_S_UNSIGNED),
+ STRUCT_FLD(old_name, ""),
+ STRUCT_FLD(open_method, SKIP_OPEN_TABLE) },
+
+ END_OF_ST_FIELD_INFO
+};
+
+/** Function to fill INFORMATION_SCHEMA.XTRADB_ZIP_DICT_COLS with information
+collected by scanning SYS_ZIP_DICT_COLS table.
+@return 0 on success */
+static
+int
+xtradb_i_s_dict_fill_sys_zip_dict_cols(
+ THD* thd, /*!< in: thread */
+ ulint table_id, /*!< in: table ID */
+ ulint column_pos, /*!< in: column position */
+ ulint dict_id, /*!< in: dict ID */
+ TABLE* table_to_fill) /*!< in/out: fill this table */
+{
+ DBUG_ENTER("xtradb_i_s_dict_fill_sys_zip_dict_cols");
+
+ Field** fields = table_to_fill->field;
+
+ OK(field_store_ulint(fields[zip_dict_cols_field_table_id],
+ table_id));
+ OK(field_store_ulint(fields[zip_dict_cols_field_column_pos],
+ column_pos));
+ OK(field_store_ulint(fields[zip_dict_cols_field_dict_id],
+ dict_id));
+
+ OK(schema_table_store_record(thd, table_to_fill));
+
+ DBUG_RETURN(0);
+}
+
+/** Function to populate INFORMATION_SCHEMA.XTRADB_ZIP_DICT_COLS table.
+Loop through each record in SYS_ZIP_DICT_COLS, and extract the column
+information and fill the INFORMATION_SCHEMA.XTRADB_ZIP_DICT_COLS table.
+@return 0 on success */
+static
+int
+xtradb_i_s_sys_zip_dict_cols_fill_table(
+ THD* thd, /*!< in: thread */
+ TABLE_LIST* tables, /*!< in/out: tables to fill */
+ Item* ) /*!< in: condition (not used) */
+{
+ btr_pcur_t pcur;
+ const rec_t* rec;
+ mem_heap_t* heap;
+ mtr_t mtr;
+
+ DBUG_ENTER("xtradb_i_s_sys_zip_dict_cols_fill_table");
+ RETURN_IF_INNODB_NOT_STARTED(tables->schema_table_name);
+
+ /* deny access to user without SUPER_ACL privilege */
+ if (check_global_access(thd, SUPER_ACL)) {
+ DBUG_RETURN(0);
+ }
+
+ heap = mem_heap_create(1000);
+ mutex_enter(&dict_sys->mutex);
+ mtr_start(&mtr);
+
+ rec = dict_startscan_system(&pcur, &mtr, SYS_ZIP_DICT_COLS);
+
+ while (rec) {
+ const char* err_msg;
+ ulint table_id;
+ ulint column_pos;
+ ulint dict_id;
+
+ /* Extract necessary information from a SYS_ZIP_DICT_COLS
+ row */
+ err_msg = dict_process_sys_zip_dict_cols(
+ heap, rec, &table_id, &column_pos, &dict_id);
+
+ mtr_commit(&mtr);
+ mutex_exit(&dict_sys->mutex);
+
+ if (!err_msg) {
+ xtradb_i_s_dict_fill_sys_zip_dict_cols(
+ thd, table_id, column_pos, dict_id,
+ tables->table);
+ } else {
+ push_warning_printf(thd,
+ Sql_condition::WARN_LEVEL_WARN,
+ ER_CANT_FIND_SYSTEM_REC, "%s", err_msg);
+ }
+
+ mem_heap_empty(heap);
+
+ /* Get the next record */
+ mutex_enter(&dict_sys->mutex);
+ mtr_start(&mtr);
+ rec = dict_getnext_system(&pcur, &mtr);
+ }
+
+ mtr_commit(&mtr);
+ mutex_exit(&dict_sys->mutex);
+ mem_heap_free(heap);
+
+ DBUG_RETURN(0);
+}
+
+static int i_s_xtradb_zip_dict_cols_init(void* p)
+{
+ DBUG_ENTER("i_s_xtradb_zip_dict_cols_init");
+
+ ST_SCHEMA_TABLE* schema = static_cast<ST_SCHEMA_TABLE*>(p);
+
+ schema->fields_info = xtradb_sys_zip_dict_cols_fields_info;
+ schema->fill_table = xtradb_i_s_sys_zip_dict_cols_fill_table;
+
+ DBUG_RETURN(0);
+}
+
+UNIV_INTERN struct st_mysql_plugin i_s_xtradb_zip_dict_cols =
+{
+ STRUCT_FLD(type, MYSQL_INFORMATION_SCHEMA_PLUGIN),
+ STRUCT_FLD(info, &i_s_info),
+ STRUCT_FLD(name, "XTRADB_ZIP_DICT_COLS"),
+ STRUCT_FLD(author, PLUGIN_AUTHOR),
+ STRUCT_FLD(descr, "InnoDB compressed columns information"),
+ STRUCT_FLD(license, PLUGIN_LICENSE_GPL),
+ STRUCT_FLD(init, i_s_xtradb_zip_dict_cols_init),
+ STRUCT_FLD(deinit, i_s_common_deinit),
+ STRUCT_FLD(version, INNODB_VERSION_SHORT),
+ STRUCT_FLD(status_vars, NULL),
+ STRUCT_FLD(system_vars, NULL),
+ STRUCT_FLD(__reserved1, NULL),
+ STRUCT_FLD(flags, 0UL),
+};
+#endif
diff --git a/storage/xtradb/handler/xtradb_i_s.h b/storage/xtradb/handler/xtradb_i_s.h
index 2f7552c565a..905d84587af 100644
--- a/storage/xtradb/handler/xtradb_i_s.h
+++ b/storage/xtradb/handler/xtradb_i_s.h
@@ -22,5 +22,7 @@ this program; if not, write to the Free Software Foundation, Inc.,
extern struct st_mysql_plugin i_s_xtradb_read_view;
extern struct st_mysql_plugin i_s_xtradb_internal_hash_tables;
extern struct st_mysql_plugin i_s_xtradb_rseg;
+extern struct st_mysql_plugin i_s_xtradb_zip_dict;
+extern struct st_mysql_plugin i_s_xtradb_zip_dict_cols;
#endif /* XTRADB_I_S_H */
diff --git a/storage/xtradb/include/data0type.h b/storage/xtradb/include/data0type.h
index 111664b0b52..f269c266efb 100644
--- a/storage/xtradb/include/data0type.h
+++ b/storage/xtradb/include/data0type.h
@@ -170,6 +170,9 @@ be less than 256 */
type when the column is true VARCHAR where
MySQL uses 2 bytes to store the data len;
for shorter VARCHARs MySQL uses only 1 byte */
+#define DATA_COMPRESSED 16384 /* this is ORed to the precise data
+ type when the column has COLUMN_FORMAT =
+ COMPRESSED attribute*/
/*-------------------------------------------*/
/* This many bytes we need to store the type information affecting the
@@ -500,6 +503,17 @@ dtype_print(
/*========*/
const dtype_t* type); /*!< in: type */
+/**
+Calculates the number of extra bytes needed for compression header
+depending on precise column type.
+@reval 0 if prtype does not include DATA_COMPRESSED flag
+@reval ZIP_COLUMN_HEADER_LENGTH if prtype includes DATA_COMPRESSED flag
+*/
+UNIV_INLINE
+ulint
+prtype_get_compression_extra(
+ ulint prtype); /*!< in: precise type */
+
/* Structure for an SQL data type.
If you add fields to this structure, be sure to initialize them everywhere.
This structure is initialized in the following functions:
diff --git a/storage/xtradb/include/data0type.ic b/storage/xtradb/include/data0type.ic
index d489bef89a8..29dc480a19c 100644
--- a/storage/xtradb/include/data0type.ic
+++ b/storage/xtradb/include/data0type.ic
@@ -26,6 +26,7 @@ Created 1/16/1996 Heikki Tuuri
#include <string.h> /* strlen() */
#include "mach0data.h"
+#include "rem0types.h" /* ZIP_COLUMN_HEADER_LENGTH */
#ifndef UNIV_HOTBACKUP
# include "ha_prototypes.h"
@@ -709,3 +710,18 @@ dtype_get_sql_null_size(
0, 0));
#endif /* !UNIV_HOTBACKUP */
}
+
+/**
+Calculates the number of extra bytes needed for compression header
+depending on precise column type.
+@reval 0 if prtype does not include DATA_COMPRESSED flag
+@reval ZIP_COLUMN_HEADER_LENGTH if prtype includes DATA_COMPRESSED flag
+*/
+UNIV_INLINE
+ulint
+prtype_get_compression_extra(
+ ulint prtype) /*!< in: precise type */
+{
+ return (prtype & DATA_COMPRESSED) != 0 ?
+ ZIP_COLUMN_HEADER_LENGTH : 0;
+}
diff --git a/storage/xtradb/include/dict0boot.h b/storage/xtradb/include/dict0boot.h
index 477e1150f43..d5bee886cbf 100644
--- a/storage/xtradb/include/dict0boot.h
+++ b/storage/xtradb/include/dict0boot.h
@@ -324,6 +324,38 @@ enum dict_fld_sys_datafiles_enum {
DICT_FLD__SYS_DATAFILES__PATH = 3,
DICT_NUM_FIELDS__SYS_DATAFILES = 4
};
+/* The columns in SYS_DICT */
+enum dict_col_sys_zip_dict_enum {
+ DICT_COL__SYS_ZIP_DICT__ID = 0,
+ DICT_COL__SYS_ZIP_DICT__NAME = 1,
+ DICT_COL__SYS_ZIP_DICT__DATA = 2,
+ DICT_NUM_COLS__SYS_ZIP_DICT = 3
+};
+/* The field numbers in the SYS_DICT clustered index */
+enum dict_fld_sys_zip_dict_enum {
+ DICT_FLD__SYS_ZIP_DICT__ID = 0,
+ DICT_FLD__SYS_ZIP_DICT__DB_TRX_ID = 1,
+ DICT_FLD__SYS_ZIP_DICT__DB_ROLL_PTR = 2,
+ DICT_FLD__SYS_ZIP_DICT__NAME = 3,
+ DICT_FLD__SYS_ZIP_DICT__DATA = 4,
+ DICT_NUM_FIELDS__SYS_ZIP_DICT = 5
+};
+/* The columns in SYS_DICT_COLS */
+enum dict_col_sys_zip_dict_cols_enum {
+ DICT_COL__SYS_ZIP_DICT_COLS__TABLE_ID = 0,
+ DICT_COL__SYS_ZIP_DICT_COLS__COLUMN_POS = 1,
+ DICT_COL__SYS_ZIP_DICT_COLS__DICT_ID = 2,
+ DICT_NUM_COLS__SYS_ZIP_DICT_COLS = 3
+};
+/* The field numbers in the SYS_DICT_COLS clustered index */
+enum dict_fld_sys_zip_dict_cols_enum {
+ DICT_FLD__SYS_ZIP_DICT_COLS__TABLE_ID = 0,
+ DICT_FLD__SYS_ZIP_DICT_COLS__COLUMN_POS = 1,
+ DICT_FLD__SYS_ZIP_DICT_COLS__DB_TRX_ID = 2,
+ DICT_FLD__SYS_ZIP_DICT_COLS__DB_ROLL_PTR = 3,
+ DICT_FLD__SYS_ZIP_DICT_COLS__DICT_ID = 4,
+ DICT_NUM_FIELDS__SYS_ZIP_DICT_COLS = 5
+};
/* A number of the columns above occur in multiple tables. These are the
length of thos fields. */
diff --git a/storage/xtradb/include/dict0crea.h b/storage/xtradb/include/dict0crea.h
index 77627c9bf67..6ea71ada83e 100644
--- a/storage/xtradb/include/dict0crea.h
+++ b/storage/xtradb/include/dict0crea.h
@@ -166,6 +166,19 @@ UNIV_INTERN
dberr_t
dict_create_or_check_sys_tablespace(void);
/*=====================================*/
+
+#define ZIP_DICT_MAX_NAME_LENGTH 64
+/* Max window size (2^15) minus 262 */
+#define ZIP_DICT_MAX_DATA_LENGTH 32506
+
+/** Creates the zip_dict system table inside InnoDB
+at server bootstrap or server start if it is not found or is
+not of the right form.
+@return DB_SUCCESS or error code */
+UNIV_INTERN
+dberr_t
+dict_create_or_check_sys_zip_dict(void);
+
/********************************************************************//**
Add a single tablespace definition to the data dictionary tables in the
database.
@@ -181,6 +194,84 @@ dict_create_add_tablespace_to_dictionary(
trx_t* trx, /*!< in: transaction */
bool commit); /*!< in: if true then commit the
transaction */
+
+/** Add a single compression dictionary definition to the SYS_ZIP_DICT
+InnoDB system table.
+@return error code or DB_SUCCESS */
+UNIV_INTERN
+dberr_t
+dict_create_add_zip_dict(
+ const char* name, /*!< in: dict name */
+ ulint name_len, /*!< in: dict name length */
+ const char* data, /*!< in: dict data */
+ ulint data_len, /*!< in: dict data length */
+ trx_t* trx); /*!< in/out: transaction */
+
+/** Add a single compression dictionary reference to the SYS_ZIP_DICT_COLS
+InnoDB system table.
+@return error code or DB_SUCCESS */
+UNIV_INTERN
+dberr_t
+dict_create_add_zip_dict_reference(
+ ulint table_id, /*!< in: table id */
+ ulint column_pos, /*!< in: column position */
+ ulint dict_id, /*!< in: dict id */
+ trx_t* trx); /*!< in/out: transaction */
+
+/** Get a single compression dictionary id for the given
+(table id, column pos) pair.
+@return error code or DB_SUCCESS */
+UNIV_INTERN
+dberr_t
+dict_create_get_zip_dict_id_by_reference(
+ ulint table_id, /*!< in: table id */
+ ulint column_pos, /*!< in: column position */
+ ulint* dict_id, /*!< out: dict id */
+ trx_t* trx); /*!< in/out: transaction */
+
+/** Get compression dictionary id for the given name.
+@return error code or DB_SUCCESS */
+UNIV_INTERN
+dberr_t
+dict_create_get_zip_dict_id_by_name(
+ const char* dict_name, /*!< in: dict name */
+ ulint dict_name_len, /*!< in: dict name length */
+ ulint* dict_id, /*!< out: dict id */
+ trx_t* trx); /*!< in/out: transaction */
+
+/** Get compression dictionary info (name and data) for the given id.
+Allocates memory for name and data on success.
+Must be freed with mem_free().
+@return error code or DB_SUCCESS */
+UNIV_INTERN
+dberr_t
+dict_create_get_zip_dict_info_by_id(
+ ulint dict_id, /*!< in: dict id */
+ char** name, /*!< out: dict name */
+ ulint* name_len, /*!< out: dict name length */
+ char** data, /*!< out: dict data */
+ ulint* data_len, /*!< out: dict data length */
+ trx_t* trx); /*!< in/out: transaction */
+
+/** Remove a single compression dictionary from the data dictionary
+tables in the database.
+@return error code or DB_SUCCESS */
+UNIV_INTERN
+dberr_t
+dict_create_remove_zip_dict(
+ const char* name, /*!< in: dict name */
+ ulint name_len, /*!< in: dict name length */
+ trx_t* trx); /*!< in/out: transaction */
+
+/** Remove all compression dictionary references for the given table ID from
+the data dictionary tables in the database.
+@return error code or DB_SUCCESS */
+UNIV_INTERN
+dberr_t
+dict_create_remove_zip_dict_references_for_table(
+ ulint table_id, /*!< in: table id */
+ trx_t* trx); /*!< in/out: transaction */
+
/********************************************************************//**
Add a foreign key definition to the data dictionary tables.
@return error code or DB_SUCCESS */
diff --git a/storage/xtradb/include/dict0dict.h b/storage/xtradb/include/dict0dict.h
index e843d63e717..5aab595302a 100644
--- a/storage/xtradb/include/dict0dict.h
+++ b/storage/xtradb/include/dict0dict.h
@@ -1910,6 +1910,52 @@ dict_table_set_corrupt_by_space(
ulint space_id,
ibool need_mutex);
+/** Insert a records into SYS_ZIP_DICT.
+@retval DB_SUCCESS if OK
+@retval dberr_t if the insert failed */
+UNIV_INTERN
+dberr_t
+dict_create_zip_dict(
+ const char* name, /*!< in: zip_dict name */
+ ulint name_len, /*!< in: zip_dict name length*/
+ const char* data, /*!< in: zip_dict data */
+ ulint data_len); /*!< in: zip_dict data length */
+
+/** Get single compression dictionary id for the given
+(table id, column pos) pair.
+@retval DB_SUCCESS if OK
+@retval DB_RECORD_NOT_FOUND if not found */
+UNIV_INTERN
+dberr_t
+dict_get_dictionary_id_by_key(
+ ulint table_id, /*!< in: table id */
+ ulint column_pos, /*!< in: column position */
+ ulint* dict_id); /*!< out: zip_dict id */
+
+/** Get compression dictionary info (name and data) for the given id.
+Allocates memory in name->str and data->str on success.
+Must be freed with mem_free().
+@retval DB_SUCCESS if OK
+@retval DB_RECORD_NOT_FOUND if not found */
+UNIV_INTERN
+dberr_t
+dict_get_dictionary_info_by_id(
+ ulint dict_id, /*!< in: table name */
+ char** name, /*!< out: dictionary name */
+ ulint* name_len, /*!< out: dictionary name length*/
+ char** data, /*!< out: dictionary data */
+ ulint* data_len); /*!< out: dictionary data length*/
+
+/** Delete a record in SYS_ZIP_DICT with the given name.
+@retval DB_SUCCESS if OK
+@retval DB_RECORD_NOT_FOUND if not found
+@retval DB_ROW_IS_REFERENCED if in use */
+UNIV_INTERN
+dberr_t
+dict_drop_zip_dict(
+ const char* name, /*!< in: zip_dict name */
+ ulint name_len); /*!< in: zip_dict name length*/
+
#ifndef UNIV_NONINL
#include "dict0dict.ic"
#endif
diff --git a/storage/xtradb/include/dict0load.h b/storage/xtradb/include/dict0load.h
index dcbc3de8e94..85e3e565637 100644
--- a/storage/xtradb/include/dict0load.h
+++ b/storage/xtradb/include/dict0load.h
@@ -44,6 +44,8 @@ enum dict_system_id_t {
SYS_FOREIGN_COLS,
SYS_TABLESPACES,
SYS_DATAFILES,
+ SYS_ZIP_DICT,
+ SYS_ZIP_DICT_COLS,
/* This must be last item. Defines the number of system tables. */
SYS_NUM_SYSTEM_TABLES
@@ -386,6 +388,33 @@ dict_process_sys_datafiles(
const rec_t* rec, /*!< in: current SYS_DATAFILES rec */
ulint* space, /*!< out: pace id */
const char** path); /*!< out: datafile path */
+
+/** This function parses a SYS_ZIP_DICT record, extracts necessary
+information from the record and returns to caller.
+@return error message, or NULL on success */
+UNIV_INTERN
+const char*
+dict_process_sys_zip_dict(
+ mem_heap_t* heap, /*!< in/out: heap memory */
+ ulint zip_size, /*!< in: nonzero=compressed BLOB page size */
+ const rec_t* rec, /*!< in: current SYS_ZIP_DICT rec */
+ ulint* id, /*!< out: dict id */
+ const char** name, /*!< out: dict name */
+ const char** data, /*!< out: dict data */
+ ulint* data_len); /*!< out: dict data length */
+
+/** This function parses a SYS_ZIP_DICT_COLS record, extracts necessary
+information from the record and returns to caller.
+@return error message, or NULL on success */
+UNIV_INTERN
+const char*
+dict_process_sys_zip_dict_cols(
+ mem_heap_t* heap, /*!< in/out: heap memory */
+ const rec_t* rec, /*!< in: current SYS_ZIP_DICT rec */
+ ulint* table_id, /*!< out: table id */
+ ulint* column_pos, /*!< out: column position */
+ ulint* dict_id); /*!< out: dict id */
+
/********************************************************************//**
Get the filepath for a spaceid from SYS_DATAFILES. This function provides
a temporary heap which is used for the table lookup, but not for the path.
diff --git a/storage/xtradb/include/fts0fts.h b/storage/xtradb/include/fts0fts.h
index 87b5787d416..3e2f359bbeb 100644
--- a/storage/xtradb/include/fts0fts.h
+++ b/storage/xtradb/include/fts0fts.h
@@ -375,6 +375,7 @@ extern bool fts_need_sync;
/** Variable specifying the table that has Fulltext index to display its
content through information schema table */
extern char* fts_internal_tbl_name;
+extern char* fts_internal_tbl_name2;
#define fts_que_graph_free(graph) \
do { \
@@ -823,6 +824,15 @@ void
fts_drop_orphaned_tables(void);
/*==========================*/
+/* Get parent table name if it's a fts aux table
+@param[in] aux_table_name aux table name
+@param[in] aux_table_len aux table length
+@return parent table name, or NULL */
+char*
+fts_get_parent_table_name(
+ const char* aux_table_name,
+ ulint aux_table_len);
+
/******************************************************************//**
Since we do a horizontal split on the index table, we need to drop
all the split tables.
diff --git a/storage/xtradb/include/os0thread.h b/storage/xtradb/include/os0thread.h
index 815faf97319..671b9b7dc3f 100644
--- a/storage/xtradb/include/os0thread.h
+++ b/storage/xtradb/include/os0thread.h
@@ -131,14 +131,27 @@ os_thread_create_func(
os_thread_id_t* thread_id); /*!< out: id of the created
thread, or NULL */
+/**
+Waits until the specified thread completes and joins it. Its return value is
+ignored.
+
+@param thread thread to join */
+UNIV_INTERN
+void
+os_thread_join(
+ os_thread_t thread);
+
/*****************************************************************//**
Exits the current thread. */
UNIV_INTERN
void
os_thread_exit(
/*===========*/
- void* exit_value) /*!< in: exit value; in Windows this void*
+ void* exit_value, /*!< in: exit value; in Windows this void*
is cast as a DWORD */
+ bool detach = true) /*!< in: if true, the thread will be detached
+ right before exiting. If false, another thread
+ is responsible for joining this thread. */
UNIV_COLD MY_ATTRIBUTE((noreturn));
/*****************************************************************//**
Returns the thread identifier of current thread.
diff --git a/storage/xtradb/include/rem0types.h b/storage/xtradb/include/rem0types.h
index f8133f77466..5da96066f88 100644
--- a/storage/xtradb/include/rem0types.h
+++ b/storage/xtradb/include/rem0types.h
@@ -71,4 +71,7 @@ enum rec_format_enum {
};
typedef enum rec_format_enum rec_format_t;
+/** Compressed field header size in bytes */
+#define ZIP_COLUMN_HEADER_LENGTH 2
+
#endif
diff --git a/storage/xtradb/include/row0mysql.h b/storage/xtradb/include/row0mysql.h
index 52e82da668d..b818791cf72 100644
--- a/storage/xtradb/include/row0mysql.h
+++ b/storage/xtradb/include/row0mysql.h
@@ -42,6 +42,9 @@ struct SysIndexCallback;
extern ibool row_rollback_on_timeout;
+extern uint srv_compressed_columns_zip_level;
+extern ulong srv_compressed_columns_threshold;
+
struct row_prebuilt_t;
/*******************************************************************//**
@@ -52,6 +55,49 @@ row_mysql_prebuilt_free_blob_heap(
/*==============================*/
row_prebuilt_t* prebuilt); /*!< in: prebuilt struct of a
ha_innobase:: table handle */
+
+/** Frees the compress heap in prebuilt when no longer needed. */
+UNIV_INTERN
+void
+row_mysql_prebuilt_free_compress_heap(
+ row_prebuilt_t* prebuilt); /*!< in: prebuilt struct of a
+ ha_innobase:: table handle */
+
+/** Uncompress blob/text/varchar column using zlib
+@return pointer to the uncompressed data */
+const byte*
+row_decompress_column(
+ const byte* data, /*!< in: data in innodb(compressed) format */
+ ulint *len, /*!< in: data length; out: length of
+ decompressed data*/
+ const byte* dict_data,
+ /*!< in: optional dictionary data used for
+ decompression */
+ ulint dict_data_len,
+ /*!< in: optional dictionary data length */
+ row_prebuilt_t* prebuilt);
+ /*!< in: use prebuilt->compress_heap only
+ here*/
+
+/** Compress blob/text/varchar column using zlib
+@return pointer to the compressed data */
+byte*
+row_compress_column(
+ const byte* data, /*!< in: data in mysql(uncompressed)
+ format */
+ ulint *len, /*!< in: data length; out: length of
+ compressed data*/
+ ulint lenlen, /*!< in: bytes used to store the length of
+ data */
+ const byte* dict_data,
+ /*!< in: optional dictionary data used for
+ compression */
+ ulint dict_data_len,
+ /*!< in: optional dictionary data length */
+ row_prebuilt_t* prebuilt);
+ /*!< in: use prebuilt->compress_heap only
+ here*/
+
/*******************************************************************//**
Stores a >= 5.0.3 format true VARCHAR length to dest, in the MySQL row
format.
@@ -90,10 +136,21 @@ row_mysql_store_blob_ref(
to 4 bytes */
const void* data, /*!< in: BLOB data; if the value to store
is SQL NULL this should be NULL pointer */
- ulint len); /*!< in: BLOB length; if the value to store
+ ulint len, /*!< in: BLOB length; if the value to store
is SQL NULL this should be 0; remember
also to set the NULL bit in the MySQL record
header! */
+ bool need_decompression,
+ /*!< in: if the data need to be compressed*/
+ const byte* dict_data,
+ /*!< in: optional compression dictionary
+ data */
+ ulint dict_data_len,
+ /*!< in: optional compression dictionary data
+ length */
+ row_prebuilt_t* prebuilt);
+ /*<! in: use prebuilt->compress_heap only
+ here */
/*******************************************************************//**
Reads a reference to a BLOB in the MySQL format.
@return pointer to BLOB data */
@@ -104,8 +161,17 @@ row_mysql_read_blob_ref(
ulint* len, /*!< out: BLOB length */
const byte* ref, /*!< in: BLOB reference in the
MySQL format */
- ulint col_len); /*!< in: BLOB reference length
+ ulint col_len, /*!< in: BLOB reference length
(not BLOB length) */
+ bool need_compression,
+ /*!< in: if the data need to be
+ compressed*/
+ const byte* dict_data, /*!< in: optional compression
+ dictionary data */
+ ulint dict_data_len, /*!< in: optional compression
+ dictionary data length */
+ row_prebuilt_t* prebuilt); /*!< in: use prebuilt->compress_heap
+ only here */
/**************************************************************//**
Pad a column with spaces. */
UNIV_INTERN
@@ -153,7 +219,16 @@ row_mysql_store_col_in_innobase_format(
necessarily the length of the actual
payload data; if the column is a true
VARCHAR then this is irrelevant */
- ulint comp); /*!< in: nonzero=compact format */
+ ulint comp, /*!< in: nonzero=compact format */
+ bool need_compression,
+ /*!< in: if the data need to be
+ compressed */
+ const byte* dict_data, /*!< in: optional compression
+ dictionary data */
+ ulint dict_data_len, /*!< in: optional compression
+ dictionary data length */
+ row_prebuilt_t* prebuilt); /*!< in: use prebuilt->compress_heap
+ only here */
/****************************************************************//**
Handles user errors and lock waits detected by the database engine.
@return true if it was a lock wait and we should continue running the
@@ -655,6 +730,8 @@ struct mysql_row_templ_t {
ulint is_unsigned; /*!< if a column type is an integer
type and this field is != 0, then
it is an unsigned integer type */
+ bool compressed; /*!< if column format is compressed */
+ LEX_CSTRING zip_dict_data; /*!< associated compression dictionary */
};
#define MYSQL_FETCH_CACHE_SIZE 8
@@ -854,6 +931,8 @@ struct row_prebuilt_t {
in fetch_cache */
mem_heap_t* blob_heap; /*!< in SELECTS BLOB fields are copied
to this heap */
+ mem_heap_t* compress_heap; /*!< memory heap used to compress
+ /decompress blob column*/
mem_heap_t* old_vers_heap; /*!< memory heap where a previous
version is built in consistent read */
bool in_fts_query; /*!< Whether we are in a FTS query */
diff --git a/storage/xtradb/include/srv0srv.h b/storage/xtradb/include/srv0srv.h
index f60cfde1264..454d9294a1c 100644
--- a/storage/xtradb/include/srv0srv.h
+++ b/storage/xtradb/include/srv0srv.h
@@ -597,6 +597,9 @@ extern ibool srv_priority_boost;
extern ulint srv_truncated_status_writes;
extern ulint srv_available_undo_logs;
+extern ulint srv_column_compressed;
+extern ulint srv_column_decompressed;
+
extern ulint srv_mem_pool_size;
extern ulint srv_lock_table_size;
@@ -1225,6 +1228,8 @@ struct export_var_t{
ulint innodb_purge_view_trx_id_age; /*!< rw_max_trx_id
- purged view's min trx_id */
#endif /* UNIV_DEBUG */
+ ulint innodb_column_compressed; /*!< srv_column_compressed */
+ ulint innodb_column_decompressed; /*!< srv_column_decompressed */
ib_int64_t innodb_page_compression_saved;/*!< Number of bytes saved
by page compression */
diff --git a/storage/xtradb/include/univ.i b/storage/xtradb/include/univ.i
index a42b8b8bc25..4d91a3029a7 100644
--- a/storage/xtradb/include/univ.i
+++ b/storage/xtradb/include/univ.i
@@ -48,7 +48,7 @@ Created 1/20/1994 Heikki Tuuri
#define INNODB_VERSION_BUGFIX 32
#ifndef PERCONA_INNODB_VERSION
-#define PERCONA_INNODB_VERSION 78.1
+#define PERCONA_INNODB_VERSION 79.0
#endif
/* Enable UNIV_LOG_ARCHIVE in XtraDB */
diff --git a/storage/xtradb/log/log0log.cc b/storage/xtradb/log/log0log.cc
index 411fed91ac5..a01a2ed9570 100644
--- a/storage/xtradb/log/log0log.cc
+++ b/storage/xtradb/log/log0log.cc
@@ -1011,6 +1011,7 @@ log_init(void)
log_sys->next_checkpoint_no = 0;
log_sys->last_checkpoint_lsn = log_sys->lsn;
+ log_sys->next_checkpoint_lsn = log_sys->lsn;
log_sys->n_pending_checkpoint_writes = 0;
@@ -1944,6 +1945,7 @@ log_complete_checkpoint(void)
log_sys->next_checkpoint_no++;
+ ut_ad(log_sys->next_checkpoint_lsn >= log_sys->last_checkpoint_lsn);
log_sys->last_checkpoint_lsn = log_sys->next_checkpoint_lsn;
MONITOR_SET(MONITOR_LSN_CHECKPOINT_AGE,
log_sys->lsn - log_sys->last_checkpoint_lsn);
@@ -2031,11 +2033,17 @@ log_group_checkpoint(
ulint i;
ut_ad(!srv_read_only_mode);
+ ut_ad(srv_shutdown_state != SRV_SHUTDOWN_LAST_PHASE);
ut_ad(mutex_own(&(log_sys->mutex)));
ut_a(LOG_CHECKPOINT_SIZE <= OS_FILE_LOG_BLOCK_SIZE);
buf = group->checkpoint_buf;
+#ifdef UNIV_DEBUG
+ lsn_t old_next_checkpoint_lsn
+ = mach_read_from_8(buf + LOG_CHECKPOINT_LSN);
+ ut_ad(old_next_checkpoint_lsn <= log_sys->next_checkpoint_lsn);
+#endif /* UNIV_DEBUG */
mach_write_to_8(buf + LOG_CHECKPOINT_NO, log_sys->next_checkpoint_no);
mach_write_to_8(buf + LOG_CHECKPOINT_LSN, log_sys->next_checkpoint_lsn);
@@ -2314,6 +2322,7 @@ log_checkpoint(
return(FALSE);
}
+ ut_ad(oldest_lsn >= log_sys->next_checkpoint_lsn);
log_sys->next_checkpoint_lsn = oldest_lsn;
#ifdef UNIV_DEBUG
if (log_debug_writes) {
@@ -3670,13 +3679,15 @@ loop:
before proceeding further. */
srv_shutdown_state = SRV_SHUTDOWN_FLUSH_PHASE;
count = 0;
- while (buf_page_cleaner_is_active) {
- ++count;
- os_thread_sleep(100000);
- if (srv_print_verbose_log && count > 600) {
+ while (buf_page_cleaner_is_active || buf_lru_manager_is_active) {
+ if (srv_print_verbose_log && count == 0) {
ib_logf(IB_LOG_LEVEL_INFO,
"Waiting for page_cleaner to "
"finish flushing of buffer pool");
+ }
+ ++count;
+ os_thread_sleep(100000);
+ if (count > 600) {
count = 0;
}
}
@@ -3844,6 +3855,7 @@ loop:
ut_a(freed);
ut_a(lsn == log_sys->lsn);
+ ut_ad(lsn == log_sys->last_checkpoint_lsn);
if (lsn < srv_start_lsn) {
ib_logf(IB_LOG_LEVEL_ERROR,
diff --git a/storage/xtradb/log/log0online.cc b/storage/xtradb/log/log0online.cc
index 167d46e2ae8..2a1ac63dc5b 100644
--- a/storage/xtradb/log/log0online.cc
+++ b/storage/xtradb/log/log0online.cc
@@ -433,6 +433,7 @@ log_online_track_missing_on_startup(
current server startup */
{
ut_ad(last_tracked_lsn != tracking_start_lsn);
+ ut_ad(srv_track_changed_pages);
ib_logf(IB_LOG_LEVEL_WARN, "last tracked LSN in \'%s\' is " LSN_PF
", but the last checkpoint LSN is " LSN_PF ". This might be "
@@ -615,6 +616,8 @@ log_online_read_init(void)
compile_time_assert(MODIFIED_PAGE_BLOCK_BITMAP % 8 == 0);
compile_time_assert(MODIFIED_PAGE_BLOCK_BITMAP_LEN % 8 == 0);
+ ut_ad(srv_track_changed_pages);
+
log_bmp_sys = static_cast<log_bitmap_struct *>
(ut_malloc(sizeof(*log_bmp_sys)));
log_bmp_sys->read_buf_ptr = static_cast<byte *>
@@ -1089,10 +1092,15 @@ log_online_write_bitmap_page(
{
ibool success;
+ ut_ad(srv_track_changed_pages);
ut_ad(mutex_own(&log_bmp_sys->mutex));
/* Simulate a write error */
- DBUG_EXECUTE_IF("bitmap_page_write_error", return FALSE;);
+ DBUG_EXECUTE_IF("bitmap_page_write_error",
+ ib_logf(IB_LOG_LEVEL_ERROR,
+ "simulating bitmap write error in "
+ "log_online_write_bitmap_page");
+ return FALSE;);
success = os_file_write(log_bmp_sys->out.name, log_bmp_sys->out.file,
block, log_bmp_sys->out.offset,
@@ -1182,7 +1190,9 @@ log_online_write_bitmap(void)
rbt_next(log_bmp_sys->modified_pages, bmp_tree_node);
DBUG_EXECUTE_IF("bitmap_page_2_write_error",
- DBUG_SET("+d,bitmap_page_write_error"););
+ ut_ad(bmp_tree_node); /* 2nd page must exist */
+ DBUG_SET("+d,bitmap_page_write_error");
+ DBUG_SET("-d,bitmap_page_2_write_error"););
}
rbt_reset(log_bmp_sys->modified_pages);
@@ -1203,15 +1213,11 @@ log_online_follow_redo_log(void)
log_group_t* group;
ibool result;
- mutex_enter(&log_bmp_sys->mutex);
-
- if (!srv_track_changed_pages) {
- mutex_exit(&log_bmp_sys->mutex);
- return FALSE;
- }
-
+ ut_ad(srv_track_changed_pages);
ut_ad(!srv_read_only_mode);
+ mutex_enter(&log_bmp_sys->mutex);
+
/* Grab the LSN of the last checkpoint, we will parse up to it */
mutex_enter(&(log_sys->mutex));
log_bmp_sys->end_lsn = log_sys->last_checkpoint_lsn;
@@ -1554,9 +1560,12 @@ log_online_diagnose_bitmap_eof(
/* It's a "Warning" here because it's not a fatal error
for the whole server */
ib_logf(IB_LOG_LEVEL_WARN,
- "changed page bitmap file \'%s\' does not "
- "contain a complete run at the end.",
- bitmap_file->name);
+ "changed page bitmap file \'%s\', size "
+ UINT64PF " bytes, does not "
+ "contain a complete run at the next read "
+ "offset " UINT64PF,
+ bitmap_file->name, bitmap_file->size,
+ bitmap_file->offset);
return FALSE;
}
}
diff --git a/storage/xtradb/mach/mach0data.cc b/storage/xtradb/mach/mach0data.cc
index df68aab8a18..206434dc5ab 100644
--- a/storage/xtradb/mach/mach0data.cc
+++ b/storage/xtradb/mach/mach0data.cc
@@ -56,7 +56,18 @@ mach_parse_compressed(
*val = flag;
return(ptr + 1);
- } else if (flag < 0xC0UL) {
+ }
+
+ /* Workaround GCC bug
+ https://gcc.gnu.org/bugzilla/show_bug.cgi?id=77673:
+ the compiler moves mach_read_from_4 right to the beginning of the
+ function, causing and out-of-bounds read if we are reading a short
+ integer close to the end of buffer. */
+#if defined(__GNUC__) && (__GNUC__ >= 5) && !defined(__clang__)
+ asm volatile("": : :"memory");
+#endif
+
+ if (flag < 0xC0UL) {
if (end_ptr < ptr + 2) {
return(NULL);
}
diff --git a/storage/xtradb/os/os0thread.cc b/storage/xtradb/os/os0thread.cc
index aabdd06d76b..af826027efc 100644
--- a/storage/xtradb/os/os0thread.cc
+++ b/storage/xtradb/os/os0thread.cc
@@ -210,14 +210,42 @@ os_thread_create_func(
#endif
}
+/**
+Waits until the specified thread completes and joins it. Its return value is
+ignored.
+
+@param thread thread to join */
+UNIV_INTERN
+void
+os_thread_join(
+ os_thread_t thread)
+{
+ /*This function is currently only used to workaround glibc bug
+ described in http://bugs.mysql.com/bug.php?id=82886
+
+ On Windows, no workarounds are necessary, all threads
+ are "detached" upon thread exit (handle is closed), so we do
+ nothing.
+ */
+#ifndef _WIN32
+ int ret MY_ATTRIBUTE((unused)) = pthread_join(thread, NULL);
+
+ /* Waiting on already-quit threads is allowed */
+ ut_ad(ret == 0 || ret == ESRCH);
+#endif
+}
+
/*****************************************************************//**
Exits the current thread. */
UNIV_INTERN
void
os_thread_exit(
/*===========*/
- void* exit_value) /*!< in: exit value; in Windows this void*
+ void* exit_value, /*!< in: exit value; in Windows this void*
is cast as a DWORD */
+ bool detach) /*!< in: if true, the thread will be detached
+ right before exiting. If false, another thread
+ is responsible for joining this thread. */
{
#ifdef UNIV_DEBUG_THREAD_CREATION
fprintf(stderr, "Thread exits, id %lu\n",
@@ -233,7 +261,8 @@ os_thread_exit(
#ifdef __WIN__
ExitThread((DWORD) exit_value);
#else
- pthread_detach(pthread_self());
+ if (detach)
+ pthread_detach(pthread_self());
pthread_exit(exit_value);
#endif
}
diff --git a/storage/xtradb/rem/rem0rec.cc b/storage/xtradb/rem/rem0rec.cc
index 80dc7557ec7..d1205608d47 100644
--- a/storage/xtradb/rem/rem0rec.cc
+++ b/storage/xtradb/rem/rem0rec.cc
@@ -323,7 +323,8 @@ rec_init_offsets_comp_ordinary(
stored in one byte for 0..127. The length
will be encoded in two bytes when it is 128 or
more, or when the field is stored externally. */
- if (UNIV_UNLIKELY(col->len > 255)
+ if (UNIV_UNLIKELY(col->len > 255 -
+ prtype_get_compression_extra(col->prtype))
|| UNIV_UNLIKELY(col->mtype
== DATA_BLOB)) {
if (len & 0x80) {
@@ -844,8 +845,12 @@ rec_get_converted_size_comp_prefix_low(
continue;
}
- ut_ad(len <= col->len || col->mtype == DATA_BLOB
- || (col->len == 0 && col->mtype == DATA_VARCHAR));
+ ut_ad(len <= col->len || col->mtype == DATA_BLOB ||
+ ((col->mtype == DATA_VARCHAR || col->mtype == DATA_BINARY
+ || col->mtype == DATA_VARMYSQL)
+ && (col->len == 0
+ || len <= col->len +
+ prtype_get_compression_extra(col->prtype))));
fixed_len = field->fixed_len;
if (temp && fixed_len
@@ -877,7 +882,9 @@ rec_get_converted_size_comp_prefix_low(
ut_ad(col->len >= 256 || col->mtype == DATA_BLOB);
extra_size += 2;
} else if (len < 128
- || (col->len < 256 && col->mtype != DATA_BLOB)) {
+ || (col->len < 256 -
+ prtype_get_compression_extra(col->prtype)
+ && col->mtype != DATA_BLOB)) {
extra_size++;
} else {
/* For variable-length columns, we look up the
@@ -1272,12 +1279,16 @@ rec_convert_dtuple_to_rec_comp(
*lens-- = (byte) (len >> 8) | 0xc0;
*lens-- = (byte) len;
} else {
- ut_ad(len <= dtype_get_len(type)
+ ut_ad(len <= dtype_get_len(type) +
+ prtype_get_compression_extra(
+ dtype_get_prtype(type))
|| dtype_get_mtype(type) == DATA_BLOB
|| !strcmp(index->name,
FTS_INDEX_TABLE_IND_NAME));
if (len < 128
- || (dtype_get_len(type) < 256
+ || (dtype_get_len(type) < 256 -
+ prtype_get_compression_extra(
+ dtype_get_prtype(type))
&& dtype_get_mtype(type) != DATA_BLOB)) {
*lens-- = (byte) len;
diff --git a/storage/xtradb/row/row0ftsort.cc b/storage/xtradb/row/row0ftsort.cc
index a3a00620a6d..43cbdecb1c1 100644
--- a/storage/xtradb/row/row0ftsort.cc
+++ b/storage/xtradb/row/row0ftsort.cc
@@ -1026,7 +1026,7 @@ fts_parallel_merge(
CloseHandle(psort_info->thread_hdl);
#endif /*__WIN__ */
- os_thread_exit(NULL);
+ os_thread_exit(NULL, false);
OS_THREAD_DUMMY_RETURN;
}
diff --git a/storage/xtradb/row/row0log.cc b/storage/xtradb/row/row0log.cc
index 53e375023fb..a17ee405720 100644
--- a/storage/xtradb/row/row0log.cc
+++ b/storage/xtradb/row/row0log.cc
@@ -621,7 +621,7 @@ row_log_table_delete(
&old_pk_extra_size);
ut_ad(old_pk_extra_size < 0x100);
- mrec_size = 4 + old_pk_size;
+ mrec_size = 6 + old_pk_size;
/* Log enough prefix of the BLOB unless both the
old and new table are in COMPACT or REDUNDANT format,
@@ -651,8 +651,8 @@ row_log_table_delete(
*b++ = static_cast<byte>(old_pk_extra_size);
/* Log the size of external prefix we saved */
- mach_write_to_2(b, ext_size);
- b += 2;
+ mach_write_to_4(b, ext_size);
+ b += 4;
rec_convert_dtuple_to_temp(
b + old_pk_extra_size, new_index,
@@ -2276,14 +2276,14 @@ row_log_table_apply_op(
break;
case ROW_T_DELETE:
- /* 1 (extra_size) + 2 (ext_size) + at least 1 (payload) */
- if (mrec + 4 >= mrec_end) {
+ /* 1 (extra_size) + 4 (ext_size) + at least 1 (payload) */
+ if (mrec + 6 >= mrec_end) {
return(NULL);
}
extra_size = *mrec++;
- ext_size = mach_read_from_2(mrec);
- mrec += 2;
+ ext_size = mach_read_from_4(mrec);
+ mrec += 4;
ut_ad(mrec < mrec_end);
/* We assume extra_size < 0x100 for the PRIMARY KEY prefix.
diff --git a/storage/xtradb/row/row0merge.cc b/storage/xtradb/row/row0merge.cc
index 3d7a5d2ef5d..9f154a712f4 100644
--- a/storage/xtradb/row/row0merge.cc
+++ b/storage/xtradb/row/row0merge.cc
@@ -619,7 +619,12 @@ row_merge_buf_add(
dfield_set_len(field, len);
}
- ut_ad(len <= col->len || col->mtype == DATA_BLOB);
+ ut_ad(len <= col->len || col->mtype == DATA_BLOB ||
+ ((col->mtype == DATA_VARCHAR || col->mtype == DATA_BINARY
+ || col->mtype == DATA_VARMYSQL)
+ && (col->len == 0
+ || len <= col->len +
+ prtype_get_compression_extra(col->prtype))));
fixed_len = ifield->fixed_len;
if (fixed_len && !dict_table_is_comp(index->table)
@@ -648,7 +653,9 @@ row_merge_buf_add(
} else if (dfield_is_ext(field)) {
extra_size += 2;
} else if (len < 128
- || (col->len < 256 && col->mtype != DATA_BLOB)) {
+ || (col->len < 256 -
+ prtype_get_compression_extra(col->prtype)
+ && col->mtype != DATA_BLOB)) {
extra_size++;
} else {
/* For variable-length columns, we look up the
@@ -4165,6 +4172,13 @@ wait_again:
" exited when creating FTS"
" index '%s'",
indexes[i]->name);
+ } else {
+ for (j = 0; j < FTS_NUM_AUX_INDEX;
+ j++) {
+
+ os_thread_join(merge_info[j]
+ .thread_hdl);
+ }
}
} else {
/* This cannot report duplicates; an
diff --git a/storage/xtradb/row/row0mysql.cc b/storage/xtradb/row/row0mysql.cc
index 059254a3c49..0bdee1282f8 100644
--- a/storage/xtradb/row/row0mysql.cc
+++ b/storage/xtradb/row/row0mysql.cc
@@ -65,11 +65,54 @@ Created 9/17/2000 Heikki Tuuri
#include "row0import.h"
#include "m_string.h"
#include "my_sys.h"
+#include "zlib.h"
#include <algorithm>
/** Provide optional 4.x backwards compatibility for 5.0 and above */
UNIV_INTERN ibool row_rollback_on_timeout = FALSE;
+/**
+Z_NO_COMPRESSION = 0
+Z_BEST_SPEED = 1
+Z_BEST_COMPRESSION = 9
+Z_DEFAULT_COMPRESSION = -1
+Compression level to be used by zlib for compressed-blob columns.
+Settable by user.
+*/
+UNIV_INTERN uint srv_compressed_columns_zip_level = DEFAULT_COMPRESSION_LEVEL;
+/**
+(Z_FILTERED | Z_HUFFMAN_ONLY | Z_RLE | Z_FIXED | Z_DEFAULT_STRATEGY)
+
+The strategy parameter is used to tune the compression algorithm. Use the
+value Z_DEFAULT_STRATEGY for normal data, Z_FILTERED for data produced by a
+filter (or predictor), Z_HUFFMAN_ONLY to force Huffman encoding only
+(no string match), or Z_RLE to limit match distances to one
+(run-length encoding). Filtered data consists mostly of small values with a
+somewhat random distribution. In this case, the compression algorithm is
+tuned to compress them better.
+The effect of Z_FILTERED is to force more Huffman coding and less string
+matching; it is somewhat intermediate between Z_DEFAULT_STRATEGY and
+Z_HUFFMAN_ONLY. Z_RLE is designed to be almost as fast as Z_HUFFMAN_ONLY,
+but give better compression for PNG image data. The strategy parameter only
+affects the compression ratio but not the correctness of the compressed
+output even if it is not set appropriately. Z_FIXED prevents the use of
+dynamic Huffman codes, allowing for a simpler decoder for special
+applications.
+*/
+const uint srv_compressed_columns_zlib_strategy = Z_DEFAULT_STRATEGY;
+/** Compress the column if the data length exceeds this value. */
+UNIV_INTERN ulong srv_compressed_columns_threshold = 96;
+/**
+Determine if zlib needs to compute adler32 value for the compressed data.
+This variables is similar to page_zip_zlib_wrap, but only used by
+compressed blob columns.
+*/
+const bool srv_compressed_columns_zlib_wrap = true;
+/**
+Determine if zlib will use custom memory allocation functions based on
+InnoDB memory heap routines (mem_heap_t*).
+*/
+const bool srv_compressed_columns_zlib_use_heap = false;
/** Chain node of the list of tables to drop in the background. */
struct row_mysql_drop_t{
char* table_name; /*!< table name */
@@ -173,6 +216,17 @@ row_mysql_prebuilt_free_blob_heap(
prebuilt->blob_heap = NULL;
}
+/** Frees the compress heap in prebuilt when no longer needed. */
+UNIV_INTERN
+void
+row_mysql_prebuilt_free_compress_heap(
+ row_prebuilt_t* prebuilt) /*!< in: prebuilt struct of a
+ ha_innobase:: table handle */
+{
+ mem_heap_free(prebuilt->compress_heap);
+ prebuilt->compress_heap = NULL;
+}
+
/*******************************************************************//**
Stores a >= 5.0.3 format true VARCHAR length to dest, in the MySQL row
format.
@@ -229,6 +283,425 @@ row_mysql_read_true_varchar(
return(field + 1);
}
+/**
+ Compressed BLOB header format:
+ ---------------------------------------------------------------
+ | reserved | wrap | algorithm | len-len | compressed | unused |
+ | [1] | [1] | [5] | [3] | [1] | [5] |
+ ---------------------------------------------------------------
+ | 0 0 | 1 1 | 2 6 | 7 9 | 10 10 | 11 15 |
+ ---------------------------------------------------------------
+ * 'reserved' bit is planned to be used in future versions of the BLOB
+ header. In this version it must always be
+ 'default_zip_column_reserved_value' (0).
+ * 'wrap' identifies if compression algorithm calculated a checksum
+ (adler32 in case of zlib) and appended it to the compressed data.
+ * 'algorithm' identifies which algoritm was used to compress this BLOB.
+ Currently, the only value 'default_zip_column_algorithm_value' (0) is
+ supported.
+ * 'len-len' field identifies the length of the column length data portion
+ followed by this header (see below).
+ * If 'compressed' bit is set to 1, then this header is immediately followed
+ by 1..8 bytes (depending on the value of 'len-len' bitfield) which
+ determine original (uncompressed) block size. These 'len-len' bytes are
+ followed by compressed representation of the original data.
+ * If 'compressed' bit is set to 0, every other bitfield ('wrap',
+ 'algorithm' and 'le-len') must be ignored. In this case the header is
+ immediately followed by uncompressed (original) data.
+*/
+
+/**
+ Currently the only supported value for the 'reserved' field is
+ false (0).
+*/
+static const bool default_zip_column_reserved_value = false;
+
+/**
+ Currently the only supported value for the 'algorithm' field is 0, which
+ means 'zlib'.
+*/
+static const uint default_zip_column_algorithm_value = 0;
+
+static const size_t zip_column_prefix_max_length =
+ ZIP_COLUMN_HEADER_LENGTH + 8;
+static const size_t zip_column_header_length = ZIP_COLUMN_HEADER_LENGTH;
+
+/* 'reserved', bit 0 */
+static const uint zip_column_reserved = 0;
+/* 0000 0000 0000 0001 */
+static const uint zip_column_reserved_mask = 0x0001;
+
+/* 'wrap', bit 1 */
+static const uint zip_column_wrap = 1;
+/* 0000 0000 0000 0010 */
+static const uint zip_column_wrap_mask = 0x0002;
+
+/* 'algorithm', bit 2,3,4,5,6 */
+static const uint zip_column_algorithm = 2;
+/* 0000 0000 0111 1100 */
+static const uint zip_column_algorithm_mask = 0x007C;
+
+/* 'len-len', bit 7,8,9 */
+static const uint zip_column_data_length = 7;
+/* 0000 0011 1000 0000 */
+static const uint zip_column_data_length_mask = 0x0380;
+
+/* 'compressed', bit 10 */
+static const uint zip_column_compressed = 10;
+/* 0000 0100 0000 0000 */
+static const uint zip_column_compressed_mask = 0x0400;
+
+/** Updates compressed block header with the given components */
+static void
+column_set_compress_header(
+ byte* data,
+ bool compressed,
+ ulint lenlen,
+ uint alg,
+ bool wrap,
+ bool reserved)
+{
+ ulint header = 0;
+ header |= (compressed << zip_column_compressed);
+ header |= (lenlen << zip_column_data_length);
+ header |= (alg << zip_column_algorithm);
+ header |= (wrap << zip_column_wrap);
+ header |= (reserved << zip_column_reserved);
+ mach_write_to_2(data, header);
+}
+
+/** Parse compressed block header into components */
+static void
+column_get_compress_header(
+ const byte* data,
+ bool* compressed,
+ ulint* lenlen,
+ uint* alg,
+ bool* wrap,
+ bool* reserved
+)
+{
+ ulint header = mach_read_from_2(data);
+ *compressed = ((header & zip_column_compressed_mask) >>
+ zip_column_compressed);
+ *lenlen = ((header & zip_column_data_length_mask) >>
+ zip_column_data_length);
+ *alg = ((header & zip_column_algorithm_mask) >>
+ zip_column_algorithm);
+ *wrap = ((header & zip_column_wrap_mask) >>
+ zip_column_wrap);
+ *reserved = ((header & zip_column_reserved_mask) >>
+ zip_column_reserved);
+}
+
+/** Allocate memory for zlib. */
+static
+void*
+column_zip_zalloc(
+ void* opaque, /*!< in/out: memory heap */
+ uInt items, /*!< in: number of items to allocate */
+ uInt size) /*!< in: size of an item in bytes */
+{
+ return(mem_heap_zalloc(static_cast<mem_heap_t*>(opaque),
+ items * size));
+}
+
+/** Deallocate memory for zlib. */
+static
+void
+column_zip_free(
+ void* opaque MY_ATTRIBUTE((unused)), /*!< in: memory heap */
+ void* address MY_ATTRIBUTE((unused))) /*!< in: object to free */
+{
+}
+
+/** Configure the zlib allocator to use the given memory heap. */
+UNIV_INTERN
+void
+column_zip_set_alloc(
+ void* stream, /*!< in/out: zlib stream */
+ mem_heap_t* heap) /*!< in: memory heap to use */
+{
+ z_stream* strm = static_cast<z_stream*>(stream);
+
+ if (srv_compressed_columns_zlib_use_heap) {
+ strm->zalloc = column_zip_zalloc;
+ strm->zfree = column_zip_free;
+ strm->opaque = heap;
+ } else {
+ strm->zalloc = (alloc_func)0;
+ strm->zfree = (free_func)0;
+ strm->opaque = (voidpf)0;
+ }
+}
+
+/** Compress blob/text/varchar column using zlib
+@return pointer to the compressed data */
+byte*
+row_compress_column(
+ const byte* data, /*!< in: data in mysql(uncompressed)
+ format */
+ ulint *len, /*!< in: data length; out: length of
+ compressed data*/
+ ulint lenlen, /*!< in: bytes used to store the length of
+ data */
+ const byte* dict_data,
+ /*!< in: optional dictionary data used for
+ compression */
+ ulint dict_data_len,
+ /*!< in: optional dictionary data length */
+ row_prebuilt_t* prebuilt)
+ /*!< in: use prebuilt->compress_heap only
+ here*/
+{
+ int err = 0;
+ ulint comp_len = *len;
+ ulint buf_len = *len + zip_column_prefix_max_length;
+ byte* buf;
+ byte* ptr;
+ z_stream c_stream;
+ bool wrap = srv_compressed_columns_zlib_wrap;
+
+ int window_bits = wrap ? MAX_WBITS : -MAX_WBITS;
+
+ if (!prebuilt->compress_heap) {
+ prebuilt->compress_heap =
+ mem_heap_create(max(UNIV_PAGE_SIZE, buf_len));
+ }
+
+ buf = static_cast<byte*>(mem_heap_zalloc(
+ prebuilt->compress_heap,buf_len));
+
+ if (*len < srv_compressed_columns_threshold ||
+ srv_compressed_columns_zip_level == Z_NO_COMPRESSION)
+ goto do_not_compress;
+
+ ptr = buf + zip_column_header_length + lenlen;
+
+ /*init deflate object*/
+ c_stream.next_in = const_cast<Bytef*>(data);
+ c_stream.avail_in = *len;
+ c_stream.next_out = ptr;
+ c_stream.avail_out = comp_len;
+
+ column_zip_set_alloc(&c_stream, prebuilt->compress_heap);
+
+ err = deflateInit2(&c_stream, srv_compressed_columns_zip_level,
+ Z_DEFLATED, window_bits, MAX_MEM_LEVEL,
+ srv_compressed_columns_zlib_strategy);
+ ut_a(err == Z_OK);
+
+ if (dict_data != 0 && dict_data_len != 0) {
+ err = deflateSetDictionary(&c_stream, dict_data,
+ dict_data_len);
+ ut_a(err == Z_OK);
+ }
+
+ err = deflate(&c_stream, Z_FINISH);
+ if (err != Z_STREAM_END) {
+ deflateEnd(&c_stream);
+ if (err == Z_OK)
+ err = Z_BUF_ERROR;
+ } else {
+ comp_len = c_stream.total_out;
+ err = deflateEnd(&c_stream);
+ }
+
+ switch (err) {
+ case Z_OK:
+ break;
+ case Z_BUF_ERROR:
+ /* data after compress is larger than uncompressed data*/
+ break;
+ default:
+ ib_logf(IB_LOG_LEVEL_ERROR,
+ "failed to compress the column, error: %d\n", err);
+ }
+
+ /* make sure the compressed data size is smaller than
+ uncompressed data */
+ if (err == Z_OK &&
+ *len > (comp_len + zip_column_header_length + lenlen)) {
+ column_set_compress_header(buf, true, lenlen - 1,
+ default_zip_column_algorithm_value, wrap,
+ default_zip_column_reserved_value);
+ ptr = buf + zip_column_header_length;
+ /*store the uncompressed data length*/
+ switch (lenlen) {
+ case 1:
+ mach_write_to_1(ptr, *len);
+ break;
+ case 2:
+ mach_write_to_2(ptr, *len);
+ break;
+ case 3:
+ mach_write_to_3(ptr, *len);
+ break;
+ case 4:
+ mach_write_to_4(ptr, *len);
+ break;
+ default:
+ ut_error;
+ }
+
+ *len = comp_len + zip_column_header_length + lenlen;
+ return buf;
+ }
+
+do_not_compress:
+ ptr = buf;
+ column_set_compress_header(ptr, false, 0,
+ default_zip_column_algorithm_value, false,
+ default_zip_column_reserved_value);
+ ptr += zip_column_header_length;
+ memcpy(ptr, data, *len);
+ *len += zip_column_header_length;
+ return buf;
+}
+
+/** Uncompress blob/text/varchar column using zlib
+@return pointer to the uncompressed data */
+const byte*
+row_decompress_column(
+ const byte* data, /*!< in: data in innodb(compressed) format */
+ ulint *len, /*!< in: data length; out: length of
+ decompressed data*/
+ const byte* dict_data,
+ /*!< in: optional dictionary data used for
+ decompression */
+ ulint dict_data_len,
+ /*!< in: optional dictionary data length */
+ row_prebuilt_t* prebuilt)
+ /*!< in: use prebuilt->compress_heap only
+ here*/
+{
+ ulint buf_len = 0;
+ byte* buf;
+ int err = 0;
+ int window_bits = 0;
+ z_stream d_stream;
+ bool is_compressed = false;
+ bool wrap = false;
+ bool reserved = false;
+ ulint lenlen = 0;
+ uint alg = 0;
+
+ ut_ad(*len != ULINT_UNDEFINED);
+ ut_ad(*len >= zip_column_header_length);
+
+ column_get_compress_header(data, &is_compressed, &lenlen, &alg,
+ &wrap, &reserved);
+
+ if (reserved != default_zip_column_reserved_value) {
+ ib_logf(IB_LOG_LEVEL_FATAL,
+ "unsupported compressed BLOB header format\n");
+ }
+
+ if (alg != default_zip_column_algorithm_value) {
+ ib_logf(IB_LOG_LEVEL_FATAL,
+ "unsupported 'algorithm' value in the"
+ " compressed BLOB header\n");
+ }
+
+ ut_a(lenlen < 4);
+
+ data += zip_column_header_length;
+ if (!is_compressed) { /* column not compressed */
+ *len -= zip_column_header_length;
+ return data;
+ }
+
+ lenlen++;
+
+ ulint comp_len = *len - zip_column_header_length - lenlen;
+
+ ulint uncomp_len = 0;
+ switch (lenlen) {
+ case 1:
+ uncomp_len = mach_read_from_1(data);
+ break;
+ case 2:
+ uncomp_len = mach_read_from_2(data);
+ break;
+ case 3:
+ uncomp_len = mach_read_from_3(data);
+ break;
+ case 4:
+ uncomp_len = mach_read_from_4(data);
+ break;
+ default:
+ ut_error;
+ }
+
+ data += lenlen;
+
+ /* data is compressed, decompress it*/
+ if (!prebuilt->compress_heap) {
+ prebuilt->compress_heap =
+ mem_heap_create(max(UNIV_PAGE_SIZE, uncomp_len));
+ }
+
+ buf_len = uncomp_len;
+ buf = static_cast<byte*>(mem_heap_zalloc(
+ prebuilt->compress_heap, buf_len));
+
+ /* init d_stream */
+ d_stream.next_in = const_cast<Bytef*>(data);
+ d_stream.avail_in = comp_len;
+ d_stream.next_out = buf;
+ d_stream.avail_out = buf_len;
+
+ column_zip_set_alloc(&d_stream, prebuilt->compress_heap);
+
+ window_bits = wrap ? MAX_WBITS : -MAX_WBITS;
+ err = inflateInit2(&d_stream, window_bits);
+ ut_a(err == Z_OK);
+
+ err = inflate(&d_stream, Z_FINISH);
+ if (err == Z_NEED_DICT) {
+ ut_a(dict_data != 0 && dict_data_len != 0);
+ err = inflateSetDictionary(&d_stream, dict_data,
+ dict_data_len);
+ ut_a(err == Z_OK);
+ err = inflate(&d_stream, Z_FINISH);
+ }
+
+ if (err != Z_STREAM_END) {
+ inflateEnd(&d_stream);
+ if (err == Z_BUF_ERROR && d_stream.avail_in == 0)
+ err = Z_DATA_ERROR;
+ } else {
+ buf_len = d_stream.total_out;
+ err = inflateEnd(&d_stream);
+ }
+
+ switch (err) {
+ case Z_OK:
+ break;
+ case Z_BUF_ERROR:
+ ib_logf(IB_LOG_LEVEL_FATAL,
+ "zlib buf error, this shouldn't happen\n");
+ break;
+ default:
+ ib_logf(IB_LOG_LEVEL_FATAL,
+ "failed to decompress column, error: %d\n", err);
+ }
+
+ if (err == Z_OK) {
+ if (buf_len != uncomp_len) {
+ ib_logf(IB_LOG_LEVEL_FATAL,
+ "failed to decompress blob column, may"
+ " be corrupted\n");
+ }
+ *len = buf_len;
+ return buf;
+ }
+
+ *len -= (zip_column_header_length + lenlen);
+ return data;
+}
+
+
/*******************************************************************//**
Stores a reference to a BLOB in the MySQL format. */
UNIV_INTERN
@@ -242,10 +715,21 @@ row_mysql_store_blob_ref(
to 4 bytes */
const void* data, /*!< in: BLOB data; if the value to store
is SQL NULL this should be NULL pointer */
- ulint len) /*!< in: BLOB length; if the value to store
+ ulint len, /*!< in: BLOB length; if the value to store
is SQL NULL this should be 0; remember
also to set the NULL bit in the MySQL record
header! */
+ bool need_decompression,
+ /*!< in: if the data need to be compressed*/
+ const byte* dict_data,
+ /*!< in: optional compression dictionary
+ data */
+ ulint dict_data_len,
+ /*!< in: optional compression dictionary data
+ length */
+ row_prebuilt_t* prebuilt)
+ /*<! in: use prebuilt->compress_heap only
+ here */
{
/* MySQL might assume the field is set to zero except the length and
the pointer fields */
@@ -257,13 +741,28 @@ row_mysql_store_blob_ref(
In 32-bit architectures we only use the first 4 bytes of the pointer
slot. */
- ut_a(col_len - 8 > 1 || len < 256);
- ut_a(col_len - 8 > 2 || len < 256 * 256);
- ut_a(col_len - 8 > 3 || len < 256 * 256 * 256);
+ ut_a(col_len - 8 > 1 ||
+ len < 256 +
+ (need_decompression ? ZIP_COLUMN_HEADER_LENGTH : 0));
+ ut_a(col_len - 8 > 2 ||
+ len < 256 * 256 +
+ (need_decompression ? ZIP_COLUMN_HEADER_LENGTH : 0));
+ ut_a(col_len - 8 > 3 ||
+ len < 256 * 256 * 256 +
+ (need_decompression ? ZIP_COLUMN_HEADER_LENGTH : 0));
- mach_write_to_n_little_endian(dest, col_len - 8, len);
+ const byte *ptr = NULL;
- memcpy(dest + col_len - 8, &data, sizeof data);
+ if (need_decompression)
+ ptr = row_decompress_column((const byte*)data, &len,
+ dict_data, dict_data_len, prebuilt);
+
+ if (ptr)
+ memcpy(dest + col_len - 8, &ptr, sizeof ptr);
+ else
+ memcpy(dest + col_len - 8, &data, sizeof data);
+
+ mach_write_to_n_little_endian(dest, col_len - 8, len);
}
/*******************************************************************//**
@@ -276,15 +775,32 @@ row_mysql_read_blob_ref(
ulint* len, /*!< out: BLOB length */
const byte* ref, /*!< in: BLOB reference in the
MySQL format */
- ulint col_len) /*!< in: BLOB reference length
+ ulint col_len, /*!< in: BLOB reference length
(not BLOB length) */
+ bool need_compression,
+ /*!< in: if the data need to be
+ compressed*/
+ const byte* dict_data, /*!< in: optional compression
+ dictionary data */
+ ulint dict_data_len, /*!< in: optional compression
+ dictionary data length */
+ row_prebuilt_t* prebuilt) /*!< in: use prebuilt->compress_heap
+ only here */
{
- byte* data;
+ byte* data = NULL;
+ byte* ptr = NULL;
*len = mach_read_from_n_little_endian(ref, col_len - 8);
memcpy(&data, ref + col_len - 8, sizeof data);
+ if (need_compression) {
+ ptr = row_compress_column(data, len, col_len - 8, dict_data,
+ dict_data_len, prebuilt);
+ if (ptr)
+ data = ptr;
+ }
+
return(data);
}
@@ -367,7 +883,16 @@ row_mysql_store_col_in_innobase_format(
necessarily the length of the actual
payload data; if the column is a true
VARCHAR then this is irrelevant */
- ulint comp) /*!< in: nonzero=compact format */
+ ulint comp, /*!< in: nonzero=compact format */
+ bool need_compression,
+ /*!< in: if the data need to be
+ compressed*/
+ const byte* dict_data, /*!< in: optional compression
+ dictionary data */
+ ulint dict_data_len, /*!< in: optional compression
+ dictionary data length */
+ row_prebuilt_t* prebuilt) /*!< in: use prebuilt->compress_heap
+ only here */
{
const byte* ptr = mysql_data;
const dtype_t* dtype;
@@ -420,8 +945,14 @@ row_mysql_store_col_in_innobase_format(
lenlen = 2;
}
- ptr = row_mysql_read_true_varchar(&col_len, mysql_data,
- lenlen);
+ const byte* tmp_ptr = row_mysql_read_true_varchar(
+ &col_len, mysql_data, lenlen);
+ if (need_compression)
+ ptr = row_compress_column(tmp_ptr, &col_len,
+ lenlen, dict_data, dict_data_len,
+ prebuilt);
+ else
+ ptr = tmp_ptr;
} else {
/* Remove trailing spaces from old style VARCHAR
columns. */
@@ -503,7 +1034,9 @@ row_mysql_store_col_in_innobase_format(
}
} else if (type == DATA_BLOB && row_format_col) {
- ptr = row_mysql_read_blob_ref(&col_len, mysql_data, col_len);
+ ptr = row_mysql_read_blob_ref(&col_len, mysql_data, col_len,
+ need_compression, dict_data, dict_data_len,
+ prebuilt);
}
dfield_set_data(dfield, ptr, col_len);
@@ -561,7 +1094,11 @@ row_mysql_convert_row_to_innobase(
TRUE, /* MySQL row format data */
mysql_rec + templ->mysql_col_offset,
templ->mysql_col_len,
- dict_table_is_comp(prebuilt->table));
+ dict_table_is_comp(prebuilt->table),
+ templ->compressed,
+ reinterpret_cast<const byte*>(
+ templ->zip_dict_data.str),
+ templ->zip_dict_data.length, prebuilt);
next_column:
;
}
@@ -909,6 +1446,10 @@ row_prebuilt_free(
mem_heap_free(prebuilt->blob_heap);
}
+ if (prebuilt->compress_heap) {
+ mem_heap_free(prebuilt->compress_heap);
+ }
+
if (prebuilt->old_vers_heap) {
mem_heap_free(prebuilt->old_vers_heap);
}
@@ -1344,6 +1885,9 @@ row_insert_for_mysql(
return(DB_READ_ONLY);
}
+ if (UNIV_LIKELY_NULL(prebuilt->compress_heap))
+ mem_heap_empty(prebuilt->compress_heap);
+
trx->op_info = "inserting";
row_mysql_delay_if_needed();
@@ -2748,6 +3292,10 @@ loop:
return(n_tables + n_tables_dropped);
}
+ DBUG_EXECUTE_IF("row_drop_tables_in_background_sleep",
+ os_thread_sleep(5000000);
+ );
+
table = dict_table_open_on_name(drop->table_name, FALSE, FALSE,
DICT_ERR_IGNORE_NONE);
@@ -2758,6 +3306,16 @@ loop:
goto already_dropped;
}
+ if (!table->to_be_dropped) {
+ /* There is a scenario: the old table is dropped
+ just after it's added into drop list, and new
+ table with the same name is created, then we try
+ to drop the new table in background. */
+ dict_table_close(table, FALSE, FALSE);
+
+ goto already_dropped;
+ }
+
ut_a(!table->can_be_evicted);
dict_table_close(table, FALSE, FALSE);
@@ -2888,6 +3446,12 @@ row_mysql_table_id_reassign(
pars_info_add_ull_literal(info, "old_id", table->id);
pars_info_add_ull_literal(info, "new_id", *new_id);
+ /* As micro-SQL does not support int4 == int8 comparisons,
+ old and new IDs are added again under different names as
+ int4 values*/
+ pars_info_add_int4_literal(info, "old_id_narrow", table->id);
+ pars_info_add_int4_literal(info, "new_id_narrow", *new_id);
+
err = que_eval_sql(
info,
"PROCEDURE RENUMBER_TABLE_PROC () IS\n"
@@ -2898,6 +3462,8 @@ row_mysql_table_id_reassign(
" WHERE TABLE_ID = :old_id;\n"
"UPDATE SYS_INDEXES SET TABLE_ID = :new_id\n"
" WHERE TABLE_ID = :old_id;\n"
+ "UPDATE SYS_ZIP_DICT_COLS SET TABLE_ID = :new_id_narrow\n"
+ " WHERE TABLE_ID = :old_id_narrow;\n"
"END;\n", FALSE, trx);
return(err);
@@ -3713,6 +4279,12 @@ next_rec:
pars_info_add_ull_literal(info, "old_id", table->id);
pars_info_add_ull_literal(info, "new_id", new_id);
+ /* As micro-SQL does not support int4 == int8 comparisons,
+ old and new IDs are added again under different names as
+ int4 values*/
+ pars_info_add_int4_literal(info, "old_id_narrow", table->id);
+ pars_info_add_int4_literal(info, "new_id_narrow", new_id);
+
err = que_eval_sql(info,
"PROCEDURE RENUMBER_TABLE_ID_PROC () IS\n"
"BEGIN\n"
@@ -3724,6 +4296,9 @@ next_rec:
"UPDATE SYS_INDEXES"
" SET TABLE_ID = :new_id, SPACE = :new_space\n"
" WHERE TABLE_ID = :old_id;\n"
+ "UPDATE SYS_ZIP_DICT_COLS\n"
+ " SET TABLE_ID = :new_id_narrow\n"
+ " WHERE TABLE_ID = :old_id_narrow;\n"
"END;\n"
, FALSE, trx);
@@ -4089,6 +4664,13 @@ row_drop_table_for_mysql(
}
}
+
+ DBUG_EXECUTE_IF("row_drop_table_add_to_background",
+ row_add_table_to_background_drop_list(table->name);
+ err = DB_SUCCESS;
+ goto funct_exit;
+ );
+
/* TODO: could we replace the counter n_foreign_key_checks_running
with lock checks on the table? Acquire here an exclusive lock on the
table, and rewrite lock0lock.cc and the lock wait in srv0srv.cc so that
@@ -4368,6 +4950,19 @@ row_drop_table_for_mysql(
filepath = fil_make_ibd_name(tablename, false);
}
+ /* Remove all compression dictionary references for the
+ table */
+ err = dict_create_remove_zip_dict_references_for_table(
+ table->id, trx);
+ if (err != DB_SUCCESS) {
+ ib_logf(IB_LOG_LEVEL_ERROR, "Error: (%s) not "
+ "able to remove compression dictionary "
+ "references for table %s", ut_strerr(err),
+ tablename);
+
+ goto funct_exit;
+ }
+
if (dict_table_has_fts_index(table)
|| DICT_TF2_FLAG_IS_SET(table, DICT_TF2_FTS_HAS_DOC_ID)) {
ut_ad(table->n_ref_count == 0);
@@ -4715,6 +5310,19 @@ loop:
row_mysql_lock_data_dictionary(trx);
while ((table_name = dict_get_first_table_name_in_db(name))) {
+ /* Drop parent table if it is a fts aux table, to
+ avoid accessing dropped fts aux tables in information
+ scheam when parent table still exists.
+ Note: Drop parent table will drop fts aux tables. */
+ char* parent_table_name;
+ parent_table_name = fts_get_parent_table_name(
+ table_name, strlen(table_name));
+
+ if (parent_table_name != NULL) {
+ mem_free(table_name);
+ table_name = parent_table_name;
+ }
+
ut_a(memcmp(table_name, name, namelen) == 0);
table = dict_table_open_on_name(
diff --git a/storage/xtradb/row/row0sel.cc b/storage/xtradb/row/row0sel.cc
index 9732a5f9400..9bdb8d1bb98 100644
--- a/storage/xtradb/row/row0sel.cc
+++ b/storage/xtradb/row/row0sel.cc
@@ -2458,9 +2458,11 @@ row_sel_convert_mysql_key_to_innobase(
if (UNIV_LIKELY(!is_null)) {
buf = row_mysql_store_col_in_innobase_format(
dfield, buf,
- FALSE, /* MySQL key value format col */
+ /* MySQL key value format col */
+ FALSE,
key_ptr + data_offset, data_len,
- dict_table_is_comp(index->table));
+ dict_table_is_comp(index->table),
+ false, 0, 0 ,0);
ut_a(buf <= original_buf + buf_len);
}
@@ -2553,12 +2555,16 @@ row_sel_store_row_id_to_prebuilt(
#ifdef UNIV_DEBUG
/** Convert a non-SQL-NULL field from Innobase format to MySQL format. */
-# define row_sel_field_store_in_mysql_format(dest,templ,idx,field,src,len) \
- row_sel_field_store_in_mysql_format_func(dest,templ,idx,field,src,len)
+# define row_sel_field_store_in_mysql_format( \
+ dest,templ,idx,field,src,len,prebuilt) \
+ row_sel_field_store_in_mysql_format_func \
+ (dest,templ,idx,field,src,len, prebuilt)
#else /* UNIV_DEBUG */
/** Convert a non-SQL-NULL field from Innobase format to MySQL format. */
-# define row_sel_field_store_in_mysql_format(dest,templ,idx,field,src,len) \
- row_sel_field_store_in_mysql_format_func(dest,templ,src,len)
+# define row_sel_field_store_in_mysql_format( \
+ dest,templ,idx,field,src,len,prebuilt) \
+ row_sel_field_store_in_mysql_format_func \
+ (dest,templ,src,len, prebuilt)
#endif /* UNIV_DEBUG */
/**************************************************************//**
@@ -2588,7 +2594,10 @@ row_sel_field_store_in_mysql_format_func(
templ->icp_rec_field_no */
#endif /* UNIV_DEBUG */
const byte* data, /*!< in: data to store */
- ulint len) /*!< in: length of the data */
+ ulint len, /*!< in: length of the data */
+ row_prebuilt_t* prebuilt)
+ /*!< in: use prebuilt->compress_heap
+ only here */
{
byte* ptr;
#ifdef UNIV_DEBUG
@@ -2632,6 +2641,15 @@ row_sel_field_store_in_mysql_format_func(
field_end = dest + templ->mysql_col_len;
if (templ->mysql_type == DATA_MYSQL_TRUE_VARCHAR) {
+ /* If this is a compressed column,
+ decompress it first */
+ if (templ->compressed)
+ data = row_decompress_column(data, &len,
+ reinterpret_cast<const byte*>(
+ templ->zip_dict_data.str),
+ templ->zip_dict_data.length,
+ prebuilt);
+
/* This is a >= 5.0.3 type true VARCHAR. Store the
length of the data to the first byte or the first
two bytes of dest. */
@@ -2682,7 +2700,11 @@ row_sel_field_store_in_mysql_format_func(
already copied to the buffer in row_sel_store_mysql_rec */
row_mysql_store_blob_ref(dest, templ->mysql_col_len, data,
- len);
+ len, templ->compressed,
+ reinterpret_cast<const byte*>(
+ templ->zip_dict_data.str),
+ templ->zip_dict_data.length,
+ prebuilt);
break;
case DATA_MYSQL:
@@ -2835,7 +2857,7 @@ row_sel_store_mysql_field_func(
row_sel_field_store_in_mysql_format(
mysql_rec + templ->mysql_col_offset,
- templ, index, field_no, data, len);
+ templ, index, field_no, data, len, prebuilt);
if (heap != prebuilt->blob_heap) {
mem_heap_free(heap);
@@ -2885,7 +2907,7 @@ row_sel_store_mysql_field_func(
row_sel_field_store_in_mysql_format(
mysql_rec + templ->mysql_col_offset,
- templ, index, field_no, data, len);
+ templ, index, field_no, data, len, prebuilt);
}
ut_ad(len != UNIV_SQL_NULL);
@@ -2933,6 +2955,9 @@ row_sel_store_mysql_rec(
prebuilt->blob_heap = NULL;
}
+ if (UNIV_LIKELY_NULL(prebuilt->compress_heap))
+ mem_heap_empty(prebuilt->compress_heap);
+
for (i = 0; i < prebuilt->n_template; i++) {
const mysql_row_templ_t*templ = &prebuilt->mysql_template[i];
const ulint field_no
diff --git a/storage/xtradb/srv/srv0start.cc b/storage/xtradb/srv/srv0start.cc
index 6b1e8c39b38..52bbd61e2b2 100644
--- a/storage/xtradb/srv/srv0start.cc
+++ b/storage/xtradb/srv/srv0start.cc
@@ -2809,6 +2809,12 @@ files_checked:
}
}
+ /* Create the SYS_ZIP_DICT system table */
+ err = dict_create_or_check_sys_zip_dict();
+ if (err != DB_SUCCESS) {
+ return(err);
+ }
+
srv_is_being_started = FALSE;
ut_a(trx_purge_state() == PURGE_STATE_INIT);